我刚开始学科学,我的头撞到墙上了。我使用了真实世界和测试数据,并且scikit算法在预测任何事情时都没有超出机会水平。我试过已知算法、决策树、svc和朴素贝叶斯。
基本上,我制作了一个由一列0和1组成的测试数据集,其中所有0的特征值在0到0.5之间,所有1的特征值在0.5到1之间。这应该是非常容易的,并给出接近100%的准确性。然而,没有一种算法的执行高于概率水平。准确度范围从45%到55%。我已经尝试过为每个算法调整一大堆参数,但没有任何帮助。我认为我的实现从根本上有问题。
请帮帮我。下面是我的代码:
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
import sklearn
import pandas
import numpy as np
df=pandas.read_excel('Test.xlsx')
# Make data into np arrays
y = np.array(df[1])
y=y.astype(float)
y=y.reshape(399)
x = np.array(df[2])
x=x.astype(float)
x=x.reshape(399, 1)
# Creating training and test data
labels_train, labels_test = train_test_split(y)
features_train, features_test = train_test_split(x)
#####################################################################
# PERCEPTRON
#####################################################################
from sklearn import linear_model
perceptron=linear_model.Perceptron()
perceptron.fit(features_train, labels_train)
perc_pred=perceptron.predict(features_test)
print sklearn.metrics.accuracy_score(labels_test, perc_pred, normalize=True, sample_weight=None)
print 'perceptron'
#####################################################################
# KNN classifier
#####################################################################
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(features_train, labels_train)
knn_pred = knn.predict(features_test)
# Accuraatheid
print sklearn.metrics.accuracy_score(labels_test, knn_pred, normalize=True, sample_weight=None)
print 'knn'
#####################################################################
## SVC
#####################################################################
from sklearn.svm import SVC
from sklearn import svm
svm2 = SVC(kernel="linear")
svm2 = svm.SVC()
svm2.fit(features_train, labels_train)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=1.0, kernel='linear', max_iter=-1, probability=False,
random_state=None,
shrinking=True, tol=0.001, verbose=False)
svc_pred = svm2.predict(features_test)
print sklearn.metrics.accuracy_score(labels_test, svc_pred, normalize=True,
sample_weight=None)
#####################################################################
# Decision tree
#####################################################################
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features_train, labels_train)
tree_pred=clf.predict(features_test)
# Accuraatheid
print sklearn.metrics.accuracy_score(labels_test, tree_pred, normalize=True,
sample_weight=None)
print 'tree'
#####################################################################
# Naive bayes
#####################################################################
import sklearn
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
GaussianNB()
bayes_pred = clf.predict(features_test)
print sklearn.metrics.accuracy_score(labels_test, bayes_pred,
normalize=True, sample_weight=None)
您似乎错误地使用了train_testrongplit
labels_train, labels_test = train_test_split(y) #WRONG
features_train, features_test = train_test_split(x) #WRONG
标签和数据的分割是不必要的。手动分割数据的一种简单方法:
randomvec=np.random.rand(len(data))
randomvec=randomvec>0.5
train_data=data[randomvec]
train_label=labels[randomvec]
test_data=data[np.logical_not(randomvec)]
test_label=labels[np.logical_not(randomvec)]
或正确使用scikit方法:
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5, random_state=42)