我正在尝试使用python中的scikit-learn
进行神经网络分类。
我生成了数据,将其拆分为训练和测试,并在模型MLPClassifier()
中使用它。
我接下来计划做的是使用 sklearn.model_selection.GridSearchCV
评估此模型中使用的参数。
这是我的代码:
import matplotlib.pyplot as plt
import numpy as np
import itertools
from sklearn.neural_network import MLPClassifier
from sklearn.datasets.samples_generator import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
X, y = make_blobs(n_samples=500, centers=5, n_features=2, random_state=10, cluster_std=2.5)
y[y==0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10)
X_train
和 X_test
是具有 2 个特征的数组。
model_MLP_RAW = MLPClassifier()
model_MLP_RAW.fit(X_train, y_train)
model_MLP_RAW.predict(X_test) == y_test
model_MLP_RAW.score(X_test, y_test)
model_MLP_RAW = MLPClassifier()
param_gridMLPC = {
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'alpha': [10.0 ** -np.arange(1, 7)],
'activation': ["logistic", "relu", "tanh"]
}
CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
CV_unknwnMLPC.fit(X_train, y_train)
print(CV_unknwnMLPC.best_params_)
一切正常,但在CV_unknwnMLPC.fit(X_train, y_train)
行我收到以下错误:
ValueError Traceback (most recent call last)
<ipython-input-30-90faf7e56738> in <module>()
10
11 CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
---> 12 CV_unknwnMLPC.fit(X_train, y_train)
13
14 print(CV_unknwnMLPC.best_params_)
~Anaconda3libsite-packagessklearnmodel_selection_search.py in fit(self, X, y, groups, **fit_params)
638 error_score=self.error_score)
639 for parameters, (train, test) in product(candidate_params,
--> 640 cv.split(X, y, groups)))
641
642 # if one choose to see train score, "out" will contain train score info
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self, iterable)
777 # was dispatched. In particular this covers the edge
778 # case of Parallel used with an exhausted iterator.
--> 779 while self.dispatch_one_batch(iterator):
780 self._iterating = True
781 else:
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in dispatch_one_batch(self, iterator)
623 return False
624 else:
--> 625 self._dispatch(tasks)
626 return True
627
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in _dispatch(self, batch)
586 dispatch_timestamp = time.time()
587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 588 job = self._backend.apply_async(batch, callback=cb)
589 self._jobs.append(job)
590
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in apply_async(self, func, callback)
109 def apply_async(self, func, callback=None):
110 """Schedule a func to be run"""
--> 111 result = ImmediateResult(func)
112 if callback:
113 callback(result)
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in __init__(self, batch)
330 # Don't delay the application, to avoid keeping the input
331 # arguments in memory
--> 332 self.results = batch()
333
334 def get(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in <listcomp>(.0)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnmodel_selection_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score)
456 estimator.fit(X_train, **fit_params)
457 else:
--> 458 estimator.fit(X_train, y_train, **fit_params)
459
460 except Exception as e:
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in fit(self, X, y)
971 """
972 return self._fit(X, y, incremental=(self.warm_start and
--> 973 hasattr(self, "classes_")))
974
975 @property
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _fit(self, X, y, incremental)
324
325 # Validate input parameters.
--> 326 self._validate_hyperparameters()
327 if np.any(np.array(hidden_layer_sizes) <= 0):
328 raise ValueError("hidden_layer_sizes must be > 0, got %s." %
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _validate_hyperparameters(self)
390 if self.max_iter <= 0:
391 raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
--> 392 if self.alpha < 0.0:
393 raise ValueError("alpha must be >= 0, got %s." % self.alpha)
394 if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
我在网上检查了一些答案,并仔细检查了param_gridMLPC
中的参数,以确保它们提供得很好,但错误仍然存在。
我做错了什么?
提前致谢
'alpha': [10.0 ** -np.arange(1, 7)]
在MLPClassifier的文档中:-
alpha :浮点型,可选,默认值 0.0001
L2 惩罚(正则化项)参数。
"alpha"
应该是浮动的。因此,在参数网格中,它可以是不同浮点数的列表。
但是当你这样做时:
'alpha': [10.0 ** -np.arange(1, 7)]
这将成为 numpy 数组的列表。这是一种序列序列(列表列表、数组数组、二维数组等)。这意味着列表的第一个元素是一个 numpy 数组,它将代替 "alpha"
传递给内部MLPClassifier
。这就是错误。
您可以执行以下操作:
'alpha': 10.0 ** -np.arange(1, 7)
这将是一个简单的数组,将从中选择元素(浮点值)发送到模型中。