我正在尝试学习如何使用scikit-learn中的GridSearchCV()方法在决策树分类器中找到最优超参数。
问题是,如果我只指定一个参数的选项,它是好的,就像下面这样:
print(__doc__)
# Code source: Gael Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
# define classifier
dt = DecisionTreeClassifier()
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
# define parameter values that should be searched
min_samples_split_options = range(2, 4)
# create a parameter grid: map the parameter names to the values that should be saved
param_grid_dt = dict(min_samples_split= min_samples_split_options) # for DT
# instantiate the grid
grid = GridSearchCV(dt, param_grid_dt, cv=10, scoring='accuracy')
# fit the grid with param
grid.fit(X, y)
# view complete results
grid.grid_scores_
'''# examine results from first tuple
print grid.grid_scores_[0].parameters
print grid.grid_scores_[0].cv_validation_scores
print grid.grid_scores_[0].mean_validation_score'''
# examine the best model
print '*******Final results*********'
print grid.best_score_
print grid.best_params_
print grid.best_estimator_
结果:None
*******Final results*********
0.68
{'min_samples_split': 3}
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
max_features=None, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=3, min_weight_fraction_leaf=0.0,
presort=False, random_state=None, splitter='best')
但是当我将另一个参数的选项添加到混合时,它会给我一个"无效参数"错误,如下所示:
print(__doc__)
# Code source: Gael Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
# define classifier
dt = DecisionTreeClassifier()
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
# define parameter values that should be searched
max_depth_options = range(10, 251) # for DT
min_samples_split_options = range(2, 4)
# create a parameter grid: map the parameter names to the values that should be saved
param_grid_dt = dict(max_depth=max_depth_options, min_sample_split=min_samples_split_options) # for DT
# instantiate the grid
grid = GridSearchCV(dt, param_grid_dt, cv=10, scoring='accuracy')
# fit the grid with param
grid.fit(X, y)
'''# view complete results
grid.grid_scores_
# examine results from first tuple
print grid.grid_scores_[0].parameters
print grid.grid_scores_[0].cv_validation_scores
print grid.grid_scores_[0].mean_validation_score
# examine the best model
print '*******Final results*********'
print grid.best_score_
print grid.best_params_
print grid.best_estimator_'''
结果:None
Traceback (most recent call last):
File "C:UsersKubiKDesktopGridSearch_ex6.py", line 31, in <module>
grid.fit(X, y)
File "C:UsersKubiKAnaconda2libsite-packagessklearngrid_search.py", line 804, in fit
return self._fit(X, y, ParameterGrid(self.param_grid))
File "C:UsersKubiKAnaconda2libsite-packagessklearngrid_search.py", line 553, in _fit
for parameters in parameter_iterable
File "C:UsersKubiKAnaconda2libsite-packagessklearnexternalsjoblibparallel.py", line 800, in __call__
while self.dispatch_one_batch(iterator):
File "C:UsersKubiKAnaconda2libsite-packagessklearnexternalsjoblibparallel.py", line 658, in dispatch_one_batch
self._dispatch(tasks)
File "C:UsersKubiKAnaconda2libsite-packagessklearnexternalsjoblibparallel.py", line 566, in _dispatch
job = ImmediateComputeBatch(batch)
File "C:UsersKubiKAnaconda2libsite-packagessklearnexternalsjoblibparallel.py", line 180, in __init__
self.results = batch()
File "C:UsersKubiKAnaconda2libsite-packagessklearnexternalsjoblibparallel.py", line 72, in __call__
return [func(*args, **kwargs) for func, args, kwargs in self.items]
File "C:UsersKubiKAnaconda2libsite-packagessklearncross_validation.py", line 1520, in _fit_and_score
estimator.set_params(**parameters)
File "C:UsersKubiKAnaconda2libsite-packagessklearnbase.py", line 270, in set_params
(key, self.__class__.__name__))
ValueError: Invalid parameter min_sample_split for estimator DecisionTreeClassifier. Check the list of available parameters with `estimator.get_params().keys()`.
[Finished in 0.3s]
你的代码中有一个错别字,应该是min_samples_split
而不是min_sample_split