我有以下代码,使用Keras Scikit-Learn Wrapper:
from keras.models import Sequential
from sklearn import datasets
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
import pickle
import numpy as np
import json
def classifier(X, y):
"""
Description of classifier
"""
NOF_ROW, NOF_COL = X.shape
def create_model():
# create model
model = Sequential()
model.add(Dense(12, input_dim=NOF_COL, init='uniform', activation='relu'))
model.add(Dense(6, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# evaluate using 10-fold cross validation
seed = 7
np.random.seed(seed)
model = KerasClassifier(build_fn=create_model, nb_epoch=150, batch_size=10, verbose=0)
return model
def main():
"""
Description of main
"""
iris = datasets.load_iris()
X, y = iris.data, iris.target
X = preprocessing.scale(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
model_tt = classifier(X_train, y_train)
model_tt.fit(X_train,y_train)
#--------------------------------------------------
# This fail
#--------------------------------------------------
filename = 'finalized_model.sav'
pickle.dump(model_tt, open(filename, 'wb'))
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.score(X_test, Y_test)
print(result)
#--------------------------------------------------
# This also fail
#--------------------------------------------------
# from keras.models import load_model
# model_tt.save('test_model.h5')
#--------------------------------------------------
# This works OK
#--------------------------------------------------
# print model_tt.score(X_test, y_test)
# print model_tt.predict_proba(X_test)
# print model_tt.predict(X_test)
# Output of predict_proba
# 2nd column is the probability that the prediction is 1
# this value is used as final score, which can be used
# with other method as comparison
# [ [ 0.25311464 0.74688536]
# [ 0.84401423 0.15598579]
# [ 0.96047372 0.03952631]
# ...,
# [ 0.25518912 0.74481088]
# [ 0.91467732 0.08532269]
# [ 0.25473493 0.74526507]]
# Output of predict
# [[1]
# [0]
# [0]
# ...,
# [1]
# [0]
# [1]]
if __name__ == '__main__':
main()
如代码中所述,它在这一行失败:
pickle.dump(model_tt, open(filename, 'wb'))
出现这个错误:
pickle.PicklingError: Can't pickle <function create_model at 0x101c09320>: it's not found as __main__.create_model
我怎样才能绕过它?
Edit 1:关于保存模型的原始答案
With HDF5:
# saving model
json_model = model_tt.model.to_json()
open('model_architecture.json', 'w').write(json_model)
# saving weights
model_tt.model.save_weights('model_weights.h5', overwrite=True)
# loading model
from keras.models import model_from_json
model = model_from_json(open('model_architecture.json').read())
model.load_weights('model_weights.h5')
# dont forget to compile your model
model.compile(loss='binary_crossentropy', optimizer='adam')
Edit 2: iris数据集的完整代码示例
# Train model and make predictions
import numpy
import pandas
from keras.models import Sequential, model_from_json
from keras.layers import Dense
from keras.utils import np_utils
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
iris = datasets.load_iris()
X, Y, labels = iris.data, iris.target, iris.target_names
X = preprocessing.scale(X)
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
y = np_utils.to_categorical(encoded_Y)
def build_model():
# create model
model = Sequential()
model.add(Dense(4, input_dim=4, init='normal', activation='relu'))
model.add(Dense(3, init='normal', activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def save_model(model):
# saving model
json_model = model.to_json()
open('model_architecture.json', 'w').write(json_model)
# saving weights
model.save_weights('model_weights.h5', overwrite=True)
def load_model():
# loading model
model = model_from_json(open('model_architecture.json').read())
model.load_weights('model_weights.h5')
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3, random_state=seed)
# build
model = build_model()
model.fit(X_train, Y_train, nb_epoch=200, batch_size=5, verbose=0)
# save
save_model(model)
# load
model = load_model()
# predictions
predictions = model.predict_classes(X_test, verbose=0)
print(predictions)
# reverse encoding
for pred in predictions:
print(labels[pred])
请注意,我只使用了Keras,而不是包装器。它只是在简单的事情上增加了一些复杂性。此外,代码是自愿不重构的,因此您可以了解整个情况。
您还说要输出1或0。这在这个数据集中是不可能的,因为你有3个输出颜色和类别(Iris-setosa, Iris-versicolor, Iris-virginica)。如果您只有2个类,那么使用sigmoid输出函数,您的输出dim和类将为0或1。
只是补充gaarv的答案-如果您不需要模型结构(model.to_json()
)和权重(model.save_weights()
)之间的分离,您可以使用以下方法之一:
- 使用内置的
keras.models.save_model
和'keras.models。load_model '将所有内容存储在hdf5文件中。 - 使用pickle序列化Model对象(或任何包含对它的引用的类)到file/network/whatever.
不幸的是,Keras默认情况下不支持pickle。你可以用我的解决方案增加了这个缺失的功能。工作代码为这里:http://zachmoshe.com/2017/04/03/pickling-keras-models.html
另一个很好的选择是在fit
模型时使用回调。特别是ModelCheckpoint
回调,如下所示:
from keras.callbacks import ModelCheckpoint
#Create instance of ModelCheckpoint
chk = ModelCheckpoint("myModel.h5", monitor='val_loss', save_best_only=False)
#add that callback to the list of callbacks to pass
callbacks_list = [chk]
#create your model
model_tt = KerasClassifier(build_fn=create_model, nb_epoch=150, batch_size=10)
#fit your model with your data. Pass the callback(s) here
model_tt.fit(X_train,y_train, callbacks=callbacks_list)
这将保存您的训练每个epoch到myModel.h5
文件。这提供了很大的好处,因为你可以在你想要的时候停止你的训练(比如当你看到它已经开始过拟合的时候),并且仍然保留以前的训练。
注意,这将结构和权重保存在同一个hdf5
文件中(如Zach所示),因此您可以使用keras.models.load_model
加载您的模型。
如果你想单独保存你的权重,那么你可以在实例化你的ModelCheckpoint
时使用save_weights_only=True
参数,使你能够像Gaarv解释的那样加载你的模型。从文档中提取:
save_weights_only:如果为True,则只保存模型的权重(model.save_weights(filepath)),否则保存整个模型(model.save(filepath))。
公认的答案太复杂了。您可以在.h5
文件中完全保存和恢复模型的各个方面。直接来自Keras FAQ:
您可以使用
model.save(filepath)
将Keras模型保存为单个HDF5文件将包含:
- 模型的体系结构,允许重新创建模型
- 模型的权重
- 训练配置(损耗、优化器)
- 优化器的状态,允许在你离开的地方恢复训练。
然后您可以使用
keras.models.load_model(filepath)
来重新建立您的模型。load_model
也将使用保存的训练配置来编译模型(除非模型从一开始就没有编译过)。
和对应的代码:
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model identical to the previous one
model = load_model('my_model.h5')
如果您的keras包装器模型在scikit管道中,您可以在管道中单独保存步骤。
import joblib
from sklearn.pipeline import Pipeline
from tensorflow import keras
# pass the create_cnn_model function into wrapper
cnn_model = keras.wrappers.scikit_learn.KerasClassifier(build_fn=create_cnn_model)
# create pipeline
cnn_model_pipeline_estimator = Pipeline([
('preprocessing_pipeline', pipeline_estimator),
('clf', cnn_model)
])
# train model
final_model = cnn_model_pipeline_estimator.fit(
X, y, clf__batch_size=32, clf__epochs=15)
# collect the preprocessing pipeline & model seperately
pipeline_estimator = final_model.named_steps['preprocessing_pipeline']
clf = final_model.named_steps['clf']
# store pipeline and model seperately
joblib.dump(pipeline_estimator, open('path/to/pipeline.pkl', 'wb'))
clf.model.save('path/to/model.h5')
# load pipeline and model
pipeline_estimator = joblib.load('path/to/pipeline.pxl')
model = keras.models.load_model('path/to/model.h5')
new_example = [[...]]
# transform new data with pipeline & use model for prediction
transformed_data = pipeline_estimator.transform(new_example)
prediction = model.predict(transformed_data)