我正在尝试使用Keras Tuner来调优有状态的LSTM。我有代码在工作,它能够训练模型,但我仍然不知道如何让模型在时期之间重置状态。通常,我会在循环中一次训练1个历元,并在历元之间手动重置状态。然而,我不相信这甚至是可能的Keras调谐器。我能用什么论据来实现这一点吗?下面是我当前的调谐器代码:
def build_model(hp):
model = Sequential()
model.add(layers.Input(batch_shape=(batch_size,train_X.shape[1], train_X.shape[2])))
for i in range(hp.Int('num_LSTM_layers', 1, 3)):
model.add(layers.LSTM(units=hp.Int('units_' + str(i),min_value=32,max_value=512,step=4),
batch_input_shape=(batch_size,train_X.shape[1], train_X.shape[2]),
activation=hp.Choice('LSTM_activation_' + str(i),values=['relu','softplus',
'tanh', 'sigmoid','softsign','selu','elu','linear'],
default='elu'),return_sequences=True,stateful=True))
for j in range(hp.Int('num_dense_layers', 1, 3)):
model.add(layers.Dense(units=hp.Int('units_' + str(i),min_value=64,max_value=1024,step=4),
activation=hp.Choice('dense_activation_' + str(i),values=['relu','softplus',
'tanh', 'sigmoid','softsign','selu','elu','linear'],
default='elu')))
model.add(layers.Dropout(rate=hp.Float('rate_' + str(i), min_value=0.01, max_value=0.50, step=0.01)))
model.add(layers.Dense(train_y.shape[1],activation='linear'))
model.compile(
optimizer=hp.Choice('optimizers',values=['rmsprop','adam','adadelta','Nadam']),
loss='mse',metrics=['mse'])
return model
tuner_bo = BayesianOptimization(
build_model,
objective='val_loss',
max_trials=50,
executions_per_trial=3,overwrite=True,num_initial_points=10,
directory=model_path,project_name='LSTM_KT_2001',
allow_new_entries=True,tune_new_entries=True)
tuner_bo.search_space_summary()
tuner_bo.search(train_X, train_y, epochs=100,batch_size=1,validation_data=(test_X,test_y), verbose=2)
我重写了tuner类中的on_ech_end方法,不确定该方法是否正确。
class MyBayesianOptimization(BayesianOptimization):
def on_epoch_end(self, trial, my_hyper_model, epoch, logs=None):
my_hyper_model.reset_states()
super(MyBayesianOptimization, self).on_epoch_end(trial, my_hyper_model, epoch, logs)