如何在卷积网络中找到最优超参数



我开始了解scikit-optimize软件包,并且我对贝叶斯优化相对较新,我想在我当前的卷积神经网络中使用它。然而,我试图通过使用Bayesian-optimization来找到卷积神经网络的最佳超参数,但我目前的尝试没有正常工作。

到目前为止,我试图实现这个目的,但我的代码不能正常工作,我不知道我的代码的哪一部分仍然存在问题。有人能告诉我如何改正吗?为了找到最佳超参数,在卷积神经网络上使用贝叶斯优化是否有任何有效的实现?有什么可能的想法吗?

更新

我为我的卷积神经网络尝试了GridSearchCVRandomSearchCV,它有很深的层,使用GridSearchCV需要太多时间才能完成,即使2-3整天也无法完成优化。我想使用新的优化框架,如贝叶斯优化(即skoptoptuna(来寻找卷积神经网络的最佳参数和超参数。有人能为我目前在colab的尝试1和我在colab中的尝试2提供可能的补救和有效的方法吗?有什么想法吗?

我当前的尝试

这是我目前使用scikit-optimize包进行贝叶斯优化的尝试。这是我在这个colab中的尝试,我在那里运行了在卷积神经网络上实现贝叶斯优化的所有实验,以找到它的最佳超参数:

### function returned to Bayesian Optimization
@use_named_args(dimensions=dimensions)
def bayes_opt(cnn_num_steps, cnn_init_epoch, cnn_max_epoch,
cnn_learning_rate_decay, cnn_batch_size, cnn_dropout_rate, cnn_init_learning_rate):
global  iteration, num_steps, init_epoch, max_epoch, learning_rate_decay, dropout_rate, init_learning_rate, batch_size
num_steps = np.int32(cnn_num_steps)
batch_size = np.int32(cnn_batch_size)
learning_rate_decay = np.float32(cnn_learning_rate_decay)
init_epoch = np.int32(cnn_init_epoch)
max_epoch = np.int32(cnn_max_epoch)
dropout_rate = np.float32(cnn_dropout_rate)
init_learning_rate = np.float32(cnn_init_learning_rate)
tf.reset_default_graph()
tf.set_random_seed(randomState)
sess = tf.Session()
(train_X, train_y), (test_X, test_y) = cifar10.load_data()
train_X = train_X.astype('float32') / 255.0
test_X = test_X.astype('float32') / 255.0
targets = tf.placeholder(tf.float32, [None, input_size], name="targets")

model_learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
model_dropout_rate = tf.placeholder_with_default(0.0, shape=())
global_step = tf.Variable(0, trainable=False)
prediction = cnn(model_dropout_rate, model_learning_rate)
model_learning_rate = tf.train.exponential_decay(learning_rate=model_learning_rate, global_step=global_step, decay_rate=learning_rate_decay,
decay_steps=init_epoch, staircase=False)
with tf.name_scope('loss'):
model_loss = tf.losses.mean_squared_error(targets, prediction)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(model_learning_rate).minimize(model_loss,global_step=global_step)
sess.run(tf.global_variables_initializer())
for epoch_step in range(max_epoch):
for batch_X, batch_y in generate_batches(train_X, train_y, batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
model_learning_rate: init_learning_rate,
model_dropout_rate: dropout_rate
}
sess.run(train_step, train_data_feed)
## how to return validation error, any idea?
## return validation error
## return val_error

我目前在colab的尝试仍然存在各种问题,而且还没有完成。有人能通过使用贝叶斯优化来寻找深度卷积神经网络的最佳超参数来提供可能的可行方法吗?有什么想法吗?谢谢

我建议您将Keras Tuner包用于Bayesian Optimization

下面只是一个关于如何实现这一目标的小例子。

from kerastuner import HyperModel, Objective
import tensorflow as tf
from kerastuner.tuners import BayesianOptimization
# Create the keras tuner model.
class MyHyperModel(HyperModel):

def build(self, hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(len(tokenizer.word_index) + 1, embedding_dim))
for i in range(hp.Int('num_layers', 1, 3)):
model.add(tf.keras.layers.Conv1D(filters=hp.Choice('num_filters', values=[32, 64], default=64),activation='relu',
kernel_size=3,
bias_initializer='glorot_uniform'))
model.add(tf.keras.layers.MaxPool1D())

model.add(tf.keras.layers.GlobalMaxPool1D())

for i in range(hp.Int('num_layers_rnn', 1, 3)):
model.add(tf.keras.layers.Dense(units=hp.Int('units', min_value=32, max_value=512, step=32), activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))

model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

model.compile(
optimizer=hp.Choice('optimizer', values= ['Adam', 'Adadelta', 'Adamax']),
loss='binary_crossentropy',
metrics=[f1])
return model

然后,一旦创建,您就可以通过以下代码开始对模型进行训练。

hypermodel = MyHyperModel()
tuner = BayesianOptimization(
hypermodel,
objective=Objective('val_f1', direction="max"),
num_initial_points=50,
max_trials=15,
directory='./',
project_name='real_or_not')
tuner.search(train_dataset,
epochs=10, validation_data=validation_dataset)

您可以查看此链接中的文档。我还附上了一个Kaggle笔记本的链接,展示了我自己写的Bayesian Optimization。我附上链接,这样你就可以实际试用这个例子了。请随时提出任何进一步的问题。

更新:16/08

您评论说,您希望使用Bayesian Optimization调整以下超参数。我将以以下方式处理这个问题。

import tensorflow as tf
from kerastuner import HyperModel, Objective
from kerastuner.tuners import BayesianOptimization
class MyHyperModel(HyperModel):
def build(self, hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=hp.Choice('num_filters', values=[32, 64], default=64),activation='relu',
kernel_size=(3,3),
bias_initializer='glorot_uniform', input_shape=(32, 32, 3)))
model.add(tf.keras.layers.MaxPooling2D())
for i in range(hp.Int('num_layers', 1, 3)):
model.add(tf.keras.layers.Conv2D(filters=hp.Choice('num_filters', values=[32, 64], default=64),activation='relu',
kernel_size=(3,3),
bias_initializer='glorot_uniform'))
model.add(tf.keras.layers.MaxPooling2D())

model.add(tf.keras.layers.Flatten())

for i in range(hp.Int('num_layers_rnn', 1, 3)):
model.add(tf.keras.layers.Dense(units=hp.Int('units', min_value=32, max_value=512, step=32), activation='relu'))
model.add(tf.keras.layers.Dropout(rate=hp.Choice('droup_out_rate', values=[0.2, 0.4, 0.5], default=0.2)))

model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
loss='binary_crossentropy',
metrics=['accuracy'])
return model

class MyTuner(BayesianOptimization):
def run_trial(self, trial, *args, **kwargs):
# You can add additional HyperParameters for preprocessing and custom training loops
# via overriding `run_trial`
kwargs['batch_size'] = trial.hyperparameters.Int('batch_size', 32, 256, step=32)
kwargs['epochs'] = trial.hyperparameters.Int('epochs', 10, 30)
super(MyTuner, self).run_trial(trial, *args, **kwargs)
hypermodel = MyHyperModel()
tuner = MyTuner(
hypermodel,
objective=Objective('val_acc', direction="max"),
num_initial_points=50,
max_trials=15,
directory='./',
project_name='cnn_bayesian_opt')
tuner.search(train_dataset, validation_data=validation_dataset)

您还可以在这里查看github问题,解释如何调整epochsbatch_size

以上代码将根据您的要求调整以下参数。

  1. number_of_convolutional_filter
  2. number_of_hidden_layer
  3. drop_rate
  4. learning_rate
  5. batch_size
  6. epochs

Ax平台是在深度神经网络上使用贝叶斯优化的非常强大的工具。以下是我使用ax的方法:

建立CNN模型

!pip install ax-platform 
from tensorflow.keras import  models
from ax.service.managed_loop import optimize
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
def build_model(opt, dropout):
model = models.Sequential()
model.add(Conv2D(32, kernel_size=(3,3), input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(n_hidden))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model

训练CNN模型

下一步是训练CNN模型并返回其准确性,这将用于贝叶斯优化:

def train_evaluate(param):
acc = 0
mymodel = build_model(opt=param["opt"], dropout=param["dropout"])
mymodel.fit(X_train, y_train, epochs=param["epochs"], batch_size=param["batch_size"],verbose=1, validation_data=[X_test, y_test])
acc = mymodel.evaluate(X_test, y_test)[1]
print(param, acc)
del mymodel
return acc

运行贝叶斯优化

best_parameters, values, experiment, model = optimize(
parameters=[
{"name": "opt", "type": "choice", "values": ['adam', 'rmsprop', 'sgd']},
{"name": "dropout", "type": "choice", "values": [0.0, 0.25, 0.50, 0.75, 0.99]},
{"name": "epochs", "type": "choice", "values": [10, 50, 100]},
{"name": "batch_size", "type": "choice", "values": [32,64, 100, 128]}
],
evaluation_function=train_evaluate,
objective_name="acc",
total_trials=10,
)

返回最佳参数

data = experiment.fetch_data()
df = data.df
best_arm_name = df.arm_name[df["mean"] == df["mean"].max()].values[0]
best_arm = experiment.arms_by_name[best_arm_name]
print(best_parameters)
print(best_arm)

请注意,您可以添加其他想要优化的参数,如learning_ratenum_hidden_layer,其方式与我上面展示的方式相同。我希望这能满足你的需要。如果你还有问题,请告诉我。祝你好运

最新更新