如何重用keras功能模型的层


import tensorflow as tf
import keras
def get_model():
x1 = keras.layers.Dense(6, activation='relu',input_shape=(10,))
x2 = keras.layers.Dense(3, activation='relu')(x1)
output_ = keras.layers.Dense(10,acitvation='sigmoid')(x2)
model = keras.model(inputs=[x1], outputs=[output_])
return model
model = get_model()
model.compile(...)
chk_point = keras.callbacks.ModelCheckpoint(f'./best_model.h5', 
monitor='val_loss', save_best_only=True, mode='min')
model.fit(..., callbacks=[chk_point])

def new_model():
old = '../best_model.h5' #using old model for training new model

(我正在尝试用预训练的函数模型进行迁移学习(

现在我想得到bestModel的所有层。如果可能的话,我想删除我的best_mode的最后一层。我想冻结best_mode,即trainable=False。并为该模型添加新的层。

我正在尝试对火车数据集进行去噪自动编码器,其中输入和best_ model.h5的输出是相同的(eg.input_shape=(100output_shape=(100,((。然后我想冻结所有的层,然后删除该模型的最后一层,为该模型添加新的层。然后像往常一样在X和y上训练该模型

一种方法是定义新模型,然后从模型复制层权重(最后一层除外(,并将trainable设置为False。例如,假设您希望移除最后一层并添加两个密集层(这只是一个示例(。请注意,当前模型的输入和输出大小为(10,(。还要注意,功能API中的第一层是输入层。这是代码:

import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
import numpy as np

def get_model():
inputs = keras.Input(shape=(10, ))
x1 = keras.layers.Dense(6, activation='relu')(inputs)
x2 = keras.layers.Dense(3, activation='relu')(x1)
output_ = keras.layers.Dense(10,activation='sigmoid')(x2)
model = keras.Model(inputs=inputs, outputs=[output_])
return model

def get_new_model():
inputs = keras.Input(shape=(10, ))
x1 = keras.layers.Dense(6, activation='relu')(inputs)
x2 = keras.layers.Dense(3, activation='relu')(x1)
# new layers
x3 = keras.layers.Dense(15, activation='relu')(x2)
output_ = keras.layers.Dense(10, activation='sigmoid')(x3)
model = keras.Model(inputs=inputs, outputs=[output_])
return model

model = get_model()
model.compile(optimizer='adam', loss='mse')
batch_size = 16
_ = model.call(inputs=tf.random.normal(shape=(batch_size, 10)))
model.summary()
# create x data using two normal distributions with different mean
# y data is unused in auto encoder
x0 = np.random.normal(loc=0.0, size=(100, 10))
x1 = np.random.normal(loc=0.3, size=(100, 10))
x = np.concatenate((x0, x1), axis=0)
# y is unused
y0 = np.zeros((100, 10))
y1 = np.ones((100, 10))
y = np.concatenate((y0, y1), axis=0)
# split train/validation data
x_train, x_val, y_train, y_val = train_test_split(x, y, train_size=0.7) 
print(x_train.shape)
print(y_train.shape)
chk_point = keras.callbacks.ModelCheckpoint(f'./best_model.h5', 
monitor='val_loss', save_best_only=True, mode='min')
history = model.fit(x=x_train, y=x_train, batch_size=batch_size, epochs=100, callbacks=[chk_point], validation_data=(x_val, x_val))
# reload old model
model_old = keras.models.load_model('./best_model.h5')
model_old.summary()
# get new model
model_new = get_new_model()
model_new.compile(optimizer='adam', loss='mse')
_ = model_new.call(inputs=tf.random.normal(shape=(batch_size, 10)))
model_new.summary()
# copy the two dense layer weights and set trainable to False
# skip the first layer which is an InputLayer
for count, (layer_old, layer_new) in enumerate(zip(model_old.layers[1:3], model_new.layers[1:3])):  
layer_new.trainable = False
layer_new.set_weights(layer_old.get_weights())
model_new.layers[count+1] = layer_new
model_new.summary()

最新更新