将Tensorflow keras模型保存为h5文件时出现类型错误



我正试图制作一个自动编码器,以便获得图像的矢量格式(x:640, y:480)。然而,当我尝试调用encoder.save("encoder.h5")时,我得到以下错误:

TypeError: ('Not JSON Serializable:', <tf.Variable 'batch_normalization/gamma:0' shape=(32,) dtype=float32, numpy=
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
dtype=float32)>)

我相当确定模型的工作原理与拟合函数一样,我能够在拟合完成后调用encoder.predict()并得到编码向量(长度为60)我的代码是:

import os
import tensorflow as tf
from tensorflow.python.keras.layers import Input, UpSampling2D, Add, Conv2D, MaxPooling2D, LeakyReLU
import cv2
from tensorflow.python.keras import Model
from tensorflow.python.keras import layers, losses
from tensorflow.python.keras.models import Model
import h5py
def get_encoder(shape=(640, 480, 3)):
def res_block(x, n_features):
_x = x
x = tf.keras.layers.BatchNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(n_features, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
x = Add()([_x, x])
return x
inp = Input(shape=shape)
# 640 x 480
x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(inp)
x = tf.keras.layers.BatchNormalization()(x)
x = LeakyReLU()(x)
x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = LeakyReLU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# 320 x 240
x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
for _ in range(2):
x = res_block(x, 32)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# 160 x 120
x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
for _ in range(2):
x = res_block(x, 32)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
# 80 x 60
x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
return Model(inp, x)

def get_decoder(shape=(240, 180, 3)):
inp = Input(shape=shape)
# 60 x 80
x = UpSampling2D((2, 2))(inp)
x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = LeakyReLU()(x)
# 120 x 160
x = UpSampling2D((2, 2))(x)
x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = LeakyReLU()(x)
# 240 x 320
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = LeakyReLU()(x)
# 480 x 640
x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
return Model(inp, x)

encoder = get_encoder((480, 640, 3))
decoder = get_decoder((60, 80, 1))
inp = Input((480, 640, 3))
e = encoder(inp)
d = decoder(e)
autoencoder = Model(inp, d)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
batch_size = 8
SHAPE = (480, 640)
IMAGES = "pathToImages"
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_gen = image_generator.flow_from_directory(
os.path.join(IMAGES, "train"),
class_mode="input", target_size=SHAPE, batch_size=batch_size,
)
val_gen = image_generator.flow_from_directory(
os.path.join(IMAGES, "test"),
class_mode="input", target_size=SHAPE, batch_size=batch_size,
)
for i in range(10):
autoencoder.fit(train_gen, validation_data=val_gen, epochs=1, steps_per_epoch=10, validation_steps= 5, batch_size=batch_size)
encoder.save('encoder.h5')
decoder.save('decoder.h5')

我基于这个https://www.kaggle.com/code/miklgr500/image2vec-autoencoder/notebook

关于如何修复它,或保存模型权重的替代方法的任何想法?

编辑:usingencoder.save_weights('encoder.h5')works

OP是这样说Sadra的答案的:

使用encoder.save_weights('encoder.h5')works

相关内容

最新更新