Tensorflow模型.fit()再现性


import tensorflow as tf
RANDOM_SEED_CONSTANT = 42  # FOR_REPRODUCIBILITY
tf.random.set_seed(RANDOM_SEED_CONSTANT)
# Prevent NHWC errors https://www.nuomiphp.com/eplan/en/50125.html
from tensorflow.keras import backend as K
K.set_image_data_format("channels_last")
from tensorflow import keras
from tensorflow.keras import datasets, layers, models
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0 # Normalize pixel values to be between 0 and 1
# Create a simple CNN
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, 
activation='relu', 
kernel_initializer=tf.keras.initializers.HeNormal(seed=RANDOM_SEED_CONSTANT)))
model.add(layers.Dense(10, 
kernel_initializer=tf.keras.initializers.HeNormal(seed=RANDOM_SEED_CONSTANT)))
print(model.summary())
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.save_weights('myweights.h5')
# Run1
history = model.fit(train_images, train_labels, epochs=1, 
shuffle=False,
validation_data=(test_images, test_labels))
# Run2
model.load_weights('myweights.h5')
history = model.fit(train_images, train_labels, epochs=1, 
shuffle=False,
validation_data=(test_images, test_labels))
# Run3
model.load_weights('myweights.h5')
history = model.fit(train_images, train_labels, epochs=1, 
shuffle=False,
validation_data=(test_images, test_labels))

上面的3个model.fit((调用给出了以下结果:

1563/1563 [==============================] - 7s 4ms/step - loss: 1.4939 - accuracy: 0.4543 - val_loss: 1.2516 - val_accuracy: 0.5567
1563/1563 [==============================] - 6s 4ms/step - loss: 1.6071 - accuracy: 0.4092 - val_loss: 1.3857 - val_accuracy: 0.4951
1563/1563 [==============================] - 7s 4ms/step - loss: 1.5538 - accuracy: 0.4325 - val_loss: 1.3187 - val_accuracy: 0.5294

造成这种差异的原因是什么?我正在努力了解可能阻碍从模型中复制结果的来源。除了随机种子、密集层初始化,我还缺少什么?

测试再现性的方法不正确。您需要关闭程序并重新运行它,看看结果是否相同。否则,运行2取决于在运行1期间发生的事件,运行3取决于运行1和2期间发生的事情。

原因是Tensorflow为随机生成维护了一个内部计数器,如CCD_2的文档中所述(emphasis是我的(:

In [1]: run program.py
1563/1563 [==============================] - 13s 8ms/step - loss: 1.4997 - accuracy: 0.4540 - val_loss: 1.2528 - val_accuracy: 0.5494
{'loss': [1.4996991157531738], 'accuracy': [0.4540199935436249], 'val_loss': [1.2527965307235718], 'val_accuracy': [0.5493999719619751]}
In [2]: run program.py
1563/1563 [==============================] - 12s 8ms/step - loss: 1.4997 - accuracy: 0.4540 - val_loss: 1.2528 - val_accuracy: 0.5494
{'loss': [1.4996991157531738], 'accuracy': [0.4540199935436249], 'val_loss': [1.2527965307235718], 'val_accuracy': [0.5493999719619751]}

我们在上面的tf.random.uniform的第二次调用中得到"A2"而不是"A1"的原因是,TensorFlow在所有具有相同参数的调用中都使用相同的tf.rrandom.unique内核(即内部表示(,并且内核维护一个内部计数器,该计数器每次执行时都会递增,从而生成不同的结果

如果我只运行程序的第一次运行两次,在每次运行之间关闭程序(在这种情况下是IPython(,我得到:

PD_5减去执行计算所需的时间(根据机器上的负载可能会有所不同(,结果完全相同

最新更新