训练深度学习模型时出错



所以我设计了一个CNN,并用以下参数编译,

training_file_loc = "8-SignLanguageMNIST/sign_mnist_train.csv"
testing_file_loc = "8-SignLanguageMNIST/sign_mnist_test.csv"
def getData(filename):
images = []
labels = []
with open(filename) as csv_file:
file = csv.reader(csv_file, delimiter = ",")
next(file, None)

for row in file:
label = row[0]
data = row[1:]
img = np.array(data).reshape(28,28)

images.append(img)
labels.append(label)

images = np.array(images).astype("float64")
labels = np.array(labels).astype("float64")

return images, labels
training_images, training_labels = getData(training_file_loc)
testing_images, testing_labels = getData(testing_file_loc)
print(training_images.shape, training_labels.shape)
print(testing_images.shape, testing_labels.shape)
training_images = np.expand_dims(training_images, axis = 3)
testing_images = np.expand_dims(testing_images, axis = 3)
training_datagen = ImageDataGenerator(
rescale = 1/255,
rotation_range = 45,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = "nearest"
)
training_generator = training_datagen.flow(
training_images,
training_labels,
batch_size = 64,
)

validation_datagen = ImageDataGenerator(
rescale = 1/255,
rotation_range = 45,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = "nearest"
)
validation_generator = training_datagen.flow(
testing_images,
testing_labels,
batch_size = 64,
)
model = tf.keras.Sequential([
keras.layers.Conv2D(16, (3, 3), input_shape = (28, 28, 1), activation = "relu"),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Conv2D(32, (3, 3), activation = "relu"),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Flatten(),
keras.layers.Dense(256, activation = "relu"),
keras.layers.Dropout(0.25),
keras.layers.Dense(512, activation = "relu"),
keras.layers.Dropout(0.25),
keras.layers.Dense(26, activation = "softmax")
])
model.compile(
loss = "categorical_crossentropy",
optimizer = RMSprop(lr = 0.001),
metrics = ["accuracy"]
)

但是,当我运行model.fit((时,我得到了以下错误,

ValueError: Shapes (None, 1) and (None, 24) are incompatible

将损失函数改为sparse_categorical_crossentropy后,程序运行良好。

我不明白为什么会发生这种事。

有人能解释一下这一点,以及这些损失函数之间的区别吗?

问题是,categorical_crossentropy需要一个热编码标签,这意味着,对于每个样本,它需要一个长度为num_classes的张量,其中第label个元素设置为1,其他元素都为0。

另一方面,sparse_categorical_crossentropy直接使用整数标签(因为这里的用例是大量的类,所以一个热编码标签会浪费大量零的内存(。我相信,但我不能证实这一点,categorical_crossentropy比稀疏的对应程序运行得更快。

对于您的情况,对于26个类,我建议使用非稀疏版本,并将您的标签转换为一个热编码的标签,如下所示:

def getData(filename):
images = []
labels = []
with open(filename) as csv_file:
file = csv.reader(csv_file, delimiter = ",")
next(file, None)

for row in file:
label = row[0]
data = row[1:]
img = np.array(data).reshape(28,28)

images.append(img)
labels.append(label)

images = np.array(images).astype("float64")
labels = np.array(labels).astype("float64")

return images, tf.keras.utils.to_categorical(labels, num_classes=26) # you can omit num_classes to have it computed from the data

旁注:除非你有理由对图像使用float64,否则我会切换到float32(它将数据集所需的内存减半,模型可能会将它们转换为float32,作为第一个操作(

Simple,对于输出类为整数的分类问题,使用sparse_categorical_crosentropy,对于那些将标签转换为一个热编码标签的问题,我们使用categorical_crosentropy。

最新更新