我有这个CNN:
def cnn(trainImages, trainLabels, testImages, testLabels):
trainImages = np.array(trainImages)
trainLabels = np.array(trainLabels)
testImages = np.array(testImages)
testLabels = np.array(testLabels)
trainImages = trainImages / 255
testImages = testImages / 255
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (3, 3), padding = 'same', activation = 'relu', input_shape = (224, 224, 3)))
model.add(MaxPool2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(filters = 64, kernel_size = (3, 3), padding = 'same', activation = 'relu'))
model.add(MaxPool2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Conv2D(filters = 128, kernel_size = (3, 3), padding = 'same', activation = 'relu'))
model.add(MaxPool2D(pool_size = (2, 2), strides = (2, 2)))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(9))
opt = Adam(learning_rate = 0.001)
model.compile(optimizer = opt, loss = tensorflow.keras.losses.SparseCategoricalCrossentropy(from_logits = True), metrics = ['accuracy'])
model.fit(trainImages, trainLabels, epochs = 20, batch_size = 64)
predictionResult = model.predict(testImages)
pred = []
for i in range(len(predictionResult)):
pred.append(np.argmax(predictionResult[i], axis = -1))
vehicles = ['Black Vehicles', 'Blue Vehicles', 'Brown Vehicles', 'Green Vehicles', 'Pink Vehicles', 'Red Vehicles', 'Silver Vehicles', 'White Vehicles', 'Yellow Vehicles']
print('Accuracy: ', metrics.accuracy_score(testLabels, pred))
print(metrics.classification_report(testLabels, pred, target_names = vehicles))
print(metrics.confusion_matrix(testLabels, pred))
在本地我得到了93%,但在谷歌Colab只有10%。会发生什么呢?我看到在Google Colab上,第一个训练epoch的准确率差10-15%,在一个epoch突然增加到35%,第二个增加到60%,第三个增加到90%以上。
你必须在模型的最后一层使用激活函数- 'Softmax'(用于多类概率)
model.add(密度(9),激活= ' softmax '))
这将提高模型的准确性。