我有训练和测试数据的形状如下:
(9, 28, 28, 1), (3, 28, 28, 1), (9, 2, 1), (3, 2, 1)
的代码如下:
import tensorflow as tf
import keras
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout
batch_size = 28
epochs = 10
num_classes = 1
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3),padding='same', input_shape=(9,784,1),
activation='linear'))
model.add(MaxPooling2D((2, 2),padding='same'))
model.add(Conv2D(64, (3, 3), activation='linear',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Conv2D(128, (3, 3), activation='linear',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Dense(num_classes, activation='sigmoid'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(X_test, y_test))
报错:
ValueError: Shapes (None, 2,1) and (None, 4,4)是不兼容的
除了你的数据形状和分配最终神经元without flattening
的权重之外,你没有提到适当的input shape
这里我适当地添加了
import tensorflow as tf
from tensorflow import keras
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
batch_size = 28
epochs = 10
num_classes = 2
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3),padding='same', input_shape=(28,28,1),
activation='linear'))
model.add(MaxPooling2D((2, 2),padding='same'))
model.add(Conv2D(64, (3, 3), activation='linear',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Conv2D(128, (3, 3), activation='linear',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Flatten())
model.add(Dense(12))
for layer in model.layers:
print(layer.get_output_at(0).get_shape().as_list())
model.add(Dense(2))
model.add(Dense(num_classes, activation='sigmoid'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=batch_size,epochs=epochs,verbose=1)#,validation_data=(X_test, y_test))
:
[None, 28, 28, 32]
[None, 14, 14, 32]
[None, 14, 14, 64]
[None, 7, 7, 64]
[None, 7, 7, 128]
[None, 4, 4, 128]
[None, 2048]
[None, 12]
Epoch 1/10
1/1 [==============================] - 7s 7s/step - loss: 0.7242 - accuracy: 0.5556
Epoch 2/10
1/1 [==============================] - 0s 6ms/step - loss: 0.7474 - accuracy: 0.4444
Epoch 3/10
1/1 [==============================] - 0s 6ms/step - loss: 0.7997 - accuracy: 0.4444
Epoch 4/10
1/1 [==============================] - 0s 6ms/step - loss: 0.8712 - accuracy: 0.4444
Epoch 5/10
1/1 [==============================] - 0s 5ms/step - loss: 0.9182 - accuracy: 0.4444
Epoch 6/10
1/1 [==============================] - 0s 5ms/step - loss: 0.9220 - accuracy: 0.4444
Epoch 7/10
1/1 [==============================] - 0s 5ms/step - loss: 0.9011 - accuracy: 0.4444
Epoch 8/10
1/1 [==============================] - 0s 5ms/step - loss: 0.8745 - accuracy: 0.4444
Epoch 9/10
1/1 [==============================] - 0s 5ms/step - loss: 0.8506 - accuracy: 0.4444
Epoch 10/10
1/1 [==============================] - 0s 7ms/step - loss: 0.8342 - accuracy: 0.4444
<keras.callbacks.History at 0x7fcf40808050>