在构建图像分割模型时,我面临着获得维度不相等的问题



我正在研究一个图像分割问题,其中训练图像=50,测试图像=51。我遇到了一个尺寸不相等的错误。Input_shape = (256256 3)模型代码:

import tensorflow as tf
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.layers import GlobalAveragePooling2D, Reshape, Dense, Multiply, AveragePooling2D, UpSampling2D
from tensorflow.keras.models import Model
from tensorflow.keras.applications import VGG19
def squeeze_excite_block(inputs, ratio=8):
init = inputs
channel_axis = -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
x = Multiply()([init, se])
return x
def ASPP(x, filter):
shape = x.shape
y1 = AveragePooling2D(pool_size=(shape[1], shape[2]))(x)
y1 = Conv2D(filter, 1, padding="same")(y1)
y1 = BatchNormalization()(y1)
y1 = Activation("relu")(y1)
y1 = UpSampling2D((shape[1], shape[2]), interpolation="bilinear")(y1)
y2 = Conv2D(filter, 1, dilation_rate=1, padding="same", use_bias=False)(x)
y2 = BatchNormalization()(y2)
y2 = Activation("relu")(y2)
y3 = Conv2D(filter, 3, dilation_rate=6, padding="same", use_bias=False)(x)
y3 = BatchNormalization()(y3)
y3 = Activation("relu")(y3)
y4 = Conv2D(filter, 3, dilation_rate=12, padding="same", use_bias=False)(x)
y4 = BatchNormalization()(y4)
y4 = Activation("relu")(y4)
y5 = Conv2D(filter, 3, dilation_rate=18, padding="same", use_bias=False)(x)
y5 = BatchNormalization()(y5)
y5 = Activation("relu")(y5)
y = Concatenate()([y1, y2, y3, y4, y5])
y = Conv2D(filter, 1, dilation_rate=1, padding="same", use_bias=False)(y)
y = BatchNormalization()(y)
y = Activation("relu")(y)
return y
def conv_block(x, filters):
x = Conv2D(filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = squeeze_excite_block(x)
return x
def encoder1(inputs):
skip_connections = []
model = VGG19(include_top=False, weights="imagenet", input_tensor=inputs)
names = ["block1_conv2", "block2_conv2", "block3_conv4", "block4_conv4"]
for name in names:
skip_connections.append(model.get_layer(name).output)
output = model.get_layer("block5_conv4").output
return output, skip_connections
def decoder1(inputs, skip_connections):
num_filters = [256, 128, 64, 32]
skip_connections.reverse()
x = inputs
for i, f in enumerate(num_filters):
x = UpSampling2D((2, 2), interpolation="bilinear")(x)
x = Concatenate()([x, skip_connections[i]])
x = conv_block(x, f)
return x
def output_block(inputs):
x = Conv2D(1, 1, padding="same")(inputs)
x = Activation("sigmoid")(x)
return x
def encoder2(inputs):
num_filters = [32, 64, 128, 256]
skip_connections = []
x = inputs
for i, f in enumerate(num_filters):
x = conv_block(x, f)
skip_connections.append(x)
x = MaxPool2D((2, 2))(x)
return x, skip_connections
def decoder2(inputs, skip_1, skip_2):
num_filters = [256, 128, 64, 32]
skip_2.reverse()
x = inputs
for i, f in enumerate(num_filters):
x = UpSampling2D((2, 2), interpolation="bilinear")(x)
x = Concatenate()([x, skip_1[i], skip_2[i]])
x = conv_block(x, f)
return x
def build_model(input_shape):
inputs = Input(input_shape)
x, skip_1 = encoder1(inputs)
x = ASPP(x, 64)
x = decoder1(x, skip_1)
output1 = output_block(x)
x = inputs * output1
x, skip_2 = encoder2(x)
x = ASPP(x, 64)
x = decoder2(x, skip_1, skip_2)
output2 = output_block(x)
outputs = Concatenate()([output1, output2])
model = Model(inputs, outputs)
return model

if __name__ == "__main__":
input_shape = (256, 256, 3)
model = build_model(input_shape)
model.summary()

在训练我的模型时得到以下错误:ValueError: in user code:

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:855 train_function  *
return step_function(self, iterator)
<ipython-input-10-88ab9377d655>:15 dice_coef  *
intersection = tf.reduce_sum(y_true * y_pred)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/math_ops.py:1250 binary_op_wrapper
raise e
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/math_ops.py:1234 binary_op_wrapper
return func(x, y, name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/math_ops.py:1575 _mul_dispatch
return multiply(x, y, name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/math_ops.py:530 multiply
return gen_math_ops.mul(x, y, name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_math_ops.py:6250 mul
"Mul", x=x, y=y, name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/op_def_library.py:750 _apply_op_helper
attrs=attr_protos, op_def=op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py:601 _create_op_internal
compute_device)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:3565 _create_op_internal
op_def=op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:2042 __init__
control_input_ops, op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:1883 _create_c_op
raise ValueError(str(e))
ValueError: Dimensions must be equal, but are 65536 and 131072 for '{{node mul_1}} = Mul[T=DT_FLOAT](flatten/Reshape, flatten_1/Reshape)' with input shapes: [?,65536], [?,131072]

如果有任何帮助,我将不胜感激。

提前感谢您的帮助。

错误发生在这一行

x = Multiply()([init, se])

错误的原因是Multiply()做了一个元素乘法,因此initse的维数应该是相同的。在您的情况下,initBatchNormConv2D上的输出,因此将具有类似(1, x, y, filters)的形状,而se是来自密集层的输出,因此将具有类似(1, filters)的形状,这里您将filters定义为最后一个密集层中的神经元数量。现在你必须重塑Dense层的输出以匹配init。此外,如果x和y不等于1,则会出现尺寸不匹配,您将得到错误。

最新更新