三维卷积神经网络输入形状



我在使用Keras和Python对3D形状进行分类的3D CNN中遇到问题。我有一个文件夹,里面有一些JSON格式的模型。我把这些模型读入了一个Numpy数组。模型是25*25*25,表示体素化模型的占用网格(每个位置表示位置(i,j,k)中的体素是否有点),所以我只有1个输入通道,就像2D图像中的灰度图像一样。我的代码如下:

import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution3D, MaxPooling3D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
# Number of Classes and Epochs of Training
nb_classes = 3 # cube, cone or sphere
nb_epoch = 100
batch_size = 2
# Input Image Dimensions
img_rows, img_cols, img_depth = 25, 25, 25
# Number of Convolutional Filters to use
nb_filters = 32
# Convolution Kernel Size
kernel_size = [5,5,5]
X_train, Y_train = [], []
# Read from File
import os
import json
i=0
for filename in os.listdir(os.path.join(os.getcwd(), 'models')):
with open(os.path.join(os.getcwd(), 'models', filename)) as f:
file = f.readlines()
json_file = 'n'.join(file)
content = json.loads(json_file)
occupancy = content['model']['occupancy']
form = []
for value in occupancy:
form.append(int(value))
final_model = [ [ [ 0 for i in range(img_rows) ]
for j in range(img_cols) ]
for k in range(img_depth) ]
a = 0
for i in range(img_rows):
for j in range(img_cols):
for k in range(img_depth):
final_model[i][j][k] = form[a]
a = a + 1
X_train.append(final_model)
Y_train.append(content['model']['label'])
X_train = np.array(X_train)
Y_train = np.array(Y_train)
# (1 channel, 25 rows, 25 cols, 25 of depth)
input_shape = (1, img_rows, img_cols, img_depth)
# Init
model = Sequential()
# 3D Convolution layer
model.add(Convolution3D(nb_filters, kernel_size[0], kernel_size[1], kernel_size[2],
input_shape=input_shape,
activation='relu'))
# Fully Connected layer
model.add(Flatten())
model.add(Dense(128,
init='normal',
activation='relu'))
model.add(Dropout(0.5))
# Softmax Layer
model.add(Dense(nb_classes,
init='normal'))
model.add(Activation('softmax'))
# Compile
model.compile(loss='categorical_crossentropy',
optimizer=SGD())
# Fit network
model.fit(X_train, Y_train, nb_epoch=nb_epoch,
verbose=1)

在此之后,我得到以下错误

使用TensorFlow后端。Traceback(最近一次通话):文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/common_sapes.py",第670行,在_call_cpp_shape_fn_impl中status)文件"/usr/local/Cellar/python3/3.6.0/Frameworks/Python.framework/Versions/3.6/lib/python3.6/contextlib.py",第89行,在exitnext(self.gen)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/errors_impl.py"中,第469行,在raise_exception_on_not_ok_status中pywrap_tensorflow.TF_GetCode(状态))tensorflow.python.framework.errors_impl.InvalidArgumentError:负数"Conv3D"的1减去5导致的维度大小(操作:"Conv3D"),具有输入形状:[?,1,25,25,25],[5,5,5,5,25,32]。

在处理上述异常的过程中,发生了另一个异常:

追溯(最后一次调用):文件"CNN_3D.py",第76行,位于activation='ru'))文件"/usr/local/lib/python3.6/site packages/ceras/models.py",第299行,在添加layer.create_input_layer(batch_input_shape,input_dtype)文件"/usr/local/lib/python3.6/site packages/keras/engine/topology.py",第401行,在create_input_layer中self(x)文件"/usr/local/lib/python3.6/site packages/keras/engine/topology.py",第572行,在调用self.add_inbound_node(inbound_layers,node_indices,tensor_indices)文件中"/usr/local/lib/python3.6/site packages/keras/engine/topology.py",第635行,在add_inbound_node中Node.create_Node(self、inbound_layers、Node_indices、tensor_indices)文件"/usr/local/lib/python3.6/site packages/keras/engine/topology.py",第166行,在create_node中output_tensors=to_list(outbound_layer.call(input_tensors[0],mask=input_masks[0]))文件"/usr/local/lib/python3.6/site packages/ceras/layers/convolutional.py",1234号线,呼叫中filter_shape=self.W_shape)文件"/usr/local/lib/python3.6/site packages/keras/backend/tensorflow_backend.py",第2831行,在conv3d中x=tf.nn.conv3d(x,kernel,grades,padding)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/ops/gen_nn_ops.py",线522,在conv3d中progress=progress,padding=padding,name=name)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/op_def_library.py",763行,在apply_op中op_def=op_def)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/ops.py",第2397行,在create_op中set_shapes_for_outputs(ret)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/ops.py",第1757行,在set_shapes_for_outputs中shapes=shape_func(op)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/ops.py",第1707行,在call_with_requiring中return call_cpp_shape_fn(op,require_shape_fn=True)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/common_sapes.py",第610行,在call_cpp_shape_fn中debug_python_sape_fn,require_shape_fn)文件"/usr/local/lib/python3.6/site packages/tensorflow/python/framework/common_sapes.py",第675行,在_call_cpp_shape_fn_impl中raise ValueError(err.message)ValueError:对于带输入的"Conv3D"(操作:"Conv3D'"),从1中减去5导致维度大小为负形状:[?,1,25,25,25],[5,5,5,25,32]。

我做错了什么导致了这个错误?

我认为问题是您在Theano排序中设置输入形状,但您使用的是带有Tensorflow后端和Tensorflow img排序的Keras。此外,y_train数组必须转换为分类标签。

更新代码:

from keras.utils import np_utils
from keras import backend as K
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols, img_depth)
input_shape = (1, img_rows, img_cols, img_depth)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, img_depth, 1)
input_shape = (img_rows, img_cols, img_depth, 1)
Y_train = np_utils.to_categorical(Y_train, nb_classes)

添加这些行应该可以修复它。

如果使用三维CNN进行帧预测,则必须使用:输入=(帧数、高度、宽度、通道)我的代码def cnn_network():model=序列()

# # Layer 1
# model.add(
# Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), input_shape=(6, 14, 14, 512), use_bias=512, padding='SAME',
#            activation='relu',
#            name='conv3D_1_1'))
# model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1),use_bias=512, padding='SAME',
#        activation='relu',name='conv3D_1_5'))
# model.add(MaxPooling3D(pool_size=(4, 2, 2), strides=(1, 2, 2)))
# model.add(BatchNormalization())
# # Layer 2
# model.add(
#     Conv3DTranspose(512, kernel_size=(1, 3, 3), strides=(1, 2, 2), use_bias=512, padding='SAME', activation='relu',
#                     name='Deconv3D_16'))
# model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), use_bias=512,
#                  padding='SAME', activation='relu', name='conv3D_2__19'))
# #model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), use_bias=512,
#  #                padding='SAME', activation='relu', name='conv3D_2__77'))
# model.add(
# Conv3DTranspose(256, kernel_size=(3, 3, 3), strides=(1, 2, 2), use_bias=256, padding='SAME', activation='relu',
#                     name='Deconv3D_1'))
# model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), use_bias=256,
#                  padding='SAME', activation='relu', name='conv3D_2__1'))
# model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), use_bias=256, padding='SAME', activation='relu',
#                  name='conv3D_2__2'))
# model.add(MaxPooling3D(pool_size=(3, 1, 1), strides=(1, 1, 1)))
# #model.add(Conv3D(256, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=256, padding='SAME', activation='relu',
#                  #name='conv3D_18'))
# #model.add(MaxPooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1)))
# model.add(BatchNormalization())
# # Layer 3
# model.add(
#     Conv3DTranspose(128, kernel_size=(1, 3, 3), strides=(1, 2, 2), use_bias=128, padding='SAME', activation='relu',
#                     name='Deconv3D_2'))
# model.add(Conv3D(128, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=128, padding='SAME', activation='relu',
#                  name='conv3D__3__1'))
# model.add(Conv3D(128, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=128, padding='SAME', activation='relu',
#                  name='conv3D__3__2'))
# model.add(BatchNormalization())
# # Layer4
# model.add(
#     Conv3DTranspose(64, kernel_size=(1, 3, 3), strides=(1, 2, 2), use_bias=64, padding='SAME', activation='relu',
#                     name='Deconv3D_3'))
# model.add(Conv3D(64, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=64, padding='SAME', activation='relu',
#                  name='conv3D__4__1'))
# model.add(Conv3D(64, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=64, padding='SAME', activation='relu',
#                      name='conv3D__4__2'))

# model.add(BatchNormalization())
# # Layer 5
# model.add(
#     Conv3DTranspose(32, kernel_size=(1, 3, 3), strides=(1, 2, 2), use_bias=32, padding='SAME', activation='relu',
#                     name='Deconv3D_4'))
# model.add(Conv3D(32, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=32, padding='SAME', activation='relu',
#                  name='conv3D__5__1'))
# model.add(Conv3D(16, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=16, padding='SAME', activation='relu',
#                  name='conv3D__5__2'))
# #model.add(Conv3D(8, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=8, padding='SAME', activation='relu',
#                  #name='conv3D2_23'))
# model.add(Conv3D(1, kernel_size=(1, 3, 3), strides=(1, 1, 1), use_bias=1, padding='SAME', activation='sigmoid',
#                                      name='conv3D__5__3'))
# #model.add(BatchNormalization())

最新更新