为什么我的pytorch模型summy没有参数?



我是pytorch的初学者。
我想要一个类似U-Net的自动编码器模型。
所以我制作了下面的代码,并使用pytorch_model_summary查看摘要,但结果告诉我模型有任何参数…

为什么我的模型有参数?

class unet_like(nn.Module):
def __init__(self):
super(unet_like, self).__init__()

def conv2d_block(self, in_channels, out_channels, x): 
x = nn.Conv2d(in_channels=in_channels, out_channels = out_channels, kernel_size = 3, padding = "same")(x)
x = nn.BatchNorm2d(out_channels)(x)
x = nn.ReLU()(x)
x = nn.Conv2d(in_channels = out_channels, out_channels = out_channels, kernel_size = 3, padding = "same")(x)
x = nn.BatchNorm2d(out_channels)(x)
x = nn.ReLU()(x)
return x
def forward(self, x):

c1 = self.conv2d_block(3, 16, x)
p1 = nn.MaxPool2d(kernel_size = 2)(c1)
p1 = nn.Dropout2d(0.1)(p1)
c2 = self.conv2d_block(16, 32, p1)
p2 = nn.MaxPool2d(kernel_size = 2)(c2)
p2 = nn.Dropout(0.1)(p2)
c3 = self.conv2d_block(32, 64, p2)
p3 = nn.MaxPool2d(kernel_size = 2)(c3)
p3 = nn.Dropout(0.1)(p3)
c4 = self.conv2d_block(64, 128, p3)
p4 = nn.MaxPool2d(kernel_size = 2)(c4)
p4 = nn.Dropout(0.1)(p4)
c5 = self.conv2d_block(128, 256, p4)
# nn.ConvTranspose2d(in_channels = 16, out_channels = 64, kernel_size = 3, stride = 1, padding = (1, 1)),
# nn.ReLU(),

u6 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size = 2, stride = 2, output_padding = (0,1))(c5)
print(u6.shape)
print(c4.shape)
u6 = torch.cat([u6, c4], 1) # u6: 128, c4: 128
print(u6.shape)
u6 = nn.Dropout(0.1)(u6)
c6 = self.conv2d_block(256, 128, u6)
u7 = nn.ConvTranspose2d(in_channels = 128, out_channels = 64, kernel_size = 2, stride = 2, output_padding = (1,0))(c6)
u7 = torch.cat([u7, c3], 1)
u7 = nn.Dropout(0.1)(u7)
c7 = self.conv2d_block(128, 64, u7)

u8 = nn.ConvTranspose2d(in_channels = 64, out_channels = 32, kernel_size = 2, stride = 2, output_padding = (0,1))(c7)
u8 = torch.cat([u8, c2], 1)
u8 = nn.Dropout(0.1)(u8)
c8 = self.conv2d_block(64, 32, u8)
u9 = nn.ConvTranspose2d(in_channels = 32, out_channels = 16, kernel_size = 2, stride = 2, output_padding = (0,1))(c8)
u9 = torch.cat([u9, c1], 1)
u9 = nn.Dropout(0.1)(u9)
c9 = self.conv2d_block(32, 16, u9)
#           in_channels, kernel_size,  
# outputs = Conv2D(1, (1, 1), activation = "sigmoid")(c9)
c9 = nn.Conv2d(in_channels=16, out_channels = 1, kernel_size = 3, padding = (1,1))(c9)
outputs = nn.Sigmoid()(c9)
return outputs
model = unet_like().to("cpu")
print(pytorch_model_summary.summary(model, torch.tensor(train_images[:1], dtype = torch.float32).to("cpu"), show_input=True))
torch.Size([1, 128, 12, 9])
torch.Size([1, 128, 12, 9])
torch.Size([1, 256, 12, 9])
-----------------------------------------------------------------------
Layer (type)         Input Shape         Param #     Tr. Param #
=======================================================================
unet_like-1     [1, 3, 100, 75]               0               0
=======================================================================
Total params: 0
Trainable params: 0
Non-trainable params: 0
-----------------------------------------------------------------------

您的模型定义必须在您的模块类的初始化器中:__init__,而forward处理该模块的推理逻辑。你正在使用的PyTorch层是模块,它们需要在用于推理之前进行初始化。与keras等框架不同,PyTorch不需要编译步骤,逻辑是声明式定义的。

这里有一些东西可以让你开始:

class UNetlike(nn.Module):
def __init__(self):
super().__init__()

self.c1 = nn.Sequential(self.conv2d_block(3, 16),
nn.MaxPool2d(kernel_size=2),
nn.Dropout2d(0.1))
self.c2 = nn.Sequential(self.conv2d_block(16, 32),
nn.MaxPool2d(kernel_size=2),
nn.Dropout2d(0.1))

self.c2 = nn.Sequential(self.conv2d_block(32, 64),
nn.MaxPool2d(kernel_size=2),
nn.Dropout2d(0.1))

self.c4 = nn.Sequential(self.conv2d_block(64, 128),
nn.MaxPool2d(kernel_size=2),
nn.Dropout2d(0.1))

def conv2d_block(self, in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, 
kernel_size=3, padding="same"),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, 
kernel_size=3, padding="same"),
nn.BatchNorm2d(out_channels),
nn.ReLU())

def forward(self, x):
p1 = self.c1(x)
p2 = self.c2(p1)
p3 = self.c3(p2)
p4 = self.c3(p3)
# so on and so forth

最新更新