如何解决这个问题:计算每个通道的填充输入大小:(3 x 3)。内核大小:(4 x 4)。内核大小不能大于实际输入大小



我有问题:每个通道的计算填充输入大小:(3 x 3(。内核大小:(4 x 4(。内核大小不能大于实际输入大小

def-conv(c_in,c_out,batch_norm=True,activation="lrelu"(:return conv_block(c_in,c_out,kernel=4,步幅=2,pad=1,bias=False,batch_norm=batch_norm,activation=activation,pool_type=None(

def-tconv(c_in,c_out,batch_norm=True,activation="lrelu"(:return tconv_block(c_in,c_out,kernel=4,步幅=2,pad=1,bias=False,batch_norm=batch_norm,activation=activation,pool_type=None(

def __init__(self):
super().__init__()
self.conv = nn.Sequential(
conv(3, 32, batch_norm=False),          
conv(32, 64),
conv(64, 128),
conv(128, 256),
conv_block(256, 1, kernel=4, stride=1, pad=0, bias=False, activation=None, pool_type=None),
nn.Flatten()
)
def forward(self, x):
x = self.conv(x)
return x

def clip_weights(self, vmin=-0.01, vmax=0.01):
for p in self.parameters():
p.data.clamp_(vmin, vmax)    

class Generator(nn.Module):
def __init__(self, z_dim):
super().__init__()
self.z_dim = z_dim
self.tconv = nn.Sequential(
tconv_block(z_dim, 512, kernel=4, stride=2, pad=1, bias=False, activation="lrelu", pool_type=None),
tconv(512, 256),
tconv(256, 128),
tconv(128, 64),
tconv(64, 32),
tconv(32, 3, activation="tanh", batch_norm=False)
)

def forward(self, x):
return self.tconv(x)
def generate(self, n, device):
z = torch.randn((n, self.z_dim, 1, 1), device=device)
return self.tconv(z)```
z = torch.randn((n, self.z_dim, 1, 1), device=device)

上面的代码生成具有(1,1(大小的输入噪声张量,该大小对于模型来说太小了。

z = torch.randn((n, self.z_dim, 10, 10), device=device)

在上面的代码中,增加输入张量的大小应该可以解决这个错误。

相关内容

  • 没有找到相关文章

最新更新