无法将权重初始化应用于Conv2d、ConvTranspose2d



现在,我已经使用权重初始化来研究DCGAN、LSGAN。

I


def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, 0.0, 0.02)
print('Conv')
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight.data, 0.0, 0.02)
print('Trans')
elif isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias, 0)
print('Batch')

这是我的初始化函数

这是我的发电机。

class Generator(nn.Module):
def __init__(self, nz, ngf, channels):
super(Generator, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(in_channels = nz, out_channels = ngf * 4, kernel_size = 4, stride =1,
padding = 0, bias = False),
nn.BatchNorm2d(ngf*4),
nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(in_channels = ngf*4, out_channels = ngf*2, kernel_size = 4, stride = 2,
padding = 1, bias = False),
nn.BatchNorm2d(ngf*2),
nn.ReLU(True)
)
self.layer3 = nn.Sequential(
nn.ConvTranspose2d(in_channels = ngf*2, out_channels = ngf*1, kernel_size = 4, stride = 2,
padding = 1, bias = False),
nn.BatchNorm2d(ngf*1),
nn.ReLU(True))
self.layer4 = nn.Sequential(
nn.ConvTranspose2d(in_channels = ngf*1, out_channels = channels, kernel_size = 4, stride = 2,
padding = 1, bias = False),
nn.Tanh()
)
generator = Generator(100, 64, 1).to(device)
generator.apply(weights_init).state_dict()
OrderedDict([('layer1.0.weight',
tensor([[[[ 2.8698e-03,  5.4211e-03, -1.2506e-02, -9.0855e-03],
[-1.3270e-02,  6.4097e-03, -3.0736e-03,  7.3850e-03],
[-8.1306e-03, -1.4132e-05, -1.0484e-02, -1.5072e-02],
[-1.0174e-02, -4.2638e-03, -8.8196e-03,  4.4663e-03]],

[[ 1.8402e-03, -1.3164e-02, -1.4002e-02, -1.2906e-02],
[ 1.2970e-02,  1.0097e-02,  1.1278e-02,  1.3000e-02],
[-1.0825e-02,  1.3762e-03,  5.6415e-03,  9.2425e-03],
[-9.3556e-03,  4.9029e-03, -3.5206e-03,  1.0317e-02]],

结果是并没有应用。。。。

如何应用此初始化。。?

我尝试

def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
print('layer success')
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
print('batch success')

此函数,但未应用。。

我注意到一些层是应用权重初始化的,但它是随机的。。。我试图解决这个问题,但没有

请帮我

经过简单的检查,我没有发现任何证据表明权重和偏差没有按照初始化函数的预期进行初始化。

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
generator = Generator(100, 64, 1).to(device)
before = deepcopy(generator.state_dict())
generator.apply(weights_init)
after = generator.state_dict()
for k, v in before.items():
if not k.endswith(("weight", "bias")):
continue
if k.endswith("weight"):
print(torch.std(after[k]).item())
print(k, torch.allclose(v, after[k]))
if k.endswith("bias"):
print(torch.all(before[k]==0.).item(), torch.all(after[k]==0.).item())

相关内容

  • 没有找到相关文章

最新更新