参数 #1 的预期张量'input'具有相同的维度



使用以下代码,我为每个训练数据创建 10 个实例,每个实例有 100 个维度。 100 个维度中的每一个都包含 3 个维度。因此它的形状是:(3, 100, 10(。这将模拟 10 个 100 像素的实例,每个实例具有 3 个通道以模拟 RGB 值

我已将此模型设置为仅在 1 和 0 之间分类。

应用 softmax 层时,我收到错误:

运行时错误:参数 #1 "input" 的预期张量具有相同的张量 维度作为"结果"的张量;但 4 不等于 3(而 检查参数cudnn_convolution(

我正在使用 0.4.0(使用print(torch.__version__)检查( 如何正确设置软最大层的尺寸?因为我认为我的尺寸是正确的?

%reset -f
import os
import torch
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.utils.data as data_utils
import torchvision
import numpy as np
from sklearn.preprocessing import scale
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.functional as F
from random import randint
batch_size_value = 10
train_dataset = []
mu, sigma = 0, 0.1 # mean and standard deviation
num_instances = 10
# Create 3000 instance and reshape to (3 , 100, 10) , this emulates 10 instances of 100 pixels 
# each with 3 channels to emulate an RGB value
for i in range(num_instances) :
image = []
image_x = np.random.normal(mu, sigma, 3000).reshape((3 , 100, 10))
train_dataset.append(image_x)
mu, sigma = 100, 0.80 # mean and standard deviation
for i in range(num_instances) :
image = []
image_x = np.random.normal(mu, sigma, 3000).reshape((3 , 100, 10))
train_dataset.append(image_x)
labels_1 = [1 for i in range(num_instances)]
labels_0 = [0 for i in range(num_instances)]
labels = labels_1 + labels_0
print(labels)
x2 = torch.tensor(train_dataset).float()
y2 = torch.tensor(labels).long()
my_train2 = data_utils.TensorDataset(x2, y2)
train_loader2 = data_utils.DataLoader(my_train2, batch_size=batch_size_value, shuffle=False)
# print(x2)
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device' , device)
# device = 'cpu'
# Hyper parameters
num_epochs = 10
num_classes = 2
batch_size = 5
learning_rate = 0.001
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5) 
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1   = nn.Linear(864, 120) 
self.fc2   = nn.Linear(120, 84)
self.fc3   = nn.Linear(84, num_classes)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
model = ConvNet().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader2)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader2):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
#         print(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i % 10) == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' 
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))

更新:

删除这些行:

out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)

修复了维度小于内核的问题。

代码中有一个结构化问题和一个错误。这是解决方案。

class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5) 
self.conv2 = nn.Conv2d(6, 16, kernel_size=1)  # kernel_size 5----> 1
self.fc1   = nn.Linear(384, 120)              # FC NodeCount864--> 384
self.fc2   = nn.Linear(120, 84)
self.fc3   = nn.Linear(84, num_classes)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out                        # Don't forget to return the output

相关内容

最新更新