PyTorch的二进制分类



下面是我在PyTorch中为二进制分类编写的代码:

%reset -f
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
import torch.utils.data as data_utils
import torch.nn as nn
import torch.nn.functional as F

device = 'cpu'
num_epochs = 10
hidden_size = 500 
num_classes = 2
learning_rate = .001
torch.manual_seed(24)
x1 = np.array([0,0])
x2 = np.array([0,1])
x3 = np.array([1,0])
x4 = np.array([1,1])
x = torch.tensor([x1,x2,x3,x4]).float()
y = torch.tensor([1,0,1,1]).long()
train = data_utils.TensorDataset(x,y)
train_loader = data_utils.DataLoader(train , batch_size=2 , shuffle=True)

input_size = len(x[0])
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 1)
class NeuralNet(nn.Module) : 
def __init__(self, input_size, hidden_size, num_classes) : 
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size , hidden_size)
self.fc2 = nn.Linear(hidden_size , 100)
self.fc3 = nn.Linear(100 , num_classes)
self.sigmoid = nn.Sigmoid()
def forward(self, x) : 
out = self.fc1(x)
out = self.sigmoid(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = self.fc3(out)
out = self.sigmoid(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
model.apply(weights_init)
criterionCE = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_values = []
for i in range(0 , 140) :
print('in i' , i)
total_step = len(train_loader)
for epoch in range(num_epochs) :
for i,(images , labels) in enumerate(train_loader) : 
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
print('outputs' , outputs)
loss = criterionCE(outputs , labels)
loss_values.append(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
outputs = model(x)
print(outputs.data.max(1)[1])

下面的代码抛出一个错误:

~/opt/miniconda3/envs/ds1/lib/python3.8/site-packages/torch/nn/functional.py in binary_cross_entropy(input, target, weight, size_average, reduce, reduction)
2067                       stacklevel=2)
2068     if input.numel() != target.numel():
-> 2069         raise ValueError("Target and input must have the same number of elements. target nelement ({}) "
2070                          "!= input nelement ({})".format(target.numel(), input.numel()))
2071 
ValueError: Target and input must have the same number of elements. target nelement (2) != input nelement (4)

我打印了我认为是错误原因的地方:

print('outputs' , outputs)

呈现
outputs tensor([[9.9988e-01, 1.4011e-05],
[9.9325e-01, 1.2087e-05]], grad_fn=<SigmoidBackward>)

所以输出应该是2x1而不是2x2的结果。我没有正确地定义模型的设置吗?当我使用sigmoid时,不应该创建2x1输出而不是2x2吗?

如果您正在处理二进制分类任务,您的模型应该只输出一个logit。由于您已将self.fc3设置为具有2神经元,因此您将得到2logits作为输出。因此,您应该将self.fc3设置为nn.Linear(100 , 1)

最新更新