如何修复ValueError:太多的值解包(预期2)?



最近遇到这样一个问题:ValueError: too many values to unpack (expected 2).

import os
import natsort
from PIL import Image
import torchvision
import torch
import torch.optim as optim
from torchvision import transforms, models
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
import torch.nn.functional as F
root_dir = './images/'
class Col(Dataset):
def __init__(self, main_dir, transform):
self.main_dir = main_dir
self.transform = transform
all_images = self.all_img(main_dir = main_dir)
self.total_imges = natsort.natsorted(all_images)

def __len__(self):
return len(self.total_imges)

def __getitem__(self, idx):
img_loc = os.path.join(self.total_imges[idx])
image = Image.open(img_loc).convert("RGB")
tensor_image = self.transform(image)
return tensor_image

def all_img(self, main_dir):
img = []     
for path, subdirs, files in os.walk(main_dir):
for name in files:
img.append(os.path.join(path, name))
return img
model = models.resnet18(pretrained=False)
model.fc = nn.Sequential(nn.Linear(model.fc.in_features, 256),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(256, 100),
nn.ReLU(),
nn.Dropout(p=0.4),
nn.Linear(100,9))
# model.load_state_dict(torch.load('model.pth'))
for name, param in model.named_parameters():
if("bn" not in name):
param.requires_grad = False
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5457, 0.5457, 0.5457], std=[0.2342, 0.2342, 0.2342])    
])

data = Col(main_dir=root_dir, transform=transform)

dataset = torch.utils.data.DataLoader(data, batch_size=130)

train_set, validate_set= torch.utils.data.random_split(dataset, [round(len(dataset)*0.7), (len(dataset) - round(len(dataset)*0.7))])
if torch.cuda.is_available():
device = torch.device("cuda") 
else:
device = torch.device("cpu")

model.to(device)
def train(model, optimizer, loss_fn, train_set, validate_set, epochs=20, device="cpu"):
for epoch in range(1, epochs+1):
training_loss = 0.0
valid_loss = 0.0
model.train()
for batch in train_set:
optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs)
loss = loss_fn(output, targets)
loss.backward()
optimizer.step()
training_loss += loss.data.item() * inputs.size(0)
training_loss /= len(train_set.dataset)

model.eval()
num_correct = 0 
num_examples = 0
for batch in validate_set:
inputs, targets = batch
inputs = inputs.to(device)
output = model(inputs)
targets = targets.to(device)
loss = loss_fn(output,targets) 
valid_loss += loss.data.item() * inputs.size(0)
correct = torch.eq(torch.max(F.softmax(output, dim=1), dim=1)[1], targets)
num_correct += torch.sum(correct).item()
num_examples += correct.shape[0]
valid_loss /= len(validate_set.dataset)

print('Epoch: {}, Training Loss: {:.2f}, Validation Loss: {:.2f}, accuracy = {:.2f}'.format(epoch, training_loss,
valid_loss, num_correct / num_examples))

optimizer = optim.Adam(model.parameters(), lr=0.0001)

但是调用这个函数

train(model, optimizer,torch.nn.CrossEntropyLoss(), train_set.dataset, validate_set.dataset, epochs=100, device=device)

给出这个错误

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/tmp/ipykernel_4828/634509595.py in <module>
----> 1 train(model, optimizer,torch.nn.CrossEntropyLoss(), train_set.dataset, validate_set.dataset, epochs=100, device=device)

/tmp/ipykernel_4828/1473922939.py in train(model, optimizer, loss_fn, train_set, validate_set, epochs, device)
6         for batch in train_set:
7             optimizer.zero_grad()
----> 8             inputs, targets = batch
9             inputs = inputs.to(device)
10             targets = targets.to(device)

ValueError: too many values to unpack (expected 2)

Batch不同时包含输入和目标。你的问题只是getitem只返回tensor_image(这可能是输入),而不是目标。

相关内容

最新更新