错误:运行时间错误:试图在当前进程完成其引导阶段之前启动新进程



运行以下脚本后出现错误:

--编码:utf-8--

导入填充物

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import cv2
import numpy as np
import csv

步骤1:从日志文件中读取

samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)

步骤2:将数据划分为训练集和验证集

train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])

步骤3a:定义数据加载器的扩充、转换过程、参数和数据集

def augment(imgName, angle):
name = 'data/IMG/' + imgName.split('/')[-1]
current_image = cv2.imread(name)
current_image = current_image[65:-25, :, :]
if np.random.rand() < 0.5:
current_image = cv2.flip(current_image, 1)
angle = angle * -1.0  
return current_image, angle
class Dataset(data.Dataset):
def __init__(self, samples, transform=None):
self.samples = samples
self.transform = transform
def __getitem__(self, index):

batch_samples = self.samples[index]

steering_angle = float(batch_samples[3])

center_img, steering_angle_center = augment(batch_samples[0], steering_angle)
left_img, steering_angle_left = augment(batch_samples[1], steering_angle + 0.4)
right_img, steering_angle_right = augment(batch_samples[2], steering_angle - 0.4)
center_img = self.transform(center_img)
left_img = self.transform(left_img)
right_img = self.transform(right_img)
return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right)

def __len__(self):
return len(self.samples)

步骤3b:使用数据加载器创建生成器以并行处理过程

def _my_normalization(x):
return x/255.0 - 0.5
transformations = transforms.Compose([transforms.Lambda(_my_normalization)])
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 4}
training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)
validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)

步骤4:定义网络

class NetworkDense(nn.Module):
def __init__(self):
super(NetworkDense, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 5, stride=2),
nn.ELU(),
nn.Conv2d(24, 36, 5, stride=2),
nn.ELU(),
nn.Conv2d(36, 48, 5, stride=2),
nn.ELU(),
nn.Conv2d(48, 64, 3),
nn.ELU(),
nn.Conv2d(64, 64, 3),
nn.Dropout(0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=64 * 2 * 33, out_features=100),
nn.ELU(),
nn.Linear(in_features=100, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)

def forward(self, input):  
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output

class NetworkLight(nn.Module):
def __init__(self):
super(NetworkLight, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 3, stride=2),
nn.ELU(),
nn.Conv2d(24, 48, 3, stride=2),
nn.MaxPool2d(4, stride=4),
nn.Dropout(p=0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=48*4*19, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)

def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output

步骤5:定义优化器

model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.MSELoss()

步骤6:检查设备并定义将张量移动到该设备的函数

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is: ', device)
def toDevice(datas, device):

imgs, angles = datas
return imgs.float().to(device), angles.float().to(device)

步骤7:根据定义的最大时期训练和验证网络

max_epochs = 22
for epoch in range(max_epochs):

model.to(device)

# Training
train_loss = 0
model.train()
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)

# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]        
for data in datas:
imgs, angles = data
#             print("training image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
loss.backward()
optimizer.step()
train_loss += loss.data[0].item()

if local_batch % 100 == 0:
print('Loss: %.3f '
% (train_loss/(local_batch+1)))

# Validation
model.eval()
valid_loss = 0
with torch.set_grad_enabled(False):
for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)

# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]        
for data in datas:
imgs, angles = data
#                 print("Validation image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))

valid_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Valid Loss: %.3f '
% (valid_loss/(local_batch+1)))

步骤8:定义状态并将模型保存到状态

state = {
'model': model.module if device == 'cuda' else model,
}
torch.save(state, 'model.h5')

这是错误消息:

"D: \VICO\备份\venv\Scripts\python.exe"quot;D:/VICO/Back-up/venv/Scripts/self-driving_car.py";设备是:cpu设备是:cpu追踪(最近一次通话):文件"&";,第1行,在文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\power.py",第105行,spawn_mainexitcode=_main(fd)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\power.py",第114行,in _mainprepare(preparation_data)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\power.py",第225行,准备中_fixup_main_from_path(数据"init_main_from _path"])文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\power.py",第277行,在_fixup_main_from_path中run_name=">mp_main")文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\runpy.py",第263行,在run_path中追踪(最近一次通话):文件";D:/VICO/Back-up/venv/Scripts/self-driving_car.py";,第165行,inpkg_name=pkg_name,script_name=fname)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\runpy.py",第96行,在_run_module_code中对于local_batch,enumerate(training_generator)中的(center,lefts,rights):文件";D: \VICO\Back-up\venv\lib\site-packages\torch\utils\data\dataloader.py",第291行,在itermod_name、mod_spec、pkg_name、script_name中)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\runpy.py",第85行,在_run_code中exec(代码,run_globals)文件";D: \VICO\Back-up\venv\Scripts\self_driving_car.py";,第165行,inreturn _MultiProcessingDataLoaderIter(自身)文件";D: \VICO\Back-up\venv\lib\site-packages\torch\utils\data\dataloader.py",第737行,在local_batch的init中,枚举(training_generator)中的(center,lefts,right):文件";D: \VICO\Back-up\venv\lib\site-packages\torch\utils\data\dataloader.py",第291行,iterreturn _MultiProcessingDataLoaderIter(self)文件";D: \VICO\Back-up\venv\lib\site-packages\torch\utils\data\dataloader.py",第737行,在initw.start()中文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\process.py";,第112行,起始自我_popen=自我_Popen(自我)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\context.py";,第223行,在_Popen中w.start()文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\process.py";,第112行,起始return _default_context.get_context().Process_Popen(Process_obj)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\context.py";,第322行,在_Popen中自我_popen=自我_Popen(自我)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\context.py";,第223行,在_Popen中return Popen(process_obj)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\popen_spawn_win32.py";,第89行,在initreturn _default_context.get_context().Process_Popen(Process_obj)中文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\context.py";,第322行,在_Popen中reduction.dump(process_obj,to_child)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\reduction.py";,第60行,倾卸return Popen(process_obj)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\popen_spawn_win32.py";,第46行,在initForkingPickler(文件,协议).dump(obj)中BrokenPipeError:[Erno 32]管道破裂prep_data=生成.get_preparation_data(process_obj_name)文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\power.py",第143行,在get_preparation_data中_check_not_importing_main()文件";C: \Users\isonata\AppData\Local\Programs\Python37\lib\multiprocessing\power.py",第136行,在_check_not_importing_main中不会冻结以生成可执行文件。")运行时错误:已尝试在当前进程已完成其引导阶段。

This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.

进程结束,退出代码为1

我不确定解决问题的下一步行动

已解决,简单地说:

if __name__ == "__main__":
main()

为了避免在每个循环中重新加载模块。

我遇到了类似的问题,并通过将DataLoader中的"num_workers"参数设置回零来修复它:

DataLoader(num_workers=0)

相关内容

最新更新