Fastai V1 Pytorch自定义模型



我一直在尝试将fastai与自定义火炬模型一起使用。我的代码如下:

X_train = np.load(dirpath + 'X_train.npy')
X_valid = np.load(dirpath + 'X_valid.npy')
Y_train = np.load(dirpath + 'Y_train.npy')
Y_valid = np.load(dirpath + 'Y_valid.npy')
X_train's shape is : (240, 122, 96), 
and Y_train's shape is : (240,1)

然后我将它们转换为火炬张量,

# Converting data to torch tensors
def to_torch_data(x,np_type,tch_type):
    return torch.from_numpy(x.astype(np_type)).to(tch_type)
X_train = to_torch_data(X_train,float,torch.float32)
X_valid = to_torch_data(X_valid,float,torch.float32)
Y_train = to_torch_data(Y_train,float,torch.float32)
Y_valid = to_torch_data(Y_valid,float,torch.float32)

为Fastai DataBunch包装器创建TensorDataset,

# Creating torch tensor datasets so that data can be used 
# on ImageDataBunch function for fastai
train_ds = tdatautils.TensorDataset(X_train,Y_train)
valid_ds = tdatautils.TensorDataset(X_valid,Y_valid)
# Creating DataBunch object to be used as data in fastai methods.
batch_size = 24
my_data_bunch = DataBunch.create(train_ds,valid_ds,bs=batch_size)

这是我的自定义火炬模型:

# Creating corresponding torch model
import torch.nn.functional as F
class Net(nn.Module):
    def __init__(self,droprate=0,activationF=None):
        super(Net, self).__init__()
        self.lstm_0 = nn.LSTM(96, 720) 
        self.activation_0 = nn.ELU()
        self.dropout_0 = nn.Dropout(p=droprate)
        self.lstm_1 = nn.LSTM(720,480)        
        self.activation_1 = nn.ELU()
        self.batch_norm_1 = nn.BatchNorm1d(122)
        self.fc_2 = nn.Linear(480,128)
        self.dropout_2 = nn.Dropout(p=droprate)
        self.last = nn.Linear(128,1)
        self.last_act = nn.ReLU()
    def forward(self, x):
        out,hid1 = self.lstm_0(x)
        out = self.dropout_0(self.activation_0(out))
        out,hid2 = self.lstm_1(out)
        out = out[:,-1,:]
        out = self.batch_norm_1(self.activation_1(out))
        out = self.dropout_2(self.fc_2(out))
        out = self.last_act(self.last(out))
        return out
#create instance of model
net = Net(droprate=train_droprate,activationF=train_activation) #.cuda()
print(net)  

所有这些,我运行了lr_find方法的学习。我得到了这个错误:

Empty                                     Traceback (most recent call last)
C:Anaconda3envsfastailibsite-packagestorchutilsdatadataloader.py in _try_get_batch(self, timeout)
    510         try:
--> 511             data = self.data_queue.get(timeout=timeout)
    512             return (True, data)
C:Anaconda3envsfastailibqueue.py in get(self, block, timeout)
    171                     if remaining <= 0.0:
--> 172                         raise Empty
    173                     self.not_empty.wait(remaining)
Empty: 
During handling of the above exception, another exception occurred:
RuntimeError                              Traceback (most recent call last)
<ipython-input-35-e4b7603c0a82> in <module>
----> 1 my_learner.lr_find()
~Desktopfastaifastaifastaitrain.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
     30     cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
     31     epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 32     learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
     33 
     34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,
~Desktopfastaifastaifastaibasic_train.py in fit(self, epochs, lr, wd, callbacks)
    197         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
    198         if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
--> 199         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
    200 
    201     def create_opt(self, lr:Floats, wd:Floats=0.)->None:
~Desktopfastaifastaifastaibasic_train.py in fit(epochs, learn, callbacks, metrics)
     97             cb_handler.set_dl(learn.data.train_dl)
     98             cb_handler.on_epoch_begin()
---> 99             for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
    100                 xb, yb = cb_handler.on_batch_begin(xb, yb)
    101                 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
C:Anaconda3envsfastailibsite-packagesfastprogressfastprogress.py in __iter__(self)
     70         self.update(0)
     71         try:
---> 72             for i,o in enumerate(self._gen):
     73                 if i >= self.total: break
     74                 yield o
~Desktopfastaifastaifastaibasic_data.py in __iter__(self)
     73     def __iter__(self):
     74         "Process and returns items from `DataLoader`."
---> 75         for b in self.dl: yield self.proc_batch(b)
     76 
     77     @classmethod
C:Anaconda3envsfastailibsite-packagestorchutilsdatadataloader.py in __next__(self)
    574         while True:
    575             assert (not self.shutdown and self.batches_outstanding > 0)
--> 576             idx, batch = self._get_batch()
    577             self.batches_outstanding -= 1
    578             if idx != self.rcvd_idx:
C:Anaconda3envsfastailibsite-packagestorchutilsdatadataloader.py in _get_batch(self)
    541         elif self.pin_memory:
    542             while self.pin_memory_thread.is_alive():
--> 543                 success, data = self._try_get_batch()
    544                 if success:
    545                     return data
C:Anaconda3envsfastailibsite-packagestorchutilsdatadataloader.py in _try_get_batch(self, timeout)
    517             if not all(w.is_alive() for w in self.workers):
    518                 pids_str = ', '.join(str(w.pid) for w in self.workers if not w.is_alive())
--> 519                 raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
    520             if isinstance(e, queue.Empty):
    521                 return (False, None)
RuntimeError: DataLoader worker (pid(s) 9584, 7236, 5108, 932, 13228, 13992, 4576, 13204) exited unexpectedly

我已经研究了数据加载程序,但找不到任何有用的东西。

尽管我不了解您发布的错误消息,但我在您的代码中看到一个问题。

out = out[:,-1,:] # batch_size x 480
out = self.batch_norm_1(self.activation_1(out))

,但您将batch_norm_1宣布为:

self.batch_norm_1 = nn.BatchNorm1d(122)

应该是:

self.batch_norm_1 = nn.BatchNorm1d(480)

最新更新