RuntimeError:期望的标量类型为Double,但发现为Float



我在GCNN工作。我的输入数据是float64。但是每当我运行代码时,就会显示这个错误。我试着把所有张量都转换成二重张量,但没有成功。我的数据主要是在numpy数组中,然后我把它们转换成pytorch张量。

这是我的数据。这里我将numpy数组转换为张量,并将张量转换为几何数据来运行gcnn。
e_index1 = torch.tensor(edge_index)
x1 = torch.tensor(x)
y1 = torch.tensor(y)
print(x.dtype)
print(y.dtype)
print(edge_index.dtype)
from torch_geometric.data import Data
data = Data(x=x1, edge_index=e_index1, y=y1)

输出:

float64
float64
int64

这是我的gcnn类的代码和其余的代码。

import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv

class GCN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GCNConv(data.num_node_features, 16)
self.conv2 = GCNConv(16, data.num_node_features)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
device = torch.device('cpu')
model = GCN().to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
model.train()
for epoch in range(10):
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()

错误日志

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-148-e816c251670b> in <module>
7 for epoch in range(10):
8     optimizer.zero_grad()
----> 9     out = model(data)
10     loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
11     loss.backward()
5 frames
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1188         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190             return forward_call(*input, **kwargs)
1191         # Do not call functions when jit is used
1192         full_backward_hooks, non_full_backward_hooks = [], []
<ipython-input-147-c1bfee724570> in forward(self, data)
13         x, edge_index = data.x.type(torch.DoubleTensor), data.edge_index
14 
---> 15         x = self.conv1(x, edge_index)
16         x = F.relu(x)
17         x = F.dropout(x, training=self.training)
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1188         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190             return forward_call(*input, **kwargs)
1191         # Do not call functions when jit is used
1192         full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/conv/gcn_conv.py in forward(self, x, edge_index, edge_weight)
193                     edge_index = cache
194 
--> 195         x = self.lin(x)
196 
197         # propagate_type: (x: Tensor, edge_weight: OptTensor)
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1188         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190             return forward_call(*input, **kwargs)
1191         # Do not call functions when jit is used
1192         full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/dense/linear.py in forward(self, x)
134             x (Tensor): The features.
135         """
--> 136         return F.linear(x, self.weight, self.bias)
137 
138     @torch.no_grad()
RuntimeError: expected scalar type Double but found Float

我也尝试了stackover flow博客中给出的解决方案。但没有成功。重复显示相同的错误

您可以使用model.double()将所有模型参数转换为double类型。这应该给出一个兼容的模型,因为您的输入数据是双重的。请记住,由于双类型具有更高的精度性质,因此通常比单类型慢。

最新更新