从LSTM自动编码器馈送分类器数据



目标:

我已经建立了一个LSTM自动编码器的目的是减少特征。我的计划是对一些输入进行编码,并在将来将其提供给分类器。编码器采用形状为[batch_size, timesteps, features_of_timesteps的数据。然而,在编码器部分的输出层中,我仅返回形式为[1, timesteps, features_of_timesteps]的最后隐藏状态。

class Encoder(nn.Module):
def __init__(self, input_size, first_layer, second_layer, n_layers):
super(Encoder, self).__init__()
self.n_layers = n_layers
self.encode = nn.Sequential(nn.LSTM(input_size, first_layer, batch_first=True),
getSequence(),
nn.ReLU(True),
nn.LSTM(first_layer, second_layer),
getLast())
self.decode = nn.Sequential(nn.LSTM(second_layer, first_layer, batch_first=True),
getSequence(),
nn.ReLU(True),
nn.LSTM(first_layer, input_size),
getSequence())
def forward(self, x):
x = x.float()
x = self.encode(x)
x = x.repeat(batch_size, 1, 1)
x = self.decode(x)
return x

担心:

我担心我的第二个LSTM层在模型编码部分的最后一个隐藏状态是总结整个批次,同时降低特征维度。这感觉是错误的,因为我试图将单个时间序列缩减为一个较小的向量,而不是将整批时间序列缩减成一个向量。我的担心是对的吗?

您的代码中有多个问题,为了简单起见,我只给您一个定义良好的模型,下面的代码构建了一个LSTM自动编码器,它用形状(batch_size, timesteps, number_of_features_at_each_timesteps):重建输入

import torch
from torch import nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim
self.rnn1 = nn.LSTM(
input_size=n_features,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size=self.embedding_dim,
num_layers=1,
batch_first=True
)
def forward(self, x):
x, (_, _) = self.rnn1(x)
x, (hidden_n, _) = self.rnn2(x)
return hidden_n
class Decoder(nn.Module):
def __init__(self, seq_len, input_dim=64, n_features=1):
super(Decoder, self).__init__()
self.seq_len, self.input_dim = seq_len, input_dim
self.hidden_dim, self.n_features = 2 * input_dim, n_features
self.rnn1 = nn.LSTM(
input_size=input_dim,
hidden_size=input_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim, n_features)
def forward(self, x):
x = x.repeat(self.seq_len, 1, 1)
x = x.permute(1, 0, 2)
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
return self.output_layer(x)
class RecurrentAutoencoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=64):
super(RecurrentAutoencoder, self).__init__()
self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)
def forward(self, x):
print("Inputs size:", x.size())
x = self.encoder(x)
print("Representation size: ", x.size())
x = self.decoder(x)
print("Outputs size: ", x.size())
return x
batch_n = 5
seq_len = 10
n_features = 3
inputs = torch.randn(batch_n, seq_len, n_features).to(device)
model = RecurrentAutoencoder(seq_len, n_features).to(device)
y = model(inputs)

输出:

Inputs size: torch.Size([5, 10, 3])
Representation size:  torch.Size([1, 5, 64])
Outputs size:  torch.Size([5, 10, 3])

小心表示(即编码器的输出)具有(1, batch_size, embedding_dim)形状

最新更新