我试着用这个模型在不同的数据集上做实验,它适用于单变量时间序列。然而,我在尝试对多变量时间序列进行处理时遇到了一个问题,我认为这是由于时间分布层的原因,但我不确定。我试着阅读关于同一个问题的不同帖子,但没有成功。
trainx形状:(38100,100,4(| trainy形状:(381,4(
testx形状:(12230100,4(| testy形状:(122,4(
(样本、时间戳、功能(
模型如下:
def build_model(X):
'''
Builds an autoencoder model.
@params: X input array
@return: autoencoder full model, encoder model part
'''
encoder_inputs = keras.layers.Input(shape=(X.shape[1], X.shape[2]), name='Input_Layer')
L1 = keras.layers.LSTM(64, return_sequences=True, name='Encoder_1')(encoder_inputs)
L2 = keras.layers.LSTM(32, return_sequences=True, name='Encoder_2')(L1)
code = keras.layers.LSTM(2, return_sequences=False, name='code_vector')(L2)
L3 = keras.layers.RepeatVector(X.shape[1], name='Repeat_Vector')(code)
L4 = keras.layers.LSTM(32, return_sequences=True, name='Decoder_1')(L3)
L5 = keras.layers.LSTM(64, return_sequences=True, name='Decoder_2')(L4)
decoder_outputs = keras.layers.TimeDistributed(keras.layers.Dense(X.shape[2]), name='Time_Distrubted')(L5)
encoder = keras.Model(inputs=encoder_inputs, outputs=code, name='Encoder')
autoencoder = keras.Model(inputs=encoder_inputs, outputs=decoder_outputs, name='Autoencoder')
return autoencoder, code
然后我构建模型,并按照如下方式编译和拟合:
model, code = build_model(trainx)
model.compile('adam', loss='mae')
history = model.fit(x=trainx, y=trainy, epochs=100, validation_split=0.1, batch_size=32, callbacks=callbacks, shuffle=False)
我得到以下错误跟踪:
<ipython-input-246-e01fa31bc39d> in <module>
----> 1 history = model.fit(x=trainx, y=trainy, epochs=100, validation_split=0.1, batch_size=32, callbacks=callbacks, shuffle=False)
~Anaconda3libsite-packagestensorflowpythonkerasenginetraining.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1181 _r=1):
1182 callbacks.on_train_batch_begin(step)
-> 1183 tmp_logs = self.train_function(iterator)
1184 if data_handler.should_sync:
1185 context.async_wait()
~Anaconda3libsite-packagestensorflowpythoneagerdef_function.py in __call__(self, *args, **kwds)
887
888 with OptionalXlaContext(self._jit_compile):
--> 889 result = self._call(*args, **kwds)
890
891 new_tracing_count = self.experimental_get_tracing_count()
~Anaconda3libsite-packagestensorflowpythoneagerdef_function.py in _call(self, *args, **kwds)
948 # Lifting succeeded, so variables are initialized and we can run the
949 # stateless function.
--> 950 return self._stateless_fn(*args, **kwds)
951 else:
952 _, _, _, filtered_flat_args =
~Anaconda3libsite-packagestensorflowpythoneagerfunction.py in __call__(self, *args, **kwargs)
3021 (graph_function,
3022 filtered_flat_args) = self._maybe_define_function(args, kwargs)
-> 3023 return graph_function._call_flat(
3024 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
3025
~Anaconda3libsite-packagestensorflowpythoneagerfunction.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1958 and executing_eagerly):
1959 # No tape is watching; skip to running the function.
-> 1960 return self._build_call_outputs(self._inference_function.call(
1961 ctx, args, cancellation_manager=cancellation_manager))
1962 forward_backward = self._select_forward_and_backward_functions(
~Anaconda3libsite-packagestensorflowpythoneagerfunction.py in call(self, ctx, args, cancellation_manager)
589 with _InterpolateFunctionError(self):
590 if cancellation_manager is None:
--> 591 outputs = execute.execute(
592 str(self.signature.name),
593 num_outputs=self._num_outputs,
~Anaconda3libsite-packagestensorflowpythoneagerexecute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
InvalidArgumentError: Incompatible shapes: [32,100,4] vs. [32,4]
[[node gradient_tape/mean_absolute_error/BroadcastGradientArgs (defined at <ipython-input-246-e01fa31bc39d>:1) ]] [Op:__inference_train_function_110609]
Function call stack:
train_function
正如我提到的,我认为这可能和时间分布层有关。但是,如果有帮助的话,模型可以在batch_size=1时运行。除此之外,它没有。
来自评论
网络输出必须与目标形状匹配。如果您有
2D
目标您的网络必须生成2D
而不是3D
。简单设置CCD_ 4产生CCD_ 5输出。
def build_model(X):
'''
Builds an autoencoder model.
@params: X input array
@return: autoencoder full model, encoder model part
'''
encoder_inputs = keras.layers.Input(shape=(X.shape[1], X.shape[2]), name='Input_Layer')
L1 = keras.layers.LSTM(64, return_sequences=True, name='Encoder_1')(encoder_inputs)
L2 = keras.layers.LSTM(32, return_sequences=True, name='Encoder_2')(L1)
code = keras.layers.LSTM(2, return_sequences=False, name='code_vector')(L2)
L3 = keras.layers.RepeatVector(X.shape[1], name='Repeat_Vector')(code)
L4 = keras.layers.LSTM(32, return_sequences=True, name='Decoder_1')(L3)
L5 = keras.layers.LSTM(64, name='Decoder_2')(L4)
decoder_outputs = keras.layers.Dense(X.shape[2], name='Time_Distrubted')(L5)
encoder = keras.Model(inputs=encoder_inputs, outputs=code, name='Encoder')
autoencoder = keras.Model(inputs=encoder_inputs, outputs=decoder_outputs, name='Autoencoder')
return autoencoder, code
model, code = build_model(trainx)
model.compile('adam', loss='mae')
history = model.fit(x=trainx, y=trainy, epochs=100, validation_split=0.1, batch_size=32, callbacks=callbacks, shuffle=False)
(转述自Marco Cerliani(