ValueError: layer dense的Input 0与layer不兼容:期望轴-1的值为8,但接收到的输入形状



我正在为OpenAI lunarLander-v2环境训练一个模型。我已经使用顺序模型成功地做到了这一点,但是当尝试使用功能模型时,我得到了一些张形状不兼容的错误。以下是代理类的代码,我认为问题与done_list和next_states的形状不兼容有关,但我不确定如何重塑这些张量以使其工作。

class DQAgent(Agent):
def __init__(self, env, config):
Agent.__init__(self, env, config)

self.memory = deque(maxlen=self.config.memory_size)
self.model = self.initialize()
def initialize(self):

inputs = Input(shape=(8,))

dense = Dense(self.config.layer_size * self.config.input_layer_mult, activation = relu)
x = dense(inputs)
x = Dense(self.config.layer_size, activation = relu)(x)

outputs = layers.Dense(self.action_space_size, activation = linear)(x)

model = keras.Model(inputs = inputs, outputs = outputs, name = self.name)
model.compile(loss = mean_squared_error, optimizer = Adam(lr = self.config.learning_rate))
model.summary()
return model
def policyAct(self, state):
predicted_actions = self.model.predict(state)
return np.argmax(predicted_actions[0])
def addToMemory(self, state, action, reward, next_state, done):
self.memory.append((self, state, action, reward, next_state, done))

def sampleFromMemory(self):
sample = np.random.sample(self.memory, self.config.batch_size)
return sample
def extractFromSample(self, sample):
states = np.array([i[0] for i in sample])
actions = np.array([i[1] for i in sample])
rewards = np.array([i[2] for i in sample])
next_states = np.array([i[3] for i in sample])
done_list = np.array([i[4] for i in sample])
states = np.squeeze(states)
next_states = np.squeeze(next_states)


return np.squeeze(states), actions, rewards, next_states, done_list

def updateReplayCount(self):
self.config.replay_counter += 1
self.config.replay_counter = self.replay_counter % self.config.replay_step_size
def learnFromMemory(self):
if len(self.memory) < self.config.batch_size or self.config.replay_counter != 0:
return
if np.mean(self.training_episode_rewards[-10:]) > 100:
return
sample = self.sampleFromMemory()
states, actions, rewards, next_states, done_list = self.extractFromSample(sample)
targets = rewards + self.config.gamma * (np.amax(self.model.predict_on_batch(next_states), 
axis=1)) * (1 - (done_list))

target_vec = self.model.predict_on_batch(states)
indexes = np.array([i for i in range(self.config.batch_size)])
target_vec[[indexes], [actions]] = targets
self.model.fit(states, target_vec, epochs=1, verbose=0)

def save(self, name):
self.model.save(name)
当使用Sequential API而不是function创建模型时,类似的代码可以很好地工作。我对这个很陌生,对SO也是,任何帮助都是非常感谢的。

WARNING:tensorflow:Model是为input Tensor("input_10:0", shape=(None, 8), dtype=float32)用shape (None, 8)构造的,但是在一个不兼容的shape (None, 1)的input上调用。ValueError:层dense_72的输入0与层不兼容:期望输入形状的轴-1的值为8,但接收到的输入形状为[None, 1]

顺序实现的模型,运行时没有问题(代码的其余部分是相同的)

def initialize_model(self):
model = Sequential()


model.add(Dense(self.config.layer_size*self.config.input_layer_mult, input_dim = self.observation_space_dim, activation=relu))


for i in range(self.config.deep_layers):
model.add(Dense(self.config.layer_size, activation=relu))


model.add(Dense(self.action_space_dim, activation=linear))


model.compile(loss=mean_squared_error, optimizer=Adam(lr=self.config.learning_rate))
print(model.summary())

return model

从这里,输入形状应为(1,)

最新更新