TensorFlow-无效的符号:您必须为占位符的值提供一个值



编辑:我已经确定了以下行作为错误的来源,

unstacked_observation_outputs = tf.nn.relu(tf.nn.xw_plus_b(unstacked_observation_outputs[
            tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1], 
                weights['second_observation'], biases['second_observation']))

特别是,在tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1中应用.eval()是问题。

我有两个非常相似的类。一个叫Actor((和另一个评论家((。当我实例化演员对象如下:

class Actor(object):
def __init__(self, sess, observation_dimension, action_dimension, learning_rate, tau):
    self.sess = sess
    self.observation_dimension = observation_dimension
    self.action_dimension = action_dimension
    self.learning_rate = learning_rate
    self.tau = tau
    self.lstm_cell = rnn.BasicLSTMCell(lstm_hidden_units)
    with tf.variable_scope('lstm') as scope:
        self.observation_history, self.action_history = self.depthless_lstm()
        self.network_parameters = tf.trainable_variables()
        scope.reuse_variables()
        self.target_observation_history, self.target_action_history = self.depthless_lstm()
        self.target_network_parameters = tf.trainable_variables()[len(self.network_parameters):]
    def depthless_lstm(self):
        state = tf.placeholder(tf.float32, [None, 3600, self.observation_dimension])
        weights = {
        '0': tf.get_variable('weights_0', [lstm_hidden_units, hidden_units_01], initializer = tf.random_normal_initializer()),
        '1': tf.get_variable('weights_1', [hidden_units_01, 1], initializer = tf.random_normal_initializer())
        }
        biases = {
        '0': tf.get_variable('biases_0', [hidden_units_01], initializer = tf.random_normal_initializer()),
        '1': tf.get_variable('biases_1', [1], initializer = tf.random_normal_initializer())
        }
        lstm_outputs, learned_state = tf.nn.dynamic_rnn(self.lstm_cell, state, dtype = tf.float32)
        unstacked_lstm_outputs = tf.unstack(lstm_outputs, axis = 1)
        list_of_hidden_01_outputs = [tf.nn.relu(tf.nn.xw_plus_b(matrix, weights['0'], biases['0'])) 
        for matrix in unstacked_lstm_outputs]
        actions = [tf.nn.softmax(tf.nn.xw_plus_b(matrix, weights['1'], biases['1'])) 
        for matrix in list_of_hidden_01_outputs]
        actions = [actions[n] for n in range(len(actions)) if (n % frequency) == 1]
        return state, actions
actor = Actor(sess, observation_dimension, action_dimension, learning_rate, tau)

我没有收到任何错误。但是,当我实例化评论家对象时:

class Critic(object):
def __init__(self, sess, observation_dimension, action_dimension, learning_rate, tau, 
    model, number_actor_variables):
    self.sess = sess
    self.observation_dimension = observation_dimension
    self.action_dimension = action_dimension
    self.learning_rate = learning_rate
    self.tau = tau
    self.lstm_observation_cell = rnn.BasicLSTMCell(lstm_observation_units)
    with tf.variable_scope('next_lstm') as scope:
        self.observation_history, self.action, self.critique = self.depthless_lstm_critic()
        self.network_parameters = tf.trainable_variables()[number_actor_variables:]
        scope.reuse_variables()
        self.target_observation_history, self.target_action, self.target_critique = self.depthless_lstm_critic()
        self.target_network_parameters = tf.trainable_variables()[(len(self.network_parameters) 
            + number_actor_variables):]
    def depthless_lstm_critic(self):
        observation_history = tf.placeholder(tf.float32, [None, 3600, self.observation_dimension])
        action = tf.placeholder(tf.float32, [None, self.action_dimension])
        weights = {
        'first_observation': tf.get_variable('first_observation', [self.observation_dimension, observation_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'first_action': tf.get_variable('first_action', [self.action_dimension, action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'second_observation': tf.get_variable('second_observation', [lstm_observation_units, observation_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'second_action': tf.get_variable('second_action', [action_hidden_units01, action_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'combined_observation': tf.get_variable('combined_observation', [observation_hidden_units02, combined_units], 
        initializer = tf.random_normal_initializer()),
        'combined_action': tf.get_variable('combined_action', [action_hidden_units02, combined_units], 
        initializer = tf.random_normal_initializer()),
        'final': tf.get_variable('final', [combined_units, 1], 
        initializer = tf.random_normal_initializer())
        }
        biases = {
        'first_observation': tf.get_variable('_first_observation', [observation_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'first_action': tf.get_variable('_first_action', [action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'second_observation': tf.get_variable('_second_observation', [observation_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'second_action': tf.get_variable('_second_action', [action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'combined': tf.get_variable('_combined', [combined_units], 
        initializer = tf.random_normal_initializer()),
        'final': tf.get_variable('_final', [1], 
        initializer = tf.random_normal_initializer())
        }
        unstacked_observation_history = tf.unstack(observation_history, axis = 1)
        unstacked_observation_history = [tf.nn.relu(tf.nn.xw_plus_b(vector, 
        weights['first_observation'], biases['first_observation'])) 
            for vector in unstacked_observation_history]
        learned_action = tf.nn.relu(tf.nn.xw_plus_b(action, weights['first_action'], biases['first_action']))
        stacked_observation_history = tf.stack(unstacked_observation_history, axis = 1)
        lstm_observation_outputs, observation_states = tf.nn.dynamic_rnn(self.lstm_observation_cell, 
        stacked_observation_history, dtype = tf.float32, 
            sequence_length = length(stacked_observation_history))
        unstacked_observation_outputs = tf.unstack(lstm_observation_outputs, axis = 1)
        unstacked_observation_outputs = tf.nn.relu(tf.nn.xw_plus_b(unstacked_observation_outputs[
            tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1], 
                weights['second_observation'], biases['second_observation']))
        learned_action = tf.nn.relu(tf.nn.xw_plus_b(learned_action, weights['second_action'], 
        biases['second_action']))
        combined_results = tf.nn.relu(tf.matmul(weights['combined_observation'], 
        unstacked_observation_outputs) + tf.nn.xw_plus_b(learned_action, weights['combined_action'], 
            biases['combined']))
        critique = tf.nn.xw_plus_b(combined_results, weights['final'], biases['final'])
        return observation_history, action, critique
critic = Critic(sess, observation_dimension, action_dimension, learning_rate, tau, 
    model, number_actor_variables)

我收到以下错误消息:

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'next_lstm/Placeholder' with dtype float
[[Node: next_lstm/Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]

和追溯

Caused by op 'next_lstm/Placeholder', defined at:
File "test_critic.py", line 89, in <module>
model, number_actor_variables)
File "test_critic.py", line 15, in __init__
self.observation_history, self.action, self.critique = 
self.depthless_lstm_critic()
File "test_critic.py", line 23, in depthless_lstm_critic
observation_history = tf.placeholder(tf.float32, [None, 3600, 
self.observation_dimension])

追溯表明问题是占位符observation_history。但是,代码的那一部分似乎与我为Actor((类写的内容相同,该类别没有产生错误消息。

observation_history是一个占位符,这意味着在构建图形并等待一些实际值时,它是图中的一个空节点。因此,您需要将一些价值传递给此占位符。

Tensor.eval({self.placeholderA : someValueA, self.placeholderB : someValueB ...}, self.sess)

在您的情况下,您需要将一些实际值传递给observation_history

最新更新