如何在python 3.7.9中使用Tensorflow 2.0版本并使用位移函数



我正在学习这个家伙的教程:https://www.youtube.com/watch?v=PwAGxqrXSCs&list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v&index=47,现在我遇到了一个错误。程序告诉我:;AttributeError:模块"tensorflow"没有属性"placeholder";。我知道在tensorflow的2.0及更高版本中,占位符函数被删除了,但我无法找到另一种方法来让我的脚本发挥作用,因为我刚刚开始学习深度学习!非常感谢您的帮助!

这是我的代码(我使用PyCharm作为IDE,不知道这是否是错误的来源(:

import tensorflow as tf
mnist = tf.keras.datasets.mnist
#Hidden Layers
n_nodes_hl1 = 500 # Can be different from others (they do no HAVE to be the same number)
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100

#Input Data
#Height x width
x = tf.placeholder('float',[None, 784])
y = tf.placeholder('float')

def neural_network_model(data):
#(Input_data * weights) + biases
#Why need biases?
#If Input_Data = 0 and weights = (ANYTHING) and there was no bias, no neuron would ever fire!
#Because ofcourse (0 * (ANYTHING)) = 0

hidden_1_layer = {"weights":tf.Variable(tf.random.normal([784, n_nodes_hl1])),
"biases":tf.Variable(tf.random.normal(n_nodes_hl1))}
hidden_2_layer = {"weights":tf.Variable(tf.random.normal([n_nodes_hl1, n_nodes_hl2])),
"biases":tf.Variable(tf.random.normal(n_nodes_hl2))}
hidden_3_layer = {"weights":tf.Variable(tf.random.normal([n_nodes_hl2, n_nodes_hl3])),
"biases":tf.Variable(tf.random.normal(n_nodes_hl3))}
output_layer = {"weights":tf.Variable(tf.random.normal([n_nodes_hl3, n_classes])),
"biases":tf.Variable(tf.random.normal([n_classes]))}
# (Input_data * weights) + biases
#This is the Sum thingy l1(layer 1)
l1 = tf.add(tf.matmul(data * hidden_1_layer["weights"]), hidden_1_layer["biases"])
#Threshold function (will neuron fire?) [relu] = rectified linear
l1 = tf.nn.relu(l1)
#This is the Sum thingy for l2
l2 = tf.add(tf.matmul(l1 * hidden_2_layer["weights"]), hidden_2_layer["biases"])
l2 = tf.nn.relu(l2)
#This is the Sum thingy for l3
l3 = tf.add(tf.matmul(l2 * hidden_3_layer["weights"]), hidden_3_layer["biases"])
l3 = tf.nn.relu(l3)
#This is the Sum thingy
output = tf.matmul(l3 * output_layer["weights"]) + output_layer["biases"]
return output

def train_neural_network(x):
prediction = neural_network_model(x)
#Calculates the difference between prediction and known label
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
#No we know what the cost is, LETS MINIMZE IT!
#                                   learning_rate = 0.001
optimizer = tf.train.AdamOptimizer().minimize(cost)
#How many epochs do we want? (How often does it run) (Cycles feed forward + backprop)
hm_epochs = 100
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())

#This trains:
for epoch in hm_epochs:
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
x, y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict = {x: x, y: y})
epoch_loss += c
print("Epoch", epoch, "Completed out of", hm_epochs, "loss:",epoch_loss)

#This checks if the machine does stuff right
#Checks if the maximum number is identical to the machines prediction
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct, "float"))
print("Accuracy:", accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))


train_neural_network(x)

在Tenosrflow 2中,您可以使用tf.compat.v1.placeholder((,这与eager executiontf.function不兼容。

如果要显式设置输入,请参阅Keras functional API,了解如何使用tf.keras.Input替换tf.compat.v1.placeholder

示例,

x = tf.keras.Input(shape=(32,))

相关内容

  • 没有找到相关文章

最新更新