无法将feed_dict键解释为批处理和测试的张量



我是Tensorflow的新手,并试图在python 3.6中做MNIST示例。

我构建了代码以查看错误在 # 次迭代期间如何变化,但它在sess.run内部的feed_dict上给了我一个错误。

这是我的代码如下,

import tensorflow as tf
import numpy as np
import functools
import sys
sys.path.append('./utils')
from mnist import MNIST
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
def __init__(self, image, label):
self.image = image
self.label = label
self.logits
self.prediction
self.optimize
self.error

@lazy_property
def logits(self):
weight = tf.Variable(tf.zeros([img_size_flat, num_classes]))
#print (img_size_flat)
biases = tf.Variable(tf.zeros([num_classes]))
#print (num_classes)
equation = tf.matmul(self.image, weight) + biases
return equation
@lazy_property
def prediction(self):
return tf.nn.softmax(self.logits)
@lazy_property
def optimize(self):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.label)
cost = tf.reduce_mean(cross_entropy)
return tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost)

@lazy_property
def error(self):
y_pred_cls = tf.argmax(self.prediction, axis=1)
mistakes = tf.not_equal(y_true_cls, y_pred_cls)
#print(mistakes)
error_from_acc = tf.reduce_mean(tf.cast(mistakes, tf.float32))
return error_from_acc

batch_size = 100
num_steps = 1000
tf.reset_default_graph()
data = MNIST(data_dir="data/MNIST/")
img_size_flat = data.img_size_flat
img_shape = data.img_shape
num_classes = data.num_classes
image = tf.placeholder(tf.float32, [None, img_size_flat])
label = tf.placeholder(tf.float32, [None, num_classes])
y_true_cls = tf.placeholder(tf.int64, [None])
#print (y_true_cls)
model = Model(image, label)

with tf.Session() as session:
session.run(tf.global_variables_initializer())
for step in range(num_steps):
error = session.run(model.error, {x: data.x_test, y_true: data.y_test}) # Gives me an error message from HERE!!!!
x_batch, y_true_batch, _ = data.random_batch(batch_size=batch_size)
session.run(model.optimize, {x: x_batch, y_true: y_true_batch})
if (step % 100 == 0):
print("Error rate @ iter %d : %f" % (step, error))

我做错了什么?

我应该把feed_dict放在哪里({x: data.x_test, y_true: data.y_test}{x: x_batch, y_true: y_true_batch}(?

另外,我在代码中做错了什么吗?

请在这里帮助我..

谢谢。

您已经像这样定义了占位符:

image = tf.placeholder(tf.float32, [None, img_size_flat])
label = tf.placeholder(tf.float32, [None, num_classes])

,但随后您将xy_true作为占位符传递到session.run

session.run(model.error, {x: data.x_test, y_true: data.y_test})

因此,您需要通过imagelabel替换xy_true,您应该没问题:

session.run(model.error, {image : data.x_test, label : data.y_test})

最新更新