训练卷积自动编码器时'No gradients provided for any variable'



我试图创建一个卷积自动编码器,但我遇到了一个问题 代码如下:

import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
img = mpimg.imread('data.jpg')
x = (img-np.mean(img))/np.std(img)
y = img
epochs = 500
def autoencoder(x, weights):
global output
output = tf.nn.conv2d([x], weights[0], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[1], strides=[1,2,2,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[2], strides=[1,2,2,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[3], strides=[1,2,2,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[4], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.image.resize_images(output, [50, 38])
output = tf.nn.conv2d(output, weights[5], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.image.resize_images(output, [100, 76])
output = tf.nn.conv2d(output, weights[6], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.image.resize_images(output, [200, 152])
output = tf.nn.conv2d(output, weights[7], strides=[1,1,1,1],padding='SAME')
weights = [tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3]))]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for e in range(epochs):
print('epoch:',e+1)
autoencoder(tf.cast(x,tf.float32), weights)
plt.imshow(output.eval()[0])
plt.savefig(str(e+1)+'.png')
cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output.eval()[0],y)))
tf.train.AdamOptimizer().minimize(cost)

这是错误:

Traceback (most recent call last):
File "D:KayTensorflowSession 3Autoencoder.py", line 56, in <module>
tf.train.AdamOptimizer().minimize(cost)
File "C:UsersKatharinaAppDataLocalProgramsPythonPython35libsite-packagestensorflowpythontrainingoptimizer.py", line 276, in minimize
([str(v) for _, v in grads_and_vars], loss))
ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients, between variables ['Tensor("Variable/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_1/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_2/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_3/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_4/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_5/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_6/read:0", shape=(5, 5, 3, 3), dtype=float32)', 'Tensor("Variable_7/read:0", shape=(5, 5, 3, 3), dtype=float32)'] and loss Tensor("Mean_1:0", shape=(), dtype=float32).

谁能帮我?

你的代码甚至不能按原样运行,但通过一些重写,你会得到实际运行的代码。

import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import scipy

img = scipy.misc.imresize(scipy.misc.face(), [200, 152])[None, :]
x = (img-np.mean(img))/np.std(img)
y = img
epochs = 500
def autoencoder(x, weights):
output = tf.nn.conv2d([x], weights[0], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[1], strides=[1,2,2,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[2], strides=[1,2,2,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[3], strides=[1,2,2,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.nn.conv2d(output, weights[4], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.image.resize_images(output, [50, 38])
output = tf.nn.conv2d(output, weights[5], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.image.resize_images(output, [100, 76])
output = tf.nn.conv2d(output, weights[6], strides=[1,1,1,1],padding='SAME')
output = tf.nn.relu(output)
output = tf.image.resize_images(output, [200, 152])
output = tf.nn.conv2d(output, weights[7], strides=[1,1,1,1],padding='SAME')
return output
weights = [tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3])),
tf.Variable(tf.random_normal([5,5,3,3]))]
output = autoencoder(tf.cast(x, tf.float32), weights)
cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, y)))
train_op = tf.train.AdamOptimizer().minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for e in range(epochs):
print('epoch:',e+1)
output_result, _ = sess.run([output, train_op])

即便如此,它也会返回虚假值,并且对太多好处。

您有几个错误:

  • 在每次迭代中创建新的优化器
  • 您使用的是eval而不是sess.run
  • 你有非常小的过滤器,只有3个通道,太小了。
  • 你没有偏见

无论如何,下面给出了代码的简短工作版本,但还有很多改进。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy

img = scipy.misc.imresize(scipy.misc.face(), [200, 152])[None, :]
x = (img-np.mean(img))/np.std(img)
y = img
epochs = 50000

def apply_conv(x, strides=1, filters=32, activation=tf.nn.relu):
return tf.layers.conv2d(x, strides=strides, filters=filters, kernel_size=3, padding='SAME',
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu)
def autoencoder(x):
output = apply_conv(x, strides=1)
output = apply_conv(output, strides=2)
output = apply_conv(output, strides=2)
output = apply_conv(output, strides=2)
output = apply_conv(output, strides=1)
output = tf.image.resize_images(output, [50, 38])
output = apply_conv(output, strides=1)
output = tf.image.resize_images(output, [100, 76])
output = apply_conv(output, strides=1)
output = tf.image.resize_images(output, [200, 152])
output = apply_conv(output, strides=1, filters=3, activation=None)
return output
output = autoencoder(tf.cast(x, tf.float32))
cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, y)))
train_op = tf.train.AdamOptimizer().minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for e in range(epochs):
print('epoch:',e+1)
output_result, cost_result, _ = sess.run([output, cost, train_op])
print('cost = {}'.format(cost_result))
if e % 20 == 0:
plt.imshow(output_result[0].astype('uint8'))
plt.pause(0.0001)  # wait for plot to show

相关内容

  • 没有找到相关文章

最新更新