类内部的张量变量与外部一个不同



我正在尝试使用TensorFlow解决ANN模型。目前,我能够以一系列文本字符串运行该程序。但是,现在,我想将代码转换为更易于使用的东西。因此,我将代码转换为课程。这是我所做的。(基本上将整个代码集复制到类。

import os
import tensorflow as tf
class NNmodel:
    def __init__(self, 
                 layers, inpShape, outShape, 
                 features,
                 learning_rate=0.1, nSteps = 100,
                 saveFolder='models'):
        self.layers = layers
        self.features = features
        self.learning_rate = learning_rate
        self.saveFolder = saveFolder
        self.nSteps = 100
        self.d    = tf.placeholder(shape = inpShape, dtype = tf.float32, name='d') # input layer
        self.dOut = tf.placeholder(shape = outShape, dtype = tf.float32, name='dOut') # output layer
        self.weights = []
        self.biases  = []
        self.compute = [self.d]
        layerSizes = [self.features] + [l['size'] for l in self.layers]
        for i, (v1, v2) in enumerate(zip(layerSizes, layerSizes[1:])):
            self.weights.append( 
                tf.Variable(np.random.randn(v1, v2)*0.1, dtype = tf.float32, name='W{}'.format(i)))
            self.biases.append(
                tf.Variable(np.zeros((1,1)), dtype = tf.float32, name='b{}'.format(i)) )
            self.compute.append( tf.matmul( 
                self.compute[-1], self.weights[i]) + self.biases[i] )
            if self.layers[i]['activation'] == 'tanh':
                self.compute.append( tf.tanh( self.compute[-1] ) )
            if self.layers[i]['activation'] == 'relu':
                self.compute.append( tf.nn.relu( self.compute[-1] ) )
            if self.layers[i]['activation'] == 'sigmoid':
                self.compute.append( tf.sigmoid ( self.compute[-1] ) )
        self.result = self.compute[-1]
        self.delta  = self.dOut - self.result
        self.cost   = tf.reduce_mean(self.delta**2)
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate = self.learning_rate).minimize(self.cost)
        return
    def findVal(self, func, inpDict, restorePt=None):
        saver = tf.train.Saver()
        sess  = tf.Session()
        init = tf.global_variables_initializer()
        sess.run(init)
        if restorePt is not None:
            try:
                saver.restore(sess,  tf.train.latest_checkpoint(restorePt) )
                print('Session restored')
            except Exception as e:
                print('Unable to restore the session ...')
                return None
        else:
            print('Warning, no restore point selected ...')
        result = sess.run(func, feed_dict = inpDict)
        sess.close()
        return result
    def optTF(self, inpDict, printSteps=50, modelFile=None):
        cost = []
        saver = tf.train.Saver()
        sess = tf.Session()
        init = tf.global_variables_initializer()
        sess.run(init)
        print('x'*100)
        for i in range(self.nSteps):
            # First run the optimizer ...
            sess.run(self.optimizer, feed_dict = inpDict)
            # Save all the data you want to save
            c = sess.run( self.cost, feed_dict = inpDict)
            cost.append(c)
            if (i%printSteps) == 0:
                print('{:5d}'.format(i))
        result = self.run(self.result, feed_dict = inpDict)
        if modelFile is not None:
            path = saver.save(sess, os.path.join( 
                self.saveFolder, modelFile))
            print('Model saved in: {}'.format(path))
        else:
            print('Warning! model not saved')
        sess.close()
        return cost, result

当我使用此模型时,我发现似乎有一个问题:

N        = 500
features = 2
nSteps   = 1000
X = [ (np.random.random(N))*np.random.randint(1000, 2000) for i in range(features)]
X = np.array([np.random.random(N), np.random.random(N)])
data = [X.T, X[0].reshape(-1, 1)]
layers = [
    {'name':'6', 'size': 10, 'activation':'tanh'},
    {'name':'7', 'size': 1, 'activation':'linear'},
]
m1 = NNmodel(layers, inpShape=np.shape(data[0]), outShape = np.shape(data[1]), 
             features=features,
             learning_rate=0.1, nSteps = 100,
             saveFolder='models1')
d    = tf.placeholder(shape = np.shape(data[0]), dtype = tf.float32, name='d_4')
dOut = tf.placeholder(shape = np.shape(data[1]), dtype = tf.float32, name='dOut')
m1.findVal(m1.result, {d: data[0], dOut:data[1]})

现在看来,我正在使用我在外部提供的ddOut的占位符之间存在不匹配,而self.dself.dOut模型中已经存在的表单。我如何解决这个问题?

为什么不只是在模型中使用占位符?

m1.findVal(m1.result, {m1.d: data[0], m1.dOut:data[1]})

相关内容

最新更新