"TypeError:"float"对象在编写简单 GAN 时无法解释为 Python 3.7.1 中的整数"错误



我是Python和编程的全新。我正在尝试编码一个简单的GAN来使用Keras数据集(请参阅下面的超链接到教程)。我收到两个警告,然后是一个错误:

TypeError: 'float' object cannot be interpreted as an integer.

任何帮助将不胜感激。

详细信息

Python 3.7.1,Mac OS High Sierra 10.13.6。我正在为Python代码使用空闲,并通过终端运行程序。

错误

警告:TensorFlow:来自/users/darren/miniconda3/lib/python3.7/site-packages/tensorflow/tensorflow/python/framework/op_def_library.py.py:263:colocate_with:colocate_with(来自tensorflow.python.framework.ops)是并将在以后的版本中删除。

更新的说明:由placer自动处理的共系。

警告:TensorFlow:来自/users/darren/miniconda3/lib/python3.7/site-packages/keras/backend/backend/backend/tensorflow _backend.3445:with tensorflow.python.nn_ops.nn_ops.nn_ops with keep_prob是弃用并将在以后的版本中删除。

更新的说明:请使用rate代替keep_prob。费率应设置为rate = 1 - keep_prob

------------------------------------------------------

追溯(最近的最新电话):

文件" gan.py",第91行,in 火车(400,128)文件" gan.py",第75行,火车 对于_在tqdm(range(batch_count))中: TypeError:" float"对象不能解释为整数

代码:

import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.datasets import mnist
from keras.optimizers import Adam
from keras import initializers
os.environ["KERAS_BACKEND"] = "tensorflow"
np.random.seed(10)
random_dim = 100
def load_minst_data():
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = (x_train.astype(np.float32) - 127.5)/127.5
        x_train = x_train.reshape(60000, 784)
        return (x_train, y_train, x_test, y_test)
def get_optimizer():
        return Adam(lr=0.0002, beta_1=0.5)
def get_generator(optimizer):
        generator = Sequential()
        generator.add(Dense(256, input_dim=random_dim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        generator.add(LeakyReLU(0.2))
        generator.add(Dense(512))
        generator.add(LeakyReLU(0.2))
        generator.add(Dense(1024))
        generator.add(LeakyReLU(0.2))
        generator.add(Dense(784, activation='tanh'))
        generator.compile(loss='binary_crossentropy', optimizer=optimizer)
        return generator
def get_discriminator(optimizer):
        discriminator = Sequential()
        discriminator.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
        discriminator.add(LeakyReLU(0.2))
        discriminator.add(Dropout(0.3))
        discriminator.add(Dense(512))
        discriminator.add(LeakyReLU(0.2))
        discriminator.add(Dropout(0.3))
        discriminator.add(Dense(256))
        discriminator.add(LeakyReLU(0.2))
        discriminator.add(Dropout(0.3))
        discriminator.add(Dense(1, activation='sigmoid'))
        discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
        return discriminator
def get_gan_network(discriminator, random_dim, generator, optimizer):
        discriminator.trainable = False
        gan_input = Input(shape=(random_dim,))
        x = generator(gan_input)
        gan_output = discriminator(x)
        gan = Model(inputs=gan_input, outputs=gan_output)
        gan.compile(loss='binary_crossentropy', optimizer=optimizer)
        return gan
def plot_generated_images(epoch, generator, examples=100, dim=(10, 10), figsize=(10, 10)):
        noise = np.random.normal(0, 1, size=[examples, random_dim])
        generated_images = generator.predict(noise)
        generated_images = generated_images.reshape(examples, 28, 28)
        plt.figure(figsize=figsize)
        for i in range(generated_images.shape[0]):
                plt.subplot(dim[0], dim[1], i+1) 
                plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
                plt.axis('off')
        plt.tight_layout()
        plt.savefig('gan_generated_image_epoch_%d.png' % epoch)
def train(epochs=1, batch_size=128):
        x_train, y_train, x_test, y_test = load_minst_data()
        batch_count = x_train.shape[0] / batch_size
        adam = get_optimizer()
        generator = get_generator(adam)
        discriminator = get_discriminator(adam)
        gan = get_gan_network(discriminator, random_dim, generator, adam)
        for e in range(1, epochs+1):
                print ('-'*15, 'Epoch %d' % e, '-'*15)
                for _ in tqdm(range(batch_count)):
                        noise = np.random.normal(0, 1, size=[batch_size, random_dim])
                        image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]
                        generated_images = generator.predict(noise)|
                        X = np.concatenate([image_batch, generated_images])
                        y_dis = np.zeros(2*batch_size)
                        y_dis[:batch_size] = 0.9
                        discriminator.trainable = True
                        discriminator.train_on_batch(X, y_dis)
                        noise = np.random.normal(0, 1, size=[batch_size, random_dim])
                        y_gen = np.ones(batch_size)
                        discriminator.trainable = False
                        gan.train_on_batch(noise, y_gen)
               if e == 1 or e % 20 == 0:
                        plot_generated_images(e, generator)
if __name__ == '__main__':
        train(400, 128)

链接到教程

问题来自行x_train.shape[0] / batch_size。默认情况下,在python3的较新版本中,/操作员执行浮点部门而不是div

如果您希望结果是整数,则应使用//运算符,该操作员的行为与div

一样

x_train.shape[0] // batch_size

最新更新