并非所有点都在Scikit Optimize中的空间误差范围内



我正试图使用scikit优化包在LSTM模型(purly Tensorflow)上执行超参数优化任务。我使用贝叶斯优化方法,使用高斯过程(gp_minimize)来实现这一点。为该功能提供的演示代码可以通过此链接找到。当我尝试运行我的代码时,我不断收到以下错误:

ValueError:并非所有点都在空间的边界内。

我的完整代码如下所示:

import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
from skopt.utils import use_named_args
import csv
import tensorflow as tf
import numpy as np
import  pandas as pd
from sklearn.metrics import mean_squared_error
from math import sqrt
import atexit
from time import time, strftime, localtime
from datetime import timedelta

input_size = 1
num_layers = 1
hidden1_activation = tf.nn.relu
hidden2_activation = tf.nn.relu
lstm_activation = tf.nn.relu
columns = ['Sales', 'DayOfWeek', 'SchoolHoliday', 'Promo']
features = len(columns)
fileName = None
column_min_max = None

# fileNames = ['store2_1.csv', 'store85_1.csv', 'store259_1.csv', 'store519_1.csv', 'store725_1.csv', 'store749_1.csv', 'store934_1.csv', 'store1019_1.csv']
# column_min_max_all = [[[0, 11000], [1, 7]], [[0, 17000], [1, 7]], [[0, 23000], [1, 7]], [[0, 14000], [1, 7]], [[0, 14000], [1, 7]], [[0, 15000], [1, 7]], [[0, 17000], [1, 7]], [[0, 25000], [1, 7]]]
fileNames = ['store2_1.csv']
column_min_max_all = [[[0, 11000], [1, 7]]]

num_steps = None
lstm_size = None
batch_size = None
init_learning_rate = 0.01
learning_rate_decay = None
init_epoch = None  # 5
max_epoch = None  # 100 or 50
hidden1_nodes = None
hidden2_nodes = None
dropout_rate= None
best_accuracy = 0.0
start = None

lstm_num_steps = Categorical(categories=[2,3,4,5,6,7,8,9,10,11,12,13,14], name ='lstm_num_steps')
size = Categorical(categories=[8,16,32,64,128], name ='size')
lstm_hidden1_nodes = Categorical(categories=[4,8,16,32,64], name= 'lstm_hidden1_nodes')
lstm_hidden2_nodes = Categorical(categories=[2,4,8,16,32],name= 'lstm_hidden2_nodes')
lstm_learning_rate_decay = Categorical(categories=[0.99,0.8,0.7], name='lstm_learning_rate_decay')
lstm_max_epoch = Categorical(categories=[60,50,100,120,150,200], name='lstm_max_epoch')
lstm_init_epoch = Categorical(categories=[5, 10, 15, 20],name='lstm_init_epoch')
lstm_batch_size = Categorical(categories=[5, 8, 16, 30, 31, 64] , name = 'lstm_batch_size')
lstm_dropout_rate = Categorical(categories=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] , name = 'lstm_dropout_rate')

dimensions = [lstm_num_steps, size,lstm_hidden1_nodes, lstm_hidden2_nodes,lstm_init_epoch,lstm_max_epoch,lstm_learning_rate_decay,lstm_batch_size, lstm_dropout_rate]
default_parameters = [5,35,30,15,5,60,0.99,8,0.1]

# def log_dir_name(lstm_num_steps, size,lstm_hidden1_nodes, lstm_hidden2_nodes,lstm_learning_rate,lstm_init_epoch,lstm_max_epoch,lstm_learning_rate_decay,lstm_batch_size):
#
#     # The dir-name for the TensorBoard log-dir.
#     s = "./19_logs/{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_{9}/"
#
#     # Insert all the hyper-parameters in the dir-name.
#     log_dir = s.format(lstm_num_steps, size,lstm_hidden1_nodes, lstm_hidden2_nodes,lstm_learning_rate,lstm_init_epoch,lstm_max_epoch,lstm_learning_rate_decay,lstm_batch_size)
#
#     return log_dir
def secondsToStr(elapsed=None):
if elapsed is None:
return strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
return str(timedelta(seconds=elapsed))
def log(s, elapsed=None):
line = "="*40
print(line)
print(secondsToStr(), '-', s)
if elapsed:
print("Elapsed time:", elapsed)
print(line)
print()
def endlog():
end = time()
elapsed = end-start
log("End Program", secondsToStr(elapsed))

def generate_batches(train_X, train_y, batch_size):
num_batches = int(len(train_X)) // batch_size
if batch_size * num_batches < len(train_X):
num_batches += 1
batch_indices = range(num_batches)
for j in batch_indices:
batch_X = train_X[j * batch_size: (j + 1) * batch_size]
batch_y = train_y[j * batch_size: (j + 1) * batch_size]
# assert set(map(len, batch_X)) == {num_steps}
yield batch_X, batch_y

def segmentation(data):
seq = [price for tup in data[columns].values for price in tup]
seq = np.array(seq)
# split into items of features
seq = [np.array(seq[i * features: (i + 1) * features])
for i in range(len(seq) // features)]
# split into groups of num_steps
X = np.array([seq[i: i + num_steps] for i in range(len(seq) -  num_steps)])
y = np.array([seq[i +  num_steps] for i in range(len(seq) -  num_steps)])
# get only sales value
y = [[y[i][0]] for i in range(len(y))]
y = np.asarray(y)
return X, y
def scale(data):
for i in range (len(column_min_max)):
data[columns[i]] = (data[columns[i]] - column_min_max[i][0]) / ((column_min_max[i][1]) - (column_min_max[i][0]))
return data
def rescle(test_pred):
prediction = [(pred * (column_min_max[0][1] - column_min_max[0][0])) + column_min_max[0][0] for pred in test_pred]
return prediction

def pre_process():
store_data = pd.read_csv(fileName)
# sftp://wso2@192.168.32.11/home/wso2/suleka/salesPred/store2_1.csv

store_data = store_data.drop(store_data[(store_data.Open == 0) & (store_data.Sales == 0)].index)
#
# store_data = store_data.drop(store_data[(store_data.Open != 0) & (store_data.Sales == 0)].index)
# ---for segmenting original data --------------------------------
original_data = store_data.copy()
## train_size = int(len(store_data) * (1.0 - test_ratio))

validation_len = len(store_data[(store_data.Month == 6) & (store_data.Year == 2015)].index)
test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
train_size = int(len(store_data) -  (validation_len+test_len))
train_data = store_data[:train_size]
validation_data = store_data[(train_size-num_steps): validation_len+train_size]
test_data = store_data[((validation_len+train_size) - num_steps): ]
original_val_data = validation_data.copy()
original_test_data = test_data.copy()

# -------------- processing train data---------------------------------------
scaled_train_data = scale(train_data)
train_X, train_y = segmentation(scaled_train_data)
# -------------- processing validation data---------------------------------------
scaled_validation_data = scale(validation_data)
val_X, val_y = segmentation(scaled_validation_data)

# -------------- processing test data---------------------------------------
scaled_test_data = scale(test_data)
test_X, test_y = segmentation(scaled_test_data)
# ----segmenting original validation data-----------------------------------------------
nonescaled_val_X, nonescaled_val_y = segmentation(original_val_data)

# ----segmenting original test data-----------------------------------------------
nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)

return train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y,nonescaled_val_y

def setupRNN(inputs):
cell = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True, activation=lstm_activation)
val1, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
val = tf.transpose(val1, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1, name="last_lstm_output")
# hidden layer
hidden1 = tf.layers.dense(last, units=hidden1_nodes, activation=hidden2_activation)
hidden2 = tf.layers.dense(hidden1, units=hidden2_nodes, activation=hidden1_activation)
dropout = tf.layers.dropout(hidden2, rate=dropout_rate, training=True)
weight = tf.Variable(tf.truncated_normal([hidden2_nodes, input_size]))
bias = tf.Variable(tf.constant(0.1, shape=[input_size]))
prediction = tf.matmul(dropout, weight) + bias
return prediction

# saver = tf.train.Saver()
# saver.save(sess, "checkpoints_sales/sales_pred.ckpt")


@use_named_args(dimensions=dimensions)
def fitness(lstm_num_steps, size,lstm_hidden1_nodes,lstm_hidden2_nodes,lstm_init_epoch,lstm_max_epoch,
lstm_learning_rate_decay,lstm_batch_size,lstm_dropout_rate):
global num_steps, lstm_size, hidden2_nodes, hidden2_activation, hidden1_activation, hidden1_nodes, lstm_activation, init_epoch, max_epoch, learning_rate_decay, dropout_rate
num_steps = lstm_num_steps
lstm_size = size
batch_size = lstm_batch_size
learning_rate_decay = lstm_learning_rate_decay
init_epoch = lstm_init_epoch
max_epoch = lstm_max_epoch
hidden1_nodes = lstm_hidden1_nodes
hidden2_nodes = lstm_hidden2_nodes
dropout_rate = lstm_dropout_rate

# log_dir = log_dir_name(lstm_num_steps, size,lstm_hidden1_nodes,lstm_hidden2_nodes,lstm_learning_rate,lstm_init_epoch,lstm_max_epoch,
#        lstm_learning_rate_decay,lstm_batch_size)
train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y = pre_process()
inputs = tf.placeholder(tf.float32, [None, num_steps, features], name="inputs")
targets = tf.placeholder(tf.float32, [None, input_size], name="targets")
learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
prediction = setupRNN(inputs)
with tf.name_scope('loss'):
model_loss = tf.losses.mean_squared_error(targets, prediction)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
train_step = train_step
# with tf.name_scope('accuracy'):
#     correct_prediction = tf.sqrt(tf.losses.mean_squared_error(prediction, targets))
#
# accuracy = correct_prediction
sess = tf.Session()
sess.run(tf.global_variables_initializer())
learning_rates_to_use = [
init_learning_rate * (
learning_rate_decay ** max(float(i + 1 - init_epoch), 0.0)
) for i in range(max_epoch)]
for epoch_step in range(max_epoch):
current_lr = learning_rates_to_use[epoch_step]

for batch_X, batch_y in generate_batches(train_X, train_y, batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
learning_rate: current_lr,
}
sess.run(train_step, train_data_feed)
val_data_feed = {
inputs: val_X,
targets: val_y,
learning_rate: 0.0,
}
pred = sess.run(prediction, val_data_feed)
pred_vals = rescle(pred)
pred_vals = np.array(pred_vals)
pred_vals = pred_vals.flatten()
pred_vals = pred_vals.tolist()
nonescaled_y = nonescaled_val_y.flatten()
nonescaled_y = nonescaled_y.tolist()
val_accuracy = sqrt(mean_squared_error(nonescaled_y, pred_vals))
global best_accuracy
if val_accuracy < best_accuracy:
# Save the new model to harddisk.
saver = tf.train.Saver()
saver.save(sess, "checkpoints_sales/sales_pred.ckpt")
with open("best_configs.csv", "a") as f:
writer = csv.writer(f)
writer.writerows(zip([fileName], [num_steps], [lstm_size], [hidden2_nodes], [hidden2_activation], [hidden1_activation], [hidden1_nodes], [lstm_size], [lstm_activation],  [init_epoch], [max_epoch], [learning_rate_decay], [dropout_rate],[val_accuracy]))
# Update the classification accuracy.
best_accuracy = val_accuracy
# Clear the Keras session, otherwise it will keep adding new
# models to the same TensorFlow graph each time we create
# a model with a different set of hyper-parameters.
# sess.clear_session()
sess.close()
tf.reset_default_graph()

# NOTE: Scikit-optimize does minimization so it tries to
# find a set of hyper-parameters with the LOWEST fitness-value.
# Because we are interested in the HIGHEST classification
# accuracy, we need to negate this number so it can be minimized.
return val_accuracy

if __name__ == '__main__':
start = time()
for i in range(len(fileNames)):
fileName = '{}{}'.format('home/suleka/Documents/sales_prediction/', fileNames[i])
#/home/suleka/Documents/sales_prediction/

column_min_max = column_min_max_all[i]
#Bayesian optimization using Gaussian Processes.
#acq_func -> https://arxiv.org/pdf/1807.02811.pdf
search_result = gp_minimize(func=fitness,
dimensions=dimensions,
acq_func='EI', # Expected Improvement.
n_calls=40,
x0=default_parameters)
atexit.register(endlog)
log("Start Program")

下面显示的是完整的堆栈跟踪:

/home/wso2/anaconda3/lib/python3.6/site packages/h5py/init.py:36:FutureWarning:从转换issubdtype的第二个参数不赞成使用floatnp.floating。将来,它将被处理作为CCD_ 3。来自_conv导入register_filters as _register_filters auto_LSTM_skopt.py:138:SettingWithCopyWarning:正试图在从DataFrame切片。尝试使用.loc[row_indexer,col_indexer]=值而不是

请参阅文档中的注意事项:http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-查看与复制data[columns[i]]=(data[columns[i]]-column_min_max[i][0])/((column_min_maxi)-(column_min _max[i][0]))/home/wso2/anaconda3/lib/python3.6/site packages/tensorflow/python/ops/gradients_impl.py:100:用户警告:正在将稀疏IndexedSlices转换为的稠密张量未知形状。这可能会消耗大量内存
"将稀疏的IndexedSlices转换为未知形状的稠密张量。"回溯(最后一次调用):文件"auto_LSTM_skopt.py",第行365,英寸x0=default_parameters)文件"/home/wso2/anaconda3/lib/python3.6/site packages/skopt/poptimizer/gp.py",第228行,在gp_minimize中callback=callback,n_jobs=n_jobs)文件"/home/wso2/anaconda3/lib/python3.6/site packages/skopt/poptimizer/base.py",第240行,以base_minimize为单位result=optimir.tell(x0,y0)文件"/home/wso2/anaconda3/lib/python3.6/site packages/skopt/poptimir/optimir.py",第432行check_x_in_space(x,self.space)文件"/home/wso2/anaconda3/lib/python3.6/site packages/skopt/utils.py",第186行,在check_x_in_space中raise ValueError("并非所有点都在"ValueError的范围内:并非所有点均在空间的范围内。

问题在于size维度。default_parameters中的所有值都必须在要优化的参数维度列表中,否则skopt将抛出。并非所有点都在空间错误的范围内。

您当前拥有:size = Categorical(categories=[8,16,32,64,128], name ='size')

在您的默认参数中:default_parameters = [5,35,30,15,5,60,0.99,8,0.1]

第二个项目(表示"size")的值为35,这不是要搜索的size参数的一部分。

固定1。包括35个尺寸空间:

size = Categorical(categories=[8,16,32,35,64,128], name ='size')

固定2。将default_parameters:中的35更改为"32">

default_parameters = [5,32,30,15,5,60,0.99,8,0.1]

使用上面的任何修复程序,你的代码都会像符咒一样运行:)

最新更新