Android 中的 Tensorflow:您必须为 dtype float 的占位符张量'Placeholder'输入一个值



我在将Tensorflow模型导入Android Studio应用程序时遇到问题。 我已经构建了一个模型,冻结了模型并在 Python 中优化了冻结的模型,现在我正在尝试在 Android 应用程序中使用它,但它不断返回相同的错误,我必须为占位符输入一个值。当我运行inferenceInterface.run(OUTPUT_NODES;函数时,这种情况发生在Android应用程序中。

我不知道错误是否在于模型本身,但我假设python会给我一个错误并且不会构建模型,他做了什么。

这是我发送到Tensorflow的数据行示例(在csv文件中):

1,26,2091,5,2,0,0,0,0,0,85,105,6,4,0,1
1,26,47,9,4,0,0,0,0,0,85,0,7,4,1,0

这是 Python 中的创建模型:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from math import floor, ceil
from pylab import rcParams

columns = ["Gender", "Age", "StepsNum", "Still", "ContinuousStill",
"Running", "Driving", "Cycling", "Weather", "TargetWeight",
"Calories", "DayOfTheWeek", "PartOfTheDay",
"NotificationType", "UserInput"]
userinput = ['0','1']

activites_df = pd.read_csv("E:\MASTER\PythonPrograms\useractivityInt.csv", header = None, names=columns)

#encode all strings
def encode(series): 
#print(pd.get_dummies(series.astype(str)))
return pd.get_dummies(series.astype(str))

train_x = pd.DataFrame(activites_df, columns = columns, dtype=float)
# train_x = activites_df
train_y = encode(activites_df.UserInput)
print(train_y)
# train_y = activites_df.iloc[:,-1]
# print(train_y)
# train_y = pd.DataFrame(userinput, dtype=float)

train_size = 0.9
train_cnt = floor(train_x.shape[0] * train_size)
#iloc[0] - first row, iloc[:0] - first column of data frame, iloc[0:n] - first n rows
x_train = train_x.iloc[0:train_cnt].values
y_train = train_y.iloc[0:train_cnt].values
x_test = train_x.iloc[train_cnt:].values
y_test = train_y.iloc[train_cnt:].values

def multilayer_perceptron(x, weights, biases, keep_prob):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_1 = tf.nn.dropout(layer_1, keep_prob)
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer

#shape[0] - Gives the number of rows in matrix.. shape[1] - numbers of columns 
n_hidden_1 = 38
n_input = train_x.shape[1]
n_classes = train_y.shape[1]
# n_classes = 2
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]),tf.float32),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]),tf.float32)
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]),tf.float32),
'out': tf.Variable(tf.random_normal([n_classes]),tf.float32)
}
#keep_prob: A scalar Tensor with the same type as x. The probability that each element is kept.
# keep_prob = tf.placeholder("float")
keep_prob = tf.placeholder(tf.float32)
training_epochs = 5000
display_step = 1000
batch_size = 32
x = tf.placeholder(tf.float32, [None, n_input], name='input')
y = tf.placeholder(tf.float32, [None, n_classes])
predictions = multilayer_perceptron(x, weights, biases, keep_prob)
#_y is name for output node
#If we take an input of [1, 2, 3, 4, 1, 2, 3], the softmax of that is [0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175]. 
# The output has most of its weight where the '4' was in the original input. 
# This is what the function is normally used for: to highlight the largest values and suppress values which are significantly below the maximum value.
pred_softmax = tf.nn.softmax(predictions, name="y_")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))
LEARNING_RATE = 0.0025
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.0
total_batch = int(len(x_train) / batch_size)
x_batches = np.array_split(x_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost], 
feed_dict={
x: batch_x, 
y: batch_y, 
keep_prob: 0.8
})
avg_cost += c / total_batch
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "loss=", 
"{:.9f}".format(avg_cost))
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))
saver = tf.train.Saver()
tf.train.write_graph(sess.graph_def, '.', 'E:\MASTER\PythonPrograms\har.pbtxt')  
saver.save(sess, save_path = "E:\MASTER\PythonPrograms\har.ckpt")

Android Studio 中代码的主要部分是:

变量的定义:

private static final String MODEL_FILE = "file:///android_asset/optimized_frozen_har.pb";
String INPUT_NODE = "input";
String[] OUTPUT_NODES = {"y_"};
String OUTPUT_NODE = "y_";
//I don't know what is input size
long[] INPUT_SIZE = {1, 15};
int OUTPUT_SIZE = 2;
private TensorFlowInferenceInterface inferenceInterface;

OnCreate中的初始化和调用函数:

inferenceInterface = new TensorFlowInferenceInterface(appContext.getAssets(), MODEL_FILE);

float[] data = {(float)1.0, (float)26.0, (float)1000.0, (float)3.0, (float)1.0,(float)0.0, (float) 0.0, (float)0.0, (float)0.0, (float)0.0, (float)85.0, (float)48.0, (float)7.0, (float)4.0, (float)2.0};
float[] out = predictProbabilitiesFloat(data);

用于从 TensorFlow 返回结果的函数:

public float[] predictProbabilitiesFloat(float[] data) {
float[] result = new float[OUTPUT_SIZE];
inferenceInterface.feed(INPUT_NODE, data, INPUT_SIZE);
inferenceInterface.run(OUTPUT_NODES);
inferenceInterface.fetch(OUTPUT_NODE, result);
//for us it should be 0 or 1
return result;
}

如果有人知道如何解决这个问题,请帮助我,作为硕士论文的一部分,我还有几天的时间来完成这项工作。

提前谢谢你!

我设法在朋友的帮助下解决我自己的问题......因此,如果其他人有同样的问题,这是解决方案:

新模型如下所示:

# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from math import floor, ceil
from pylab import rcParams

columns = ["Gender", "Age", "StepsNum", "Still", "ContinuousStill",
"Running", "Driving", "Cycling", "Sleeping", "Weather", "TargetWeight",
"Calories", "DayOfTheWeek", "PartOfTheDay",
"NotificationType", "UserInput"]
columns_train = ["Gender", "Age", "StepsNum", "Still", "ContinuousStill",
"Running", "Driving", "Cycling", "Sleeping", "Weather", "TargetWeight",
"Calories", "DayOfTheWeek", "PartOfTheDay",
"NotificationType"]
userinput = [0,1]

activites_df = pd.read_csv("E:\MASTER\PythonPrograms\useractivityInt.csv", header = None, names=columns, dtype=float)
#encode all strings
def encode(series): 
#print(pd.get_dummies(series.astype(str)))
return pd.get_dummies(series.astype(str))
def to_float(series):
return pd.to_numeric(series, downcast='float')
mydict_gender = {"Male": 1, "Female": 2}
mydict_weather = {"Cloudy": 1, "Clear": 2, "Foggy": 3, "Rainy": 4, "Snowy": 5, "Icy": 6}
mydict_partOfTheDay = {"Morning": 1, "Noon": 2, "Afternoon": 3, "Evening": 4, "Night": 5}
mydict_notificationType = {"atHome": 1, "doExercises": 2, "goToBed": 3, "getUp": 4, "goToCycling": 5, "goToRunning": 6, "goToWork": 7, "StepsGoal": 8, "takeAWalk": 9}
#treba da bude matrica!             
train_x = pd.DataFrame(activites_df, columns = columns_train, dtype=float)
# train_x = activites_df
train_y = encode(activites_df.UserInput)
# train_y = activites_df.iloc[:,-1]
# print(train_y)
# train_y = pd.DataFrame(userinput, dtype=float)

train_size = 0.9
#floor - zaokruzivanje.. tran_cnt je broj podataka u csvu
train_cnt = floor(train_x.shape[0] * train_size)
#iloc[0] - first row, iloc[:0] - first column of data frame, iloc[0:n] - first n rows
x_train = train_x.iloc[0:train_cnt].values
y_train = train_y.iloc[0:train_cnt].values
#uzmi neki tamo milioniti red i njegove vrednosti za test
x_test = train_x.iloc[train_cnt:].values
y_test = train_y.iloc[train_cnt:].values

def multilayer_perceptron(x, weights, biases, keep_prob):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_1 = tf.nn.dropout(layer_1, keep_prob)
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer

#shape[0] - Gives the number of rows in matrix.. shape[1] - numbers of columns 
n_hidden_1 = 38
n_input = train_x.shape[1]
n_classes = train_y.shape[1]
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]),tf.float32),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]),tf.float32)
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]),tf.float32),
'out': tf.Variable(tf.random_normal([n_classes]),tf.float32)
}
#keep_prob: A scalar Tensor with the same type as x. The probability that each element is kept.
# keep_prob = tf.placeholder("float")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
training_epochs = 5000
display_step = 1000
batch_size = 32
x = tf.placeholder(tf.float32, [None, n_input], name='input')
y = tf.placeholder(tf.float32, [None, n_classes])
#input_size doesnt exist
# print("input size: " + n_input.train_size)
predictions = multilayer_perceptron(x, weights, biases, keep_prob)
#_y is name for output node
#If we take an input of [1, 2, 3, 4, 1, 2, 3], the softmax of that is [0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175]. 
# The output has most of its weight where the '4' was in the original input. 
# This is what the function is normally used for: to highlight the largest values and suppress values which are significantly below the maximum value.
pred_softmax = tf.nn.softmax(predictions, name="y_")
#cost is loss
#ovo je loss funkcija
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=y))
LEARNING_RATE = 0.0025
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)
total_batch = int(len(x_train) / batch_size)
x_batches = []
y_batches = []
print(x_train.shape)
print(y_train.shape)
for i in range(total_batch):
x_batches.append([])
y_batches.append([])
for j in range(batch_size):
x_batches[i].append(x_train[batch_size*i + j])
y_batches[i].append(y_train[batch_size*i + j])
#x_batches = np.array_split(x_train, total_batch)
#y_batches = np.array_split(y_train, total_batch)
#print("{}, {}, {}".format(len(x_batches), len(x_batches[0]), len(x_batches[0][0])))
#print("{}, {}, {}".format(len(y_batches), len(y_batches[0]), len(y_batches[0][0])))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0.0
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost],
feed_dict={
x: batch_x, 
y: batch_y, 
keep_prob: 0.8
})
avg_cost += c / total_batch
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "loss=", 
"{:.9f}".format(avg_cost))
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: x_test, y: y_test, keep_prob: 1.0}))
saver = tf.train.Saver()
tf.train.write_graph(sess.graph_def, '.', 'E:\MASTER\PythonPrograms\har.pbtxt')  
saver.save(sess, save_path = "E:\MASTER\PythonPrograms\har.ckpt")
# sess.close()

调用 TensorFlow 方法的 android 方法如下所示:

public float[] predictProbabilitiesFloat(float[] data) {
float[] result = new float[OUTPUT_SIZE];
inferenceInterface.feed(INPUT_NODE, data, INPUT_SIZE);
float[] kprob = {(float) 1.0};
inferenceInterface.feed("keep_prob", kprob, 1);
inferenceInterface.run(OUTPUT_NODES);
inferenceInterface.fetch(OUTPUT_NODE, result);
//for us it should be 0 or 1
return result;
}

我也需要通过安卓应用程序提供keep_prob价值......

我希望这会帮助某人...

相关内容

最新更新