在张量板上显示 PR 曲线



>我正在尝试使用张量流和张量板显示mAP或精度响应曲线。目前我有张量板显示我的accuracyprecisionf-scorerecall,用于按纪元进行训练和测试,但在让pr_curve工作时遇到问题。 我要么收到错误

"类型错误:输入/Placeholder_1:0 必须类型为 dtype:"布尔">

如果我将浮点数更改为布尔值,则会出现错误

"tensorflow.python.framework.errors_impl。无效参数错误: 您 必须为占位符张量"input/Placeholder_1"输入值 d型浮子和形状[?,2]">

我尝试了不同的事情,例如移动与pr_curve相关的命令或使用变量副本作为参数传递,没有任何好处。 如果我注释出pf_curve命令,我不会得到错误,但我也不会得到曲线。

这是我的代码,pr_curve命令是第 109 行或 110 行,具体取决于错误。

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb 
def make_bread():
pdb.set_trace()
return "I don't have time"
import numpy as np
import math, time
import tensorflow as tf
from tensorboard import summary as summ_lib
import flags as f
import input, lenet5, evaluate
tf.logging.set_verbosity(tf.logging.INFO)
similar results every run)
tf.set_random_seed(1)
np.random.seed(10)
summaries = {'train': [], 'validate': [], 'test': []}
def main(unused_argv):
train_dataset, validate_dataset, test_dataset = 
input.input(shuffle_files=False)
info = tf.constant(
["Batch size = %s" % f.FLAGS.batch_size,
"Epochs = %s" % f.FLAGS.num_epochs,
"Learning rate = %s" % f.FLAGS.learning_rate,
"Batch normalization = No",
"Window size = %s" % f.FLAGS.window_size,
"Shuffle Files = No",
"CNN model = %s" % f.FLAGS.cnn_model,
"Shuffle Samples = YES"]
)
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, input.SAMPLE_DEPTH, 
input.SAMPLE_HEIGHT, input.SAMPLE_WIDTH])
y_ = tf.placeholder(tf.float32, [None, 2])
dropout_rate = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool)
with tf.name_scope('logits'):
if f.FLAGS.cnn_model == "lenet5":
logits = lenet5.model_fn(sample_input = x, 
is_training=is_training, summaries=summaries)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=logits)
mean_cross_entropy_loss = tf.reduce_mean(cross_entropy)
loss_summ = tf.summary.scalar('Mean_cross_entropy_loss', mean_cross_entropy_loss)
summaries['train'].append(loss_summ)
with tf.name_scope('adam_optimizer'):
optimizer = tf.train.AdamOptimizer(f.FLAGS.learning_rate).minimize(mean_cross_entropy_loss)
with tf.name_scope('accuracy'):
preds = tf.argmax(logits, 1)
correct_preds = tf.argmax(y_, 1)
equal = tf.equal(preds, correct_preds)
training_accuracy_op = tf.reduce_mean(tf.cast(equal, tf.float32))
summaries['train'].append(tf.summary.scalar('Training_Accuracy', training_accuracy_op))

with tf.name_scope('Evaluation_Metrics'):
tp_op = evaluate.tp(logits=logits, labels=y_)
fp_op = evaluate.fp(logits=logits, labels=y_)
tn_op = evaluate.tn(logits=logits, labels=y_)
fn_op = evaluate.fn(logits=logits, labels=y_)
tp_sum = tf.placeholder(tf.float32)
tn_sum = tf.placeholder(tf.float32)
fp_sum = tf.placeholder(tf.float32)
fn_sum = tf.placeholder(tf.float32)
precision_op = evaluate.precision(tp=tp_sum, fp=fp_sum, tn=tn_sum, fn=fn_sum)
accuracy_op = evaluate.accuracy(tp=tp_sum, fp=fp_sum, tn=tn_sum, fn=fn_sum)
recall_op = evaluate.recall(tp=tp_sum, fp=fp_sum, tn=tn_sum, fn=fn_sum)
fscore_op = evaluate.fscore(tp=tp_sum, fp=fp_sum, tn=tn_sum, fn=fn_sum)
precision_summ = tf.summary.scalar('Precision', precision_op)
accuracy_summ = tf.summary.scalar('Accuracy', accuracy_op)
recall_summ = tf.summary.scalar('Recall', recall_op)
fscore_summ = tf.summary.scalar('Fscore', fscore_op)
summaries['validate'].append(accuracy_summ)
summaries['validate'].append(precision_summ)
summaries['validate'].append(recall_summ)
summaries['validate'].append(fscore_summ)
summaries['test'].append(accuracy_summ)
summaries['test'].append(precision_summ)
summaries['test'].append(recall_summ)
summaries['test'].append(fscore_summ)
print ("Saving graph to %s" % f.FLAGS.log_dir)
mAP_summ = summ_lib.pr_curve(name='mAP',predictions=logits,labels=y_,num_thresholds=11)
# OR THIS ONE mAP_summ = summ_lib.pr_curve(name='mAP',predictions=logits,labels=tf.cast(y_,tf.bool),num_thresholds=11)
train_writer = tf.summary.FileWriter(f.FLAGS.log_dir + "/train")
validate_writer = tf.summary.FileWriter(f.FLAGS.log_dir + "/validate")
test_writer = tf.summary.FileWriter(f.FLAGS.log_dir + "/test")
train_writer.add_graph(tf.get_default_graph())
train_summaries = tf.summary.merge(summaries['train'])
validate_summaries = tf.summary.merge(summaries['validate'])
test_summaries = tf.summary.merge(summaries['test'])
with tf.Session() as sess:
train_writer.add_summary(sess.run(tf.summary.text("Information", info)))
train_iter = train_dataset.make_initializable_iterator()
train_next_elem = train_iter.get_next()
sess.run(tf.global_variables_initializer())
global_step = 0
display_freq = 10
validate_freq = 50
test_freq = 50
for epoch in range(1, f.FLAGS.num_epochs+1):
sess.run(train_iter.initializer)
step_time = 0.0
fetch_time = 0.0
while True:
try:
a = time.time()
global_step += 1
#print(make_bread())
sample, label = sess.run(train_next_elem)
fetch_time += time.time() - a
a = time.time()
_, summ = sess.run([optimizer, train_summaries], feed_dict={x: sample, y_: label, dropout_rate: 0.5, is_training: True})
train_writer.add_summary(summ, global_step)
step_time += time.time() - a
except tf.errors.OutOfRangeError:
break
if global_step % display_freq == 0:
batch_loss, batch_accuracy = sess.run([mean_cross_entropy_loss, training_accuracy_op],
feed_dict={x: sample, y_: label, dropout_rate: 1.0, is_training: False})
print ("Epoch {:3}t Step {:5}:t Loss={:.3f}, tTraining Accuracy={:.5f} tStep Time {:4.2f}m, Fetch Time {:4.2f}m".
format(epoch, global_step, batch_loss, batch_accuracy, step_time/60, fetch_time/60))
step_time = 0.0
fetch_time = 0.0

#Validate and test after each epoch
val_it = validate_dataset.make_one_shot_iterator()
val_next_elem = val_it.get_next()
tot_tp, tot_tn, tot_fp, tot_fn = 0, 0, 0, 0
while True:
try:
sample, label = sess.run(val_next_elem)
tp, fp, tn, fn = sess.run([tp_op, fp_op, tn_op, fn_op],
feed_dict={x: sample, y_: label, dropout_rate: 1.0, is_training: False})
except tf.errors.OutOfRangeError:
break
tot_tp += tp
tot_fp += fp
tot_fn += fn
tot_tn += tn
precision, recall, accuracy, fscore, summ = sess.run([precision_op, recall_op, accuracy_op, fscore_op, validate_summaries],
 feed_dict={tp_sum: tot_tp, tn_sum: tot_tn, fp_sum: tot_fp, fn_sum: tot_fn})
validate_writer.add_summary(summ, global_step)
print ("Epoch %d, Step %d" % (epoch, global_step))
print ("="*10, "Validating Results", "="*10)
print ("TP: %gnTN: %gnFP: %gnFN: %g" % (tot_tp, tot_tn, tot_fp, tot_fn))
print ("tPrecision: %gntRecall: %gntF1_score: %gntAccuracy: %g" % (precision, recall, fscore, accuracy))

test_it = test_dataset.make_one_shot_iterator()
test_next_elem = test_it.get_next()
tot_tp, tot_tn, tot_fp, tot_tn = 0, 0, 0, 0
while True:
try:
sample, label = sess.run(test_next_elem)
tp, fp, tn, fn, pred, lab = sess.run([tp_op, fp_op, tn_op, fn_op, logits, y_],
feed_dict={x: sample, y_: label, dropout_rate: 1.0, is_training: False})
except tf.errors.OutOfRangeError:
break
tot_tp += tp
tot_fp += fp
tot_fn += fn
tot_tn += tn
precision, recall, accuracy, fscore, summ = sess.run([precision_op, recall_op, accuracy_op, fscore_op, test_summaries],
 feed_dict={tp_sum: tot_tp, tn_sum: tot_tn, fp_sum: tot_fp, fn_sum: tot_fn})
test_writer.add_summary(summ, global_step)
print ("="*10, "Testing Results", "="*10)
print ("TP: %gnTN: %gnFP: %gnFN: %g" % (tot_tp, tot_tn, tot_fp, tot_fn))
print ("tPrecision: %gntRecall: %gntF1_score: %gntAccuracy: %g" % (precision, recall, fscore, accuracy))
print ("="*10, "===============", "="*10)
map_writer = tf.summary.FileWriter(f.FLAGS.log_dir + "/mAP")
map_writer.add_summary(sess.run(mAP_summ),global_step=0)
map_writer.close()
if __name__ == "__main__":
tf.app.run()

以下是我的结果,具体取决于所使用的pr_curve命令:

10:06:00:26.03.2019:/media/randy/Data1/Python/Machine_Learning/3d_cnn ./run2.sh
Number of training files:  68
Number of validation files:  23
Number of testing files:  23
W0326 22:06:28.115116 139978789848896 deprecation.py:506] From /home/randy/.local/lib/python3.6/site-packages/tensorflow/python/training/slot_creator.py:187: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
Saving graph to /media/randy/Data1/log/proc_order/ABS-Correlated
Traceback (most recent call last):
File "cnn_model.py", line 208, in <module>
tf.app.run()
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/home/randy/.local/lib/python3.6/site-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/home/randy/.local/lib/python3.6/site-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "cnn_model.py", line 108, in main
mAP_summ = summ_lib.pr_curve(name='mAP',predictions=logits,labels=y_,num_thresholds=11)
File "/home/randy/.local/lib/python3.6/site-packages/tensorboard/plugins/pr_curve/summary.py", line 94, in op
tf.compat.v1.assert_type(labels, tf.bool)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/ops/check_ops.py", line 1502, in assert_type
tf_type))
TypeError:   input/Placeholder_1:0 must be of type <dtype: 'bool'>
real    0m1.092s
user    0m1.254s
sys     0m0.751s


10:06:28:26.03.2019:/media/randy/Data1/Python/Machine_Learning/3d_cnn ./run2.sh
Number of training files:  68
Number of validation files:  23
Number of testing files:  23
W0326 22:07:04.819849 140201659422528 deprecation.py:506] From /home/randy/.local/lib/python3.6/site-packages/tensorflow/python/training/slot_creator.py:187: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
Saving graph to /media/randy/Data1/log/proc_order/ABS-Correlated
2019-03-26 22:07:04.963555: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2019-03-26 22:07:04.966910: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened CUDA library libcuda.so.1
2019-03-26 22:07:05.054544: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:1010] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2019-03-26 22:07:05.054971: I tensorflow/compiler/xla/service/service.cc:162] XLA service 0x4dd1b80 executing computations on platform CUDA. Devices:
2019-03-26 22:07:05.054985: I tensorflow/compiler/xla/service/service.cc:169]   StreamExecutor device (0): GeForce GTX 1070 Ti, Compute Capability 6.1
2019-03-26 22:07:05.072833: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3192000000 Hz
2019-03-26 22:07:05.073556: I tensorflow/compiler/xla/service/service.cc:162] XLA service 0x4d5bc70 executing computations on platform Host. Devices:
2019-03-26 22:07:05.073585: I tensorflow/compiler/xla/service/service.cc:169]   StreamExecutor device (0): <undefined>, <undefined>
2019-03-26 22:07:05.073788: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1464] Found device 0 with properties:
name: GeForce GTX 1070 Ti major: 6 minor: 1 memoryClockRate(GHz): 1.683
pciBusID: 0000:01:00.0
totalMemory: 7.93GiB freeMemory: 7.53GiB
2019-03-26 22:07:05.073797: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1543] Adding visible gpu devices: 0
2019-03-26 22:07:05.073837: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened CUDA library libcudart.so.10.0
2019-03-26 22:07:05.074339: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1015] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-03-26 22:07:05.074346: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1021]      0
2019-03-26 22:07:05.074349: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1034] 0:   N
2019-03-26 22:07:05.074521: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1146] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 7324 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1070 Ti, pci bus id: 0000:01:00.0, compute capability: 6.1)
W0326 22:07:05.090326 140201659422528 deprecation.py:323] From cnn_model.py:122: DatasetV1.make_initializable_iterator (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `for ... in dataset:` to iterate over a dataset. If using `tf.estimator`, return the `Dataset` object directly from your input function. As a last resort, you can use `tf.compat.v1.data.make_initializable_iterator(dataset)`.
2019-03-26 22:07:06.920871: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened CUDA library libcublas.so.10.0
2019-03-26 22:07:07.021886: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened CUDA library libcudnn.so.7
Epoch   1        Step    10:     Loss=0.723,    Training Accuracy=0.75977       Step Time 0.57m, Fetch Time 0.01m
Epoch   1        Step    20:     Loss=0.528,    Training Accuracy=0.80859       Step Time 0.55m, Fetch Time 0.00m
Epoch   1        Step    30:     Loss=0.399,    Training Accuracy=0.86523       Step Time 0.55m, Fetch Time 0.00m
W0326 22:09:01.058223 140201659422528 deprecation.py:323] From cnn_model.py:157: DatasetV1.make_one_shot_iterator (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `for ... in dataset:` to iterate over a dataset. If using `tf.estimator`, return the `Dataset` object directly from your input function. As a last resort, you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)`.
Epoch 1, Step 35
========== Validating Results ==========
TP: 980
TN: 3093
FP: 302
FN: 1257
Precision: 0.764431
Recall: 0.438087
F1_score: 0.556976
Accuracy: 0.723189
========== Testing Results ==========
TP: 1124
TN: 3500
FP: 272
FN: 2505
Precision: 0.805158
Recall: 0.309727
F1_score: 0.447363
Accuracy: 0.62478
========== =============== ==========
Traceback (most recent call last):
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1335, in _do_call
return fn(*args)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1320, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1408, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'input/Placeholder_1' with dtype float and shape [?,2]
[[{{node input/Placeholder_1}}]]
[[mAP/stack/_199]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "cnn_model.py", line 208, in <module>
tf.app.run()
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/home/randy/.local/lib/python3.6/site-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/home/randy/.local/lib/python3.6/site-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "cnn_model.py", line 204, in main
map_writer.add_summary(sess.run(mAP_summ),global_step=0)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 930, in run
run_metadata_ptr)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1153, in _run
feed_dict_tensor, options, run_metadata)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1329, in _do_run
run_metadata)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1349, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'input/Placeholder_1' with dtype float and shape [?,2]
[[node input/Placeholder_1 (defined at cnn_model.py:50) ]]
[[mAP/stack/_199]]
Original stack trace for 'input/Placeholder_1':
File "cnn_model.py", line 208, in <module>
tf.app.run()
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/home/randy/.local/lib/python3.6/site-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/home/randy/.local/lib/python3.6/site-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "cnn_model.py", line 50, in main
y_ = tf.placeholder(tf.float32, [None, 2])
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 2084, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 6098, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 800, in _apply_op_helper
op_def=op_def)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3473, in create_op
op_def=op_def)
File "/home/randy/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1961, in __init__
self._traceback = tf_stack.extract_stack()

real    1m59.745s
user    6m46.591s
sys     0m6.057s

关于我如何解决这个问题的任何想法?

此外,如果我在第 108 行添加print(y_)print(tf.cast(y_,tf.bool)),我会得到以下结果:

Tensor("input/Placeholder_1:0", shape=(?, 2), dtype=float32)
Tensor("Cast:0", shape=(?, 2), dtype=bool)

这表明张量的大小正确。

TF 版本 1.13.0-dev20190218。

我想出了问题所在。 这是两个不同的错误。 "bool"错误是因为我需要将第 109 行的map_summary定义为布尔值(就像我在第 110 行所做的那样)。 第二个错误是由于我将张量作为标签传递,而不是在代码末尾传递之前评估张量。 一旦我让它全部工作,我将发布更新的代码。

相关内容

  • 没有找到相关文章

最新更新