请有人帮我解决这个错误,我已经给出了巨大的代码来更好地理解。这是从Github上截取的,我面临着很多错误我认为这段代码只在我下面标记的///和///之间的两个块中给了我错误,你可以忽略其余的代码Github链接:https://github.com/aqibsaeed/Human-Activity-Recognition-using-CNN/blob/master/Activity%20Detection.ipynb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow.compat.v1 as tf
dataset = read_data(r"C:UsersNEWDesktopInternshipHuman Action
RecognitionDatasetWISDM_ar_v1.1Action.txt")
dataset.dropna(axis=0, how='any', inplace= True)
dataset['x-axis'] = feature_normalize(dataset['x-axis'])
dataset['y-axis'] = feature_normalize(dataset['y-axis'])
dataset['z-axis'] = feature_normalize(dataset['z-axis'])
for activity in np.unique(dataset["activity"]):
subset = dataset[dataset["activity"] == activity][:180]
plot_activity(activity,subset)
segments, labels = segment_signal(dataset)
labels = np.asarray(pd.get_dummies(labels), dtype = np.int8)
reshaped_segments = segments.reshape(len(segments), 1,90, 3)
train_test_split = np.random.rand(len(reshaped_segments)) < 0.70
train_x = reshaped_segments[train_test_split]
train_y = labels[train_test_split]
test_x = reshaped_segments[~train_test_split]
test_y = labels[~train_test_split]
input_height = 1
input_width = 90
num_labels = 6
num_channels = 3
batch_size = 10
kernel_size = 60
depth = 60
num_hidden = 1000
learning_rate = 0.0001
training_epochs = 8
total_batches = train_x.shape[0] // batch_size
tf.disable_v2_behavior()
///X = tf.placeholder(shape=[None,input_height,input_width,num_channels],dtype = tf.float32)
Y = tf.placeholder(shape=[None,num_labels],dtype=tf.float32)
c = apply_depthwise_conv(X,kernel_size,num_channels,depth)
p = apply_max_pool(c,20,2)
c = apply_depthwise_conv(p,6,depth*num_channels,depth//10)**///
shape = c.get_shape().as_list()
c_flat = tf.reshape(c, [-1, shape[1] * shape[2] * shape[3]])
f_weights_l1 = weight_variable([shape[1] * shape[2] * depth * num_channels * (depth//10), num_hidden])
f_biases_l1 = bias_variable([num_hidden])
f = tf.nn.tanh(tf.add(tf.matmul(c_flat, f_weights_l1),f_biases_l1))
out_weights = weight_variable([num_hidden, num_labels])
out_biases = bias_variable([num_labels])
y_ = tf.nn.softmax(tf.matmul(f, out_weights) + out_biases)
loss = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1],dtype=float)
///with tf.Session() as session:
tf.global_variables_initializer().run()
for epoch in range(training_epochs):
for b in range(total_batches):
offset = (b * batch_size) % (train_y.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :, :, :]
batch_y = train_y[offset:(offset + batch_size), :]
c = session.run([optimizer, loss],feed_dict={X: batch_x, Y : batch_y})
cost_history = np.append(cost_history,c)
print("Epoch: ",epoch," Training Loss: ",c," Training Accuracy: ",session.run(accuracy, feed_dict={X: train_x, Y: train_y}))
print("Testing Accuracy:", session.run(accuracy, feed_dict={X: test_x, Y: test_y}))///
错误:ValueError Traceback(最近一次调用最后一次(在中
10 c = session.run([optimizer, loss],feed_dict={X: batch_x, Y : batch_y})
11 cost_history = np.append(cost_history,c)
---> 12 print("Epoch: ",epoch," Training Loss: ",c," Training Accuracy: ",session.run(accuracy, feed_dict={X: train_x, Y: train_y}))
13 print("Testing Accuracy:", session.run(accuracy, feed_dict={X: test_x, Y: test_y}))
~anaconda3envstensorflow-sessionslibsite-packagestensorflow_corepythonclientsession.py in run(self, fetches, feed_dict, options, run_metadata)
958 try:
959 result = self._run(None, fetches, feed_dict, options_ptr,
--> 960 run_metadata_ptr)
961 if run_metadata:
962 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~anaconda3envstensorflow-sessionslibsite-packagestensorflow_corepythonclientsession.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1157 'Cannot feed value of shape %r for Tensor %r, '
1158 'which has shape %r' %
-> 1159 (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
1160 if not self.graph.is_feedable(subfeed_t):
1161 raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError:无法为具有形状"(?,6("的张量"Placeholder_3:0"提供形状(0,0(的值
(0,0(的形状意味着网络内部没有数据,无论你做多少次操作,这样的张量都不存在。