I'm building a RNN loosely based on the TensorFlow tutorial.
The relevant parts of my model are as follows:
input_sequence = tf.placeholder(tf.float32, [BATCH_SIZE, TIME_STEPS, PIXEL_COUNT + AUX_INPUTS])
output_actual = tf.placeholder(tf.float32, [BATCH_SIZE, OUTPUT_SIZE])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(CELL_SIZE, state_is_tuple=False)
stacked_lstm = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * CELL_LAYERS, state_is_tuple=False)
initial_state = state = stacked_lstm.zero_state(BATCH_SIZE, tf.float32)
outputs = []
with tf.variable_scope("LSTM"):
for step in xrange(TIME_STEPS):
if step > 0:
tf.get_variable_scope().reuse_variables()
cell_output, state = stacked_lstm(input_sequence[:, step, :], state)
outputs.append(cell_output)
final_state = state
And the feeding:
cross_entropy = tf.reduce_mean(-tf.reduce_sum(output_actual * tf.log(prediction), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(output_actual, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
numpy_state = initial_state.eval()
for i in xrange(1, ITERATIONS):
batch = DI.next_batch()
print i, type(batch[0]), np.array(batch[1]).shape, numpy_state.shape
if i % LOG_STEP == 0:
train_accuracy = accuracy.eval(feed_dict={
initial_state: numpy_state,
input_sequence: batch[0],
output_actual: batch[1]
})
print "Iteration " + str(i) + " Training Accuracy " + str(train_accuracy)
numpy_state, train_step = sess.run([final_state, train_step], feed_dict={
initial_state: numpy_state,
input_sequence: batch[0],
output_actual: batch[1]
})
When I run this, I get the following error:
Traceback (most recent call last):
File "/home/agupta/Documents/Projects/Image-Recognition-with-LSTM/RNN/feature_tracking/model.py", line 109, in <module>
output_actual: batch[1]
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 698, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 838, in _run
fetch_handler = _FetchHandler(self._graph, fetches)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 355, in __init__
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 181, in for_fetch
return _ListFetchMapper(fetch)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 288, in __init__
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 178, in for_fetch
(fetch, type(fetch)))
TypeError: Fetch argument None has invalid type <type 'NoneType'>
Perhaps the weirdest part is that this error gets thrown the second iteration, and the first works completely fine. I'm ripping my hair trying to fix this, so any help would be greatly appreciated.
You are re-assigning the train_step
variable to the second element of the result of sess.run()
(which happens to be None
). Hence, on the second iteration, train_step
is None
, which leads to the error.
The fix is fortunately simple:
for i in xrange(1, ITERATIONS):
# ...
# Discard the second element of the result.
numpy_state, _ = sess.run([final_state, train_step], feed_dict={
initial_state: numpy_state,
input_sequence: batch[0],
output_actual: batch[1]
})