diff --git a/tutorials/rnn/translate/seq2seq_model.py b/tutorials/rnn/translate/seq2seq_model.py index 89b27f5bbea..205d3cc2382 100644 --- a/tutorials/rnn/translate/seq2seq_model.py +++ b/tutorials/rnn/translate/seq2seq_model.py @@ -100,13 +100,13 @@ def __init__(self, b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype) output_projection = (w, b) - def sampled_loss(labels, inputs): + def sampled_loss(labels, logits): labels = tf.reshape(labels, [-1, 1]) # We need to compute the sampled_softmax_loss using 32bit floats to # avoid numerical instabilities. local_w_t = tf.cast(w_t, tf.float32) local_b = tf.cast(b, tf.float32) - local_inputs = tf.cast(inputs, tf.float32) + local_inputs = tf.cast(logits, tf.float32) return tf.cast( tf.nn.sampled_softmax_loss( weights=local_w_t,