Skip to content

Commit

Permalink
Rename sampled_loss argument inputs to logits in preparation for name…
Browse files Browse the repository at this point in the history
…d arguments requirement
  • Loading branch information
nealwu committed Mar 22, 2017
1 parent e4cbe9e commit f7cea8d
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions tutorials/rnn/translate/seq2seq_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,13 @@ def __init__(self,
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)

def sampled_loss(labels, inputs):
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
local_inputs = tf.cast(logits, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(
weights=local_w_t,
Expand Down

0 comments on commit f7cea8d

Please sign in to comment.