Divide d_loss by batch size

This commit is contained in:
Matthew Honnibal 2017-05-27 18:32:46 -05:00
parent b082f76494
commit 9e711c3476
2 changed files with 3 additions and 1 deletions

View File

@ -228,6 +228,7 @@ class NeuralTagger(object):
idx += 1
correct = self.model.ops.xp.array(correct, dtype='i')
d_scores = scores - to_categorical(correct, nb_classes=scores.shape[1])
d_scores /= d_scores.shape[0]
loss = (d_scores**2).sum()
d_scores = self.model.ops.unflatten(d_scores, [len(d) for d in docs])
return float(loss), d_scores
@ -292,6 +293,7 @@ class NeuralLabeller(NeuralTagger):
idx += 1
correct = self.model.ops.xp.array(correct, dtype='i')
d_scores = scores - to_categorical(correct, nb_classes=scores.shape[1])
d_scores /= d_scores.shape[0]
loss = (d_scores**2).sum()
d_scores = self.model.ops.unflatten(d_scores, [len(d) for d in docs])
return float(loss), d_scores

View File

@ -450,7 +450,7 @@ cdef class Parser:
scores, bp_scores = vec2scores.begin_update(vector, drop=drop)
d_scores = self.get_batch_loss(states, golds, scores)
d_vector = bp_scores(d_scores, sgd=sgd)
d_vector = bp_scores(d_scores / d_scores.shape[0], sgd=sgd)
if drop != 0:
d_vector *= mask