Fix model loading when using multitask objectives

This commit is contained in:
Matthew Honnibal 2018-02-17 18:11:36 +01:00
parent 262d0a3148
commit d1246c95fb
2 changed files with 2 additions and 3 deletions

View File

@ -892,12 +892,10 @@ cdef class DependencyParser(Parser):
self._multitasks.append(labeller)
def init_multitask_objectives(self, gold_tuples, pipeline, sgd=None, **cfg):
self.add_multitask_objective('tag')
for labeller in self._multitasks:
tok2vec = self.model[0]
labeller.begin_training(gold_tuples, pipeline=pipeline,
tok2vec=tok2vec, sgd=sgd)
pipeline.append((labeller.name, labeller))
def __reduce__(self):
return (DependencyParser, (self.vocab, self.moves, self.model),
@ -919,7 +917,6 @@ cdef class EntityRecognizer(Parser):
tok2vec = self.model[0]
labeller.begin_training(gold_tuples, pipeline=pipeline,
tok2vec=tok2vec)
pipeline.append((labeller.name, labeller))
def __reduce__(self):
return (EntityRecognizer, (self.vocab, self.moves, self.model),

View File

@ -605,6 +605,8 @@ cdef class Parser:
break
self._make_updates(d_tokvecs,
bp_tokvecs, backprops, sgd, cuda_stream)
for multitask in self._multitasks:
multitask.update(docs, golds, drop=drop, sgd=sgd)
def update_beam(self, docs, golds, width=None, density=None,
drop=0., sgd=None, losses=None):