mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 04:08:09 +03:00
Remove unused resume_training method, and pass optimizer through
This commit is contained in:
parent
13336a6197
commit
465adfee94
|
@ -407,27 +407,7 @@ class Language(object):
|
||||||
for doc, gold in docs_golds:
|
for doc, gold in docs_golds:
|
||||||
yield doc, gold
|
yield doc, gold
|
||||||
|
|
||||||
def resume_training(self, **cfg):
|
def begin_training(self, get_gold_tuples=None, sgd=None, **cfg):
|
||||||
if cfg.get('device', -1) >= 0:
|
|
||||||
device = util.use_gpu(cfg['device'])
|
|
||||||
if self.vocab.vectors.data.shape[1] >= 1:
|
|
||||||
self.vocab.vectors.data = Model.ops.asarray(
|
|
||||||
self.vocab.vectors.data)
|
|
||||||
else:
|
|
||||||
device = None
|
|
||||||
learn_rate = util.env_opt('learn_rate', 0.001)
|
|
||||||
beta1 = util.env_opt('optimizer_B1', 0.9)
|
|
||||||
beta2 = util.env_opt('optimizer_B2', 0.999)
|
|
||||||
eps = util.env_opt('optimizer_eps', 1e-08)
|
|
||||||
L2 = util.env_opt('L2_penalty', 1e-6)
|
|
||||||
max_grad_norm = util.env_opt('grad_norm_clip', 1.)
|
|
||||||
self._optimizer = Adam(Model.ops, learn_rate, L2=L2, beta1=beta1,
|
|
||||||
beta2=beta2, eps=eps)
|
|
||||||
self._optimizer.max_grad_norm = max_grad_norm
|
|
||||||
self._optimizer.device = device
|
|
||||||
return self._optimizer
|
|
||||||
|
|
||||||
def begin_training(self, get_gold_tuples=None, **cfg):
|
|
||||||
"""Allocate models, pre-process training data and acquire a trainer and
|
"""Allocate models, pre-process training data and acquire a trainer and
|
||||||
optimizer. Used as a contextmanager.
|
optimizer. Used as a contextmanager.
|
||||||
|
|
||||||
|
@ -452,21 +432,14 @@ class Language(object):
|
||||||
else:
|
else:
|
||||||
device = None
|
device = None
|
||||||
link_vectors_to_models(self.vocab)
|
link_vectors_to_models(self.vocab)
|
||||||
|
if sgd is None:
|
||||||
|
sgd = create_default_optimizer(Model.ops)
|
||||||
|
self._optimizer = sgd
|
||||||
for name, proc in self.pipeline:
|
for name, proc in self.pipeline:
|
||||||
if hasattr(proc, 'begin_training'):
|
if hasattr(proc, 'begin_training'):
|
||||||
context = proc.begin_training(get_gold_tuples(),
|
proc.begin_training(get_gold_tuples(),
|
||||||
pipeline=self.pipeline)
|
pipeline=self.pipeline,
|
||||||
contexts.append(context)
|
sgd=self._optimizer)
|
||||||
learn_rate = util.env_opt('learn_rate', 0.001)
|
|
||||||
beta1 = util.env_opt('optimizer_B1', 0.9)
|
|
||||||
beta2 = util.env_opt('optimizer_B2', 0.999)
|
|
||||||
eps = util.env_opt('optimizer_eps', 1e-08)
|
|
||||||
L2 = util.env_opt('L2_penalty', 1e-6)
|
|
||||||
max_grad_norm = util.env_opt('grad_norm_clip', 1.)
|
|
||||||
self._optimizer = Adam(Model.ops, learn_rate, L2=L2, beta1=beta1,
|
|
||||||
beta2=beta2, eps=eps)
|
|
||||||
self._optimizer.max_grad_norm = max_grad_norm
|
|
||||||
self._optimizer.device = device
|
|
||||||
return self._optimizer
|
return self._optimizer
|
||||||
|
|
||||||
def evaluate(self, docs_golds, verbose=False):
|
def evaluate(self, docs_golds, verbose=False):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user