Fix train script

This commit is contained in:
Matthew Honnibal 2018-11-30 22:17:08 +00:00
parent 4aa1002546
commit 3139b020b5

View File

@ -144,24 +144,6 @@ def train(
if learn_tokens:
nlp.add_pipe(nlp.create_pipe("merge_subtokens"))
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.1),
util.env_opt("dropout_to", 0.1),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 750),
util.env_opt("batch_to", 750),
util.env_opt("batch_compound", 1.001),
)
lang_class = util.get_lang_class(lang)
nlp = lang_class()
meta["pipeline"] = pipeline
nlp.meta.update(meta)
if vectors:
msg.text(Messages.M058.format(model=vectors))
_load_vectors(nlp, vectors)