From de13fe030548acf86e759e2c16c85712ab8e30bb Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sat, 27 May 2017 08:20:32 -0500 Subject: [PATCH] Remove length cap on sentences --- spacy/cli/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/cli/train.py b/spacy/cli/train.py index b25cdcbd5..ed146cb24 100644 --- a/spacy/cli/train.py +++ b/spacy/cli/train.py @@ -70,12 +70,12 @@ def train(_, lang, output_dir, train_data, dev_data, n_iter=20, n_sents=0, optimizer = nlp.begin_training(lambda: corpus.train_tuples, use_gpu=use_gpu) - print("Itn.\tDep. Loss\tUAS\tNER P.\tNER R.\tNER F.\tTag %\tToken %") + print("Itn.\tLoss\tUAS\tNER P.\tNER R.\tNER F.\tTag %\tToken %") try: for i in range(n_iter): with tqdm.tqdm(total=corpus.count_train(), leave=False) as pbar: train_docs = corpus.train_docs(nlp, projectivize=True, - gold_preproc=False, max_length=1000) + gold_preproc=False, max_length=0) losses = {} for batch in minibatch(train_docs, size=batch_sizes): docs, golds = zip(*batch)