From 1442d2f213416f903d948199d6f1723c7a419dda Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 30 Nov 2020 02:39:45 +0100 Subject: [PATCH] Improve simple training example in v3 migration (#6438) * Create the examples once * Use the examples in the initialization * Provide the batch size * Fix `begin_training` migration example --- website/docs/usage/v3.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/usage/v3.md b/website/docs/usage/v3.md index b25b28a6d..47ddcf53a 100644 --- a/website/docs/usage/v3.md +++ b/website/docs/usage/v3.md @@ -969,18 +969,18 @@ The [`Language.update`](/api/language#update), raw text and a dictionary of annotations. ```python -### Training loop {highlight="11"} +### Training loop {highlight="5-8,12"} TRAIN_DATA = [ ("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}), ("I like London.", {"entities": [(7, 13, "LOC")]}), ] -nlp.initialize() +examples = [] +for text, annots in TRAIN_DATA: + examples.append(Example.from_dict(nlp.make_doc(text), annots)) +nlp.initialize(lambda: examples) for i in range(20): - random.shuffle(TRAIN_DATA) - for batch in minibatch(TRAIN_DATA): - examples = [] - for text, annots in batch: - examples.append(Example.from_dict(nlp.make_doc(text), annots)) + random.shuffle(examples) + for batch in minibatch(examples, size=8): nlp.update(examples) ``` @@ -995,7 +995,7 @@ network, setting up the label scheme. ```diff -- nlp.initialize(examples) +- nlp.begin_training() + nlp.initialize(lambda: examples) ```