2020-07-22 14:42:59 +03:00
|
|
|
from typing import Optional, Dict, Any, Tuple, Union, Callable, List
|
2020-09-08 16:24:47 +03:00
|
|
|
from timeit import default_timer as timer
|
2020-06-12 03:02:07 +03:00
|
|
|
import srsly
|
2020-02-27 20:42:27 +03:00
|
|
|
import tqdm
|
2020-01-29 19:06:46 +03:00
|
|
|
from pathlib import Path
|
2020-02-27 20:42:27 +03:00
|
|
|
from wasabi import msg
|
2020-01-29 19:06:46 +03:00
|
|
|
import thinc
|
|
|
|
import thinc.schedules
|
2020-09-19 02:17:02 +03:00
|
|
|
from thinc.api import Config, Optimizer, require_gpu, fix_random_seed, set_gpu_allocator
|
2020-05-18 23:23:33 +03:00
|
|
|
import random
|
2020-07-10 18:57:40 +03:00
|
|
|
import typer
|
2020-08-14 16:00:52 +03:00
|
|
|
import logging
|
2020-01-29 19:06:46 +03:00
|
|
|
|
2020-07-11 00:34:17 +03:00
|
|
|
from ._util import app, Arg, Opt, parse_config_overrides, show_validation_error
|
2020-08-05 00:39:19 +03:00
|
|
|
from ._util import import_code, get_sourced_components
|
2020-07-22 14:42:59 +03:00
|
|
|
from ..language import Language
|
2020-02-27 20:42:27 +03:00
|
|
|
from .. import util
|
2020-09-09 11:31:03 +03:00
|
|
|
from ..training.example import Example
|
2020-05-20 12:41:12 +03:00
|
|
|
from ..errors import Errors
|
2020-09-17 12:38:59 +03:00
|
|
|
from ..util import dot_to_object
|
2020-07-10 14:31:27 +03:00
|
|
|
|
2020-06-21 14:44:00 +03:00
|
|
|
|
2020-07-10 18:57:40 +03:00
|
|
|
@app.command(
|
|
|
|
"train", context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
|
|
|
|
)
|
2020-06-20 15:15:04 +03:00
|
|
|
def train_cli(
|
2020-01-29 19:06:46 +03:00
|
|
|
# fmt: off
|
2020-07-10 18:57:40 +03:00
|
|
|
ctx: typer.Context, # This is only used to read additional arguments
|
2020-06-21 22:35:01 +03:00
|
|
|
config_path: Path = Arg(..., help="Path to config file", exists=True),
|
2020-09-03 14:13:03 +03:00
|
|
|
output_path: Optional[Path] = Opt(None, "--output", "--output-path", "-o", help="Output directory to store trained pipeline in"),
|
2020-09-19 02:17:02 +03:00
|
|
|
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
|
2020-07-09 20:44:28 +03:00
|
|
|
verbose: bool = Opt(False, "--verbose", "-V", "-VV", help="Display more information for debugging purposes"),
|
2020-07-22 17:53:41 +03:00
|
|
|
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
|
2020-07-22 14:42:59 +03:00
|
|
|
resume: bool = Opt(False, "--resume", "-R", help="Resume training"),
|
2020-01-29 19:06:46 +03:00
|
|
|
# fmt: on
|
|
|
|
):
|
|
|
|
"""
|
2020-09-03 14:13:03 +03:00
|
|
|
Train or update a spaCy pipeline. Requires data in spaCy's binary format. To
|
2020-07-10 18:57:40 +03:00
|
|
|
convert data from other formats, use the `spacy convert` command. The
|
|
|
|
config file includes all settings and hyperparameters used during traing.
|
|
|
|
To override settings in the config, e.g. settings that point to local
|
|
|
|
paths or that you want to experiment with, you can override them as
|
|
|
|
command line options. For instance, --training.batch_size 128 overrides
|
|
|
|
the value of "batch_size" in the block "[training]". The --code argument
|
|
|
|
lets you pass in a Python file that's imported before training. It can be
|
|
|
|
used to register custom functions and architectures that can then be
|
|
|
|
referenced in the config.
|
2020-09-04 13:58:50 +03:00
|
|
|
|
|
|
|
DOCS: https://nightly.spacy.io/api/cli#train
|
2020-01-29 19:06:46 +03:00
|
|
|
"""
|
2020-08-14 16:00:52 +03:00
|
|
|
util.logger.setLevel(logging.DEBUG if verbose else logging.ERROR)
|
2020-08-04 16:09:37 +03:00
|
|
|
verify_cli_args(config_path, output_path)
|
2020-07-10 18:57:40 +03:00
|
|
|
overrides = parse_config_overrides(ctx.args)
|
2020-07-11 14:03:53 +03:00
|
|
|
import_code(code_path)
|
2020-07-10 18:57:40 +03:00
|
|
|
train(
|
|
|
|
config_path,
|
|
|
|
output_path=output_path,
|
|
|
|
config_overrides=overrides,
|
2020-07-22 14:42:59 +03:00
|
|
|
use_gpu=use_gpu,
|
|
|
|
resume_training=resume,
|
2020-07-10 18:57:40 +03:00
|
|
|
)
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
|
2020-06-03 11:00:21 +03:00
|
|
|
def train(
|
2020-06-21 22:35:01 +03:00
|
|
|
config_path: Path,
|
|
|
|
output_path: Optional[Path] = None,
|
2020-07-10 18:57:40 +03:00
|
|
|
config_overrides: Dict[str, Any] = {},
|
2020-07-22 14:42:59 +03:00
|
|
|
use_gpu: int = -1,
|
|
|
|
resume_training: bool = False,
|
2020-06-21 22:35:01 +03:00
|
|
|
) -> None:
|
2020-07-10 14:31:27 +03:00
|
|
|
if use_gpu >= 0:
|
|
|
|
msg.info(f"Using GPU: {use_gpu}")
|
|
|
|
require_gpu(use_gpu)
|
|
|
|
else:
|
|
|
|
msg.info("Using CPU")
|
2020-07-22 14:42:59 +03:00
|
|
|
msg.info(f"Loading config and nlp from: {config_path}")
|
2020-08-02 16:18:30 +03:00
|
|
|
with show_validation_error(config_path):
|
2020-08-23 22:15:12 +03:00
|
|
|
config = util.load_config(
|
|
|
|
config_path, overrides=config_overrides, interpolate=True
|
|
|
|
)
|
2020-09-19 02:17:02 +03:00
|
|
|
if config["training"]["seed"] is not None:
|
2020-07-28 23:00:24 +03:00
|
|
|
fix_random_seed(config["training"]["seed"])
|
2020-09-19 02:17:02 +03:00
|
|
|
allocator = config["training"]["gpu_allocator"]
|
|
|
|
if use_gpu >= 0 and allocator:
|
|
|
|
set_gpu_allocator(allocator)
|
2020-08-05 00:39:19 +03:00
|
|
|
# Use original config here before it's resolved to functions
|
|
|
|
sourced_components = get_sourced_components(config)
|
2020-08-02 16:18:30 +03:00
|
|
|
with show_validation_error(config_path):
|
2020-08-06 00:35:09 +03:00
|
|
|
nlp, config = util.load_model_from_config(config)
|
2020-09-18 16:45:55 +03:00
|
|
|
util.load_vocab_data_into_model(nlp, lookups=config["training"]["lookups"])
|
2020-07-28 23:00:24 +03:00
|
|
|
if config["training"]["vectors"] is not None:
|
|
|
|
util.load_vectors_into_model(nlp, config["training"]["vectors"])
|
2020-07-19 14:40:47 +03:00
|
|
|
raw_text, tag_map, morph_rules, weights_data = load_from_paths(config)
|
2020-08-04 16:09:37 +03:00
|
|
|
T_cfg = config["training"]
|
|
|
|
optimizer = T_cfg["optimizer"]
|
2020-09-17 12:48:04 +03:00
|
|
|
train_corpus = dot_to_object(config, T_cfg["train_corpus"])
|
|
|
|
dev_corpus = dot_to_object(config, T_cfg["dev_corpus"])
|
2020-08-04 16:09:37 +03:00
|
|
|
batcher = T_cfg["batcher"]
|
2020-08-26 16:24:33 +03:00
|
|
|
train_logger = T_cfg["logger"]
|
2020-09-24 13:40:25 +03:00
|
|
|
before_to_disk = create_before_to_disk_callback(T_cfg["before_to_disk"])
|
2020-08-05 00:39:19 +03:00
|
|
|
# Components that shouldn't be updated during training
|
|
|
|
frozen_components = T_cfg["frozen_components"]
|
|
|
|
# Sourced components that require resume_training
|
|
|
|
resume_components = [p for p in sourced_components if p not in frozen_components]
|
|
|
|
msg.info(f"Pipeline: {nlp.pipe_names}")
|
|
|
|
if resume_components:
|
|
|
|
with nlp.select_pipes(enable=resume_components):
|
|
|
|
msg.info(f"Resuming training for: {resume_components}")
|
2020-08-14 15:59:22 +03:00
|
|
|
nlp.resume_training(sgd=optimizer)
|
2020-08-05 00:39:19 +03:00
|
|
|
with nlp.select_pipes(disable=[*frozen_components, *resume_components]):
|
2020-08-14 15:59:22 +03:00
|
|
|
nlp.begin_training(lambda: train_corpus(nlp), sgd=optimizer)
|
2020-09-14 18:08:00 +03:00
|
|
|
# Verify the config after calling 'begin_training' to ensure labels are properly initialized
|
|
|
|
verify_config(nlp)
|
2020-06-12 03:02:07 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
if tag_map:
|
|
|
|
# Replace tag map with provided mapping
|
|
|
|
nlp.vocab.morphology.load_tag_map(tag_map)
|
|
|
|
if morph_rules:
|
|
|
|
# Load morph rules
|
|
|
|
nlp.vocab.morphology.load_morph_exceptions(morph_rules)
|
2020-07-15 20:44:18 +03:00
|
|
|
|
2020-09-03 14:13:03 +03:00
|
|
|
# Load pretrained tok2vec weights - cf. CLI command 'pretrain'
|
2020-06-12 03:02:07 +03:00
|
|
|
if weights_data is not None:
|
2020-09-25 16:47:10 +03:00
|
|
|
tok2vec_component = config["pretraining"]["component"]
|
|
|
|
if tok2vec_component is None:
|
2020-06-12 03:02:07 +03:00
|
|
|
msg.fail(
|
2020-09-25 16:47:10 +03:00
|
|
|
f"To use pretrained tok2vec weights, [pretraining.component] "
|
|
|
|
f"needs to specify the component that should load them.",
|
2020-06-12 03:02:07 +03:00
|
|
|
exits=1,
|
|
|
|
)
|
2020-09-25 16:47:10 +03:00
|
|
|
layer = nlp.get_pipe(tok2vec_component).model
|
|
|
|
tok2vec_layer = config["pretraining"]["layer"]
|
|
|
|
if tok2vec_layer:
|
|
|
|
layer = layer.get_ref(tok2vec_layer)
|
|
|
|
layer.from_bytes(weights_data)
|
|
|
|
msg.info(f"Loaded pretrained weights into component '{tok2vec_component}'")
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
# Create iterator, which yields out info after each optimization step.
|
|
|
|
msg.info("Start training")
|
2020-08-04 16:09:37 +03:00
|
|
|
score_weights = T_cfg["score_weights"]
|
2020-01-29 19:06:46 +03:00
|
|
|
training_step_iterator = train_while_improving(
|
|
|
|
nlp,
|
|
|
|
optimizer,
|
2020-08-04 16:09:37 +03:00
|
|
|
create_train_batches(train_corpus(nlp), batcher, T_cfg["max_epochs"]),
|
|
|
|
create_evaluation_callback(nlp, dev_corpus, score_weights),
|
|
|
|
dropout=T_cfg["dropout"],
|
|
|
|
accumulate_gradient=T_cfg["accumulate_gradient"],
|
|
|
|
patience=T_cfg["patience"],
|
|
|
|
max_steps=T_cfg["max_steps"],
|
|
|
|
eval_frequency=T_cfg["eval_frequency"],
|
2020-08-05 00:39:19 +03:00
|
|
|
raw_text=None,
|
2020-08-06 00:35:09 +03:00
|
|
|
exclude=frozen_components,
|
2020-01-29 19:06:46 +03:00
|
|
|
)
|
2020-02-18 17:38:18 +03:00
|
|
|
msg.info(f"Training. Initial learn rate: {optimizer.learn_rate}")
|
2020-09-23 13:12:38 +03:00
|
|
|
with nlp.select_pipes(disable=frozen_components):
|
2020-09-23 11:37:12 +03:00
|
|
|
print_row, finalize_logger = train_logger(nlp)
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
try:
|
2020-08-04 16:09:37 +03:00
|
|
|
progress = tqdm.tqdm(total=T_cfg["eval_frequency"], leave=False)
|
2020-08-26 16:24:33 +03:00
|
|
|
progress.set_description(f"Epoch 1")
|
2020-01-29 19:06:46 +03:00
|
|
|
for batch, info, is_best_checkpoint in training_step_iterator:
|
|
|
|
progress.update(1)
|
|
|
|
if is_best_checkpoint is not None:
|
|
|
|
progress.close()
|
|
|
|
print_row(info)
|
|
|
|
if is_best_checkpoint and output_path is not None:
|
2020-09-23 13:12:38 +03:00
|
|
|
with nlp.select_pipes(disable=frozen_components):
|
2020-09-23 11:37:12 +03:00
|
|
|
update_meta(T_cfg, nlp, info)
|
2020-09-02 20:37:43 +03:00
|
|
|
with nlp.use_params(optimizer.averages):
|
2020-09-24 13:40:25 +03:00
|
|
|
nlp = before_to_disk(nlp)
|
2020-09-02 20:37:43 +03:00
|
|
|
nlp.to_disk(output_path / "model-best")
|
2020-08-04 16:09:37 +03:00
|
|
|
progress = tqdm.tqdm(total=T_cfg["eval_frequency"], leave=False)
|
2020-08-26 16:24:33 +03:00
|
|
|
progress.set_description(f"Epoch {info['epoch']}")
|
2020-06-12 03:02:07 +03:00
|
|
|
except Exception as e:
|
2020-08-26 16:24:33 +03:00
|
|
|
finalize_logger()
|
2020-06-26 20:34:12 +03:00
|
|
|
if output_path is not None:
|
2020-08-23 19:31:30 +03:00
|
|
|
# We don't want to swallow the traceback if we don't have a
|
|
|
|
# specific error.
|
2020-06-26 20:34:12 +03:00
|
|
|
msg.warn(
|
|
|
|
f"Aborting and saving the final best model. "
|
2020-08-23 19:31:30 +03:00
|
|
|
f"Encountered exception: {str(e)}"
|
2020-06-26 20:34:12 +03:00
|
|
|
)
|
2020-09-24 13:40:25 +03:00
|
|
|
nlp = before_to_disk(nlp)
|
2020-08-23 19:31:30 +03:00
|
|
|
nlp.to_disk(output_path / "model-final")
|
|
|
|
raise e
|
2020-01-29 19:06:46 +03:00
|
|
|
finally:
|
2020-08-26 16:24:33 +03:00
|
|
|
finalize_logger()
|
2020-01-29 19:06:46 +03:00
|
|
|
if output_path is not None:
|
2020-05-20 13:56:27 +03:00
|
|
|
final_model_path = output_path / "model-final"
|
|
|
|
if optimizer.averages:
|
|
|
|
with nlp.use_params(optimizer.averages):
|
|
|
|
nlp.to_disk(final_model_path)
|
|
|
|
else:
|
2020-01-29 19:06:46 +03:00
|
|
|
nlp.to_disk(final_model_path)
|
2020-09-03 14:13:03 +03:00
|
|
|
msg.good(f"Saved pipeline to output directory {final_model_path}")
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
|
2020-08-04 16:09:37 +03:00
|
|
|
def create_train_batches(iterator, batcher, max_epochs: int):
|
2020-08-31 20:55:22 +03:00
|
|
|
epoch = 0
|
|
|
|
examples = list(iterator)
|
2020-08-04 16:09:37 +03:00
|
|
|
if not examples:
|
|
|
|
# Raise error if no data
|
|
|
|
raise ValueError(Errors.E986)
|
2020-08-31 20:55:22 +03:00
|
|
|
while max_epochs < 1 or epoch != max_epochs:
|
2020-08-04 16:09:37 +03:00
|
|
|
random.shuffle(examples)
|
|
|
|
for batch in batcher(examples):
|
2020-06-26 20:34:12 +03:00
|
|
|
yield epoch, batch
|
2020-08-04 16:09:37 +03:00
|
|
|
epoch += 1
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def create_evaluation_callback(
|
2020-08-26 16:24:33 +03:00
|
|
|
nlp: Language, dev_corpus: Callable, weights: Dict[str, float]
|
2020-07-22 14:42:59 +03:00
|
|
|
) -> Callable[[], Tuple[float, Dict[str, float]]]:
|
2020-09-24 11:27:33 +03:00
|
|
|
weights = {key: value for key, value in weights.items() if value is not None}
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def evaluate() -> Tuple[float, Dict[str, float]]:
|
2020-08-04 16:09:37 +03:00
|
|
|
dev_examples = list(dev_corpus(nlp))
|
|
|
|
scores = nlp.evaluate(dev_examples)
|
2020-09-24 12:29:07 +03:00
|
|
|
# Calculate a weighted sum based on score_weights for the main score.
|
|
|
|
# We can only consider scores that are ints/floats, not dicts like
|
|
|
|
# entity scores per type etc.
|
|
|
|
for key, value in scores.items():
|
|
|
|
if key in weights and not isinstance(value, (int, float)):
|
|
|
|
raise ValueError(Errors.E915.format(name=key, score_type=type(value)))
|
2020-06-12 03:02:07 +03:00
|
|
|
try:
|
2020-08-23 22:15:12 +03:00
|
|
|
weighted_score = sum(
|
|
|
|
scores.get(s, 0.0) * weights.get(s, 0.0) for s in weights
|
|
|
|
)
|
2020-06-12 03:02:07 +03:00
|
|
|
except KeyError as e:
|
2020-07-22 14:42:59 +03:00
|
|
|
keys = list(scores.keys())
|
|
|
|
err = Errors.E983.format(dict="score_weights", key=str(e), keys=keys)
|
2020-08-06 00:53:21 +03:00
|
|
|
raise KeyError(err) from None
|
2020-04-20 23:06:28 +03:00
|
|
|
return weighted_score, scores
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
return evaluate
|
|
|
|
|
|
|
|
|
2020-09-24 13:40:25 +03:00
|
|
|
def create_before_to_disk_callback(
|
|
|
|
callback: Optional[Callable[[Language], Language]]
|
|
|
|
) -> Callable[[Language], Language]:
|
|
|
|
def before_to_disk(nlp: Language) -> Language:
|
|
|
|
if not callback:
|
|
|
|
return nlp
|
|
|
|
modified_nlp = callback(nlp)
|
|
|
|
if not isinstance(modified_nlp, Language):
|
|
|
|
err = Errors.E914.format(name="before_to_disk", value=type(modified_nlp))
|
|
|
|
raise ValueError(err)
|
|
|
|
return modified_nlp
|
|
|
|
|
|
|
|
return before_to_disk
|
|
|
|
|
|
|
|
|
2020-01-29 19:06:46 +03:00
|
|
|
def train_while_improving(
|
2020-07-22 14:42:59 +03:00
|
|
|
nlp: Language,
|
|
|
|
optimizer: Optimizer,
|
2020-06-12 03:02:07 +03:00
|
|
|
train_data,
|
|
|
|
evaluate,
|
|
|
|
*,
|
2020-07-22 14:42:59 +03:00
|
|
|
dropout: float,
|
|
|
|
eval_frequency: int,
|
|
|
|
accumulate_gradient: int,
|
|
|
|
patience: int,
|
|
|
|
max_steps: int,
|
|
|
|
raw_text: List[Dict[str, str]],
|
2020-08-05 00:39:19 +03:00
|
|
|
exclude: List[str],
|
2020-01-29 19:06:46 +03:00
|
|
|
):
|
|
|
|
"""Train until an evaluation stops improving. Works as a generator,
|
|
|
|
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
|
|
|
|
where info is a dict, and is_best_checkpoint is in [True, False, None] --
|
|
|
|
None indicating that the iteration was not evaluated as a checkpoint.
|
2020-08-18 20:15:16 +03:00
|
|
|
The evaluation is conducted by calling the evaluate callback.
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
Positional arguments:
|
|
|
|
nlp: The spaCy pipeline to evaluate.
|
2020-05-20 13:56:27 +03:00
|
|
|
optimizer: The optimizer callable.
|
2020-01-29 19:06:46 +03:00
|
|
|
train_data (Iterable[Batch]): A generator of batches, with the training
|
|
|
|
data. Each batch should be a Sized[Tuple[Input, Annot]]. The training
|
|
|
|
data iterable needs to take care of iterating over the epochs and
|
|
|
|
shuffling.
|
|
|
|
evaluate (Callable[[], Tuple[float, Any]]): A callback to perform evaluation.
|
|
|
|
The callback should take no arguments and return a tuple
|
|
|
|
`(main_score, other_scores)`. The main_score should be a float where
|
|
|
|
higher is better. other_scores can be any object.
|
|
|
|
|
|
|
|
Every iteration, the function yields out a tuple with:
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
* batch: A list of Example objects.
|
2020-01-29 19:06:46 +03:00
|
|
|
* info: A dict with various information about the last update (see below).
|
|
|
|
* is_best_checkpoint: A value in None, False, True, indicating whether this
|
|
|
|
was the best evaluation so far. You should use this to save the model
|
|
|
|
checkpoints during training. If None, evaluation was not conducted on
|
|
|
|
that iteration. False means evaluation was conducted, but a previous
|
|
|
|
evaluation was better.
|
|
|
|
|
|
|
|
The info dict provides the following information:
|
|
|
|
|
|
|
|
epoch (int): How many passes over the data have been completed.
|
|
|
|
step (int): How many steps have been completed.
|
2020-08-31 15:24:41 +03:00
|
|
|
score (float): The main score from the last evaluation.
|
2020-01-29 19:06:46 +03:00
|
|
|
other_scores: : The other scores from the last evaluation.
|
2020-08-28 22:44:04 +03:00
|
|
|
losses: The accumulated losses throughout training.
|
2020-01-29 19:06:46 +03:00
|
|
|
checkpoints: A list of previous results, where each result is a
|
|
|
|
(score, step, epoch) tuple.
|
|
|
|
"""
|
|
|
|
if isinstance(dropout, float):
|
|
|
|
dropouts = thinc.schedules.constant(dropout)
|
|
|
|
else:
|
|
|
|
dropouts = dropout
|
|
|
|
results = []
|
|
|
|
losses = {}
|
2020-06-12 03:02:07 +03:00
|
|
|
if raw_text:
|
|
|
|
random.shuffle(raw_text)
|
2020-07-09 20:44:28 +03:00
|
|
|
raw_examples = [
|
|
|
|
Example.from_dict(nlp.make_doc(rt["text"]), {}) for rt in raw_text
|
|
|
|
]
|
2020-07-06 14:02:36 +03:00
|
|
|
raw_batches = util.minibatch(raw_examples, size=8)
|
2020-06-12 03:02:07 +03:00
|
|
|
|
2020-09-08 16:24:47 +03:00
|
|
|
words_seen = 0
|
|
|
|
start_time = timer()
|
2020-06-26 20:34:12 +03:00
|
|
|
for step, (epoch, batch) in enumerate(train_data):
|
2020-01-29 19:06:46 +03:00
|
|
|
dropout = next(dropouts)
|
2020-08-05 00:39:19 +03:00
|
|
|
for subbatch in subdivide_batch(batch, accumulate_gradient):
|
2020-09-08 16:24:47 +03:00
|
|
|
|
2020-08-05 00:39:19 +03:00
|
|
|
nlp.update(
|
|
|
|
subbatch, drop=dropout, losses=losses, sgd=False, exclude=exclude
|
|
|
|
)
|
|
|
|
if raw_text:
|
|
|
|
# If raw text is available, perform 'rehearsal' updates,
|
|
|
|
# which use unlabelled data to reduce overfitting.
|
|
|
|
raw_batch = list(next(raw_batches))
|
|
|
|
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses, exclude=exclude)
|
|
|
|
# TODO: refactor this so we don't have to run it separately in here
|
|
|
|
for name, proc in nlp.pipeline:
|
2020-08-12 00:29:31 +03:00
|
|
|
if (
|
|
|
|
name not in exclude
|
|
|
|
and hasattr(proc, "model")
|
|
|
|
and proc.model not in (True, False, None)
|
|
|
|
):
|
2020-08-05 00:39:19 +03:00
|
|
|
proc.model.finish_update(optimizer)
|
2020-01-29 19:06:46 +03:00
|
|
|
optimizer.step_schedules()
|
|
|
|
if not (step % eval_frequency):
|
2020-08-04 16:09:37 +03:00
|
|
|
if optimizer.averages:
|
|
|
|
with nlp.use_params(optimizer.averages):
|
|
|
|
score, other_scores = evaluate()
|
|
|
|
else:
|
|
|
|
score, other_scores = evaluate()
|
2020-01-29 19:06:46 +03:00
|
|
|
results.append((score, step))
|
|
|
|
is_best_checkpoint = score == max(results)[0]
|
|
|
|
else:
|
|
|
|
score, other_scores = (None, None)
|
|
|
|
is_best_checkpoint = None
|
2020-09-08 16:24:47 +03:00
|
|
|
words_seen += sum(len(eg) for eg in batch)
|
2020-01-29 19:06:46 +03:00
|
|
|
info = {
|
2020-06-26 20:34:12 +03:00
|
|
|
"epoch": epoch,
|
2020-01-29 19:06:46 +03:00
|
|
|
"step": step,
|
|
|
|
"score": score,
|
|
|
|
"other_scores": other_scores,
|
|
|
|
"losses": losses,
|
|
|
|
"checkpoints": results,
|
2020-09-08 16:24:47 +03:00
|
|
|
"seconds": int(timer() - start_time),
|
|
|
|
"words": words_seen,
|
2020-01-29 19:06:46 +03:00
|
|
|
}
|
|
|
|
yield batch, info, is_best_checkpoint
|
|
|
|
if is_best_checkpoint is not None:
|
|
|
|
losses = {}
|
2020-05-20 13:56:27 +03:00
|
|
|
# Stop if no improvement in `patience` updates (if specified)
|
2020-01-29 19:06:46 +03:00
|
|
|
best_score, best_step = max(results)
|
2020-05-20 13:56:27 +03:00
|
|
|
if patience and (step - best_step) >= patience:
|
|
|
|
break
|
|
|
|
# Stop if we've exhausted our max steps (if specified)
|
2020-07-01 19:08:14 +03:00
|
|
|
if max_steps and step >= max_steps:
|
2020-01-29 19:06:46 +03:00
|
|
|
break
|
|
|
|
|
|
|
|
|
2020-05-18 23:23:33 +03:00
|
|
|
def subdivide_batch(batch, accumulate_gradient):
|
|
|
|
batch = list(batch)
|
2020-06-26 20:34:12 +03:00
|
|
|
batch.sort(key=lambda eg: len(eg.predicted))
|
2020-05-18 23:23:33 +03:00
|
|
|
sub_len = len(batch) // accumulate_gradient
|
|
|
|
start = 0
|
|
|
|
for i in range(accumulate_gradient):
|
|
|
|
subbatch = batch[start : start + sub_len]
|
|
|
|
if subbatch:
|
|
|
|
yield subbatch
|
|
|
|
start += len(subbatch)
|
2020-06-12 03:02:07 +03:00
|
|
|
subbatch = batch[start:]
|
2020-05-18 23:23:33 +03:00
|
|
|
if subbatch:
|
|
|
|
yield subbatch
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def update_meta(
|
|
|
|
training: Union[Dict[str, Any], Config], nlp: Language, info: Dict[str, Any]
|
|
|
|
) -> None:
|
2020-06-12 03:02:07 +03:00
|
|
|
nlp.meta["performance"] = {}
|
2020-07-29 12:04:12 +03:00
|
|
|
for metric in training["score_weights"]:
|
2020-09-24 11:27:33 +03:00
|
|
|
if metric is not None:
|
|
|
|
nlp.meta["performance"][metric] = info["other_scores"].get(metric, 0.0)
|
2020-06-12 03:02:07 +03:00
|
|
|
for pipe_name in nlp.pipe_names:
|
|
|
|
nlp.meta["performance"][f"{pipe_name}_loss"] = info["losses"][pipe_name]
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def load_from_paths(
|
|
|
|
config: Config,
|
|
|
|
) -> Tuple[List[Dict[str, str]], Dict[str, dict], bytes]:
|
2020-07-10 14:31:27 +03:00
|
|
|
# TODO: separate checks from loading
|
|
|
|
raw_text = util.ensure_path(config["training"]["raw_text"])
|
|
|
|
if raw_text is not None:
|
|
|
|
if not raw_text.exists():
|
|
|
|
msg.fail("Can't find raw text", raw_text, exits=1)
|
|
|
|
raw_text = list(srsly.read_jsonl(config["training"]["raw_text"]))
|
|
|
|
tag_map = {}
|
2020-07-15 20:44:18 +03:00
|
|
|
morph_rules = {}
|
2020-07-10 14:31:27 +03:00
|
|
|
weights_data = None
|
|
|
|
init_tok2vec = util.ensure_path(config["training"]["init_tok2vec"])
|
|
|
|
if init_tok2vec is not None:
|
|
|
|
if not init_tok2vec.exists():
|
|
|
|
msg.fail("Can't find pretrained tok2vec", init_tok2vec, exits=1)
|
|
|
|
with init_tok2vec.open("rb") as file_:
|
|
|
|
weights_data = file_.read()
|
2020-07-19 14:40:47 +03:00
|
|
|
return raw_text, tag_map, morph_rules, weights_data
|
2020-07-10 14:31:27 +03:00
|
|
|
|
|
|
|
|
2020-08-26 16:24:33 +03:00
|
|
|
def verify_cli_args(config_path: Path, output_path: Optional[Path] = None) -> None:
|
2020-06-26 20:34:12 +03:00
|
|
|
# Make sure all files and paths exists if they are needed
|
|
|
|
if not config_path or not config_path.exists():
|
|
|
|
msg.fail("Config file not found", config_path, exits=1)
|
|
|
|
if output_path is not None:
|
|
|
|
if not output_path.exists():
|
|
|
|
output_path.mkdir()
|
|
|
|
msg.good(f"Created output directory: {output_path}")
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def verify_config(nlp: Language) -> None:
|
2020-09-14 18:08:00 +03:00
|
|
|
"""Perform additional checks based on the config, loaded nlp object and training data."""
|
2020-07-22 14:42:59 +03:00
|
|
|
# TODO: maybe we should validate based on the actual components, the list
|
|
|
|
# in config["nlp"]["pipeline"] instead?
|
|
|
|
for pipe_config in nlp.config["components"].values():
|
|
|
|
# We can't assume that the component name == the factory
|
2020-07-22 18:29:31 +03:00
|
|
|
factory = pipe_config["factory"]
|
2020-07-22 14:42:59 +03:00
|
|
|
if factory == "textcat":
|
|
|
|
verify_textcat_config(nlp, pipe_config)
|
|
|
|
|
|
|
|
|
|
|
|
def verify_textcat_config(nlp: Language, pipe_config: Dict[str, Any]) -> None:
|
2020-06-26 20:34:12 +03:00
|
|
|
# if 'positive_label' is provided: double check whether it's in the data and
|
|
|
|
# the task is binary
|
2020-07-22 14:42:59 +03:00
|
|
|
if pipe_config.get("positive_label"):
|
2020-09-14 18:08:00 +03:00
|
|
|
textcat_labels = nlp.get_pipe("textcat").labels
|
2020-07-22 14:42:59 +03:00
|
|
|
pos_label = pipe_config.get("positive_label")
|
2020-06-26 20:34:12 +03:00
|
|
|
if pos_label not in textcat_labels:
|
2020-09-14 18:08:00 +03:00
|
|
|
raise ValueError(
|
|
|
|
Errors.E920.format(pos_label=pos_label, labels=textcat_labels)
|
2020-06-26 20:34:12 +03:00
|
|
|
)
|
2020-09-14 18:08:00 +03:00
|
|
|
if len(list(textcat_labels)) != 2:
|
|
|
|
raise ValueError(
|
|
|
|
Errors.E919.format(pos_label=pos_label, labels=textcat_labels)
|
2020-06-26 20:34:12 +03:00
|
|
|
)
|