diff --git a/MANIFEST.in b/MANIFEST.in index 6502ff607..ef42138f1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,9 @@ recursive-include include *.h -recursive-include spacy *.pyx *.pxd *.txt *.cfg +recursive-include spacy *.pyx *.pxd *.txt *.cfg *.jinja include LICENSE include README.md include pyproject.toml recursive-exclude spacy/lang *.json recursive-include spacy/lang *.json.gz +recursive-include spacy/cli *.json recursive-include licenses * diff --git a/pyproject.toml b/pyproject.toml index d4aa25943..1b4972bd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=8.0.0a23,<8.0.0a30", + "thinc>=8.0.0a27,<8.0.0a30", "blis>=0.4.0,<0.5.0", "pytokenizations", "smart_open>=2.0.0,<3.0.0" diff --git a/requirements.txt b/requirements.txt index 4bb62742d..b4901a692 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # Our libraries cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=8.0.0a23,<8.0.0a30 +thinc>=8.0.0a27,<8.0.0a30 blis>=0.4.0,<0.5.0 ml_datasets>=0.1.1 murmurhash>=0.28.0,<1.1.0 @@ -26,3 +26,4 @@ pytest>=4.6.5 pytest-timeout>=1.3.0,<2.0.0 mock>=2.0.0,<3.0.0 flake8>=3.5.0,<3.6.0 +jinja2 diff --git a/setup.cfg b/setup.cfg index f9da1adb9..a34c34e23 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,13 +34,13 @@ setup_requires = cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 murmurhash>=0.28.0,<1.1.0 - thinc>=8.0.0a23,<8.0.0a30 + thinc>=8.0.0a27,<8.0.0a30 install_requires = # Our libraries murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=8.0.0a23,<8.0.0a30 + thinc>=8.0.0a27,<8.0.0a30 blis>=0.4.0,<0.5.0 wasabi>=0.7.1,<1.1.0 srsly>=2.1.0,<3.0.0 diff --git a/spacy/__init__.py b/spacy/__init__.py index 73e828936..d07ee5674 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -14,7 +14,7 @@ from . import pipeline # noqa: F401 from .cli.info import info # noqa: F401 from .glossary import explain # noqa: F401 from .about import __version__ # noqa: F401 -from .util import registry # noqa: F401 +from .util import registry, logger # noqa: F401 from .errors import Errors from .language import Language diff --git a/spacy/about.py b/spacy/about.py index eb4d2128c..5ed46bbe4 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy-nightly" -__version__ = "3.0.0a6" +__version__ = "3.0.0a7" __release__ = True __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index bc47ffdef..2b21e2f2b 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -15,7 +15,7 @@ from .debug_model import debug_model # noqa: F401 from .evaluate import evaluate # noqa: F401 from .convert import convert # noqa: F401 from .init_model import init_model # noqa: F401 -from .init_config import init_config # noqa: F401 +from .init_config import init_config, fill_config # noqa: F401 from .validate import validate # noqa: F401 from .project.clone import project_clone # noqa: F401 from .project.assets import project_assets # noqa: F401 diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index 93ec9f31e..5613fa317 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -179,13 +179,13 @@ def show_validation_error( file_path: Optional[Union[str, Path]] = None, *, title: str = "Config validation error", - hint_init: bool = True, + hint_fill: bool = True, ): """Helper to show custom config validation errors on the CLI. file_path (str / Path): Optional file path of config file, used in hints. title (str): Title of the custom formatted error. - hint_init (bool): Show hint about filling config. + hint_fill (bool): Show hint about filling config. """ try: yield @@ -195,14 +195,14 @@ def show_validation_error( # helper for this in Thinc err_text = str(e).replace("Config validation error", "").strip() print(err_text) - if hint_init and "field required" in err_text: + if hint_fill and "field required" in err_text: config_path = file_path if file_path is not None else "config.cfg" msg.text( "If your config contains missing values, you can run the 'init " - "config' command to fill in all the defaults, if possible:", + "fill-config' command to fill in all the defaults, if possible:", spaced=True, ) - print(f"{COMMAND} init config {config_path} --base {config_path}\n") + print(f"{COMMAND} init fill-config {config_path} --base {config_path}\n") sys.exit(1) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 6c8c85e30..27cf033c4 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -5,7 +5,6 @@ import sys import srsly from wasabi import Printer, MESSAGES, msg, diff_strings import typer -from thinc.api import Config from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import import_code, debug_cli, get_sourced_components @@ -49,11 +48,8 @@ def debug_config_cli( overrides = parse_config_overrides(ctx.args) import_code(code_path) with show_validation_error(config_path): - config = Config().from_disk(config_path, overrides=overrides) - try: - nlp, _ = util.load_model_from_config(config, auto_fill=auto_fill) - except ValueError as e: - msg.fail(str(e), exits=1) + config = util.load_config(config_path, overrides=overrides) + nlp, _ = util.load_model_from_config(config, auto_fill=auto_fill) if auto_fill: orig_config = config.to_str() filled_config = nlp.config.to_str() @@ -134,7 +130,7 @@ def debug_data( if not config_path.exists(): msg.fail("Config file not found", config_path, exists=1) with show_validation_error(config_path): - cfg = Config().from_disk(config_path, overrides=config_overrides) + cfg = util.load_config(config_path, overrides=config_overrides) nlp, config = util.load_model_from_config(cfg) # Use original config here, not resolved version sourced_components = get_sourced_components(cfg) diff --git a/spacy/cli/debug_model.py b/spacy/cli/debug_model.py index cc6cb98ea..604a5676a 100644 --- a/spacy/cli/debug_model.py +++ b/spacy/cli/debug_model.py @@ -1,7 +1,7 @@ from typing import Dict, Any, Optional from pathlib import Path from wasabi import msg -from thinc.api import require_gpu, fix_random_seed, set_dropout_rate, Adam, Config +from thinc.api import require_gpu, fix_random_seed, set_dropout_rate, Adam from thinc.api import Model, data_validation import typer @@ -49,16 +49,12 @@ def debug_model_cli( } config_overrides = parse_config_overrides(ctx.args) with show_validation_error(config_path): - cfg = Config().from_disk(config_path, overrides=config_overrides) - try: - nlp, config = util.load_model_from_config(cfg) - except ValueError as e: - msg.fail(str(e), exits=1) + config = util.load_config(config_path, overrides=config_overrides) + nlp, config = util.load_model_from_config(config_path) seed = config["pretraining"]["seed"] if seed is not None: msg.info(f"Fixing random seed: {seed}") fix_random_seed(seed) - pipe = nlp.get_pipe(component) if hasattr(pipe, "model"): model = pipe.model diff --git a/spacy/cli/evaluate.py b/spacy/cli/evaluate.py index cf77fecfd..cf8f513fc 100644 --- a/spacy/cli/evaluate.py +++ b/spacy/cli/evaluate.py @@ -60,7 +60,6 @@ def evaluate( fix_random_seed() if use_gpu >= 0: require_gpu(use_gpu) - util.set_env_log(False) data_path = util.ensure_path(data_path) output_path = util.ensure_path(output) displacy_path = util.ensure_path(displacy_path) diff --git a/spacy/cli/init_config.py b/spacy/cli/init_config.py index 01664ee40..7d80eb289 100644 --- a/spacy/cli/init_config.py +++ b/spacy/cli/init_config.py @@ -1,81 +1,178 @@ -from typing import Optional, List +from typing import Optional, List, Tuple +from enum import Enum from pathlib import Path +from wasabi import Printer, diff_strings from thinc.api import Config -from wasabi import msg +from pydantic import BaseModel +import srsly +import re -from ..util import load_model_from_config, get_lang_class, load_model -from ._util import init_cli, Arg, Opt, show_validation_error +from .. import util +from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND + + +TEMPLATE_ROOT = Path(__file__).parent / "templates" +TEMPLATE_PATH = TEMPLATE_ROOT / "quickstart_training.jinja" +RECOMMENDATIONS_PATH = TEMPLATE_ROOT / "quickstart_training_recommendations.json" + + +class Optimizations(str, Enum): + efficiency = "efficiency" + accuracy = "accuracy" + + +class RecommendationsTrfItem(BaseModel): + name: str + size_factor: int + + +class RecommendationsTrf(BaseModel): + efficiency: RecommendationsTrfItem + accuracy: RecommendationsTrfItem + + +class RecommendationSchema(BaseModel): + word_vectors: Optional[str] = None + transformer: Optional[RecommendationsTrf] = None @init_cli.command("config") def init_config_cli( # fmt: off - output_path: Path = Arg("-", help="Output path or - for stdout", allow_dash=True), - base_path: Optional[Path] = Opt(None, "--base", "-b", help="Optional base config to fill", exists=True, dir_okay=False), - model: Optional[str] = Opt(None, "--model", "-m", help="Optional model to copy config from"), - lang: Optional[str] = Opt(None, "--lang", "-l", help="Optional language code for blank config"), - pipeline: Optional[str] = Opt(None, "--pipeline", "-p", help="Optional pipeline components to use") + output_file: Path = Arg("-", help="File to save config.cfg to (or - for stdout)", allow_dash=True), + lang: Optional[str] = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"), + pipeline: Optional[str] = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include in the model (without 'tok2vec' or 'transformer')"), + optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), + cpu: bool = Opt(False, "--cpu", "-C", help="Whether the model needs to run on CPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), # fmt: on ): - """Generate a starter config.cfg for training.""" - validate_cli_args(base_path, model, lang) - is_stdout = str(output_path) == "-" - pipeline = [p.strip() for p in pipeline.split(",")] if pipeline else [] - cfg = init_config(output_path, base_path, model, lang, pipeline, silent=is_stdout) - if is_stdout: - print(cfg.to_str()) + """ + Generate a starter config.cfg for training. Based on your requirements + specified via the CLI arguments, this command generates a config with the + optimal settings for you use case. This includes the choice of architecture, + pretrained weights and related hyperparameters. + """ + if isinstance(optimize, Optimizations): # instance of enum from the CLI + optimize = optimize.value + pipeline = [p.strip() for p in pipeline.split(",")] + init_config(output_file, lang=lang, pipeline=pipeline, optimize=optimize, cpu=cpu) + + +@init_cli.command("fill-config") +def init_fill_config_cli( + # fmt: off + base_path: Path = Arg(..., help="Base config to fill", exists=True, dir_okay=False), + output_file: Path = Arg("-", help="File to save config.cfg to (or - for stdout)", allow_dash=True), + diff: bool = Opt(False, "--diff", "-D", help="Print a visual diff highlighting the changes") + # fmt: on +): + """ + Fill partial config.cfg with default values. Will add all missing settings + from the default config and will create all objects, check the registered + functions for their default values and update the base config. This command + can be used with a config generated via the training quickstart widget: + https://nightly.spacy.io/usage/training#quickstart + """ + fill_config(output_file, base_path, diff=diff) + + +def fill_config( + output_file: Path, base_path: Path, *, diff: bool = False +) -> Tuple[Config, Config]: + is_stdout = str(output_file) == "-" + msg = Printer(no_print=is_stdout) + with show_validation_error(hint_fill=False): + config = util.load_config(base_path) + nlp, _ = util.load_model_from_config(config, auto_fill=True) + before = config.to_str() + after = nlp.config.to_str() + if before == after: + msg.warn("Nothing to auto-fill: base config is already complete") else: - cfg.to_disk(output_path) - msg.good("Saved config", output_path) + msg.good("Auto-filled config with all values") + if diff and not is_stdout: + if before == after: + msg.warn("No diff to show: nothing was auto-filled") + else: + msg.divider("START CONFIG DIFF") + print("") + print(diff_strings(before, after)) + msg.divider("END CONFIG DIFF") + print("") + save_config(nlp.config, output_file, is_stdout=is_stdout) def init_config( - output_path: Path, - config_path: Optional[Path], - model: Optional[str], - lang: Optional[str], - pipeline: Optional[List[str]], - silent: bool = False, -) -> Config: - if config_path is not None: - msg.info("Generating config from base config", show=not silent) - with show_validation_error(config_path, hint_init=False): - config = Config().from_disk(config_path) - try: - nlp, _ = load_model_from_config(config, auto_fill=True) - except ValueError as e: - msg.fail(str(e), exits=1) - return nlp.config - if model is not None: - ext = f" with pipeline {pipeline}" if pipeline else "" - msg.info(f"Generating config from model {model}{ext}", show=not silent) - nlp = load_model(model) - for existing_pipe_name in nlp.pipe_names: - if existing_pipe_name not in pipeline: - nlp.remove_pipe(existing_pipe_name) - for pipe_name in pipeline: - if pipe_name not in nlp.pipe_names: - nlp.add_pipe(pipe_name) - return nlp.config - if lang is not None: - ext = f" with pipeline {pipeline}" if pipeline else "" - msg.info(f"Generating config for language '{lang}'{ext}", show=not silent) - nlp = get_lang_class(lang)() - for pipe_name in pipeline: - nlp.add_pipe(pipe_name) - return nlp.config - - -def validate_cli_args( - config_path: Optional[Path], model: Optional[str], lang: Optional[str] + output_file: Path, *, lang: str, pipeline: List[str], optimize: str, cpu: bool ) -> None: - args = {"--base": config_path, "--model": model, "--lang": lang} - if sum(arg is not None for arg in args.values()) != 1: - existing = " ".join(f"{a} {v}" for a, v in args.items() if v is not None) + is_stdout = str(output_file) == "-" + msg = Printer(no_print=is_stdout) + try: + from jinja2 import Template + except ImportError: + msg.fail("This command requires jinja2", "pip install jinja2", exits=1) + recommendations = srsly.read_json(RECOMMENDATIONS_PATH) + lang_defaults = util.get_lang_class(lang).Defaults + has_letters = lang_defaults.writing_system.get("has_letters", True) + # Filter out duplicates since tok2vec and transformer are added by template + pipeline = [pipe for pipe in pipeline if pipe not in ("tok2vec", "transformer")] + reco = RecommendationSchema(**recommendations.get(lang, {})).dict() + with TEMPLATE_PATH.open("r") as f: + template = Template(f.read()) + variables = { + "lang": lang, + "components": pipeline, + "optimize": optimize, + "hardware": "cpu" if cpu else "gpu", + "transformer_data": reco["transformer"], + "word_vectors": reco["word_vectors"], + "has_letters": has_letters, + } + base_template = template.render(variables).strip() + # Giving up on getting the newlines right in jinja for now + base_template = re.sub(r"\n\n\n+", "\n\n", base_template) + # Access variables declared in templates + template_vars = template.make_module(variables) + use_case = { + "Language": lang, + "Pipeline": ", ".join(pipeline), + "Optimize for": optimize, + "Hardware": variables["hardware"].upper(), + "Transformer": template_vars.transformer.get("name", False), + } + msg.info("Generated template specific for your use case") + for label, value in use_case.items(): + msg.text(f"- {label}: {value}") + use_transformer = bool(template_vars.use_transformer) + if use_transformer: + require_spacy_transformers(msg) + with show_validation_error(hint_fill=False): + config = util.load_config_from_str(base_template) + nlp, _ = util.load_model_from_config(config, auto_fill=True) + if use_transformer: + nlp.config.pop("pretraining", {}) # TODO: solve this better + msg.good("Auto-filled config with all values") + save_config(nlp.config, output_file, is_stdout=is_stdout) + + +def save_config(config: Config, output_file: Path, is_stdout: bool = False) -> None: + msg = Printer(no_print=is_stdout) + if is_stdout: + print(config.to_str()) + else: + config.to_disk(output_file, interpolate=False) + msg.good("Saved config", output_file) + msg.text("You can now add your data and train your model:") + variables = ["--paths.train ./train.spacy", "--paths.dev ./dev.spacy"] + print(f"{COMMAND} train {output_file.parts[-1]} {' '.join(variables)}") + + +def require_spacy_transformers(msg: Printer) -> None: + try: + import spacy_transformers # noqa: F401 + except ImportError: msg.fail( - "The init config command expects only one of the following arguments: " - "--base (base config to fill and update), --lang (language code to " - "use for blank config) or --model (base model to copy config from).", - f"Got: {existing if existing else 'no arguments'}", + "Using a transformer-based pipeline requires spacy-transformers " + "to be installed.", exits=1, ) diff --git a/spacy/cli/pretrain.py b/spacy/cli/pretrain.py index ce0eb27a0..82950f402 100644 --- a/spacy/cli/pretrain.py +++ b/spacy/cli/pretrain.py @@ -5,7 +5,7 @@ import time import re from collections import Counter from pathlib import Path -from thinc.api import use_pytorch_for_gpu_memory, require_gpu, Config +from thinc.api import use_pytorch_for_gpu_memory, require_gpu from thinc.api import set_dropout_rate, to_categorical, fix_random_seed from thinc.api import CosineDistance, L2Distance from wasabi import msg @@ -88,7 +88,7 @@ def pretrain( msg.info("Using CPU") msg.info(f"Loading config from: {config_path}") with show_validation_error(config_path): - config = Config().from_disk(config_path, overrides=config_overrides) + config = util.load_config(config_path, overrides=config_overrides) nlp, config = util.load_model_from_config(config) # TODO: validate that [pretraining] block exists if not output_dir.exists(): diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja new file mode 100644 index 000000000..4f5a2226e --- /dev/null +++ b/spacy/cli/templates/quickstart_training.jinja @@ -0,0 +1,237 @@ +{# This is a template for training configs used for the quickstart widget in +the docs and the init config command. It encodes various best practices and +can help generate the best possible configuration, given a user's requirements. #} +{%- set use_transformer = (transformer_data and hardware != "cpu") -%} +{%- set transformer = transformer_data[optimize] if use_transformer else {} -%} +[paths] +train = "" +dev = "" + +[system] +use_pytorch_for_gpu_memory = {{ "true" if use_transformer else "false" }} + +[nlp] +lang = "{{ lang }}" +{%- set full_pipeline = ["transformer" if use_transformer else "tok2vec"] + components %} +pipeline = {{ full_pipeline|pprint()|replace("'", '"')|safe }} +tokenizer = {"@tokenizers": "spacy.Tokenizer.v1"} + +[components] + +{# TRANSFORMER PIPELINE #} +{%- if use_transformer -%} +[components.transformer] +factory = "transformer" + +[components.transformer.model] +@architectures = "spacy-transformers.TransformerModel.v1" +name = "{{ transformer["name"] }}" +tokenizer_config = {"use_fast": true} + +[components.transformer.model.get_spans] +@span_getters = "strided_spans.v1" +window = 128 +stride = 96 + +{% if "tagger" in components %} +[components.tagger] +factory = "tagger" + +[components.tagger.model] +@architectures = "spacy.Tagger.v1" +nO = null + +[components.tagger.model.tok2vec] +@architectures = "spacy-transformers.Tok2VecListener.v1" +grad_factor = 1.0 + +[components.tagger.model.tok2vec.pooling] +@layers = "reduce_mean.v1" +{%- endif %} + +{% if "parser" in components -%} +[components.parser] +factory = "parser" + +[components.parser.model] +@architectures = "spacy.TransitionBasedParser.v1" +nr_feature_tokens = 8 +hidden_width = 128 +maxout_pieces = 3 +use_upper = false +nO = null + +[components.parser.model.tok2vec] +@architectures = "spacy-transformers.Tok2VecListener.v1" +grad_factor = 1.0 + +[components.parser.model.tok2vec.pooling] +@layers = "reduce_mean.v1" +{%- endif %} + +{% if "ner" in components -%} +[components.ner] +factory = "ner" + +[components.ner.model] +@architectures = "spacy.TransitionBasedParser.v1" +nr_feature_tokens = 3 +hidden_width = 64 +maxout_pieces = 2 +use_upper = false +nO = null + +[components.ner.model.tok2vec] +@architectures = "spacy-transformers.Tok2VecListener.v1" +grad_factor = 1.0 + +[components.ner.model.tok2vec.pooling] +@layers = "reduce_mean.v1" +{% endif -%} + +{# NON-TRANSFORMER PIPELINE #} +{% else -%} + +{%- if hardware == "gpu" -%} +# There are no recommended transformer weights available for language '{{ lang }}' +# yet, so the pipeline described here is not transformer-based. +{%- endif %} + +[components.tok2vec] +factory = "tok2vec" + +[components.tok2vec.model] +@architectures = "spacy.Tok2Vec.v1" + +[components.tok2vec.model.embed] +@architectures = "spacy.MultiHashEmbed.v1" +width = ${components.tok2vec.model.encode:width} +rows = {{ 2000 if optimize == "efficiency" else 7000 }} +also_embed_subwords = {{ true if has_letters else false }} +also_use_static_vectors = {{ true if optimize == "accuracy" else false }} + +[components.tok2vec.model.encode] +@architectures = "spacy.MaxoutWindowEncoder.v1" +width = {{ 96 if optimize == "efficiency" else 256 }} +depth = {{ 4 if optimize == "efficiency" else 8 }} +window_size = 1 +maxout_pieces = 3 + +{% if "tagger" in components %} +[components.tagger] +factory = "tagger" + +[components.tagger.model] +@architectures = "spacy.Tagger.v1" +nO = null + +[components.tagger.model.tok2vec] +@architectures = "spacy.Tok2VecListener.v1" +width = ${components.tok2vec.model.encode:width} +{%- endif %} + +{% if "parser" in components -%} +[components.parser] +factory = "parser" + +[components.parser.model] +@architectures = "spacy.TransitionBasedParser.v1" +nr_feature_tokens = 8 +hidden_width = 128 +maxout_pieces = 3 +use_upper = true +nO = null + +[components.parser.model.tok2vec] +@architectures = "spacy.Tok2VecListener.v1" +width = ${components.tok2vec.model.encode:width} +{%- endif %} + +{% if "ner" in components %} +[components.ner] +factory = "ner" + +[components.ner.model] +@architectures = "spacy.TransitionBasedParser.v1" +nr_feature_tokens = 6 +hidden_width = 64 +maxout_pieces = 2 +use_upper = true +nO = null + +[components.ner.model.tok2vec] +@architectures = "spacy.Tok2VecListener.v1" +width = ${components.tok2vec.model.encode:width} +{% endif %} +{% endif %} + +{% for pipe in components %} +{% if pipe not in ["tagger", "parser", "ner"] %} +{# Other components defined by the user: we just assume they're factories #} +[components.{{ pipe }}] +factory = "{{ pipe }}" +{% endif %} +{% endfor %} + +[training] +{% if use_transformer or optimize == "efficiency" or not word_vectors -%} +vectors = null +{% else -%} +vectors = "{{ word_vectors }}" +{% endif -%} +{% if use_transformer -%} +accumulate_gradient = {{ transformer["size_factor"] }} +{% endif %} + +[training.optimizer] +@optimizers = "Adam.v1" + +[training.optimizer.learn_rate] +@schedules = "warmup_linear.v1" +warmup_steps = 250 +total_steps = 20000 +initial_rate = 5e-5 + +[training.train_corpus] +@readers = "spacy.Corpus.v1" +path = ${paths:train} +max_length = {{ 500 if hardware == "gpu" else 0 }} + +[training.dev_corpus] +@readers = "spacy.Corpus.v1" +path = ${paths:dev} +max_length = 0 + +{% if use_transformer %} +[training.batcher] +@batchers = "batch_by_padded.v1" +discard_oversize = true +size = 2000 +buffer = 256 +{%- else %} +[training.batcher] +@batchers = "batch_by_words.v1" +discard_oversize = false +tolerance = 0.2 + +[training.batcher.size] +@schedules = "compounding.v1" +start = 100 +stop = 1000 +compound = 1.001 +{% endif %} + +[training.score_weights] +{%- if "tagger" in components %} +tag_acc = {{ (1.0 / components|length)|round(2) }} +{%- endif -%} +{%- if "parser" in components %} +dep_uas = 0.0 +dep_las = {{ (1.0 / components|length)|round(2) }} +sents_f = 0.0 +{%- endif %} +{%- if "ner" in components %} +ents_f = {{ (1.0 / components|length)|round(2) }} +ents_p = 0.0 +ents_r = 0.0 +{%- endif -%} diff --git a/spacy/cli/templates/quickstart_training_recommendations.json b/spacy/cli/templates/quickstart_training_recommendations.json new file mode 100644 index 000000000..8a3acc438 --- /dev/null +++ b/spacy/cli/templates/quickstart_training_recommendations.json @@ -0,0 +1,13 @@ +{ + "en": { + "word_vectors": "en_vectors_web_lg", + "transformer": { + "efficiency": { "name": "roberta-base", "size_factor": 3 }, + "accuracy": { "name": "roberta-base", "size_factor": 3 } + } + }, + "de": { + "word_vectors": null, + "transformer": null + } +} diff --git a/spacy/cli/train.py b/spacy/cli/train.py index 32d22d1bc..375e64ffd 100644 --- a/spacy/cli/train.py +++ b/spacy/cli/train.py @@ -9,6 +9,7 @@ from thinc.api import use_pytorch_for_gpu_memory, require_gpu, fix_random_seed from thinc.api import Config, Optimizer import random import typer +import logging from ._util import app, Arg, Opt, parse_config_overrides, show_validation_error from ._util import import_code, get_sourced_components @@ -17,7 +18,6 @@ from .. import util from ..gold.example import Example from ..errors import Errors - # Don't remove - required to load the built-in architectures from ..ml import models # noqa: F401 @@ -48,7 +48,7 @@ def train_cli( used to register custom functions and architectures that can then be referenced in the config. """ - util.set_env_log(verbose) + util.logger.setLevel(logging.DEBUG if verbose else logging.ERROR) verify_cli_args(config_path, output_path) overrides = parse_config_overrides(ctx.args) import_code(code_path) @@ -75,7 +75,7 @@ def train( msg.info("Using CPU") msg.info(f"Loading config and nlp from: {config_path}") with show_validation_error(config_path): - config = Config().from_disk(config_path, overrides=config_overrides) + config = util.load_config(config_path, overrides=config_overrides) if config.get("training", {}).get("seed") is not None: fix_random_seed(config["training"]["seed"]) # Use original config here before it's resolved to functions @@ -102,9 +102,9 @@ def train( if resume_components: with nlp.select_pipes(enable=resume_components): msg.info(f"Resuming training for: {resume_components}") - nlp.resume_training() + nlp.resume_training(sgd=optimizer) with nlp.select_pipes(disable=[*frozen_components, *resume_components]): - nlp.begin_training(lambda: train_corpus(nlp)) + nlp.begin_training(lambda: train_corpus(nlp), sgd=optimizer) if tag_map: # Replace tag map with provided mapping @@ -295,7 +295,11 @@ def train_while_improving( nlp.rehearse(raw_batch, sgd=optimizer, losses=losses, exclude=exclude) # TODO: refactor this so we don't have to run it separately in here for name, proc in nlp.pipeline: - if name not in exclude and hasattr(proc, "model"): + if ( + name not in exclude + and hasattr(proc, "model") + and proc.model not in (True, False, None) + ): proc.model.finish_update(optimizer) optimizer.step_schedules() if not (step % eval_frequency): diff --git a/spacy/errors.py b/spacy/errors.py index 8e9a8d4b4..26c0dba29 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -55,12 +55,6 @@ class Warnings: "loaded. (Shape: {shape})") W021 = ("Unexpected hash collision in PhraseMatcher. Matches may be " "incorrect. Modify PhraseMatcher._terminal_hash to fix.") - W022 = ("Training a new part-of-speech tagger using a model with no " - "lemmatization rules or data. This means that the trained model " - "may not be able to lemmatize correctly. If this is intentional " - "or the language you're using doesn't have lemmatization data, " - "you can ignore this warning. If this is surprising, make sure you " - "have the spacy-lookups-data package installed.") W024 = ("Entity '{entity}' - Alias '{alias}' combination already exists in " "the Knowledge Base.") W026 = ("Unable to set all sentence boundaries from dependency parses.") @@ -482,6 +476,15 @@ class Errors: E199 = ("Unable to merge 0-length span at doc[{start}:{end}].") # TODO: fix numbering after merging develop into master + E930 = ("Received invalid get_examples callback in {name}.begin_training. " + "Expected function that returns an iterable of Example objects but " + "got: {obj}") + E931 = ("Encountered Pipe subclass without Pipe.{method} method in component " + "'{name}'. If the component is trainable and you want to use this " + "method, make sure it's overwritten on the subclass. If your " + "component isn't trainable, add a method that does nothing or " + "don't use the Pipe base class.") + E940 = ("Found NaN values in scores.") E941 = ("Can't find model '{name}'. It looks like you're trying to load a " "model from a shortcut, which is deprecated as of spaCy v3.0. To " "load the model, use its full name instead:\n\n" @@ -578,8 +581,7 @@ class Errors: "but received None.") E977 = ("Can not compare a MorphAnalysis with a string object. " "This is likely a bug in spaCy, so feel free to open an issue.") - E978 = ("The '{method}' method of {name} takes a list of Example objects, " - "but found {types} instead.") + E978 = ("The {name} method takes a list of Example objects, but got: {types}") E979 = ("Cannot convert {type} to an Example object.") E980 = ("Each link annotation should refer to a dictionary with at most one " "identifier mapping to 1.0, and all others to 0.0.") diff --git a/spacy/gold/__init__.py b/spacy/gold/__init__.py index 142c6b3a7..4d71eae09 100644 --- a/spacy/gold/__init__.py +++ b/spacy/gold/__init__.py @@ -1,5 +1,5 @@ from .corpus import Corpus # noqa: F401 -from .example import Example # noqa: F401 +from .example import Example, validate_examples # noqa: F401 from .align import Alignment # noqa: F401 from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 from .iob_utils import biluo_tags_from_offsets, offsets_from_biluo_tags # noqa: F401 diff --git a/spacy/gold/corpus.py b/spacy/gold/corpus.py index 745d52e0e..774c3b840 100644 --- a/spacy/gold/corpus.py +++ b/spacy/gold/corpus.py @@ -62,7 +62,7 @@ class Corpus: if str(path) in seen: continue seen.add(str(path)) - if path.parts[-1].startswith("."): + if path.parts and path.parts[-1].startswith("."): continue elif path.is_dir(): paths.extend(path.iterdir()) diff --git a/spacy/gold/example.pyx b/spacy/gold/example.pyx index 6093d2346..3344704bf 100644 --- a/spacy/gold/example.pyx +++ b/spacy/gold/example.pyx @@ -1,5 +1,5 @@ +from collections import Iterable as IterableInstance import warnings - import numpy from ..tokens.doc cimport Doc @@ -26,6 +26,22 @@ cpdef Doc annotations2doc(vocab, tok_annot, doc_annot): return output +def validate_examples(examples, method): + """Check that a batch of examples received during processing is valid. + This function lives here to prevent circular imports. + + examples (Iterable[Examples]): A batch of examples. + method (str): The method name to show in error messages. + """ + if not isinstance(examples, IterableInstance): + err = Errors.E978.format(name=method, types=type(examples)) + raise TypeError(err) + wrong = set([type(eg) for eg in examples if not isinstance(eg, Example)]) + if wrong: + err = Errors.E978.format(name=method, types=wrong) + raise TypeError(err) + + cdef class Example: def __init__(self, Doc predicted, Doc reference, *, alignment=None): if predicted is None: @@ -263,12 +279,10 @@ def _annot2array(vocab, tok_annot, doc_annot): values.append([vocab.morphology.add(v) for v in value]) else: attrs.append(key) - try: - values.append([vocab.strings.add(v) for v in value]) - except TypeError: - types= set([type(v) for v in value]) + if not all(isinstance(v, str) for v in value): + types = set([type(v) for v in value]) raise TypeError(Errors.E969.format(field=key, types=types)) from None - + values.append([vocab.strings.add(v) for v in value]) array = numpy.asarray(values, dtype="uint64") return attrs, array.T diff --git a/spacy/language.py b/spacy/language.py index 85aac15ef..b67c55e3b 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -5,7 +5,6 @@ import random import itertools import weakref import functools -from collections import Iterable as IterableInstance from contextlib import contextmanager from copy import copy, deepcopy from pathlib import Path @@ -19,10 +18,10 @@ from timeit import default_timer as timer from .tokens.underscore import Underscore from .vocab import Vocab, create_vocab from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis -from .gold import Example +from .gold import Example, validate_examples from .scorer import Scorer from .util import create_default_optimizer, registry -from .util import SimpleFrozenDict, combine_score_weights +from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .lang.punctuation import TOKENIZER_INFIXES @@ -37,7 +36,7 @@ from . import about # This is the base config will all settings (training etc.) DEFAULT_CONFIG_PATH = Path(__file__).parent / "default_config.cfg" -DEFAULT_CONFIG = Config().from_disk(DEFAULT_CONFIG_PATH) +DEFAULT_CONFIG = util.load_config(DEFAULT_CONFIG_PATH) class BaseDefaults: @@ -46,7 +45,7 @@ class BaseDefaults: Language.Defaults. """ - config: Config = Config() + config: Config = Config(section_order=CONFIG_SECTION_ORDER) tokenizer_exceptions: Dict[str, List[dict]] = BASE_EXCEPTIONS prefixes: Optional[List[Union[str, Pattern]]] = TOKENIZER_PREFIXES suffixes: Optional[List[Union[str, Pattern]]] = TOKENIZER_SUFFIXES @@ -135,7 +134,7 @@ class Language: # of the rest. util.registry._entry_point_factories.get_all() - self._config = util.deep_merge_configs(self.default_config, DEFAULT_CONFIG) + self._config = DEFAULT_CONFIG.merge(self.default_config) self._meta = dict(meta) self._path = None self._optimizer = None @@ -168,9 +167,7 @@ class Language: def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) - cls.default_config = util.deep_merge_configs( - cls.Defaults.config, DEFAULT_CONFIG - ) + cls.default_config = DEFAULT_CONFIG.merge(cls.Defaults.config) cls.default_config["nlp"]["lang"] = cls.lang @property @@ -533,6 +530,7 @@ class Language: name: Optional[str] = None, *, config: Optional[Dict[str, Any]] = SimpleFrozenDict(), + raw_config: Optional[Config] = None, validate: bool = True, ) -> Callable[[Doc], Doc]: """Create a pipeline component. Mostly used internally. To create and @@ -543,6 +541,7 @@ class Language: Defaults to factory name if not set. config (Optional[Dict[str, Any]]): Config parameters to use for this component. Will be merged with default config, if available. + raw_config (Optional[Config]): Internals: the non-interpolated config. validate (bool): Whether to validate the component config against the arguments and types expected by the factory. RETURNS (Callable[[Doc], Doc]): The pipeline component. @@ -569,7 +568,7 @@ class Language: # This is unideal, but the alternative would mean you always need to # specify the full config settings, which is not really viable. if pipe_meta.default_config: - config = util.deep_merge_configs(config, pipe_meta.default_config) + config = Config(pipe_meta.default_config).merge(config) # We need to create a top-level key because Thinc doesn't allow resolving # top-level references to registered functions. Also gives nicer errors. # The name allows components to know their pipe name and use it in the @@ -583,12 +582,14 @@ class Language: cfg = {factory_name: config} # We're calling the internal _fill here to avoid constructing the # registered functions twice - # TODO: customize validation to make it more readable / relate it to - # pipeline component and why it failed, explain default config resolved, filled = registry.resolve(cfg, validate=validate) - filled = filled[factory_name] + filled = Config(filled[factory_name]) filled["factory"] = factory_name filled.pop("@factories", None) + # Merge the final filled config with the raw config (including non- + # interpolated variables) + if raw_config: + filled = filled.merge(raw_config) self._pipe_configs[name] = filled return resolved[factory_name] @@ -614,7 +615,10 @@ class Language: ) ) pipe = source.get_pipe(source_name) - pipe_config = util.copy_config(source.config["components"][source_name]) + # Make sure the source config is interpolated so we don't end up with + # orphaned variables in our final config + source_config = source.config.interpolate() + pipe_config = util.copy_config(source_config["components"][source_name]) self._pipe_configs[name] = pipe_config return pipe, pipe_config["factory"] @@ -629,6 +633,7 @@ class Language: last: Optional[bool] = None, source: Optional["Language"] = None, config: Optional[Dict[str, Any]] = SimpleFrozenDict(), + raw_config: Optional[Config] = None, validate: bool = True, ) -> Callable[[Doc], Doc]: """Add a component to the processing pipeline. Valid components are @@ -650,6 +655,7 @@ class Language: component from. config (Optional[Dict[str, Any]]): Config parameters to use for this component. Will be merged with default config, if available. + raw_config (Optional[Config]): Internals: the non-interpolated config. validate (bool): Whether to validate the component config against the arguments and types expected by the factory. RETURNS (Callable[[Doc], Doc]): The pipeline component. @@ -679,7 +685,11 @@ class Language: lang_code=self.lang, ) pipe_component = self.create_pipe( - factory_name, name=name, config=config, validate=validate, + factory_name, + name=name, + config=config, + raw_config=raw_config, + validate=validate, ) pipe_index = self._get_pipe_index(before, after, first, last) self._pipe_meta[name] = self.get_factory_meta(factory_name) @@ -935,17 +945,7 @@ class Language: losses = {} if len(examples) == 0: return losses - if not isinstance(examples, IterableInstance): - raise TypeError( - Errors.E978.format( - name="language", method="update", types=type(examples) - ) - ) - wrong_types = set([type(eg) for eg in examples if not isinstance(eg, Example)]) - if wrong_types: - raise TypeError( - Errors.E978.format(name="language", method="update", types=wrong_types) - ) + validate_examples(examples, "Language.update") if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer() @@ -962,7 +962,11 @@ class Language: proc.update(examples, sgd=None, losses=losses, **component_cfg[name]) if sgd not in (None, False): for name, proc in self.pipeline: - if name not in exclude and hasattr(proc, "model"): + if ( + name not in exclude + and hasattr(proc, "model") + and proc.model not in (True, False, None) + ): proc.model.finish_update(sgd) return losses @@ -999,19 +1003,7 @@ class Language: """ if len(examples) == 0: return - if not isinstance(examples, IterableInstance): - raise TypeError( - Errors.E978.format( - name="language", method="rehearse", types=type(examples) - ) - ) - wrong_types = set([type(eg) for eg in examples if not isinstance(eg, Example)]) - if wrong_types: - raise TypeError( - Errors.E978.format( - name="language", method="rehearse", types=wrong_types - ) - ) + validate_examples(examples, "Language.rehearse") if sgd is None: if self._optimizer is None: self._optimizer = create_default_optimizer() @@ -1060,7 +1052,15 @@ class Language: if get_examples is None: get_examples = lambda: [] else: # Populate vocab + if not hasattr(get_examples, "__call__"): + err = Errors.E930.format(name="Language", obj=type(get_examples)) + raise ValueError(err) for example in get_examples(): + if not isinstance(example, Example): + err = Errors.E978.format( + name="Language.begin_training", types=type(example) + ) + raise ValueError(err) for word in [t.text for t in example.reference]: _ = self.vocab[word] # noqa: F841 if device >= 0: # TODO: do we need this here? @@ -1133,17 +1133,7 @@ class Language: DOCS: https://spacy.io/api/language#evaluate """ - if not isinstance(examples, IterableInstance): - err = Errors.E978.format( - name="language", method="evaluate", types=type(examples) - ) - raise TypeError(err) - wrong_types = set([type(eg) for eg in examples if not isinstance(eg, Example)]) - if wrong_types: - err = Errors.E978.format( - name="language", method="evaluate", types=wrong_types - ) - raise TypeError(err) + validate_examples(examples, "Language.evaluate") if component_cfg is None: component_cfg = {} if scorer_cfg is None: @@ -1400,7 +1390,9 @@ class Language: DOCS: https://spacy.io/api/language#from_config """ if auto_fill: - config = util.deep_merge_configs(config, cls.default_config) + config = Config( + cls.default_config, section_order=CONFIG_SECTION_ORDER + ).merge(config) if "nlp" not in config: raise ValueError(Errors.E985.format(config=config)) config_lang = config["nlp"]["lang"] @@ -1438,16 +1430,20 @@ class Language: or lang_cls is not cls ): raise ValueError(Errors.E943.format(value=type(lang_cls))) + # Note that we don't load vectors here, instead they get loaded explicitly + # inside stuff like the spacy train function. If we loaded them here, + # then we would load them twice at runtime: once when we make from config, + # and then again when we load from disk. nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer) if after_creation is not None: nlp = after_creation(nlp) if not isinstance(nlp, cls): raise ValueError(Errors.E942.format(name="creation", value=type(nlp))) - # Note that we don't load vectors here, instead they get loaded explicitly - # inside stuff like the spacy train function. If we loaded them here, - # then we would load them twice at runtime: once when we make from config, - # and then again when we load from disk. - pipeline = config.get("components", {}) + # To create the components we need to use the final interpolated config + # so all values are available (if component configs use variables). + # Later we replace the component config with the raw config again. + interpolated = filled.interpolate() if not filled.is_interpolated else filled + pipeline = interpolated.get("components", {}) # If components are loaded from a source (existing models), we cache # them here so they're only loaded once source_nlps = {} @@ -1456,6 +1452,7 @@ class Language: opts = ", ".join(pipeline.keys()) raise ValueError(Errors.E956.format(name=pipe_name, opts=opts)) pipe_cfg = util.copy_config(pipeline[pipe_name]) + raw_config = Config(filled["components"][pipe_name]) if pipe_name not in disable: if "factory" not in pipe_cfg and "source" not in pipe_cfg: err = Errors.E984.format(name=pipe_name, config=pipe_cfg) @@ -1465,7 +1462,11 @@ class Language: # The pipe name (key in the config) here is the unique name # of the component, not necessarily the factory nlp.add_pipe( - factory, name=pipe_name, config=pipe_cfg, validate=validate, + factory, + name=pipe_name, + config=pipe_cfg, + validate=validate, + raw_config=raw_config, ) else: model = pipe_cfg["source"] @@ -1663,7 +1664,7 @@ def _fix_pretrained_vectors_name(nlp: Language) -> None: else: raise ValueError(Errors.E092) for name, proc in nlp.pipeline: - if not hasattr(proc, "cfg"): + if not hasattr(proc, "cfg") or not isinstance(proc.cfg, dict): continue proc.cfg.setdefault("deprecation_fixes", {}) proc.cfg["deprecation_fixes"]["vectors_name"] = nlp.vocab.vectors.name diff --git a/spacy/pipeline/dep_parser.pyx b/spacy/pipeline/dep_parser.pyx index 801229af5..76f58df58 100644 --- a/spacy/pipeline/dep_parser.pyx +++ b/spacy/pipeline/dep_parser.pyx @@ -9,6 +9,7 @@ from .functions import merge_subtokens from ..language import Language from ._parser_internals import nonproj from ..scorer import Scorer +from ..gold import validate_examples default_model_config = """ @@ -147,6 +148,7 @@ cdef class DependencyParser(Parser): DOCS: https://spacy.io/api/dependencyparser#score """ + validate_examples(examples, "DependencyParser.score") def dep_getter(token, attr): dep = getattr(token, attr) dep = token.vocab.strings.as_string(dep).lower() diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py index 080273f57..35bf2906e 100644 --- a/spacy/pipeline/entity_linker.py +++ b/spacy/pipeline/entity_linker.py @@ -11,7 +11,7 @@ from ..tokens import Doc from .pipe import Pipe, deserialize_config from ..language import Language from ..vocab import Vocab -from ..gold import Example +from ..gold import Example, validate_examples from ..errors import Errors, Warnings from .. import util @@ -142,7 +142,7 @@ class EntityLinker(Pipe): def begin_training( self, - get_examples: Callable[[], Iterable[Example]] = lambda: [], + get_examples: Callable[[], Iterable[Example]], *, pipeline: Optional[List[Tuple[str, Callable[[Doc], Doc]]]] = None, sgd: Optional[Optimizer] = None, @@ -197,14 +197,9 @@ class EntityLinker(Pipe): losses.setdefault(self.name, 0.0) if not examples: return losses + validate_examples(examples, "EntityLinker.update") sentence_docs = [] - try: - docs = [eg.predicted for eg in examples] - except AttributeError: - types = set([type(eg) for eg in examples]) - raise TypeError( - Errors.E978.format(name="EntityLinker", method="update", types=types) - ) from None + docs = [eg.predicted for eg in examples] if set_annotations: # This seems simpler than other ways to get that exact output -- but # it does run the model twice :( @@ -250,6 +245,7 @@ class EntityLinker(Pipe): return losses def get_loss(self, examples: Iterable[Example], sentence_encodings): + validate_examples(examples, "EntityLinker.get_loss") entity_encodings = [] for eg in examples: kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) diff --git a/spacy/pipeline/entityruler.py b/spacy/pipeline/entityruler.py index bef97ec46..4f4e0fdd5 100644 --- a/spacy/pipeline/entityruler.py +++ b/spacy/pipeline/entityruler.py @@ -9,6 +9,7 @@ from ..util import ensure_path, to_disk, from_disk from ..tokens import Doc, Span from ..matcher import Matcher, PhraseMatcher from ..scorer import Scorer +from ..gold import validate_examples DEFAULT_ENT_ID_SEP = "||" @@ -312,6 +313,7 @@ class EntityRuler: return label def score(self, examples, **kwargs): + validate_examples(examples, "EntityRuler.score") return Scorer.score_spans(examples, "ents", **kwargs) def from_bytes( diff --git a/spacy/pipeline/lemmatizer.py b/spacy/pipeline/lemmatizer.py index f2028772f..6cea65fec 100644 --- a/spacy/pipeline/lemmatizer.py +++ b/spacy/pipeline/lemmatizer.py @@ -1,5 +1,4 @@ from typing import Optional, List, Dict, Any - from thinc.api import Model from .pipe import Pipe @@ -9,6 +8,7 @@ from ..lookups import Lookups, load_lookups from ..scorer import Scorer from ..tokens import Doc, Token from ..vocab import Vocab +from ..gold import validate_examples from .. import util @@ -127,6 +127,7 @@ class Lemmatizer(Pipe): """ self.vocab = vocab self.model = model + self.name = name self._mode = mode self.lookups = lookups if lookups is not None else Lookups() self.overwrite = overwrite @@ -135,10 +136,10 @@ class Lemmatizer(Pipe): elif self.mode == "rule": self.lemmatize = self.rule_lemmatize else: - try: - self.lemmatize = getattr(self, f"{self.mode}_lemmatize") - except AttributeError: + mode_attr = f"{self.mode}_lemmatize" + if not hasattr(self, mode_attr): raise ValueError(Errors.E1003.format(mode=mode)) + self.lemmatize = getattr(self, mode_attr) self.cache = {} @property @@ -271,6 +272,7 @@ class Lemmatizer(Pipe): DOCS: https://spacy.io/api/lemmatizer#score """ + validate_examples(examples, "Lemmatizer.score") return Scorer.score_token_attr(examples, "lemma", **kwargs) def to_disk(self, path, *, exclude=tuple()): diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx index efc494181..329a05f90 100644 --- a/spacy/pipeline/morphologizer.pyx +++ b/spacy/pipeline/morphologizer.pyx @@ -6,15 +6,16 @@ from thinc.api import SequenceCategoricalCrossentropy, Model, Config from ..tokens.doc cimport Doc from ..vocab cimport Vocab from ..morphology cimport Morphology + from ..parts_of_speech import IDS as POS_IDS from ..symbols import POS - from ..language import Language from ..errors import Errors from .pipe import deserialize_config from .tagger import Tagger from .. import util from ..scorer import Scorer +from ..gold import validate_examples default_model_config = """ @@ -126,7 +127,7 @@ class Morphologizer(Tagger): self.cfg["labels_pos"][norm_label] = POS_IDS[pos] return 1 - def begin_training(self, get_examples=lambda: [], *, pipeline=None, sgd=None): + def begin_training(self, get_examples, *, pipeline=None, sgd=None): """Initialize the pipe for training, using data examples if available. get_examples (Callable[[], Iterable[Example]]): Optional function that @@ -140,6 +141,9 @@ class Morphologizer(Tagger): DOCS: https://spacy.io/api/morphologizer#begin_training """ + if not hasattr(get_examples, "__call__"): + err = Errors.E930.format(name="Morphologizer", obj=type(get_examples)) + raise ValueError(err) for example in get_examples(): for i, token in enumerate(example.reference): pos = token.pos_ @@ -192,6 +196,7 @@ class Morphologizer(Tagger): DOCS: https://spacy.io/api/morphologizer#get_loss """ + validate_examples(examples, "Morphologizer.get_loss") loss_func = SequenceCategoricalCrossentropy(names=self.labels, normalize=False) truths = [] for eg in examples: @@ -228,6 +233,7 @@ class Morphologizer(Tagger): DOCS: https://spacy.io/api/morphologizer#score """ + validate_examples(examples, "Morphologizer.score") results = {} results.update(Scorer.score_token_attr(examples, "pos", **kwargs)) results.update(Scorer.score_token_attr(examples, "morph", **kwargs)) diff --git a/spacy/pipeline/multitask.pyx b/spacy/pipeline/multitask.pyx index 84ed19b0d..3ef85c821 100644 --- a/spacy/pipeline/multitask.pyx +++ b/spacy/pipeline/multitask.pyx @@ -8,6 +8,7 @@ from ..tokens.doc cimport Doc from .pipe import Pipe from .tagger import Tagger +from ..gold import validate_examples from ..language import Language from ._parser_internals import nonproj from ..attrs import POS, ID @@ -80,10 +81,11 @@ class MultitaskObjective(Tagger): def set_annotations(self, docs, dep_ids): pass - def begin_training(self, get_examples=lambda: [], pipeline=None, sgd=None): - gold_examples = nonproj.preprocess_training_data(get_examples()) - # for raw_text, doc_annot in gold_tuples: - for example in gold_examples: + def begin_training(self, get_examples, pipeline=None, sgd=None): + if not hasattr(get_examples, "__call__"): + err = Errors.E930.format(name="MultitaskObjective", obj=type(get_examples)) + raise ValueError(err) + for example in get_examples(): for token in example.y: label = self.make_label(token) if label is not None and label not in self.labels: @@ -175,7 +177,7 @@ class ClozeMultitask(Pipe): def set_annotations(self, docs, dep_ids): pass - def begin_training(self, get_examples=lambda: [], pipeline=None, sgd=None): + def begin_training(self, get_examples, pipeline=None, sgd=None): self.model.initialize() X = self.model.ops.alloc((5, self.model.get_ref("tok2vec").get_dim("nO"))) self.model.output_layer.begin_training(X) @@ -189,6 +191,7 @@ class ClozeMultitask(Pipe): return tokvecs, vectors def get_loss(self, examples, vectors, prediction): + validate_examples(examples, "ClozeMultitask.get_loss") # The simplest way to implement this would be to vstack the # token.vector values, but that's a bit inefficient, especially on GPU. # Instead we fetch the index into the vectors table for each of our tokens, @@ -206,18 +209,16 @@ class ClozeMultitask(Pipe): if losses is not None and self.name not in losses: losses[self.name] = 0. set_dropout_rate(self.model, drop) - try: - predictions, bp_predictions = self.model.begin_update([eg.predicted for eg in examples]) - except AttributeError: - types = set([type(eg) for eg in examples]) - raise TypeError(Errors.E978.format(name="ClozeMultitask", method="rehearse", types=types)) from None + validate_examples(examples, "ClozeMultitask.rehearse") + docs = [eg.predicted for eg in examples] + predictions, bp_predictions = self.model.begin_update() loss, d_predictions = self.get_loss(examples, self.vocab.vectors.data, predictions) bp_predictions(d_predictions) if sgd is not None: self.model.finish_update(sgd) - if losses is not None: losses[self.name] += loss + return losses def add_label(self, label): raise NotImplementedError diff --git a/spacy/pipeline/ner.pyx b/spacy/pipeline/ner.pyx index a3bc3d920..631b5ae72 100644 --- a/spacy/pipeline/ner.pyx +++ b/spacy/pipeline/ner.pyx @@ -7,6 +7,7 @@ from ._parser_internals.ner cimport BiluoPushDown from ..language import Language from ..scorer import Scorer +from ..gold import validate_examples default_model_config = """ @@ -50,7 +51,7 @@ def make_ner( ): """Create a transition-based EntityRecognizer component. The entity recognizer identifies non-overlapping labelled spans of tokens. - + The transition-based algorithm used encodes certain assumptions that are effective for "traditional" named entity recognition tasks, but may not be a good fit for every span identification problem. Specifically, the loss @@ -120,4 +121,5 @@ cdef class EntityRecognizer(Parser): DOCS: https://spacy.io/api/entityrecognizer#score """ + validate_examples(examples, "EntityRecognizer.score") return Scorer.score_spans(examples, "ents", **kwargs) diff --git a/spacy/pipeline/pipe.pxd b/spacy/pipeline/pipe.pxd index bb97f79d0..bca94d528 100644 --- a/spacy/pipeline/pipe.pxd +++ b/spacy/pipeline/pipe.pxd @@ -1,2 +1,5 @@ cdef class Pipe: + cdef public object vocab + cdef public object model cdef public str name + cdef public object cfg diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx index 96a8b5944..51251dacc 100644 --- a/spacy/pipeline/pipe.pyx +++ b/spacy/pipeline/pipe.pyx @@ -1,9 +1,10 @@ # cython: infer_types=True, profile=True import srsly +from thinc.api import set_dropout_rate, Model from ..tokens.doc cimport Doc -from ..util import create_default_optimizer +from ..gold import validate_examples from ..errors import Errors from .. import util @@ -16,7 +17,6 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe """ - def __init__(self, vocab, model, name, **cfg): """Initialize a pipeline component. @@ -27,7 +27,10 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe#init """ - raise NotImplementedError + self.vocab = vocab + self.model = model + self.name = name + self.cfg = dict(cfg) def __call__(self, Doc doc): """Apply the pipe to one document. The document is modified in place, @@ -68,7 +71,7 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe#predict """ - raise NotImplementedError + raise NotImplementedError(Errors.E931.format(method="predict", name=self.name)) def set_annotations(self, docs, scores): """Modify a batch of documents, using pre-computed scores. @@ -78,7 +81,43 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe#set_annotations """ - raise NotImplementedError + raise NotImplementedError(Errors.E931.format(method="set_annotations", name=self.name)) + + def update(self, examples, *, drop=0.0, set_annotations=False, sgd=None, losses=None): + """Learn from a batch of documents and gold-standard information, + updating the pipe's model. Delegates to predict and get_loss. + + examples (Iterable[Example]): A batch of Example objects. + drop (float): The dropout rate. + set_annotations (bool): Whether or not to update the Example objects + with the predictions. + sgd (thinc.api.Optimizer): The optimizer. + losses (Dict[str, float]): Optional record of the loss during training. + Updated using the component name as the key. + RETURNS (Dict[str, float]): The updated losses dictionary. + + DOCS: https://spacy.io/api/pipe#update + """ + if losses is None: + losses = {} + if not hasattr(self, "model") or self.model in (None, True, False): + return losses + losses.setdefault(self.name, 0.0) + validate_examples(examples, "Pipe.update") + if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): + # Handle cases where there are no tokens in any docs. + return + set_dropout_rate(self.model, drop) + scores, bp_scores = self.model.begin_update([eg.predicted for eg in examples]) + loss, d_scores = self.get_loss(examples, scores) + bp_scores(d_scores) + if sgd not in (None, False): + self.model.finish_update(sgd) + losses[self.name] += loss + if set_annotations: + docs = [eg.predicted for eg in examples] + self.set_annotations(docs, scores=scores) + return losses def rehearse(self, examples, *, sgd=None, losses=None, **config): """Perform a "rehearsal" update from a batch of data. Rehearsal updates @@ -107,7 +146,7 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe#get_loss """ - raise NotImplementedError + raise NotImplementedError(Errors.E931.format(method="get_loss", name=self.name)) def add_label(self, label): """Add an output label, to be predicted by the model. It's possible to @@ -119,7 +158,7 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe#add_label """ - raise NotImplementedError + raise NotImplementedError(Errors.E931.format(method="add_label", name=self.name)) def create_optimizer(self): """Create an optimizer for the pipeline component. @@ -128,9 +167,9 @@ cdef class Pipe: DOCS: https://spacy.io/api/pipe#create_optimizer """ - return create_default_optimizer() + return util.create_default_optimizer() - def begin_training(self, get_examples=lambda: [], *, pipeline=None, sgd=None): + def begin_training(self, get_examples, *, pipeline=None, sgd=None): """Initialize the pipe for training, using data examples if available. get_examples (Callable[[], Iterable[Example]]): Optional function that diff --git a/spacy/pipeline/sentencizer.pyx b/spacy/pipeline/sentencizer.pyx index be4351212..46d599497 100644 --- a/spacy/pipeline/sentencizer.pyx +++ b/spacy/pipeline/sentencizer.pyx @@ -7,6 +7,7 @@ from ..tokens.doc cimport Doc from .pipe import Pipe from ..language import Language from ..scorer import Scorer +from ..gold import validate_examples from .. import util @@ -58,7 +59,7 @@ class Sentencizer(Pipe): else: self.punct_chars = set(self.default_punct_chars) - def begin_training(self, get_examples=lambda: [], pipeline=None, sgd=None): + def begin_training(self, get_examples, pipeline=None, sgd=None): pass def __call__(self, doc): @@ -158,6 +159,7 @@ class Sentencizer(Pipe): DOCS: https://spacy.io/api/sentencizer#score """ + validate_examples(examples, "Sentencizer.score") results = Scorer.score_spans(examples, "sents", **kwargs) del results["sents_per_type"] return results diff --git a/spacy/pipeline/senter.pyx b/spacy/pipeline/senter.pyx index cf7479c29..e82225d27 100644 --- a/spacy/pipeline/senter.pyx +++ b/spacy/pipeline/senter.pyx @@ -9,6 +9,7 @@ from .tagger import Tagger from ..language import Language from ..errors import Errors from ..scorer import Scorer +from ..gold import validate_examples from .. import util @@ -102,6 +103,7 @@ class SentenceRecognizer(Tagger): DOCS: https://spacy.io/api/sentencerecognizer#get_loss """ + validate_examples(examples, "SentenceRecognizer.get_loss") labels = self.labels loss_func = SequenceCategoricalCrossentropy(names=labels, normalize=False) truths = [] @@ -121,7 +123,7 @@ class SentenceRecognizer(Tagger): raise ValueError("nan value when computing loss") return float(loss), d_scores - def begin_training(self, get_examples=lambda: [], *, pipeline=None, sgd=None): + def begin_training(self, get_examples, *, pipeline=None, sgd=None): """Initialize the pipe for training, using data examples if available. get_examples (Callable[[], Iterable[Example]]): Optional function that @@ -151,6 +153,7 @@ class SentenceRecognizer(Tagger): RETURNS (Dict[str, Any]): The scores, produced by Scorer.score_spans. DOCS: https://spacy.io/api/sentencerecognizer#score """ + validate_examples(examples, "SentenceRecognizer.score") results = Scorer.score_spans(examples, "sents", **kwargs) del results["sents_per_type"] return results diff --git a/spacy/pipeline/simple_ner.py b/spacy/pipeline/simple_ner.py index 4965b2e13..5f3addbd7 100644 --- a/spacy/pipeline/simple_ner.py +++ b/spacy/pipeline/simple_ner.py @@ -1,4 +1,4 @@ -from typing import List, Iterable, Optional, Dict, Tuple, Callable +from typing import List, Iterable, Optional, Dict, Tuple, Callable, Set from thinc.types import Floats2d from thinc.api import SequenceCategoricalCrossentropy, set_dropout_rate, Model from thinc.api import Optimizer, Config @@ -6,6 +6,7 @@ from thinc.util import to_numpy from ..errors import Errors from ..gold import Example, spans_from_biluo_tags, iob_to_biluo, biluo_to_iob +from ..gold import validate_examples from ..tokens import Doc from ..language import Language from ..vocab import Vocab @@ -127,6 +128,7 @@ class SimpleNER(Pipe): if losses is None: losses = {} losses.setdefault("ner", 0.0) + validate_examples(examples, "SimpleNER.update") if not any(_has_ner(eg) for eg in examples): return losses docs = [eg.predicted for eg in examples] @@ -142,6 +144,7 @@ class SimpleNER(Pipe): return losses def get_loss(self, examples: List[Example], scores) -> Tuple[List[Floats2d], float]: + validate_examples(examples, "SimpleNER.get_loss") truths = [] for eg in examples: tags = eg.get_aligned_ner() @@ -161,14 +164,17 @@ class SimpleNER(Pipe): def begin_training( self, - get_examples: Callable, + get_examples: Callable[[], Iterable[Example]], pipeline: Optional[List[Tuple[str, Callable[[Doc], Doc]]]] = None, sgd: Optional[Optimizer] = None, ): + all_labels = set() if not hasattr(get_examples, "__call__"): - gold_tuples = get_examples - get_examples = lambda: gold_tuples - for label in _get_labels(get_examples()): + err = Errors.E930.format(name="SimpleNER", obj=type(get_examples)) + raise ValueError(err) + for example in get_examples(): + all_labels.update(_get_labels(example)) + for label in sorted(all_labels): self.add_label(label) labels = self.labels n_actions = self.model.attrs["get_num_actions"](len(labels)) @@ -185,6 +191,7 @@ class SimpleNER(Pipe): pass def score(self, examples, **kwargs): + validate_examples(examples, "SimpleNER.score") return Scorer.score_spans(examples, "ents", **kwargs) @@ -196,10 +203,9 @@ def _has_ner(example: Example) -> bool: return False -def _get_labels(examples: List[Example]) -> List[str]: +def _get_labels(example: Example) -> Set[str]: labels = set() - for eg in examples: - for ner_tag in eg.get_aligned("ENT_TYPE", as_string=True): - if ner_tag != "O" and ner_tag != "-": - labels.add(ner_tag) - return list(sorted(labels)) + for ner_tag in example.get_aligned("ENT_TYPE", as_string=True): + if ner_tag != "O" and ner_tag != "-": + labels.add(ner_tag) + return labels diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx index 937290d5f..9070329e8 100644 --- a/spacy/pipeline/tagger.pyx +++ b/spacy/pipeline/tagger.pyx @@ -16,6 +16,7 @@ from ..attrs import POS, ID from ..parts_of_speech import X from ..errors import Errors, TempErrors, Warnings from ..scorer import Scorer +from ..gold import validate_examples from .. import util @@ -187,19 +188,15 @@ class Tagger(Pipe): if losses is None: losses = {} losses.setdefault(self.name, 0.0) - try: - if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): - # Handle cases where there are no tokens in any docs. - return - except AttributeError: - types = set([type(eg) for eg in examples]) - raise TypeError(Errors.E978.format(name="Tagger", method="update", types=types)) from None + validate_examples(examples, "Tagger.update") + if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): + # Handle cases where there are no tokens in any docs. + return set_dropout_rate(self.model, drop) - tag_scores, bp_tag_scores = self.model.begin_update( - [eg.predicted for eg in examples]) + tag_scores, bp_tag_scores = self.model.begin_update([eg.predicted for eg in examples]) for sc in tag_scores: if self.model.ops.xp.isnan(sc.sum()): - raise ValueError("nan value in scores") + raise ValueError(Errors.E940) loss, d_tag_scores = self.get_loss(examples, tag_scores) bp_tag_scores(d_tag_scores) if sgd not in (None, False): @@ -226,11 +223,8 @@ class Tagger(Pipe): DOCS: https://spacy.io/api/tagger#rehearse """ - try: - docs = [eg.predicted for eg in examples] - except AttributeError: - types = set([type(eg) for eg in examples]) - raise TypeError(Errors.E978.format(name="Tagger", method="rehearse", types=types)) from None + validate_examples(examples, "Tagger.rehearse") + docs = [eg.predicted for eg in examples] if self._rehearsal_model is None: return if not any(len(doc) for doc in docs): @@ -256,6 +250,7 @@ class Tagger(Pipe): DOCS: https://spacy.io/api/tagger#get_loss """ + validate_examples(examples, "Tagger.get_loss") loss_func = SequenceCategoricalCrossentropy(names=self.labels, normalize=False) truths = [eg.get_aligned("TAG", as_string=True) for eg in examples] d_scores, loss = loss_func(scores, truths) @@ -263,7 +258,7 @@ class Tagger(Pipe): raise ValueError("nan value when computing loss") return float(loss), d_scores - def begin_training(self, get_examples=lambda: [], *, pipeline=None, sgd=None): + def begin_training(self, get_examples, *, pipeline=None, sgd=None): """Initialize the pipe for training, using data examples if available. get_examples (Callable[[], Iterable[Example]]): Optional function that @@ -277,13 +272,12 @@ class Tagger(Pipe): DOCS: https://spacy.io/api/tagger#begin_training """ + if not hasattr(get_examples, "__call__"): + err = Errors.E930.format(name="Tagger", obj=type(get_examples)) + raise ValueError(err) tags = set() for example in get_examples(): - try: - y = example.y - except AttributeError: - raise TypeError(Errors.E978.format(name="Tagger", method="begin_training", types=type(example))) from None - for token in y: + for token in example.y: tags.add(token.tag_) for tag in sorted(tags): self.add_label(tag) @@ -318,6 +312,7 @@ class Tagger(Pipe): DOCS: https://spacy.io/api/tagger#score """ + validate_examples(examples, "Tagger.score") return Scorer.score_token_attr(examples, "tag", **kwargs) def to_bytes(self, *, exclude=tuple()): diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index 7b9cc1e24..ce4f286e5 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -5,7 +5,7 @@ import numpy from .pipe import Pipe from ..language import Language -from ..gold import Example +from ..gold import Example, validate_examples from ..errors import Errors from ..scorer import Scorer from .. import util @@ -209,15 +209,10 @@ class TextCategorizer(Pipe): if losses is None: losses = {} losses.setdefault(self.name, 0.0) - try: - if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): - # Handle cases where there are no tokens in any docs. - return losses - except AttributeError: - types = set([type(eg) for eg in examples]) - raise TypeError( - Errors.E978.format(name="TextCategorizer", method="update", types=types) - ) from None + validate_examples(examples, "TextCategorizer.update") + if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples): + # Handle cases where there are no tokens in any docs. + return losses set_dropout_rate(self.model, drop) scores, bp_scores = self.model.begin_update([eg.predicted for eg in examples]) loss, d_scores = self.get_loss(examples, scores) @@ -252,19 +247,12 @@ class TextCategorizer(Pipe): DOCS: https://spacy.io/api/textcategorizer#rehearse """ - if losses is not None: losses.setdefault(self.name, 0.0) if self._rehearsal_model is None: return losses - try: - docs = [eg.predicted for eg in examples] - except AttributeError: - types = set([type(eg) for eg in examples]) - err = Errors.E978.format( - name="TextCategorizer", method="rehearse", types=types - ) - raise TypeError(err) from None + validate_examples(examples, "TextCategorizer.rehearse") + docs = [eg.predicted for eg in examples] if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. return losses @@ -303,6 +291,7 @@ class TextCategorizer(Pipe): DOCS: https://spacy.io/api/textcategorizer#get_loss """ + validate_examples(examples, "TextCategorizer.get_loss") truths, not_missing = self._examples_to_truth(examples) not_missing = self.model.ops.asarray(not_missing) d_scores = (scores - truths) / scores.shape[0] @@ -338,7 +327,7 @@ class TextCategorizer(Pipe): def begin_training( self, - get_examples: Callable[[], Iterable[Example]] = lambda: [], + get_examples: Callable[[], Iterable[Example]], *, pipeline: Optional[List[Tuple[str, Callable[[Doc], Doc]]]] = None, sgd: Optional[Optimizer] = None, @@ -356,21 +345,20 @@ class TextCategorizer(Pipe): DOCS: https://spacy.io/api/textcategorizer#begin_training """ - # TODO: begin_training is not guaranteed to see all data / labels ? - examples = list(get_examples()) - for example in examples: - try: - y = example.y - except AttributeError: - err = Errors.E978.format( - name="TextCategorizer", method="update", types=type(example) - ) - raise TypeError(err) from None - for cat in y.cats: + if not hasattr(get_examples, "__call__"): + err = Errors.E930.format(name="TextCategorizer", obj=type(get_examples)) + raise ValueError(err) + subbatch = [] # Select a subbatch of examples to initialize the model + for example in get_examples(): + if len(subbatch) < 2: + subbatch.append(example) + for cat in example.y.cats: self.add_label(cat) self.require_labels() - docs = [Doc(self.vocab, words=["hello"])] - truths, _ = self._examples_to_truth(examples) + docs = [eg.reference for eg in subbatch] + if not docs: # need at least one doc + docs = [Doc(self.vocab, words=["hello"])] + truths, _ = self._examples_to_truth(subbatch) self.set_output(len(self.labels)) self.model.initialize(X=docs, Y=truths) if sgd is None: @@ -392,6 +380,7 @@ class TextCategorizer(Pipe): DOCS: https://spacy.io/api/textcategorizer#score """ + validate_examples(examples, "TextCategorizer.score") return Scorer.score_cats( examples, "cats", diff --git a/spacy/pipeline/tok2vec.py b/spacy/pipeline/tok2vec.py index c9f0a99e9..f2d138cf7 100644 --- a/spacy/pipeline/tok2vec.py +++ b/spacy/pipeline/tok2vec.py @@ -2,7 +2,7 @@ from typing import Iterator, Sequence, Iterable, Optional, Dict, Callable, List, from thinc.api import Model, set_dropout_rate, Optimizer, Config from .pipe import Pipe -from ..gold import Example +from ..gold import Example, validate_examples from ..tokens import Doc from ..vocab import Vocab from ..language import Language @@ -166,9 +166,8 @@ class Tok2Vec(Pipe): """ if losses is None: losses = {} + validate_examples(examples, "Tok2Vec.update") docs = [eg.predicted for eg in examples] - if isinstance(docs, Doc): - docs = [docs] set_dropout_rate(self.model, drop) tokvecs, bp_tokvecs = self.model.begin_update(docs) d_tokvecs = [self.model.ops.alloc2f(*t2v.shape) for t2v in tokvecs] @@ -194,7 +193,8 @@ class Tok2Vec(Pipe): batch_id = Tok2VecListener.get_batch_id(docs) for listener in self.listeners[:-1]: listener.receive(batch_id, tokvecs, accumulate_gradient) - self.listeners[-1].receive(batch_id, tokvecs, backprop) + if self.listeners: + self.listeners[-1].receive(batch_id, tokvecs, backprop) if set_annotations: self.set_annotations(docs, tokvecs) return losses @@ -204,7 +204,7 @@ class Tok2Vec(Pipe): def begin_training( self, - get_examples: Callable[[], Iterable[Example]] = lambda: [], + get_examples: Callable[[], Iterable[Example]], *, pipeline: Optional[List[Tuple[str, Callable[[Doc], Doc]]]] = None, sgd: Optional[Optimizer] = None, diff --git a/spacy/pipeline/transition_parser.pxd b/spacy/pipeline/transition_parser.pxd index e594a3098..67bc01f97 100644 --- a/spacy/pipeline/transition_parser.pxd +++ b/spacy/pipeline/transition_parser.pxd @@ -8,11 +8,8 @@ from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC cdef class Parser(Pipe): - cdef readonly Vocab vocab - cdef public object model cdef public object _rehearsal_model cdef readonly TransitionSystem moves - cdef readonly object cfg cdef public object _multitasks cdef void _parseC(self, StateC** states, diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx index 9829e764d..2eadfa6aa 100644 --- a/spacy/pipeline/transition_parser.pyx +++ b/spacy/pipeline/transition_parser.pyx @@ -8,22 +8,21 @@ from libc.string cimport memset from libc.stdlib cimport calloc, free import srsly +from thinc.api import set_dropout_rate +import numpy.random +import numpy +import warnings from ._parser_internals.stateclass cimport StateClass from ..ml.parser_model cimport alloc_activations, free_activations from ..ml.parser_model cimport predict_states, arg_max_if_valid from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC, cpu_log_loss from ..ml.parser_model cimport get_c_weights, get_c_sizes - from ..tokens.doc cimport Doc + +from ..gold import validate_examples from ..errors import Errors, Warnings from .. import util -from ..util import create_default_optimizer - -from thinc.api import set_dropout_rate -import numpy.random -import numpy -import warnings cdef class Parser(Pipe): @@ -266,6 +265,7 @@ cdef class Parser(Pipe): if losses is None: losses = {} losses.setdefault(self.name, 0.) + validate_examples(examples, "Parser.update") for multitask in self._multitasks: multitask.update(examples, drop=drop, sgd=sgd) n_examples = len([eg for eg in examples if self.moves.has_gold(eg)]) @@ -329,7 +329,7 @@ cdef class Parser(Pipe): if self._rehearsal_model is None: return None losses.setdefault(self.name, 0.) - + validate_examples(examples, "Parser.rehearse") docs = [eg.predicted for eg in examples] states = self.moves.init_batch(docs) # This is pretty dirty, but the NER can resize itself in init_batch, @@ -398,21 +398,18 @@ cdef class Parser(Pipe): losses[self.name] += (d_scores**2).sum() return d_scores - def create_optimizer(self): - return create_default_optimizer() - def set_output(self, nO): self.model.attrs["resize_output"](self.model, nO) def begin_training(self, get_examples, pipeline=None, sgd=None, **kwargs): + if not hasattr(get_examples, "__call__"): + err = Errors.E930.format(name="DependencyParser/EntityRecognizer", obj=type(get_examples)) + raise ValueError(err) self.cfg.update(kwargs) lexeme_norms = self.vocab.lookups.get_table("lexeme_norm", {}) if len(lexeme_norms) == 0 and self.vocab.lang in util.LEXEME_NORM_LANGS: langs = ", ".join(util.LEXEME_NORM_LANGS) - warnings.warn(Warnings.W033.format(model="parser or NER", langs=langs)) - if not hasattr(get_examples, '__call__'): - gold_tuples = get_examples - get_examples = lambda: gold_tuples + util.logger.debug(Warnings.W033.format(model="parser or NER", langs=langs)) actions = self.moves.get_actions( examples=get_examples(), min_freq=self.cfg['min_action_freq'], diff --git a/spacy/tests/doc/test_add_entities.py b/spacy/tests/doc/test_add_entities.py index 5fb5f0914..d6e345336 100644 --- a/spacy/tests/doc/test_add_entities.py +++ b/spacy/tests/doc/test_add_entities.py @@ -18,7 +18,7 @@ def test_doc_add_entities_set_ents_iob(en_vocab): cfg = {"model": DEFAULT_NER_MODEL} model = registry.make_from_config(cfg, validate=True)["model"] ner = EntityRecognizer(en_vocab, model, **config) - ner.begin_training([]) + ner.begin_training(lambda: []) ner(doc) assert len(list(doc.ents)) == 0 assert [w.ent_iob_ for w in doc] == (["O"] * len(doc)) @@ -41,7 +41,7 @@ def test_ents_reset(en_vocab): cfg = {"model": DEFAULT_NER_MODEL} model = registry.make_from_config(cfg, validate=True)["model"] ner = EntityRecognizer(en_vocab, model, **config) - ner.begin_training([]) + ner.begin_training(lambda: []) ner(doc) assert [t.ent_iob_ for t in doc] == (["O"] * len(doc)) doc.ents = list(doc.ents) diff --git a/spacy/tests/parser/test_add_label.py b/spacy/tests/parser/test_add_label.py index 88dfabdc8..fce5f679f 100644 --- a/spacy/tests/parser/test_add_label.py +++ b/spacy/tests/parser/test_add_label.py @@ -35,7 +35,7 @@ def test_init_parser(parser): def _train_parser(parser): fix_random_seed(1) parser.add_label("left") - parser.begin_training([], **parser.cfg) + parser.begin_training(lambda: [], **parser.cfg) sgd = Adam(0.001) for i in range(5): @@ -75,7 +75,7 @@ def test_add_label_deserializes_correctly(): ner1.add_label("C") ner1.add_label("B") ner1.add_label("A") - ner1.begin_training([]) + ner1.begin_training(lambda: []) ner2 = EntityRecognizer(Vocab(), model, **config) # the second model needs to be resized before we can call from_bytes diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index 0ffe74273..c7a1ed0d2 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -1,17 +1,17 @@ import pytest - from spacy import util from spacy.lang.en import English - from spacy.language import Language from spacy.lookups import Lookups from spacy.pipeline._parser_internals.ner import BiluoPushDown from spacy.gold import Example from spacy.tokens import Doc from spacy.vocab import Vocab +import logging from ..util import make_tempdir + TRAIN_DATA = [ ("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}), ("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}), @@ -56,6 +56,7 @@ def test_get_oracle_moves(tsys, doc, entity_annots): assert names == ["U-PERSON", "O", "O", "B-GPE", "L-GPE", "O"] +@pytest.mark.filterwarnings("ignore::UserWarning") def test_get_oracle_moves_negative_entities(tsys, doc, entity_annots): entity_annots = [(s, e, "!" + label) for s, e, label in entity_annots] example = Example.from_dict(doc, {"entities": entity_annots}) @@ -332,19 +333,21 @@ def test_overfitting_IO(): assert ents2[0].label_ == "LOC" -def test_ner_warns_no_lookups(): +def test_ner_warns_no_lookups(caplog): nlp = English() assert nlp.lang in util.LEXEME_NORM_LANGS nlp.vocab.lookups = Lookups() assert not len(nlp.vocab.lookups) nlp.add_pipe("ner") - with pytest.warns(UserWarning): + with caplog.at_level(logging.DEBUG): nlp.begin_training() + assert "W033" in caplog.text + caplog.clear() nlp.vocab.lookups.add_table("lexeme_norm") nlp.vocab.lookups.get_table("lexeme_norm")["a"] = "A" - with pytest.warns(None) as record: + with caplog.at_level(logging.DEBUG): nlp.begin_training() - assert not record.list + assert "W033" not in caplog.text @Language.factory("blocker") diff --git a/spacy/tests/parser/test_preset_sbd.py b/spacy/tests/parser/test_preset_sbd.py index 939181419..594498b0b 100644 --- a/spacy/tests/parser/test_preset_sbd.py +++ b/spacy/tests/parser/test_preset_sbd.py @@ -28,7 +28,7 @@ def parser(vocab): parser.cfg["hidden_width"] = 32 # parser.add_label('right') parser.add_label("left") - parser.begin_training([], **parser.cfg) + parser.begin_training(lambda: [], **parser.cfg) sgd = Adam(0.001) for i in range(10): diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index bb93cf118..b3fb6d0fc 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -136,7 +136,7 @@ def test_kb_undefined(nlp): """Test that the EL can't train without defining a KB""" entity_linker = nlp.add_pipe("entity_linker", config={}) with pytest.raises(ValueError): - entity_linker.begin_training() + entity_linker.begin_training(lambda: []) def test_kb_empty(nlp): @@ -145,7 +145,7 @@ def test_kb_empty(nlp): entity_linker = nlp.add_pipe("entity_linker", config=config) assert len(entity_linker.kb) == 0 with pytest.raises(ValueError): - entity_linker.begin_training() + entity_linker.begin_training(lambda: []) def test_candidate_generation(nlp): @@ -249,7 +249,7 @@ def test_preserving_links_asdoc(nlp): ruler.add_patterns(patterns) el_config = {"kb": {"@assets": "myLocationsKB.v1"}, "incl_prior": False} el_pipe = nlp.add_pipe("entity_linker", config=el_config, last=True) - el_pipe.begin_training() + el_pipe.begin_training(lambda: []) el_pipe.incl_context = False el_pipe.incl_prior = True diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index 17add7391..66c27b233 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -54,7 +54,7 @@ def test_textcat_learns_multilabel(): textcat = TextCategorizer(nlp.vocab, width=8) for letter in letters: textcat.add_label(letter) - optimizer = textcat.begin_training() + optimizer = textcat.begin_training(lambda: []) for i in range(30): losses = {} examples = [Example.from_dict(doc, {"cats": cats}) for doc, cat in docs] @@ -104,7 +104,7 @@ def test_overfitting_IO(): doc = nlp(test_text) cats = doc.cats # note that by default, exclusive_classes = false so we need a bigger error margin - assert cats["POSITIVE"] > 0.9 + assert cats["POSITIVE"] > 0.8 assert cats["POSITIVE"] + cats["NEGATIVE"] == pytest.approx(1.0, 0.1) # Also test the results are still the same after IO @@ -113,7 +113,7 @@ def test_overfitting_IO(): nlp2 = util.load_model_from_path(tmp_dir) doc2 = nlp2(test_text) cats2 = doc2.cats - assert cats2["POSITIVE"] > 0.9 + assert cats2["POSITIVE"] > 0.8 assert cats2["POSITIVE"] + cats2["NEGATIVE"] == pytest.approx(1.0, 0.1) # Test scoring diff --git a/spacy/tests/regression/test_issue2001-2500.py b/spacy/tests/regression/test_issue2001-2500.py index a09c6f4fb..259ca9b0c 100644 --- a/spacy/tests/regression/test_issue2001-2500.py +++ b/spacy/tests/regression/test_issue2001-2500.py @@ -25,7 +25,6 @@ def test_issue2070(): assert len(doc) == 11 -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue2179(): """Test that spurious 'extra_labels' aren't created when initializing NER.""" nlp = Italian() @@ -135,7 +134,6 @@ def test_issue2464(en_vocab): assert len(matches) == 3 -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue2482(): """Test we can serialize and deserialize a blank NER or parser model.""" nlp = Italian() diff --git a/spacy/tests/regression/test_issue2501-3000.py b/spacy/tests/regression/test_issue2501-3000.py index cf43e1a17..3882df0a6 100644 --- a/spacy/tests/regression/test_issue2501-3000.py +++ b/spacy/tests/regression/test_issue2501-3000.py @@ -20,7 +20,7 @@ def test_issue2564(): nlp = Language() tagger = nlp.add_pipe("tagger") tagger.add_label("A") - tagger.begin_training() + tagger.begin_training(lambda: []) doc = nlp("hello world") assert doc.is_tagged docs = nlp.pipe(["hello", "world"]) @@ -136,7 +136,6 @@ def test_issue2782(text, lang_cls): assert doc[0].like_num -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue2800(): """Test issue that arises when too many labels are added to NER model. Used to cause segfault. diff --git a/spacy/tests/regression/test_issue3001-3500.py b/spacy/tests/regression/test_issue3001-3500.py index 98a6b9aa0..3059eb5ab 100644 --- a/spacy/tests/regression/test_issue3001-3500.py +++ b/spacy/tests/regression/test_issue3001-3500.py @@ -90,7 +90,6 @@ def test_issue3199(): assert list(doc[0:3].noun_chunks) == [] -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue3209(): """Test issue that occurred in spaCy nightly where NER labels were being mapped to classes incorrectly after loading the model, when the labels diff --git a/spacy/tests/regression/test_issue3501-4000.py b/spacy/tests/regression/test_issue3501-4000.py index de554a5ec..fc2a3ed7c 100644 --- a/spacy/tests/regression/test_issue3501-4000.py +++ b/spacy/tests/regression/test_issue3501-4000.py @@ -91,7 +91,6 @@ def test_issue_3526_3(en_vocab): assert new_ruler.overwrite is not ruler.overwrite -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue_3526_4(en_vocab): nlp = Language(vocab=en_vocab) patterns = [{"label": "ORG", "pattern": "Apple"}] @@ -252,7 +251,6 @@ def test_issue3803(): assert [t.like_num for t in doc] == [True, True, True, True, True, True] -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue3830_no_subtok(): """Test that the parser doesn't have subtok label if not learn_tokens""" config = { @@ -270,7 +268,6 @@ def test_issue3830_no_subtok(): assert "subtok" not in parser.labels -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue3830_with_subtok(): """Test that the parser does have subtok label if learn_tokens=True.""" config = { @@ -333,7 +330,6 @@ def test_issue3879(en_vocab): assert len(matcher(doc)) == 2 # fails because of a FP match 'is a test' -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue3880(): """Test that `nlp.pipe()` works when an empty string ends the batch. diff --git a/spacy/tests/regression/test_issue4001-4500.py b/spacy/tests/regression/test_issue4001-4500.py index 423015106..1789973e9 100644 --- a/spacy/tests/regression/test_issue4001-4500.py +++ b/spacy/tests/regression/test_issue4001-4500.py @@ -81,7 +81,6 @@ def test_issue4030(): assert doc.cats["inoffensive"] == 0.0 -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue4042(): """Test that serialization of an EntityRuler before NER works fine.""" nlp = English() @@ -110,7 +109,6 @@ def test_issue4042(): assert doc2.ents[0].label_ == "MY_ORG" -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue4042_bug2(): """ Test that serialization of an NER works fine when new labels were added. @@ -242,7 +240,6 @@ def test_issue4190(): assert result_1b == result_2 -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue4267(): """ Test that running an entity_ruler after ner gives consistent results""" nlp = English() @@ -303,7 +300,7 @@ def test_issue4313(): config = {} ner = nlp.create_pipe("ner", config=config) ner.add_label("SOME_LABEL") - ner.begin_training([]) + ner.begin_training(lambda: []) # add a new label to the doc doc = nlp("What do you think about Apple ?") assert len(ner.labels) == 1 @@ -324,7 +321,6 @@ def test_issue4313(): entity_scores[(start, end, label)] += score -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue4348(): """Test that training the tagger with empty data, doesn't throw errors""" nlp = English() diff --git a/spacy/tests/regression/test_issue4501-5000.py b/spacy/tests/regression/test_issue4501-5000.py index 96d4e1ca4..1e655851f 100644 --- a/spacy/tests/regression/test_issue4501-5000.py +++ b/spacy/tests/regression/test_issue4501-5000.py @@ -179,7 +179,6 @@ def test_issue4707(): assert "entity_ruler" in new_nlp.pipe_names -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue4725_1(): """ Ensure the pickling of the NER goes well""" vocab = Vocab(vectors_name="test_vocab_add_vector") @@ -198,7 +197,6 @@ def test_issue4725_1(): assert ner2.cfg["update_with_oracle_cut_size"] == 111 -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue4725_2(): # ensures that this runs correctly and doesn't hang or crash because of the global vectors # if it does crash, it's usually because of calling 'spawn' for multiprocessing (e.g. on Windows), diff --git a/spacy/tests/regression/test_issue5152.py b/spacy/tests/regression/test_issue5152.py index 3c1cee5c3..c7a70a99c 100644 --- a/spacy/tests/regression/test_issue5152.py +++ b/spacy/tests/regression/test_issue5152.py @@ -1,8 +1,7 @@ -import pytest from spacy.lang.en import English +import pytest -@pytest.mark.filterwarnings("ignore::UserWarning") def test_issue5152(): # Test that the comparison between a Span and a Token, goes well # There was a bug when the number of tokens in the span equaled the number of characters in the token (!) @@ -14,6 +13,8 @@ def test_issue5152(): span_2 = text[0:3] # Talk about being span_3 = text_var[0:3] # Talk of being token = y[0] # Let - assert span.similarity(token) == 0.0 + with pytest.warns(UserWarning): + assert span.similarity(token) == 0.0 assert span.similarity(span_2) == 1.0 - assert span_2.similarity(span_3) < 1.0 + with pytest.warns(UserWarning): + assert span_2.similarity(span_3) < 1.0 diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index 4c6504f6b..93069d9a3 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -62,7 +62,7 @@ def tagger(): # need to add model for two reasons: # 1. no model leads to error in serialization, # 2. the affected line is the one for model serialization - tagger.begin_training(pipeline=nlp.pipeline) + tagger.begin_training(lambda: [], pipeline=nlp.pipeline) return tagger @@ -81,7 +81,7 @@ def entity_linker(): # need to add model for two reasons: # 1. no model leads to error in serialization, # 2. the affected line is the one for model serialization - entity_linker.begin_training(pipeline=nlp.pipeline) + entity_linker.begin_training(lambda: [], pipeline=nlp.pipeline) return entity_linker diff --git a/spacy/tests/serialize/test_serialize_config.py b/spacy/tests/serialize/test_serialize_config.py index 8e3c95823..1de137e81 100644 --- a/spacy/tests/serialize/test_serialize_config.py +++ b/spacy/tests/serialize/test_serialize_config.py @@ -4,7 +4,7 @@ import spacy from spacy.lang.en import English from spacy.lang.de import German from spacy.language import Language -from spacy.util import registry, deep_merge_configs, load_model_from_config +from spacy.util import registry, load_model_from_config from spacy.ml.models import build_Tok2Vec_model, build_tb_parser_model from spacy.ml.models import MultiHashEmbed, MaxoutWindowEncoder @@ -194,37 +194,6 @@ def test_serialize_parser(): assert upper.get_dim("nI") == 66 -def test_deep_merge_configs(): - config = {"a": "hello", "b": {"c": "d"}} - defaults = {"a": "world", "b": {"c": "e", "f": "g"}} - merged = deep_merge_configs(config, defaults) - assert len(merged) == 2 - assert merged["a"] == "hello" - assert merged["b"] == {"c": "d", "f": "g"} - config = {"a": "hello", "b": {"@test": "x", "foo": 1}} - defaults = {"a": "world", "b": {"@test": "x", "foo": 100, "bar": 2}, "c": 100} - merged = deep_merge_configs(config, defaults) - assert len(merged) == 3 - assert merged["a"] == "hello" - assert merged["b"] == {"@test": "x", "foo": 1, "bar": 2} - assert merged["c"] == 100 - config = {"a": "hello", "b": {"@test": "x", "foo": 1}, "c": 100} - defaults = {"a": "world", "b": {"@test": "y", "foo": 100, "bar": 2}} - merged = deep_merge_configs(config, defaults) - assert len(merged) == 3 - assert merged["a"] == "hello" - assert merged["b"] == {"@test": "x", "foo": 1} - assert merged["c"] == 100 - # Test that leaving out the factory just adds to existing - config = {"a": "hello", "b": {"foo": 1}, "c": 100} - defaults = {"a": "world", "b": {"@test": "y", "foo": 100, "bar": 2}} - merged = deep_merge_configs(config, defaults) - assert len(merged) == 3 - assert merged["a"] == "hello" - assert merged["b"] == {"@test": "y", "foo": 1, "bar": 2} - assert merged["c"] == 100 - - def test_config_nlp_roundtrip(): """Test that a config prduced by the nlp object passes training config validation.""" @@ -311,3 +280,22 @@ def test_config_overrides(): nlp = spacy.load(d) assert isinstance(nlp, English) assert nlp.pipe_names == ["tok2vec", "tagger"] + + +def test_config_interpolation(): + config = Config().from_str(nlp_config_string, interpolate=False) + assert config["training"]["train_corpus"]["path"] == "${paths:train}" + interpolated = config.interpolate() + assert interpolated["training"]["train_corpus"]["path"] == "" + nlp = English.from_config(config) + assert nlp.config["training"]["train_corpus"]["path"] == "${paths:train}" + # Ensure that variables are preserved in nlp config + width = "${components.tok2vec.model:width}" + assert config["components"]["tagger"]["model"]["tok2vec"]["width"] == width + assert nlp.config["components"]["tagger"]["model"]["tok2vec"]["width"] == width + interpolated2 = nlp.config.interpolate() + assert interpolated2["training"]["train_corpus"]["path"] == "" + assert interpolated2["components"]["tagger"]["model"]["tok2vec"]["width"] == 342 + nlp2 = English.from_config(interpolated) + assert nlp2.config["training"]["train_corpus"]["path"] == "" + assert nlp2.config["components"]["tagger"]["model"]["tok2vec"]["width"] == 342 diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index b5cc6fff8..1da257fd5 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -1,11 +1,14 @@ import pytest - from spacy.gold import docs_to_json, biluo_tags_from_offsets from spacy.gold.converters import iob2docs, conll_ner2docs, conllu2docs from spacy.lang.en import English from spacy.schemas import ProjectConfigSchema, validate from spacy.cli.pretrain import make_docs +from spacy.cli.init_config import init_config, RECOMMENDATIONS_PATH +from spacy.cli.init_config import RecommendationSchema from spacy.cli._util import validate_project_commands, parse_config_overrides +from spacy.util import get_lang_class +import srsly def test_cli_converters_conllu2json(): @@ -319,3 +322,20 @@ def test_parse_config_overrides(args, expected): def test_parse_config_overrides_invalid(args): with pytest.raises(SystemExit): parse_config_overrides(args) + + +@pytest.mark.parametrize("lang", ["en", "nl"]) +@pytest.mark.parametrize( + "pipeline", [["tagger", "parser", "ner"], [], ["ner", "textcat", "sentencizer"]] +) +@pytest.mark.parametrize("optimize", ["efficiency", "accuracy"]) +def test_init_config(lang, pipeline, optimize): + # TODO: add more tests and also check for GPU with transformers + init_config("-", lang=lang, pipeline=pipeline, optimize=optimize, cpu=True) + + +def test_model_recommendations(): + recommendations = srsly.read_json(RECOMMENDATIONS_PATH) + for lang, data in recommendations.items(): + assert get_lang_class(lang) + assert RecommendationSchema(**data) diff --git a/spacy/tests/test_gold.py b/spacy/tests/test_gold.py index 708c57837..334d9fc24 100644 --- a/spacy/tests/test_gold.py +++ b/spacy/tests/test_gold.py @@ -154,6 +154,7 @@ def test_example_from_dict_some_ner(en_vocab): assert ner_tags == ["U-LOC", None, None, None] +@pytest.mark.filterwarnings("ignore::UserWarning") def test_json2docs_no_ner(en_vocab): data = [ { @@ -506,6 +507,7 @@ def test_roundtrip_docs_to_docbin(doc): assert cats["BAKING"] == reloaded_example.reference.cats["BAKING"] +@pytest.mark.filterwarnings("ignore::UserWarning") def test_make_orth_variants(doc): nlp = English() with make_tempdir() as tmpdir: @@ -586,7 +588,7 @@ def test_tuple_format_implicit(): ("Uber blew through $1 million a week", {"entities": [(0, 4, "ORG")]}), ( "Spotify steps up Asia expansion", - {"entities": [(0, 8, "ORG"), (17, 21, "LOC")]}, + {"entities": [(0, 7, "ORG"), (17, 21, "LOC")]}, ), ("Google rebrands its business apps", {"entities": [(0, 6, "ORG")]}), ] @@ -601,7 +603,7 @@ def test_tuple_format_implicit_invalid(): ("Uber blew through $1 million a week", {"frumble": [(0, 4, "ORG")]}), ( "Spotify steps up Asia expansion", - {"entities": [(0, 8, "ORG"), (17, 21, "LOC")]}, + {"entities": [(0, 7, "ORG"), (17, 21, "LOC")]}, ), ("Google rebrands its business apps", {"entities": [(0, 6, "ORG")]}), ] diff --git a/spacy/tests/test_new_example.py b/spacy/tests/test_new_example.py index df6489aa8..321eaae95 100644 --- a/spacy/tests/test_new_example.py +++ b/spacy/tests/test_new_example.py @@ -46,6 +46,7 @@ def test_Example_from_dict_with_tags(pred_words, annots): assert aligned_tags == ["NN" for _ in predicted] +@pytest.mark.filterwarnings("ignore::UserWarning") def test_aligned_tags(): pred_words = ["Apply", "some", "sunscreen", "unless", "you", "can", "not"] gold_words = ["Apply", "some", "sun", "screen", "unless", "you", "cannot"] @@ -198,8 +199,8 @@ def test_Example_from_dict_with_entities(annots): def test_Example_from_dict_with_entities_invalid(annots): vocab = Vocab() predicted = Doc(vocab, words=annots["words"]) - example = Example.from_dict(predicted, annots) - # TODO: shouldn't this throw some sort of warning ? + with pytest.warns(UserWarning): + example = Example.from_dict(predicted, annots) assert len(list(example.reference.ents)) == 0 diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index ce7b5cb1c..a13299fff 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -24,6 +24,7 @@ from .util import registry from .attrs import intify_attrs from .symbols import ORTH from .scorer import Scorer +from .gold import validate_examples cdef class Tokenizer: @@ -712,6 +713,7 @@ cdef class Tokenizer: return tokens def score(self, examples, **kwargs): + validate_examples(examples, "Tokenizer.score") return Scorer.score_tokenization(examples) def to_disk(self, path, **kwargs): diff --git a/spacy/util.py b/spacy/util.py index d10f83789..3cf165a4f 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -24,6 +24,7 @@ import tempfile import shutil import shlex import inspect +import logging try: import cupy.random @@ -54,10 +55,19 @@ if TYPE_CHECKING: from .vocab import Vocab # noqa: F401 -_PRINT_ENV = False OOV_RANK = numpy.iinfo(numpy.uint64).max LEXEME_NORM_LANGS = ["da", "de", "el", "en", "id", "lb", "pt", "ru", "sr", "ta", "th"] +# Default order of sections in the config.cfg. Not all sections needs to exist, +# and additional sections are added at the end, in alphabetical order. +# fmt: off +CONFIG_SECTION_ORDER = ["paths", "variables", "system", "nlp", "components", "training", "pretraining"] +# fmt: on + + +logging.basicConfig() +logger = logging.getLogger("spacy") + class registry(thinc.registry): languages = catalogue.create("spacy", "languages", entry_points=True) @@ -109,11 +119,6 @@ class SimpleFrozenDict(dict): raise NotImplementedError(self.error) -def set_env_log(value: bool) -> None: - global _PRINT_ENV - _PRINT_ENV = value - - def lang_class_is_loaded(lang: str) -> bool: """Check whether a Language class is already loaded. Language classes are loaded lazily, to avoid expensive setup code associated with the language @@ -264,9 +269,7 @@ def load_model_from_path( if not meta: meta = get_model_meta(model_path) config_path = model_path / "config.cfg" - if not config_path.exists() or not config_path.is_file(): - raise IOError(Errors.E053.format(path=config_path, name="config.cfg")) - config = Config().from_disk(config_path, overrides=dict_to_dot(config)) + config = load_config(config_path, overrides=dict_to_dot(config)) nlp, _ = load_model_from_config(config, vocab=vocab, disable=disable) return nlp.from_disk(model_path, exclude=disable) @@ -317,6 +320,29 @@ def load_model_from_init_py( ) +def load_config( + path: Union[str, Path], + overrides: Dict[str, Any] = SimpleFrozenDict(), + interpolate: bool = False, +) -> Config: + """Load a config file. Takes care of path validation and section order.""" + config_path = ensure_path(path) + if not config_path.exists() or not config_path.is_file(): + raise IOError(Errors.E053.format(path=config_path, name="config.cfg")) + return Config(section_order=CONFIG_SECTION_ORDER).from_disk( + config_path, overrides=overrides, interpolate=interpolate + ) + + +def load_config_from_str( + text: str, overrides: Dict[str, Any] = SimpleFrozenDict(), interpolate: bool = False +): + """Load a full config from a string.""" + return Config(section_order=CONFIG_SECTION_ORDER).from_str( + text, overrides=overrides, interpolate=interpolate, + ) + + def get_installed_models() -> List[str]: """List all model packages currently installed in the environment. @@ -602,27 +628,6 @@ def get_async(stream, numpy_array): return array -def env_opt(name: str, default: Optional[Any] = None) -> Optional[Any]: - if type(default) is float: - type_convert = float - else: - type_convert = int - if "SPACY_" + name.upper() in os.environ: - value = type_convert(os.environ["SPACY_" + name.upper()]) - if _PRINT_ENV: - print(name, "=", repr(value), "via", "$SPACY_" + name.upper()) - return value - elif name in os.environ: - value = type_convert(os.environ[name]) - if _PRINT_ENV: - print(name, "=", repr(value), "via", "$" + name) - return value - else: - if _PRINT_ENV: - print(name, "=", repr(default), "by default") - return default - - def read_regex(path: Union[str, Path]) -> Pattern: path = ensure_path(path) with path.open(encoding="utf8") as file_: @@ -923,45 +928,6 @@ def copy_config(config: Union[Dict[str, Any], Config]) -> Config: raise ValueError(Errors.E961.format(config=config)) from None -def deep_merge_configs( - config: Union[Dict[str, Any], Config], defaults: Union[Dict[str, Any], Config] -) -> Config: - """Deep merge two configs, a base config and its defaults. Ignores - references to registered functions to avoid filling in - - config (Dict[str, Any]): The config. - destination (Dict[str, Any]): The config defaults. - RETURNS (Dict[str, Any]): The merged config. - """ - config = copy_config(config) - merged = _deep_merge_configs(config, defaults) - return Config(merged) - - -def _deep_merge_configs( - config: Union[Dict[str, Any], Config], defaults: Union[Dict[str, Any], Config] -) -> Union[Dict[str, Any], Config]: - for key, value in defaults.items(): - if isinstance(value, dict): - node = config.setdefault(key, {}) - if not isinstance(node, dict): - continue - promises = [key for key in value if key.startswith("@")] - promise = promises[0] if promises else None - # We only update the block from defaults if it refers to the same - # registered function - if ( - promise - and any(k.startswith("@") for k in node) - and (promise in node and node[promise] != value[promise]) - ): - continue - defaults = _deep_merge_configs(node, value) - elif key not in config: - config[key] = value - return config - - def dot_to_dict(values: Dict[str, Any]) -> Dict[str, dict]: """Convert dot notation to a dict. For example: {"token.pos": True, "token._.xyz": True} becomes {"token": {"pos": True, "_": {"xyz": True }}}. @@ -1067,24 +1033,7 @@ class DummyTokenizer: def create_default_optimizer() -> Optimizer: - # TODO: Do we still want to allow env_opt? - learn_rate = env_opt("learn_rate", 0.001) - beta1 = env_opt("optimizer_B1", 0.9) - beta2 = env_opt("optimizer_B2", 0.999) - eps = env_opt("optimizer_eps", 1e-8) - L2 = env_opt("L2_penalty", 1e-6) - grad_clip = env_opt("grad_norm_clip", 10.0) - L2_is_weight_decay = env_opt("L2_is_weight_decay", False) - optimizer = Adam( - learn_rate, - L2=L2, - beta1=beta1, - beta2=beta2, - eps=eps, - grad_clip=grad_clip, - L2_is_weight_decay=L2_is_weight_decay, - ) - return optimizer + return Adam() def minibatch(items, size): diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md index 73631c64a..cc6f44fcc 100644 --- a/website/docs/api/architectures.md +++ b/website/docs/api/architectures.md @@ -274,7 +274,7 @@ architectures into your training config. | `get_spans` | `Callable` | Function that takes a batch of [`Doc`](/api/doc) object and returns lists of [`Span`](/api) objects to process by the transformer. [See here](/api/transformer#span_getters) for built-in options and examples. | | `tokenizer_config` | `Dict[str, Any]` | Tokenizer settings passed to [`transformers.AutoTokenizer`](https://huggingface.co/transformers/model_doc/auto.html#transformers.AutoTokenizer). | -### spacy-transformers.Tok2VecListener.v1 {#Tok2VecListener} +### spacy-transformers.Tok2VecListener.v1 {#transformers-Tok2VecListener} > #### Example Config > diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index 5c971effa..be7a2b499 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -16,9 +16,11 @@ menu: - ['Project', 'project'] --- -For a list of available commands, type `spacy --help`. - - +spaCy's CLI provides a range of helpful commands for downloading and training +models, converting data and debugging your config, data and installation. For a +list of available commands, you can type `python -m spacy --help`. You can also +add the `--help` flag to any command or subcommand to see the description, +available arguments and usage. ## Download {#download} @@ -41,13 +43,13 @@ the model name to be specified with its version (e.g. `en_core_web_sm-2.2.0`). $ python -m spacy download [model] [--direct] [pip args] ``` -| Argument | Type | Description | -| ------------------------------------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | positional | Model name, e.g. `en_core_web_sm`.. | -| `--direct`, `-d` | flag | Force direct download of exact model version. | -| pip args 2.1 | - | Additional installation options to be passed to `pip install` when installing the model package. For example, `--user` to install to the user home directory or `--no-deps` to not install model dependencies. | -| `--help`, `-h` | flag | Show help message and available arguments. | -| **CREATES** | directory | The installed model package in your `site-packages` directory. | +| Argument | Type | Description | +| ------------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | positional | Model name, e.g. [`en_core_web_sm`](/models/en#en_core_web_sm). | +| `--direct`, `-d` | flag | Force direct download of exact model version. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| pip args 2.1 | option / flag | Additional installation options to be passed to `pip install` when installing the model package. For example, `--user` to install to the user home directory or `--no-deps` to not install model dependencies. | +| **CREATES** | directory | The installed model package in your `site-packages` directory. | ## Info {#info} @@ -101,39 +103,62 @@ files and model directories. ### init config {#init-config new="3"} -Initialize and export a [`config.cfg` file](/usage/training#config) for training -and update it with all default values, if possible. Config files used for -training should always be complete and not contain any hidden defaults or -missing values, so this command helps you create your final config. It takes -**one** of the following options: - -- `--base`: Base **config** to auto-fill, e.g. created using the - [training quickstart](/usage/training#quickstart) widget. -- `--lang`: Base **language** code to use for blank config. -- `--model`: Base **model** to copy config from. +Initialize and save a [`config.cfg` file](/usage/training#config) using the +**recommended settings** for your use case. It works just like the +[quickstart widget](/usage/training#quickstart), only that it also auto-fills +all default values and exports a [training](/usage/training#config)-ready +config. The settings you specify will impact the suggested model architectures +and pipeline setup, as well as the hyperparameters. You can also adjust and +customize those settings in your config file later. > ```bash -> ### with base config {wrap="true"} -> $ python -m spacy init config config.cfg --base base.cfg -> ``` -> -> ```bash -> ### blank language {wrap="true"} -> $ python -m spacy init config config.cfg --lang en --pipeline tagger,parser +> ### Example {wrap="true"} +> $ python -m spacy init config config.cfg --lang en --pipeline ner,textcat --optimize accuracy > ``` ```bash -$ python -m spacy init config [output] [--base] [--lang] [--model] [--pipeline] +$ python -m spacy init config [output_file] [--lang] [--pipeline] +[--optimize] [--cpu] ``` -| Argument | Type | Description | -| ------------------ | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `output` | positional | Path to output `.cfg` file. If not set, the config is written to stdout so you can pipe it forward to a file. | -| `--base`, `-b` | option | Optional base config file to auto-fill with defaults. | -| `--lang`, `-l` | option | Optional language code to use for blank config. If a `--pipeline` is specified, the components will be added in order. | -| `--model`, `-m` | option | Optional base model to copy config from. If a `--pipeline` is specified, only those components will be kept, and all other components not in the model will be added. | -| `--pipeline`, `-p` | option | Optional comma-separated pipeline of components to add to blank language or model. | -| **CREATES** | config | Complete and auto-filled config file for training. | +| Argument | Type | Description | +| ------------------ | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `output_file` | positional | Path to output `.cfg` file. If not set, the config is written to stdout so you can pipe it forward to a file. | +| `--lang`, `-l` | option | Optional code of the [language](/usage/models#languages) to use. Defaults to `"en"`. | +| `--pipeline`, `-p` | option | Comma-separated list of trainable [pipeline components](/usage/processing-pipelines#built-in) to include in the model. Defaults to `"tagger,parser,ner"`. | +| `--optimize`, `-o` | option | `"efficiency"` or `"accuracy"`. Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters. Defaults to `"efficiency"`. | +| `--cpu`, `-C` | flag | Whether the model needs to run on CPU. This will impact the choice of architecture, pretrained weights and related hyperparameters. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| **CREATES** | file | The config file for training. | + +### init fill-config {#init-fill-config new="3"} + +Auto-fill a partial [`config.cfg` file](/usage/training#config) file with **all +default values**, e.g. a config generated with the +[quickstart widget](/usage/training#quickstart). Config files used for training +should always be complete and not contain any hidden defaults or missing values, +so this command helps you create your final training config. In order to find +the available settings and defaults, all functions referenced in the config will +be created, and their signatures are used to find the defaults. If your config +contains a problem that can't be resolved automatically, spaCy will show you a +validation error with more details. + +> ```bash +> ### Example {wrap="true"} +> $ python -m spacy init fill-config base.cfg config.cfg +> ``` + +```bash +$ python -m spacy init fill-config [base_path] [output_file] [--diff] +``` + +| Argument | Type | Description | +| -------------- | ---------- | ------------------------------------------------------------------------------------------------------------- | +| `base_path` | positional | Path to base config to fill, e.g. generated by the [quickstart widget](/usage/training#quickstart). | +| `output_file` | positional | Path to output `.cfg` file. If not set, the config is written to stdout so you can pipe it forward to a file. | +| `--diff`, `-D` | flag | Print a visual diff highlighting the changes. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| **CREATES** | file | Complete and auto-filled config file for training. | ### init model {#init-model new="2"} @@ -166,6 +191,7 @@ $ python -m spacy init model [lang] [output_dir] [--jsonl-loc] [--vectors-loc] | `--truncate-vectors`, `-t` 2.3 | option | Number of vectors to truncate to when reading in vectors file. Defaults to `0` for no truncation. | | `--prune-vectors`, `-V` | option | Number of vectors to prune the vocabulary to. Defaults to `-1` for no pruning. | | `--vectors-name`, `-vn` | option | Name to assign to the word vectors in the `meta.json`, e.g. `en_core_web_md.vectors`. | +| `--help`, `-h` | flag | Show help message and available arguments. | | **CREATES** | model | A spaCy model containing the vocab and vectors. | ## Convert {#convert} @@ -234,34 +260,33 @@ $ python -m spacy debug config [config_path] [--code_path] [--output] [--auto_fi - - ``` ✘ Config validation error -training -> use_gpu field required -training -> omit_extra_lookups field required -training -> batch_by field required -training -> raw_text field required -training -> tag_map field required -training -> evaluation_batch_size extra fields not permitted -training -> vectors extra fields not permitted -training -> width extra fields not permitted +training -> dropout field required +training -> optimizer field required +training -> optimize extra fields not permitted -{'gold_preproc': False, 'max_length': 3000, 'limit': 0, 'orth_variant_level': 0.0, 'dropout': 0.1, 'patience': 6000, 'max_epochs': 0, 'max_steps': 100000, 'eval_frequency': 400, 'seed': 0, 'accumulate_gradient': 4, 'width': 768, 'use_pytorch_for_gpu_memory': True, 'scores': ['speed', 'tags_acc', 'uas', 'las', 'ents_f'], 'score_weights': {'las': 0.4, 'ents_f': 0.4, 'tags_acc': 0.2}, 'init_tok2vec': None, 'vectors': None, 'discard_oversize': True, 'evaluation_batch_size': 16, 'batch_size': {'@schedules': 'compounding.v1', 'start': 800, 'stop': 800, 'compound': 1.001}, 'optimizer': {'@optimizers': 'Adam.v1', 'beta1': 0.9, 'beta2': 0.999, 'L2_is_weight_decay': True, 'L2': 0.01, 'grad_clip': 1.0, 'use_averages': False, 'eps': 1e-08, 'learn_rate': {'@schedules': 'warmup_linear.v1', 'warmup_steps': 250, 'total_steps': 20000, 'initial_rate': 5e-05}}} +{'vectors': 'en_vectors_web_lg', 'seed': 0, 'accumulate_gradient': 1, 'init_tok2vec': None, 'raw_text': None, 'patience': 1600, 'max_epochs': 0, 'max_steps': 20000, 'eval_frequency': 200, 'frozen_components': [], 'optimize': None, 'batcher': {'@batchers': 'batch_by_words.v1', 'discard_oversize': False, 'tolerance': 0.2, 'get_length': None, 'size': {'@schedules': 'compounding.v1', 'start': 100, 'stop': 1000, 'compound': 1.001, 't': 0.0}}, 'dev_corpus': {'@readers': 'spacy.Corpus.v1', 'path': '', 'max_length': 0, 'gold_preproc': False, 'limit': 0}, 'score_weights': {'tag_acc': 0.5, 'dep_uas': 0.25, 'dep_las': 0.25, 'sents_f': 0.0}, 'train_corpus': {'@readers': 'spacy.Corpus.v1', 'path': '', 'max_length': 0, 'gold_preproc': False, 'limit': 0}} + +If your config contains missing values, you can run the 'init fill-config' +command to fill in all the defaults, if possible: + +python -m spacy init fill-config tmp/starter-config_invalid.cfg --base tmp/starter-config_invalid.cfg ``` -| Argument | Type | Default | Description | -| --------------------- | ---------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `config_path` | positional | - | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | -| `--code_path`, `-c` | option | `None` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | -| `--auto_fill`, `-F` | option | `False` | Whether or not to auto-fill the config with built-in defaults if possible. If `False`, the provided config needs to be complete. | -| `--output_path`, `-o` | option | `None` | Output path where the filled config can be stored. Use '-' for standard output. | -| `--diff`, `-D` | option | `False` | Show a visual diff if config was auto-filled. | -| `--help`, `-h` | flag | `False` | Show help message and available arguments. | -| overrides | | `None` | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. | +| Argument | Type | Default | Description | +| --------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | +| `--code_path`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | +| `--auto_fill`, `-F` | option | Whether or not to auto-fill the config with built-in defaults if possible. If `False`, the provided config needs to be complete. | +| `--output_path`, `-o` | option | Output path where the filled config can be stored. Use '-' for standard output. | +| `--diff`, `-D` | option | `Show a visual diff if config was auto-filled. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| overrides | option / flag | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. | +| **PRINTS** | stdout | Config validation errors, if available. | ### debug data {#debug-data} @@ -428,15 +453,16 @@ will not be available. -| Argument | Type | Description | -| -------------------------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | -| `--code`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | -| `--ignore-warnings`, `-IW` | flag | Ignore warnings, only show stats and errors. | -| `--verbose`, `-V` | flag | Print additional information and explanations. | -| `--no-format`, `-NF` | flag | Don't pretty-print the results. Use this if you want to write to a file. | -| `--help`, `-h` | flag | Show help message and available arguments. | -| overrides | | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. | +| Argument | Type | Description | +| -------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | +| `--code`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | +| `--ignore-warnings`, `-IW` | flag | Ignore warnings, only show stats and errors. | +| `--verbose`, `-V` | flag | Print additional information and explanations. | +| `--no-format`, `-NF` | flag | Don't pretty-print the results. Use this if you want to write to a file. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| overrides | option / flag | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. | +| **PRINTS** | stdout | Debugging information. | ### debug profile {#debug-profile} @@ -456,11 +482,13 @@ The `profile` command is now available as a subcommand of `spacy debug`. $ python -m spacy debug profile [model] [inputs] [--n-texts] ``` -| Argument | Type | Description | -| ----------------- | ----------------------------------------------------------------- | ------------------------------------------------------- | -| `model` | positional | A loadable spaCy model. | -| `inputs` | positional | Optional path to input file, or `-` for standard input. | -| `--n-texts`, `-n` | Maximum number of texts to use if available. Defaults to `10000`. | +| Argument | Type | Description | +| ----------------- | ---------- | ----------------------------------------------------------------- | +| `model` | positional | A loadable spaCy model. | +| `inputs` | positional | Optional path to input file, or `-` for standard input. | +| `--n-texts`, `-n` | option | Maximum number of texts to use if available. Defaults to `10000`. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| **PRINTS** | stdout | Profiling information for the model. | ### debug model {#debug-model} @@ -568,20 +596,21 @@ $ python -m spacy debug model ./config.cfg tagger -l "5,15" -DIM -PAR -P0 -P1 -P -| Argument | Type | Description | Default | -| ----------------------- | ---------- | ----------------------------------------------------------------------------------------------------- | ------- | -| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | | -| `component` | positional | Name of the pipeline component of which the model should be analyzed. |   | -| `--layers`, `-l` | option | Comma-separated names of layer IDs to print. | | -| `--dimensions`, `-DIM` | option | Show dimensions of each layer. | `False` | -| `--parameters`, `-PAR` | option | Show parameters of each layer. | `False` | -| `--gradients`, `-GRAD` | option | Show gradients of each layer. | `False` | -| `--attributes`, `-ATTR` | option | Show attributes of each layer. | `False` | -| `--print-step0`, `-P0` | option | Print model before training. | `False` | -| `--print-step1`, `-P1` | option | Print model after initialization. | `False` | -| `--print-step2`, `-P2` | option | Print model after training. | `False` | -| `--print-step3`, `-P3` | option | Print final predictions. | `False` | -| `--help`, `-h` | flag | Show help message and available arguments. | | +| Argument | Type | Description | +| ----------------------- | ---------- | ----------------------------------------------------------------------------------------------------- | +| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | | +| `component` | positional | Name of the pipeline component of which the model should be analyzed. |   | +| `--layers`, `-l` | option | Comma-separated names of layer IDs to print. | | +| `--dimensions`, `-DIM` | option | Show dimensions of each layer. | +| `--parameters`, `-PAR` | option | Show parameters of each layer. | +| `--gradients`, `-GRAD` | option | Show gradients of each layer. | +| `--attributes`, `-ATTR` | option | Show attributes of each layer. | +| `--print-step0`, `-P0` | option | Print model before training. | +| `--print-step1`, `-P1` | option | Print model after initialization. | +| `--print-step2`, `-P2` | option | Print model after training. | +| `--print-step3`, `-P3` | option | Print final predictions. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| **PRINTS** | stdout | Debugging information. | ## Train {#train} @@ -611,15 +640,15 @@ in the section `[paths]`. $ python -m spacy train [config_path] [--output] [--code] [--verbose] [overrides] ``` -| Argument | Type | Description | -| ----------------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | -| `--output`, `-o` | positional | Directory to store model in. Will be created if it doesn't exist. | -| `--code`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | -| `--verbose`, `-V` | flag | Show more detailed messages during training. | -| `--help`, `-h` | flag | Show help message and available arguments. | -| overrides | | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. | -| **CREATES** | model | The final model and the best model. | +| Argument | Type | Description | +| ----------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | +| `--output`, `-o` | positional | Directory to store model in. Will be created if it doesn't exist. | +| `--code`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | +| `--verbose`, `-V` | flag | Show more detailed messages during training. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| overrides | option / flag | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. | +| **CREATES** | model | The final model and the best model. | ## Pretrain {#pretrain new="2.1" tag="experimental"} @@ -649,17 +678,17 @@ $ python -m spacy pretrain [texts_loc] [output_dir] [config_path] [--code] [--resume-path] [--epoch-resume] [overrides] ``` -| Argument | Type | Description | -| ----------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `texts_loc` | positional | Path to JSONL file with raw texts to learn from, with text provided as the key `"text"` or tokens as the key `"tokens"`. [See here](/api/data-formats#pretrain) for details. | -| `output_dir` | positional | Directory to write models to on each epoch. | -| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | -| `--code`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | -| `--resume-path`, `-r` | option | Path to pretrained weights from which to resume pretraining. | -| `--epoch-resume`, `-er` | option | The epoch to resume counting from when using `--resume-path`. Prevents unintended overwriting of existing weight files. | -| `--help`, `-h` | flag | Show help message and available arguments. | -| overrides | | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--training.use_gpu 1`. | -| **CREATES** | weights | The pretrained weights that can be used to initialize `spacy train`. | +| Argument | Type | Description | +| ----------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `texts_loc` | positional | Path to JSONL file with raw texts to learn from, with text provided as the key `"text"` or tokens as the key `"tokens"`. [See here](/api/data-formats#pretrain) for details. | +| `output_dir` | positional | Directory to write models to on each epoch. | +| `config_path` | positional | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. | +| `--code`, `-c` | option | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-models) for new architectures. | +| `--resume-path`, `-r` | option | Path to pretrained weights from which to resume pretraining. | +| `--epoch-resume`, `-er` | option | The epoch to resume counting from when using `--resume-path`. Prevents unintended overwriting of existing weight files. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| overrides | option / flag | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--training.use_gpu 1`. | +| **CREATES** | weights | The pretrained weights that can be used to initialize `spacy train`. | ## Evaluate {#evaluate new="2"} @@ -687,6 +716,7 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--gold-preproc] | `--gpu-id`, `-g` | option | GPU to use, if any. Defaults to `-1` for CPU. | | `--displacy-path`, `-dp` | option | Directory to output rendered parses as HTML. If not set, no visualizations will be generated. | | `--displacy-limit`, `-dl` | option | Number of parses to generate per file. Defaults to `25`. Keep in mind that a significantly higher number might cause the `.html` files to render slowly. | +| `--help`, `-h` | flag | Show help message and available arguments. | | **CREATES** | `stdout`, JSON, HTML | Training results and optional metrics and visualizations. | ## Package {#package} @@ -826,6 +856,7 @@ $ python -m spacy project run [subcommand] [project_dir] [--force] [--dry] | `--force`, `-F` | flag | Force re-running steps, even if nothing changed. | | `--dry`, `-D` | flag |  Perform a dry run and don't execute scripts. | | `--help`, `-h` | flag | Show help message and available arguments. | +| **EXECUTES** | script | The command defined in the `project.yml`. | ### project dvc {#project-dvc} @@ -859,10 +890,11 @@ $ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] > python -m spacy project dvc all > ``` -| Argument | Type | Description | -| ----------------- | ---------- | --------------------------------------------------------------------------------- | -| `project_dir` | positional | Path to project directory. Defaults to current working directory. | -| `workflow` | positional | Name of workflow defined in `project.yml`. Defaults to first workflow if not set. | -| `--force`, `-F` | flag | Force-updating config file. | -| `--verbose`, `-V` | flag |  Print more output generated by DVC. | -| `--help`, `-h` | flag | Show help message and available arguments. | +| Argument | Type | Description | +| ----------------- | ---------- | --------------------------------------------------------------------------------------------- | +| `project_dir` | positional | Path to project directory. Defaults to current working directory. | +| `workflow` | positional | Name of workflow defined in `project.yml`. Defaults to first workflow if not set. | +| `--force`, `-F` | flag | Force-updating config file. | +| `--verbose`, `-V` | flag |  Print more output generated by DVC. | +| `--help`, `-h` | flag | Show help message and available arguments. | +| **CREATES** | file | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. | diff --git a/website/docs/api/data-formats.md b/website/docs/api/data-formats.md index af7cb26de..6245c219f 100644 --- a/website/docs/api/data-formats.md +++ b/website/docs/api/data-formats.md @@ -20,8 +20,9 @@ Config files define the training process and model pipeline and can be passed to [`spacy train`](/api/cli#train). They use [Thinc's configuration system](https://thinc.ai/docs/usage-config) under the hood. For details on how to use training configs, see the -[usage documentation](/usage/training#config). To get started with a blank -config or fill a partial config with all defaults, you can use the +[usage documentation](/usage/training#config). To get started with the +recommended settings for your use case, check out the +[quickstart widget](/usage/training#quickstart) or run the [`init config`](/api/cli#init-config) command. > #### What does the @ mean? @@ -182,10 +183,10 @@ run [`spacy pretrain`](/api/cli#pretrain). > ``` The main data format used in spaCy v3.0 is a **binary format** created by -serializing a [`DocBin`](/api/docbin) object, which represents a collection of -`Doc` objects. This means that you can train spaCy models using the same format -it outputs: annotated `Doc` objects. The binary format is extremely **efficient -in storage**, especially when packing multiple documents together. +serializing a [`DocBin`](/api/docbin), which represents a collection of `Doc` +objects. This means that you can train spaCy models using the same format it +outputs: annotated `Doc` objects. The binary format is extremely **efficient in +storage**, especially when packing multiple documents together. Typically, the extension for these binary files is `.spacy`, and they are used as input format for specifying a [training corpus](/api/corpus) and for spaCy's diff --git a/website/docs/api/dependencyparser.md b/website/docs/api/dependencyparser.md index 187abfdbb..c7af8ffae 100644 --- a/website/docs/api/dependencyparser.md +++ b/website/docs/api/dependencyparser.md @@ -142,14 +142,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/dependencyparser#call) and ## DependencyParser.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > parser = nlp.add_pipe("parser") -> optimizer = parser.begin_training(pipeline=nlp.pipeline) +> optimizer = parser.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | diff --git a/website/docs/api/entitylinker.md b/website/docs/api/entitylinker.md index 930188e26..fa8918dba 100644 --- a/website/docs/api/entitylinker.md +++ b/website/docs/api/entitylinker.md @@ -142,14 +142,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/entitylinker#call) and ## EntityLinker.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > entity_linker = nlp.add_pipe("entity_linker", last=True) -> optimizer = entity_linker.begin_training(pipeline=nlp.pipeline) +> optimizer = entity_linker.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | diff --git a/website/docs/api/entityrecognizer.md b/website/docs/api/entityrecognizer.md index 2d66710d7..8d30463ff 100644 --- a/website/docs/api/entityrecognizer.md +++ b/website/docs/api/entityrecognizer.md @@ -131,14 +131,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/entityrecognizer#call) and ## EntityRecognizer.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > ner = nlp.add_pipe("ner") -> optimizer = ner.begin_training(pipeline=nlp.pipeline) +> optimizer = ner.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 79782fd72..41d660421 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -200,12 +200,28 @@ more efficient than processing texts one-by-one. ## Language.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the pipeline for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples can either be the full training data or a representative sample. They +are used to **initialize the models** of trainable pipeline components and are +passed each component's [`begin_training`](/api/pipe#begin_training) method, if +available. Initialization includes validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. + + + +The `Language.update` method now takes a **function** that is called with no +arguments and returns a sequence of [`Example`](/api/example) objects instead of +tuples of `Doc` and `GoldParse` objects. + + > #### Example > > ```python +> get_examples = lambda: examples > optimizer = nlp.begin_training(get_examples) > ``` @@ -276,7 +292,7 @@ and custom registered functions if needed. See the | `component_cfg` | `Dict[str, dict]` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. | | **RETURNS** | `Dict[str, float]` | The updated `losses` dictionary. | -## Language.rehearse {#rehearse tag="method,experimental"} +## Language.rehearse {#rehearse tag="method,experimental" new="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, to try to address @@ -302,6 +318,13 @@ the "catastrophic forgetting" problem. This feature is experimental. Evaluate a model's pipeline components. + + +The `Language.update` method now takes a batch of [`Example`](/api/example) +objects instead of tuples of `Doc` and `GoldParse` objects. + + + > #### Example > > ```python diff --git a/website/docs/api/morphologizer.md b/website/docs/api/morphologizer.md index 04d189939..12d3050f6 100644 --- a/website/docs/api/morphologizer.md +++ b/website/docs/api/morphologizer.md @@ -121,15 +121,21 @@ applied to the `Doc` in order. Both [`__call__`](/api/morphologizer#call) and ## Morphologizer.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > morphologizer = nlp.add_pipe("morphologizer") > nlp.pipeline.append(morphologizer) -> optimizer = morphologizer.begin_training(pipeline=nlp.pipeline) +> optimizer = morphologizer.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | diff --git a/website/docs/api/pipe.md b/website/docs/api/pipe.md index b41ec210e..8302c2e8b 100644 --- a/website/docs/api/pipe.md +++ b/website/docs/api/pipe.md @@ -9,8 +9,8 @@ components like the [`EntityRecognizer`](/api/entityrecognizer) or [`TextCategorizer`](/api/textcategorizer) inherit from it and it defines the interface that components should follow to function as trainable components in a spaCy pipeline. See the docs on -[writing trainable components](/usage/processing-pipelines#trainable) for how to -use the `Pipe` base class to implement custom components. +[writing trainable components](/usage/processing-pipelines#trainable-components) +for how to use the `Pipe` base class to implement custom components. > #### Why is Pipe implemented in Cython? > @@ -45,18 +45,12 @@ Create a new pipeline instance. In your application, you would normally use a shortcut for this and instantiate the component using its string name and [`nlp.add_pipe`](/api/language#create_pipe). - - -This method needs to be overwritten with your own custom `__init__` method. - - - -| Name | Type | Description | -| ------- | ------------------------------------------ | ------------------------------------------------------------------------------------------- | -| `vocab` | `Vocab` | The shared vocabulary. | -| `model` | [`Model`](https://thinc.ai/docs/api-model) | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. | -| `name` | str | String name of the component instance. Used to add entries to the `losses` during training. | -| `**cfg` | | Additional config parameters and settings. | +| Name | Type | Description | +| ------- | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | `Vocab` | The shared vocabulary. | +| `model` | [`Model`](https://thinc.ai/docs/api-model) | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. | +| `name` | str | String name of the component instance. Used to add entries to the `losses` during training. | +| `**cfg` | | Additional config parameters and settings. Will be available as the dictionary `Pipe.cfg` and is serialized with the component. | ## Pipe.\_\_call\_\_ {#call tag="method"} @@ -106,14 +100,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/pipe#call) and ## Pipe.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > pipe = nlp.add_pipe("your_custom_pipe") -> optimizer = pipe.begin_training(pipeline=nlp.pipeline) +> optimizer = pipe.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | @@ -176,12 +176,6 @@ method. Learn from a batch of [`Example`](/api/example) objects containing the predictions and gold-standard annotations, and update the component's model. - - -This method needs to be overwritten with your own custom `update` method. - - - > #### Example > > ```python @@ -200,7 +194,7 @@ This method needs to be overwritten with your own custom `update` method. | `losses` | `Dict[str, float]` | Optional record of the loss during training. Updated using the component name as the key. | | **RETURNS** | `Dict[str, float]` | The updated `losses` dictionary. | -## Pipe.rehearse {#rehearse tag="method,experimental"} +## Pipe.rehearse {#rehearse tag="method,experimental" new="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, to try to address @@ -378,6 +372,15 @@ Load the pipe from a bytestring. Modifies the object in place and returns it. | `exclude` | `Iterable[str]` | String names of [serialization fields](#serialization-fields) to exclude. | | **RETURNS** | `Pipe` | The pipe. | +## Attributes {#attributes} + +| Name | Type | Description | +| ------- | ------------------------------------------ | ----------------------------------------------------------------------------------------------------- | +| `vocab` | [`Vocab`](/api/vocab) | The shared vocabulary that's passed in on initialization. | +| `model` | [`Model`](https://thinc.ai/docs/api-model) | The model powering the component. | +| `name` | str | The name of the component instance in the pipeline. Can be used in the losses. | +| `cfg` | dict | Keyword arguments passed to [`Pipe.__init__`](/api/pipe#init). Will be serialized with the component. | + ## Serialization fields {#serialization-fields} During serialization, spaCy will export several data fields used to restore diff --git a/website/docs/api/sentencerecognizer.md b/website/docs/api/sentencerecognizer.md index 59ada7fcb..cefdbea88 100644 --- a/website/docs/api/sentencerecognizer.md +++ b/website/docs/api/sentencerecognizer.md @@ -116,14 +116,20 @@ and [`pipe`](/api/sentencerecognizer#pipe) delegate to the ## SentenceRecognizer.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > senter = nlp.add_pipe("senter") -> optimizer = senter.begin_training(pipeline=nlp.pipeline) +> optimizer = senter.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | @@ -193,7 +199,7 @@ Delegates to [`predict`](/api/sentencerecognizer#predict) and | `losses` | `Dict[str, float]` | Optional record of the loss during training. The value keyed by the model's name is updated. | | **RETURNS** | `Dict[str, float]` | The updated `losses` dictionary. | -## SentenceRecognizer.rehearse {#rehearse tag="method,experimental"} +## SentenceRecognizer.rehearse {#rehearse tag="method,experimental" new="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, to try to address diff --git a/website/docs/api/tagger.md b/website/docs/api/tagger.md index 7ea29e53c..9761dea15 100644 --- a/website/docs/api/tagger.md +++ b/website/docs/api/tagger.md @@ -114,14 +114,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/tagger#call) and ## Tagger.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > tagger = nlp.add_pipe("tagger") -> optimizer = tagger.begin_training(pipeline=nlp.pipeline) +> optimizer = tagger.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | @@ -191,7 +197,7 @@ Delegates to [`predict`](/api/tagger#predict) and | `losses` | `Dict[str, float]` | Optional record of the loss during training. The value keyed by the model's name is updated. | | **RETURNS** | `Dict[str, float]` | The updated `losses` dictionary. | -## Tagger.rehearse {#rehearse tag="method,experimental"} +## Tagger.rehearse {#rehearse tag="method,experimental" new="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, to try to address diff --git a/website/docs/api/textcategorizer.md b/website/docs/api/textcategorizer.md index 494bc569f..73b50b865 100644 --- a/website/docs/api/textcategorizer.md +++ b/website/docs/api/textcategorizer.md @@ -122,14 +122,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/textcategorizer#call) and ## TextCategorizer.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > textcat = nlp.add_pipe("textcat") -> optimizer = textcat.begin_training(pipeline=nlp.pipeline) +> optimizer = textcat.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | @@ -199,7 +205,7 @@ Delegates to [`predict`](/api/textcategorizer#predict) and | `losses` | `Dict[str, float]` | Optional record of the loss during training. Updated using the component name as the key. | | **RETURNS** | `Dict[str, float]` | The updated `losses` dictionary. | -## TextCategorizer.rehearse {#rehearse tag="method,experimental"} +## TextCategorizer.rehearse {#rehearse tag="method,experimental" new="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, to try to address diff --git a/website/docs/api/tok2vec.md b/website/docs/api/tok2vec.md index 8e5f78bf7..4c820c07c 100644 --- a/website/docs/api/tok2vec.md +++ b/website/docs/api/tok2vec.md @@ -125,14 +125,20 @@ and [`set_annotations`](/api/tok2vec#set_annotations) methods. ## Tok2Vec.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > tok2vec = nlp.add_pipe("tok2vec") -> optimizer = tok2vec.begin_training(pipeline=nlp.pipeline) +> optimizer = tok2vec.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | diff --git a/website/docs/api/transformer.md b/website/docs/api/transformer.md index a8b328688..0e4b066ed 100644 --- a/website/docs/api/transformer.md +++ b/website/docs/api/transformer.md @@ -159,14 +159,20 @@ applied to the `Doc` in order. Both [`__call__`](/api/transformer#call) and ## Transformer.begin_training {#begin_training tag="method"} -Initialize the pipe for training, using data examples if available. Returns an -[`Optimizer`](https://thinc.ai/docs/api-optimizers) object. +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. The data +examples are used to **initialize the model** of the component and can either be +the full training data or a representative sample. Initialization includes +validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. > #### Example > > ```python > trf = nlp.add_pipe("transformer") -> optimizer = trf.begin_training(pipeline=nlp.pipeline) +> optimizer = trf.begin_training(lambda: [], pipeline=nlp.pipeline) > ``` | Name | Type | Description | @@ -371,54 +377,64 @@ serialization by passing in the string names via the `exclude` argument. ## TransformerData {#transformerdata tag="dataclass"} -Transformer tokens and outputs for one `Doc` object. +Transformer tokens and outputs for one `Doc` object. The transformer models +return tensors that refer to a whole padded batch of documents. These tensors +are wrapped into the +[FullTransformerBatch](/api/transformer#fulltransformerbatch) object. The +`FullTransformerBatch` then splits out the per-document data, which is handled +by this class. Instances of this class +are`typically assigned to the [Doc._.trf_data`](/api/transformer#custom-attributes) +extension attribute. - - -| Name | Type | Description | -| --------- | -------------------------------------------------- | ----------- | -| `tokens` | `Dict` | | -| `tensors` | `List[FloatsXd]` | | -| `align` | [`Ragged`](https://thinc.ai/docs/api-types#ragged) | | -| `width` | int | | +| Name | Type | Description | +| --------- | -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `tokens` | `Dict` | A slice of the tokens data produced by the tokenizer. This may have several fields, including the token IDs, the texts, and the attention mask. See the [`transformers.BatchEncoding`](https://huggingface.co/transformers/main_classes/tokenizer.html#transformers.BatchEncoding) object for details. | +| `tensors` | `List[FloatsXd]` | The activations for the Doc from the transformer. Usually the last tensor that is 3-dimensional will be the most important, as that will provide the final hidden state. Generally activations that are 2-dimensional will be attention weights. Details of this variable will differ depending on the underlying transformer model. | +| `align` | [`Ragged`](https://thinc.ai/docs/api-types#ragged) | Alignment from the `Doc`'s tokenization to the wordpieces. This is a ragged array, where `align.lengths[i]` indicates the number of wordpiece tokens that token `i` aligns against. The actual indices are provided at `align[i].dataXd`. | +| `width` | int | The width of the last hidden layer. | ### TransformerData.empty {#transformerdata-emoty tag="classmethod"} - +Create an empty `TransformerData` container. -| Name | Type | Description | -| ----------- | ----------------- | ----------- | -| **RETURNS** | `TransformerData` | | +| Name | Type | Description | +| ----------- | ----------------- | -------------- | +| **RETURNS** | `TransformerData` | The container. | ## FullTransformerBatch {#fulltransformerbatch tag="dataclass"} - +Holds a batch of input and output objects for a transformer model. The data can +then be split to a list of [`TransformerData`](/api/transformer#transformerdata) +objects to associate the outputs to each [`Doc`](/api/doc) in the batch. -| Name | Type | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------------- | ----------- | -| `spans` | `List[List[Span]]` | | -| `tokens` | [`transformers.BatchEncoding`](https://huggingface.co/transformers/main_classes/tokenizer.html#transformers.BatchEncoding) | | -| `tensors` | `List[torch.Tensor]` | | -| `align` | [`Ragged`](https://thinc.ai/docs/api-types#ragged) | | -| `doc_data` | `List[TransformerData]` | | +| Name | Type | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `spans` | `List[List[Span]]` | The batch of input spans. The outer list refers to the Doc objects in the batch, and the inner list are the spans for that `Doc`. Note that spans are allowed to overlap or exclude tokens, but each Span can only refer to one `Doc` (by definition). This means that within a `Doc`, the regions of the output tensors that correspond to each Span may overlap or have gaps, but for each `Doc`, there is a non-overlapping contiguous slice of the outputs. | +| `tokens` | [`transformers.BatchEncoding`](https://huggingface.co/transformers/main_classes/tokenizer.html#transformers.BatchEncoding) | The output of the tokenizer. | +| `tensors` | `List[torch.Tensor]` | The output of the transformer model. | +| `align` | [`Ragged`](https://thinc.ai/docs/api-types#ragged) | Alignment from the spaCy tokenization to the wordpieces. This is a ragged array, where `align.lengths[i]` indicates the number of wordpiece tokens that token `i` aligns against. The actual indices are provided at `align[i].dataXd`. | +| `doc_data` | `List[TransformerData]` | The outputs, split per `Doc` object. | ### FullTransformerBatch.unsplit_by_doc {#fulltransformerbatch-unsplit_by_doc tag="method"} - +Return a new `FullTransformerBatch` from a split batch of activations, using the +current object's spans, tokens and alignment. This is used during the backward +pass, in order to construct the gradients to pass back into the transformer +model. -| Name | Type | Description | -| ----------- | ---------------------- | ----------- | -| `arrays` | `List[List[Floats3d]]` | | -| **RETURNS** | `FullTransformerBatch` | | +| Name | Type | Description | +| ----------- | ---------------------- | ------------------------------- | +| `arrays` | `List[List[Floats3d]]` | The split batch of activations. | +| **RETURNS** | `FullTransformerBatch` | The transformer batch. | ### FullTransformerBatch.split_by_doc {#fulltransformerbatch-split_by_doc tag="method"} Split a `TransformerData` object that represents a batch into a list with one `TransformerData` per `Doc`. -| Name | Type | Description | -| ----------- | ----------------------- | ----------- | -| **RETURNS** | `List[TransformerData]` | | +| Name | Type | Description | +| ----------- | ----------------------- | ---------------- | +| **RETURNS** | `List[TransformerData]` | The split batch. | ## Span getters {#span_getters source="github.com/explosion/spacy-transformers/blob/master/spacy_transformers/span_getters.py"} diff --git a/website/docs/models/index.md b/website/docs/models/index.md index b25e46f1e..d5f87d3b5 100644 --- a/website/docs/models/index.md +++ b/website/docs/models/index.md @@ -45,9 +45,9 @@ three components: 2. **Genre:** Type of text the model is trained on, e.g. `web` or `news`. 3. **Size:** Model size indicator, `sm`, `md` or `lg`. -For example, `en_core_web_sm` is a small English model trained on written web -text (blogs, news, comments), that includes vocabulary, vectors, syntax and -entities. +For example, [`en_core_web_sm`](/models/en#en_core_web_sm) is a small English +model trained on written web text (blogs, news, comments), that includes +vocabulary, vectors, syntax and entities. ### Model versioning {#model-versioning} diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index ab5806764..00348065c 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -5,7 +5,6 @@ menu: - ['Processing Text', 'processing'] - ['How Pipelines Work', 'pipelines'] - ['Custom Components', 'custom-components'] - # - ['Trainable Components', 'trainable-components'] - ['Extension Attributes', 'custom-components-attributes'] - ['Plugins & Wrappers', 'plugins'] --- @@ -885,15 +884,117 @@ available, falls back to looking up the regular factory name. - +1. **Model:** A Thinc [`Model`](https://thinc.ai/docs/api-model) instance. This + can be a model using [layers](https://thinc.ai/docs/api-layers) implemented + in Thinc, or a [wrapped model](https://thinc.ai/docs/usage-frameworks) + implemented in PyTorch, TensorFlow, MXNet or a fully custom solution. The + model must take a list of [`Doc`](/api/doc) objects as input and can have any + type of output. +2. **Pipe subclass:** A subclass of [`Pipe`](/api/pipe) that implements at least + two methods: [`Pipe.predict`](/api/pipe#predict) and + [`Pipe.set_annotations`](/api/pipe#set_annotations). +3. **Component factory:** A component factory registered with + [`@Language.factory`](/api/language#factory) that takes the `nlp` object and + component `name` and optional settings provided by the config and returns an + instance of your trainable component. + +> #### Example +> +> ```python +> from spacy.pipeline import Pipe +> from spacy.language import Language +> +> class TrainableComponent(Pipe): +> def predict(self, docs): +> ... +> +> def set_annotations(self, docs, scores): +> ... +> +> @Language.factory("my_trainable_component") +> def make_component(nlp, name, model): +> return TrainableComponent(nlp.vocab, model, name=name) +> ``` + +| Name | Description | +| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| [`predict`](/api/pipe#predict) | Apply the component's model to a batch of [`Doc`](/api/doc) objects (without modifying them) and return the scores. | +| [`set_annotations`](/api/pipe#set_annotations) | Modify a batch of [`Doc`](/api/doc) objects, using pre-computed scores generated by `predict`. | + +By default, [`Pipe.__init__`](/api/pipe#init) takes the shared vocab, the +[`Model`](https://thinc.ai/docs/api-model) and the name of the component +instance in the pipeline, which you can use as a key in the losses. All other +keyword arguments will become available as [`Pipe.cfg`](/api/pipe#cfg) and will +also be serialized with the component. + + + +spaCy's [config system](/usage/training#config) resolves the config describing +the pipeline components and models **bottom-up**. This means that it will +_first_ create a `Model` from a [registered architecture](/api/architectures), +validate its arguments and _then_ pass the object forward to the component. This +means that the config can express very complex, nested trees of objects – but +the objects don't have to pass the model settings all the way down to the +components. It also makes the components more **modular** and lets you swap +different architectures in your config, and re-use model definitions. + +```ini +### config.cfg (excerpt) +[components] + +[components.textcat] +factory = "textcat" +labels = [] + +# This function is created and then passed to the "textcat" component as +# the argument "model" +[components.textcat.model] +@architectures = "spacy.TextCatEnsemble.v1" +exclusive_classes = false +pretrained_vectors = null +width = 64 +conv_depth = 2 +embed_size = 2000 +window_size = 1 +ngram_size = 1 +dropout = null + +[components.other_textcat] +factory = "textcat" +# This references the [components.textcat.model] block above +model = ${components.textcat.model} +labels = [] +``` + +Your trainable pipeline component factories should therefore always take a +`model` argument instead of instantiating the +[`Model`](https://thinc.ai/docs/api-model) inside the component. To register +custom architectures, you can use the +[`@spacy.registry.architectures`](/api/top-level#registry) decorator. Also see +the [training guide](/usage/training#config) for details. + + + +For some use cases, it makes sense to also overwrite additional methods to +customize how the model is updated from examples, how it's initialized, how the +loss is calculated and to add evaluation scores to the training output. + +| Name | Description | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [`update`](/api/pipe#update) | Learn from a batch of [`Example`](/api/example) objects containing the predictions and gold-standard annotations, and update the component's model. | +| [`begin_training`](/api/pipe#begin_training) | Initialize the model. Typically calls into [`Model.initialize`](https://thinc.ai/docs/api-model#initialize) and [`Pipe.create_optimizer`](/api/pipe#create_optimizer) if no optimizer is provided. | +| [`get_loss`](/api/pipe#get_loss) | Return a tuple of the loss and the gradient for a batch of [`Example`](/api/example) objects. | +| [`score`](/api/pipe#score) | Score a batch of [`Example`](/api/example) objects and return a dictionary of scores. The [`@Language.factory`](/api/language#factory) decorator can define the `default_socre_weights` of the component to decide which keys of the scores to display during training and how they count towards the final score. | + + ## Extension attributes {#custom-components-attributes new="2"} diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index e6d328d02..fc1624ec1 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -37,27 +37,37 @@ The recommended way to train your spaCy models is via the single [`config.cfg`](#config) **configuration file** that includes all settings and hyperparameters. You can optionally [overwritten](#config-overrides) settings on the command line, and load in a Python file to register -[custom functions](#custom-code) and architectures. +[custom functions](#custom-code) and architectures. This quickstart widget helps +you generate a starter config with the **recommended settings** for your +specific use case. It's also available in spaCy as the +[`init config`](/api/cli#init-config) command. -> #### Instructions +> #### Instructions: widget > > 1. Select your requirements and settings. > 2. Use the buttons at the bottom to save the result to your clipboard or a > file `base_config.cfg`. -> 3. Run [`init config`](/api/cli#init-config) to create a full training config. +> 3. Run [`init fill-config`](/api/cli#init-fill-config) to create a full +> config. > 4. Run [`train`](/api/cli#train) with your config and data. +> +> #### Instructions: CLI +> +> 1. Run the [`init config`](/api/cli#init-config) command and specify your +> requirements and settings as CLI arguments. +> 2. Run [`train`](/api/cli#train) with the exported config and data. import QuickstartTraining from 'widgets/quickstart-training.js' After you've saved the starter config to a file `base_config.cfg`, you can use -the [`init config`](/api/cli#init-config) command to fill in the remaining -defaults. Training configs should always be **complete and without hidden -defaults**, to keep your experiments reproducible. +the [`init fill-config`](/api/cli#init-fill-config) command to fill in the +remaining defaults. Training configs should always be **complete and without +hidden defaults**, to keep your experiments reproducible. ```bash -$ python -m spacy init config config.cfg --base base_config.cfg +$ python -m spacy init fill-config base_config.cfg config.cfg ``` > #### Tip: Debug your data @@ -70,10 +80,13 @@ $ python -m spacy init config config.cfg --base base_config.cfg > $ python -m spacy debug data config.cfg --verbose > ``` -You can now add your data and run [`train`](/api/cli#train) with your config. -See the [`convert`](/api/cli#convert) command for details on how to convert your -data to spaCy's binary `.spacy` format. You can either include the data paths in -the `[paths]` section of your config, or pass them in via the command line. +Instead of exporting your starter config from the quickstart widget and +auto-filling it, you can also use the [`init config`](/api/cli#init-config) +command and specify your requirement and settings and CLI arguments. You can now +add your data and run [`train`](/api/cli#train) with your config. See the +[`convert`](/api/cli#convert) command for details on how to convert your data to +spaCy's binary `.spacy` format. You can either include the data paths in the +`[paths]` section of your config, or pass them in via the command line. ```bash $ python -m spacy train config.cfg --output ./output --paths.train ./train.spacy --paths.dev ./dev.spacy @@ -601,7 +614,7 @@ settings in the block will be passed to the function as keyword arguments. Keep in mind that the config shouldn't have any hidden defaults and all arguments on the functions need to be represented in the config. If your function defines **default argument values**, spaCy is able to auto-fill your config when you run -[`init config`](/api/cli#init-config). +[`init fill-config`](/api/cli#init-fill-config). ```ini ### config.cfg (excerpt) @@ -687,13 +700,13 @@ give you everything you need to train fully custom models with - - The [`Example`](/api/example) object contains annotated training data, also called the **gold standard**. It's initialized with a [`Doc`](/api/doc) object that will hold the predictions, and another `Doc` object that holds the -gold-standard annotations. Here's an example of a simple `Example` for -part-of-speech tags: +gold-standard annotations. It also includes the **alignment** between those two +documents if they differ in tokenization. The `Example` class ensures that spaCy +can rely on one **standardized format** that's passed through the pipeline. +Here's an example of a simple `Example` for part-of-speech tags: ```python words = ["I", "like", "stuff"] @@ -744,7 +757,8 @@ example = Example.from_dict(doc, {"entities": ["U-ORG", "O", "U-TECHNOLOGY", "O" As of v3.0, the [`Example`](/api/example) object replaces the `GoldParse` class. It can be constructed in a very similar way, from a `Doc` and a dictionary of -annotations: +annotations. For more details, see the +[migration guide](/usage/v3#migrating-training). ```diff - gold = GoldParse(doc, entities=entities) diff --git a/website/docs/usage/transformers.md b/website/docs/usage/transformers.md index e52417d13..c3130f57b 100644 --- a/website/docs/usage/transformers.md +++ b/website/docs/usage/transformers.md @@ -163,8 +163,9 @@ resolved, the function is created and passed into the model as an argument. Remember that the `config.cfg` used for training should contain **no missing values** and requires all settings to be defined. You don't want any hidden defaults creeping in and changing your results! spaCy will tell you if settings -are missing, and you can run [`spacy init config`](/api/cli#init-config) with to -automatically fill in all defaults. +are missing, and you can run +[`spacy init fill-config`](/api/cli#init-fill-config) to automatically fill in +all defaults. diff --git a/website/docs/usage/v3.md b/website/docs/usage/v3.md index 02f6882e4..a32f9cd86 100644 --- a/website/docs/usage/v3.md +++ b/website/docs/usage/v3.md @@ -14,12 +14,49 @@ menu: ### New training workflow and config system {#features-training} + + +- **Usage:** [Training models](/usage/training) +- **Thinc:** [Thinc's config system](https://thinc.ai/docs/usage-config), + [`Config`](https://thinc.ai/docs/api-config#config) +- **CLI:** [`train`](/api/cli#train), [`pretrain`](/api/cli#pretrain), + [`evaluate`](/api/cli#evaluate) +- **API:** [Config format](/api/data-formats#config), + [`registry`](/api/top-level#registry) + + + ### Transformer-based pipelines {#features-transformers} + + +- **Usage:** [Transformers](/usage/transformers), + [Training models](/usage/training) +- **API:** [`Transformer`](/api/transformer), + [`TransformerData`](/api/transformer#transformerdata), + [`FullTransformerBatch`](/api/transformer#fulltransformerbatch) +- **Architectures: ** [TransformerModel](/api/architectures#TransformerModel), + [Tok2VecListener](/api/architectures#transformers-Tok2VecListener), + [Tok2VecTransformer](/api/architectures#Tok2VecTransformer) +- **Models:** [`en_core_bert_sm`](/models/en) +- **Implementation:** + [`spacy-transformers`](https://github.com/explosion/spacy-transformers) + + + ### Custom models using any framework {#feautres-custom-models} ### Manage end-to-end workflows with projects {#features-projects} + + +- **Usage:** [spaCy projects](/usage/projects), + [Training models](/usage/training) +- **CLI:** [`project`](/api/cli#project), [`train`](/api/cli#train) +- **Templates:** [`projects`](https://github.com/explosion/projects) + + + ### New built-in pipeline components {#features-pipeline-components} | Name | Description | @@ -30,14 +67,48 @@ menu: | [`AttributeRuler`](/api/attributeruler) | Component for setting token attributes using match patterns. | | [`Transformer`](/api/transformer) | Component for using [transformer models](/usage/transformers) in your pipeline, accessing outputs and aligning tokens. Provided via [`spacy-transformers`](https://github.com/explosion/spacy-transformers). | + + +- **Usage:** [Processing pipelines](/usage/processing-pipelines) +- **API:** [Built-in pipeline components](/api#architecture-pipeline) +- **Implementation:** + [`spacy/pipeline`](https://github.com/explosion/spaCy/tree/develop/spacy/pipeline) + + + ### New and improved pipeline component APIs {#features-components} - `Language.factory`, `Language.component` - `Language.analyze_pipes` - Adding components from other models + + +- **Usage:** [Custom components](/usage/processing-pipelines#custom_components), + [Defining components during training](/usage/training#config-components) +- **API:** [`Language`](/api/language) +- **Implementation:** + [`spacy/language.py`](https://github.com/explosion/spaCy/tree/develop/spacy/language.py) + + + ### Type hints and type-based data validation {#features-types} +> #### Example +> +> ```python +> from spacy.language import Language +> from pydantic import StrictBool +> +> @Language.factory("my_component") +> def create_my_component( +> nlp: Language, +> name: str, +> custom: StrictBool +> ): +> ... +> ``` + spaCy v3.0 officially drops support for Python 2 and now requires **Python 3.6+**. This also means that the code base can take full advantage of [type hints](https://docs.python.org/3/library/typing.html). spaCy's user-facing @@ -54,13 +125,37 @@ validation of Thinc's [config system](https://thinc.ai/docs/usage-config), which lets you to register **custom functions with typed arguments**, reference them in your config and see validation errors if the argument values don't match. -### CLI + -| Name | Description | -| --------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| [`init config`](/api/cli#init-config) | Initialize a [training config](/usage/training) file for a blank language or auto-fill a partial config. | -| [`debug config`](/api/cli#debug-config) | Debug a [training config](/usage/training) file and show validation errors. | -| [`project`](/api/cli#project) | Subcommand for cloning and running [spaCy projects](/usage/projects). | +- **Usage: ** + [Component type hints and validation](/usage/processing-pipelines#type-hints), + [Training with custom code](/usage/training#custom-code) +- **Thinc: ** + [Type checking in Thinc](https://thinc.ai/docs/usage-type-checking), + [Thinc's config system](https://thinc.ai/docs/usage-config) + + + +### New methods, attributes and commands + +The following methods, attributes and commands are new in spaCy v3.0. + +| Name | Description | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [`Token.lex`](/api/token#attributes) | Access a token's [`Lexeme`](/api/lexeme). | +| [`Language.select_pipes`](/api/language#select_pipes) | Contextmanager for enabling or disabling specific pipeline components for a block. | +| [`Language.analyze_pipes`](/api/language#analyze_pipes) | [Analyze](/usage/processing-pipelines#analysis) components and their interdependencies. | +| [`Language.resume_training`](/api/language#resume_training) | Experimental: continue training a pretrained model and initialize "rehearsal" for components that implement a `rehearse` method to prevent catastrophic forgetting. | +| [`@Language.factory`](/api/language#factory) [`@Language.component`](/api/language#component) | Decorators for [registering](/usage/processing-pipelines#custom-components) pipeline component factories and simple stateless component functions. | +| [`Language.has_factory`](/api/language#has_factory) | Check whether a component factory is registered on a language class.s | +| [`Language.get_factory_meta`](/api/language#get_factory_meta) [`Language.get_pipe_meta`](/api/language#get_factory_meta) | Get the [`FactoryMeta`](/api/language#factorymeta) with component metadata for a factory or instance name. | +| [`Language.config`](/api/language#config) | The [config](/usage/training#config) used to create the current `nlp` object. An instance of [`Config`](https://thinc.ai/docs/api-config#config) and can be saved to disk and used for training. | +| [`Pipe.score`](/api/pipe#score) | Method on trainable pipeline components that returns a dictionary of evaluation scores. | +| [`registry`](/api/top-level#registry) | Function registry to map functions to string names that can be referenced in [configs](/usage/training#config). | +| [`init config`](/api/cli#init-config) | CLI command for initializing a [training config](/usage/training) file with the recommended settings. | +| [`init fill-config`](/api/cli#init-fill-config) | CLI command for auto-filling a partial config with all defaults and missing values. | +| [`debug config`](/api/cli#debug-config) | CLI command for debugging a [training config](/usage/training) file and showing validation errors. | +| [`project`](/api/cli#project) | Suite of CLI commands for cloning, running and managing [spaCy projects](/usage/projects). | ## Backwards Incompatibilities {#incompat} @@ -70,12 +165,21 @@ usability. The following section lists the relevant changes to the user-facing API. For specific examples of how to rewrite your code, check out the [migration guide](#migrating). -### Compatibility {#incompat-compat} + -- spaCy now requires **Python 3.6+**. +Note that spaCy v3.0 now requires **Python 3.6+**. + + ### API changes {#incompat-api} +- Model symlinks, the `link` command and shortcut names are now deprecated. + There can be many [different models](/models) and not just one "English + model", so you should always use the full model name like + [`en_core_web_sm`](/models/en) explicitly. +- The [`train`](/api/cli#train) and [`pretrain`](/api/cli#pretrain) commands now + only take a `config.cfg` file containing the full + [training config](/usage/training#config). - [`Language.add_pipe`](/api/language#add_pipe) now takes the **string name** of the component factory instead of the component function. - **Custom pipeline components** now needs to be decorated with the @@ -87,6 +191,20 @@ API. For specific examples of how to rewrite your code, check out the - The `Language.disable_pipes` contextmanager has been replaced by [`Language.select_pipes`](/api/language#select_pipes), which can explicitly disable or enable components. +- The [`Language.update`](/api/language#update), + [`Language.evaluate`](/api/language#evaluate) and + [`Pipe.update`](/api/pipe#update) methods now all take batches of + [`Example`](/api/example) objects instead of `Doc` and `GoldParse` objects, or + raw text and a dictionary of annotations. + [`Language.begin_training`](/api/language#begin_training) and + [`Pipe.begin_training`](/api/pipe#begin_training) now take a function that + returns a sequence of `Example` objects to initialize the model instead of a + list of tuples. +- [`Matcher.add`](/api/matcher#add), + [`PhraseMatcher.add`](/api/phrasematcher#add) and + [`DependencyMatcher.add`](/api/dependencymatcher#add) now only accept a list + of patterns as the second argument (instead of a variable number of + arguments). The `on_match` callback becomes an optional keyword argument. ### Removed or renamed API {#incompat-removed} @@ -96,6 +214,7 @@ API. For specific examples of how to rewrite your code, check out the | `GoldParse` | [`Example`](/api/example) | | `GoldCorpus` | [`Corpus`](/api/corpus) | | `spacy debug-data` | [`spacy debug data`](/api/cli#debug-data) | +| `spacy profile` | [`spacy debug profile`](/api/cli#debug-profile) | | `spacy link`, `util.set_data_path`, `util.get_data_path` | not needed, model symlinks are deprecated | The following deprecated methods, attributes and arguments were removed in v3.0. @@ -121,7 +240,7 @@ on them. Model symlinks and shortcuts like `en` are now officially deprecated. There are [many different models](/models) with different capabilities and not just one "English model". In order to download and load a model, you should always use -its full name – for instance, `en_core_web_sm`. +its full name – for instance, [`en_core_web_sm`](/models/en#en_core_web_sm). ```diff - python -m spacy download en @@ -224,6 +343,51 @@ and you typically shouldn't have to use it in your code. + parser = nlp.add_pipe("parser") ``` +If you need to add a component from an existing pretrained model, you can now +use the `source` argument on [`nlp.add_pipe`](/api/language#add_pipe). This will +check that the component is compatible, and take care of porting over all +config. During training, you can also reference existing pretrained components +in your [config](/usage/training#config-components) and decide whether or not +they should be updated with more data. + +> #### config.cfg (excerpt) +> +> ```ini +> [components.ner] +> source = "en_core_web_sm" +> component = "ner" +> ``` + +```diff +source_nlp = spacy.load("en_core_web_sm") +nlp = spacy.blank("en") +- ner = source_nlp.get_pipe("ner") +- nlp.add_pipe(ner) ++ nlp.add_pipe("ner", source=source_nlp) +``` + +### Adding match patterns {#migrating-matcher} + +The [`Matcher.add`](/api/matcher#add), +[`PhraseMatcher.add`](/api/phrasematcher#add) and +[`DependencyMatcher.add`](/api/dependencymatcher#add) methods now only accept a +**list of patterns** as the second argument (instead of a variable number of +arguments). The `on_match` callback becomes an optional keyword argument. + +```diff +matcher = Matcher(nlp.vocab) +patterns = [[{"TEXT": "Google"}, {"TEXT": "Now"}], [{"TEXT": "GoogleNow"}]] +- matcher.add("GoogleNow", on_match, *patterns) ++ matcher.add("GoogleNow", patterns, on_match=on_match) +``` + +```diff +matcher = PhraseMatcher(nlp.vocab) +patterns = [nlp("health care reform"), nlp("healthcare reform")] +- matcher.add("HEALTH", on_match, *patterns) ++ matcher.add("HEALTH", patterns, on_match=on_match) +``` + ### Training models {#migrating-training} To train your models, you should now pretty much always use the @@ -233,15 +397,20 @@ use a [flexible config file](/usage/training#config) that describes all training settings and hyperparameters, as well as your pipeline, model components and architectures to use. The `--code` argument lets you pass in code containing [custom registered functions](/usage/training#custom-code) that you can -reference in your config. +reference in your config. To get started, check out the +[quickstart widget](/usage/training#quickstart). #### Binary .spacy training data format {#migrating-training-format} -spaCy now uses a new -[binary training data format](/api/data-formats#binary-training), which is much -smaller and consists of `Doc` objects, serialized via the -[`DocBin`](/api/docbin). You can convert your existing JSON-formatted data using -the [`spacy convert`](/api/cli#convert) command, which outputs `.spacy` files: +spaCy v3.0 uses a new +[binary training data format](/api/data-formats#binary-training) created by +serializing a [`DocBin`](/api/docbin), which represents a collection of `Doc` +objects. This means that you can train spaCy models using the same format it +outputs: annotated `Doc` objects. The binary format is extremely **efficient in +storage**, especially when packing multiple documents together. + +You can convert your existing JSON-formatted data using the +[`spacy convert`](/api/cli#convert) command, which outputs `.spacy` files: ```bash $ python -m spacy convert ./training.json ./output @@ -273,13 +442,72 @@ workflows, from data preprocessing to training and packaging your model. -#### Migrating training scripts to CLI command and config {#migrating-training-scripts} - - - #### Training via the Python API {#migrating-training-python} - +For most use cases, you **shouldn't** have to write your own training scripts +anymore. Instead, you can use [`spacy train`](/api/cli#train) with a +[config file](/usage/training#config) and custom +[registered functions](/usage/training#custom-code) if needed. You can even +register callbacks that can modify the `nlp` object at different stages of its +lifecycle to fully customize it before training. + +If you do decide to use the [internal training API](/usage/training#api) from +Python, you should only need a few small modifications to convert your scripts +from spaCy v2.x to v3.x. The [`Example.from_dict`](/api/example#from_dict) +classmethod takes a reference `Doc` and a +[dictionary of annotations](/api/data-formats#dict-input), similar to the +"simple training style" in spaCy v2.x: + +```diff +### Migrating Doc and GoldParse +doc = nlp.make_doc("Mark Zuckerberg is the CEO of Facebook") +entities = [(0, 15, "PERSON"), (30, 38, "ORG")] +- gold = GoldParse(doc, entities=entities) ++ example = Example.from_dict(doc, {"entities": entities}) +``` + +```diff +### Migrating simple training style +text = "Mark Zuckerberg is the CEO of Facebook" +annotations = {"entities": [(0, 15, "PERSON"), (30, 38, "ORG")]} ++ doc = nlp.make_doc(text) ++ example = Example.from_dict(doc, annotations) +``` + +The [`Language.update`](/api/language#update), +[`Language.evaluate`](/api/language#evaluate) and +[`Pipe.update`](/api/pipe#update) methods now all take batches of +[`Example`](/api/example) objects instead of `Doc` and `GoldParse` objects, or +raw text and a dictionary of annotations. + +```python +### Training loop {highlight="11"} +TRAIN_DATA = [ + ("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}), + ("I like London.", {"entities": [(7, 13, "LOC")]}), +] +nlp.begin_training() +for i in range(20): + random.shuffle(TRAIN_DATA) + for batch in minibatch(TRAIN_DATA): + examples = [] + for text, annots in batch: + examples.append(Example.from_dict(nlp.make_doc(text), annots)) + nlp.update(examples) +``` + +[`Language.begin_training`](/api/language#begin_training) and +[`Pipe.begin_training`](/api/pipe#begin_training) now take a function that +returns a sequence of `Example` objects to initialize the model instead of a +list of tuples. The data examples are used to **initialize the models** of +trainable pipeline components, which includes validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme. + +```diff +- nlp.begin_training(examples) ++ nlp.begin_training(lambda: examples) +``` #### Packaging models {#migrating-training-packaging} diff --git a/website/package-lock.json b/website/package-lock.json index cb8ce725a..9b449aca9 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -17455,6 +17455,11 @@ } } }, + "jinja-to-js": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/jinja-to-js/-/jinja-to-js-3.2.3.tgz", + "integrity": "sha512-ktEBxQG17fYaFcHThB719+EbePBx+AkkORQMyuP0UuLPS2zx8uJXP5CsItXjUUwMHFPj3hCRkyqEYzLbeklYgQ==" + }, "jpeg-js": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/jpeg-js/-/jpeg-js-0.2.0.tgz", diff --git a/website/package.json b/website/package.json index 12702692d..e61661c11 100644 --- a/website/package.json +++ b/website/package.json @@ -41,6 +41,7 @@ "gatsby-transformer-sharp": "^2.1.13", "html-to-react": "^1.3.4", "intersection-observer": "^0.5.1", + "jinja-to-js": "^3.2.3", "node-sass": "^4.11.0", "parse-numeric-range": "0.0.2", "prismjs": "^1.15.0", @@ -52,20 +53,22 @@ "remark-react": "^5.0.1" }, "scripts": { - "build": "gatsby build", - "dev": "gatsby develop", + "build": "npm run python:setup && gatsby build", + "dev": "npm run python:setup && gatsby develop", "dev:nightly": "BRANCH=nightly.spacy.io npm run dev", "lint": "eslint **", "clear": "rm -rf .cache", - "test": "echo \"Write tests! -> https://gatsby.app/unit-testing\"" + "test": "echo \"Write tests! -> https://gatsby.app/unit-testing\"", + "python:install": "pip install setup/requirements.txt", + "python:setup": "cd setup && ./setup.sh" }, "devDependencies": { + "@sindresorhus/slugify": "^0.8.0", "browser-monads": "^1.0.0", "md-attr-parser": "^1.2.1", "prettier": "^1.16.4", "raw-loader": "^1.0.0", - "unist-util-visit": "^1.4.0", - "@sindresorhus/slugify": "^0.8.0" + "unist-util-visit": "^1.4.0" }, "repository": { "type": "git", diff --git a/website/setup/jinja_to_js.py b/website/setup/jinja_to_js.py new file mode 100644 index 000000000..a2c896151 --- /dev/null +++ b/website/setup/jinja_to_js.py @@ -0,0 +1,1261 @@ +# Forked from: https://github.com/jonbretman/jinja-to-js +# With additional functionality: in/not in, replace, pprint, round, + for lists, +# rendering empty dicts +# This script is mostly used to generate the JavaScript function for the +# training quicktart widget. +import contextlib +import json +import re +import os +from os import path +from io import StringIO +from jinja2 import Environment, FileSystemLoader, nodes +from pathlib import Path +import typer + + +OPERANDS = { + "eq": "===", + "ne": "!==", + "lt": " < ", + "gt": " > ", + "lteq": " <= ", + "gteq": " >= ", +} + +DICT_ITER_METHODS = ("iteritems", "items", "values", "keys") + +STATE_DEFAULT = 0 +STATE_EXECUTING = 1 +STATE_INTERPOLATING = 2 + +LOOP_HELPER_INDEX = "index" +LOOP_HELPER_INDEX_0 = "index0" +LOOP_HELPER_FIRST = "first" +LOOP_HELPER_LAST = "last" +LOOP_HELPER_LENGTH = "length" +LOOP_HELPERS = ( + LOOP_HELPER_INDEX, + LOOP_HELPER_INDEX_0, + LOOP_HELPER_FIRST, + LOOP_HELPER_LAST, + LOOP_HELPER_LENGTH, +) + + +def amd_format(dependencies, template_function): + result = "define([" + result += ",".join('"{0}"'.format(x[0]) for x in dependencies) + result += "], function (" + result += ",".join(x[1] for x in dependencies) + result += ") { return " + result += template_function + result += "; });" + return result + + +def commonjs_format(dependencies, template_function): + result = "".join('var {0} = require("{1}");'.format(y, x) for x, y in dependencies) + result += "module.exports = {0};".format(template_function) + return result + + +def es6_format(dependencies, template_function): + result = "".join('import {0} from "{1}";'.format(y, x) for x, y in dependencies) + result += "export default {0}".format(template_function) + return result + + +JS_MODULE_FORMATS = { + None: lambda dependencies, template_function: template_function, + "amd": amd_format, + "commonjs": commonjs_format, + "es6": es6_format, +} + + +# This string has to double all the '{' and '}' due to Python's string formatting. +# See - https://docs.python.org/2/library/string.html#formatstrings +TEMPLATE_WRAPPER = """ +function {function_name}(ctx) {{ + var __result = ""; + var __tmp; + var __runtime = jinjaToJS.runtime; + var __filters = jinjaToJS.filters; + var __globals = jinjaToJS.globals; + var context = jinjaToJS.createContext(ctx); + {template_code} + return __result; +}} +""" + + +class ExtendsException(Exception): + """ + Raised when an {% extends %} is encountered. At this point the parent template is + loaded and all blocks defined in the current template passed to it. + """ + + pass + + +@contextlib.contextmanager +def option(current_kwargs, **kwargs): + """ + Context manager for temporarily setting a keyword argument and + then restoring it to whatever it was before. + """ + + tmp_kwargs = dict((key, current_kwargs.get(key)) for key, value in kwargs.items()) + current_kwargs.update(kwargs) + yield + current_kwargs.update(tmp_kwargs) + + +def is_method_call(node, method_name): + """ + Returns True if `node` is a method call for `method_name`. `method_name` + can be either a string or an iterable of strings. + """ + + if not isinstance(node, nodes.Call): + return False + + if isinstance(node.node, nodes.Getattr): + # e.g. foo.bar() + method = node.node.attr + + elif isinstance(node.node, nodes.Name): + # e.g. bar() + method = node.node.name + + elif isinstance(node.node, nodes.Getitem): + # e.g. foo["bar"]() + method = node.node.arg.value + + else: + return False + + if isinstance(method_name, (list, tuple)): + return method in method_name + + return method == method_name + + +def is_loop_helper(node): + """ + Returns True is node is a loop helper e.g. {{ loop.index }} or {{ loop.first }} + """ + return ( + hasattr(node, "node") + and isinstance(node.node, nodes.Name) + and node.node.name == "loop" + ) + + +def temp_var_names_generator(): + x = 0 + while True: + yield "__$%s" % x + x += 1 + + +class JinjaToJS(object): + def __init__( + self, + template_root, + template_name, + js_module_format=None, + runtime_path="jinja-to-js", + include_prefix="", + include_ext="", + child_blocks=None, + dependencies=None, + custom_filters=None, + ): + """ + Args: + template_root (str): The path to where templates should be loaded from. + template_name (str): The name of the template to compile (relative to `template_root`). + js_module_format (str, optional): The JavaScript module format to use. + One of ('amd', 'commonjs', 'es6') + runtime_path (str, optional): If `js_module_format` is specified then the JavaScript + runtime will be imported using the appropriate method. + It defaults to assuming it will be imported from + `node_modules` but you can change it using this option. + include_prefix (str, optional): If using the `amd` module format you can use this option + to add a prefix to every include path as AMD imports are + generally relative to the main file, not the module + importing. + include_ext (str, optional): By default any includes will be references without an + extension, as neither AMD, commonJS or ES6 require the + '.js' extension. If you want to use an extension, say + '.template' then set this option to a string including + the leading '.' + child_blocks (dict, optional): Used internally when handling templates that extend + other templates. + dependencies (list of tuple, optional): Used internally when handling templates that + extend other templates. + custom_filters (list of str, optional): List of custom filters which should be allowed. + These may be filters supported by Jinja but not + supported by jinja-to-js. These filters MUST be + registered with the jinja-to-js JS runtime. + """ + + self.environment = Environment( + loader=FileSystemLoader(template_root), + autoescape=True, + extensions=["jinja2.ext.with_", "jinja2.ext.autoescape"], + ) + self.output = StringIO() + self.stored_names = set() + self.temp_var_names = temp_var_names_generator() + self.state = STATE_DEFAULT + self.child_blocks = child_blocks or {} + self.dependencies = dependencies or [] + self._runtime_function_cache = [] + self.js_module_format = js_module_format + self.runtime_path = runtime_path + self.include_prefix = include_prefix + self.include_ext = include_ext + self.template_root = template_root + self.template_name = template_name + self.custom_filters = custom_filters or [] + + # The name of the JavaScript function that will output this template. By using a named + # function the template can call itself which is required to support recursive includes. + self.js_function_name = "template" + "".join( + x.title() + for x in re.split(r"[^\w]|_", path.splitext(self.template_name)[0]) + ) + + self.context_name = "context" + + self._add_dependency(self.runtime_path, "jinjaToJS") + + # Jinja2 doesn't accept Windows filepaths + if os.name == "nt": + self.template_name = self.template_name.replace(os.pathsep, "/") + + template_string, template_path, _ = self.environment.loader.get_source( + self.environment, self.template_name + ) + + # It is assumed that this will be the absolute path to the template. It is used to work out + # related paths for inclues. + self.template_path = template_path + + if self.js_module_format not in JS_MODULE_FORMATS.keys(): + raise ValueError( + "The js_module_format option must be one of: %s" + % JS_MODULE_FORMATS.keys() + ) + + self.ast = self.environment.parse(template_string) + + try: + for node in self.ast.body: + self._process_node(node) + except ExtendsException: + pass + + def get_output(self): + """ + Returns the generated JavaScript code. + + Returns: + str + """ + # generate the JS function string + template_function = TEMPLATE_WRAPPER.format( + function_name=self.js_function_name, template_code=self.output.getvalue() + ).strip() + + # get the correct module format template + module_format = JS_MODULE_FORMATS[self.js_module_format] + + # generate the module code + return module_format(self.dependencies, template_function) + + def _get_depencency_var_name(self, dependency): + """ + Returns the variable name assigned to the given dependency or None if the dependency has + not yet been registered. + + Args: + dependency (str): Thet dependency that needs to be imported. + + Returns: + str or None + """ + for dep_path, var_name in self.dependencies: + if dep_path == dependency: + return var_name + + def _add_dependency(self, dependency, var_name=None): + """ + Adds the given dependency and returns the variable name to use to access it. If `var_name` + is not given then a random one will be created. + + Args: + dependency (str): + var_name (str, optional): + + Returns: + str + """ + if var_name is None: + var_name = next(self.temp_var_names) + # Don't add duplicate dependencies + if (dependency, var_name) not in self.dependencies: + self.dependencies.append((dependency, var_name)) + return var_name + + def _process_node(self, node, **kwargs): + node_name = node.__class__.__name__.lower() + handler = getattr(self, "_process_" + node_name, None) + if callable(handler): + handler(node, **kwargs) + else: + raise Exception(f"Unknown node {node} ({node_name})") + + def _process_extends(self, node, **kwargs): + """ + Processes an extends block e.g. `{% extends "some/template.jinja" %}` + """ + + # find all the blocks in this template + for b in self.ast.find_all(nodes.Block): + + # if not already in `child_blocks` then this is the first time a + # block with this name has been encountered. + if b.name not in self.child_blocks: + self.child_blocks[b.name] = b + else: + + # otherwise we have seen this block before, so we need to find the last + # super_block and add the block from this template to the end. + block = self.child_blocks.get(b.name) + while hasattr(block, "super_block"): + block = block.super_block + block.super_block = b + + # load the parent template + parent_template = JinjaToJS( + template_root=self.template_root, + template_name=node.template.value, + js_module_format=self.js_module_format, + runtime_path=self.runtime_path, + include_prefix=self.include_prefix, + include_ext=self.include_ext, + child_blocks=self.child_blocks, + dependencies=self.dependencies, + ) + + # add the parent templates output to the current output + self.output.write(parent_template.output.getvalue()) + + # Raise an exception so we stop parsing this template + raise ExtendsException + + def _process_block(self, node, **kwargs): + """ + Processes a block e.g. `{% block my_block %}{% endblock %}` + """ + + # check if this node already has a 'super_block' attribute + if not hasattr(node, "super_block"): + + # since it doesn't it must be the last block in the inheritance chain + node.super_block = None + + # see if there has been a child block defined - if there is this + # will be the first block in the inheritance chain + child_block = self.child_blocks.get(node.name) + + if child_block: + + # we have child nodes so we need to set `node` as the + # super of the last one in the chain + last_block = child_block + while hasattr(last_block, "super_block"): + last_block = child_block.super_block + + # once we have found it, set this node as it's super block + last_block.super_block = node + + # this is the node we want to process as it's the first in the inheritance chain + node = child_block + + # process the block passing the it's super along, if this block + # calls super() it will be handled by `_process_call` + for n in node.body: + self._process_node(n, super_block=node.super_block, **kwargs) + + def _process_output(self, node, **kwargs): + """ + Processes an output node, which will contain things like `Name` and `TemplateData` nodes. + """ + for n in node.nodes: + self._process_node(n, **kwargs) + + def _process_templatedata(self, node, **_): + """ + Processes a `TemplateData` node, this is just a bit of as-is text + to be written to the output. + """ + + # escape double quotes + value = re.sub('"', r'\\"', node.data) + + # escape new lines + value = re.sub("\n", r"\\n", value) + + # append value to the result + self.output.write('__result += "' + value + '";') + + def _process_name(self, node, **kwargs): + """ + Processes a `Name` node. Some examples of `Name` nodes: + {{ foo }} -> 'foo' is a Name + {% if foo }} -> 'foo' is a Name + """ + + with self._interpolation(): + with self._python_bool_wrapper(**kwargs): + + if node.name not in self.stored_names and node.ctx != "store": + self.output.write(self.context_name) + self.output.write(".") + + if node.ctx == "store": + self.stored_names.add(node.name) + + self.output.write(node.name) + + def _process_dict(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs): + if node.items: + raise ValueError(f"Can't process non-empty dict in epxression: {node}") + self.output.write("{}") + + def _process_getattr(self, node, **kwargs): + """ + Processes a `GetAttr` node. e.g. {{ foo.bar }} + """ + + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + if is_loop_helper(node): + self._process_loop_helper(node, **new_kwargs) + else: + self._process_node(node.node, **new_kwargs) + self.output.write(".") + self.output.write(node.attr) + + def _process_getitem(self, node, **kwargs): + """ + Processes a `GetItem` node e.g. {{ foo["bar"] }} + """ + + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.node, **new_kwargs) + + if isinstance(node.arg, nodes.Slice): + self.output.write(".slice(") + + if node.arg.step is not None: + raise Exception( + "The step argument is not supported when slicing." + ) + + if node.arg.start is None: + self.output.write("0") + else: + self._process_node(node.arg.start, **new_kwargs) + + if node.arg.stop is None: + self.output.write(")") + else: + self.output.write(",") + self._process_node(node.arg.stop, **new_kwargs) + self.output.write(")") + else: + self.output.write("[") + self._process_node(node.arg, **new_kwargs) + self.output.write("]") + + def _process_for(self, node, **kwargs): + """ + Processes a for loop. e.g. + {% for number in numbers %} + {{ number }} + {% endfor %} + {% for key, value in somemap.items() %} + {{ key }} -> {{ value }} + {% %} + """ + + # since a for loop can introduce new names into the context + # we need to remember the ones that existed outside the loop + previous_stored_names = self.stored_names.copy() + + with self._execution(): + self.output.write("__runtime.each(") + + if is_method_call(node.iter, dict.keys.__name__): + self.output.write("Object.keys(") + + self._process_node(node.iter, **kwargs) + + if is_method_call(node.iter, dict.keys.__name__): + self.output.write(")") + + self.output.write(",") + self.output.write("function") + self.output.write("(") + + # javascript iterations put the value first, then the key + if isinstance(node.target, nodes.Tuple): + if len(node.target.items) > 2: + raise Exception( + "De-structuring more than 2 items is not supported." + ) + + for i, item in enumerate(reversed(node.target.items)): + self._process_node(item, **kwargs) + if i < len(node.target.items) - 1: + self.output.write(",") + else: + self._process_node(node.target, **kwargs) + + self.output.write(")") + self.output.write("{") + + if node.test: + self.output.write("if (!(") + self._process_node(node.test, **kwargs) + self.output.write(")) { return; }") + + assigns = ( + node.target.items if isinstance(node.target, nodes.Tuple) else [node.target] + ) + + with self._scoped_variables(assigns, **kwargs): + for n in node.body: + self._process_node(n, **kwargs) + + with self._execution(): + self.output.write("}") + self.output.write(")") + self.output.write(";") + + # restore the stored names + self.stored_names = previous_stored_names + + def _process_if(self, node, execute_end=None, **kwargs): + """ + Processes an if block e.g. `{% if foo %} do something {% endif %}` + """ + + with self._execution(): + self.output.write("if") + self.output.write("(") + + with option(kwargs, use_python_bool_wrapper=True): + self._process_node(node.test, **kwargs) + + self.output.write(")") + self.output.write("{") + + # We accept an `execute_end` function as a keyword argument as this function is + # recursive in the case of something like if-elif-elif-else. In these cases this + # invocation of this function may have to close execution opened by a previous + # invocation of this function. + if execute_end: + execute_end() + + # body + for n in node.body: + self._process_node(n, **kwargs) + + if not node.else_ and not node.elif_: + # no else - just close the if + with self._execution(): + self.output.write("}") + + else: + # either an else or an elif + with self._execution() as execute_end: + self.output.write("}") + self.output.write(" else ") + + # check for elif + for n in node.elif_: + self._process_node(n, execute_end=execute_end, **kwargs) + + if node.elif_ and node.else_: + self.output.write(" else ") + + # open up the body + self.output.write("{") + + # process the body of the else + for n in node.else_: + self._process_node(n, **kwargs) + + # close the body + with self._execution(): + self.output.write("}") + + def _process_condexpr(self, node, **kwargs): + with self._interpolation(): + self.output.write("(") + + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.test, **new_kwargs) + + self.output.write(" ? ") + self._process_node(node.expr1, **kwargs) + self.output.write(" : ") + self._process_node(node.expr2, **kwargs) + self.output.write(")") + + def _process_not(self, node, **kwargs): + self.output.write("!") + + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.node, **new_kwargs) + + def _process_or(self, node, **kwargs): + self._process_node(node.left, **kwargs) + self.output.write(" || ") + self._process_node(node.right, **kwargs) + + def _process_and(self, node, **kwargs): + self._process_node(node.left, **kwargs) + self.output.write(" && ") + self._process_node(node.right, **kwargs) + + def _process_tuple(self, node, **kwargs): + self.output.write("[") + for i, item in enumerate(node.items): + self._process_node(item, **kwargs) + if i < len(node.items) - 1: + self.output.write(",") + self.output.write("]") + + def _process_call(self, node, super_block=None, **kwargs): + if is_method_call(node, DICT_ITER_METHODS): + # special case for dict methods + self._process_node(node.node.node, **kwargs) + + elif is_method_call(node, "super"): + # special case for the super() method which is available inside blocks + if not super_block: + raise Exception("super() called outside of a block with a parent.") + self._process_node(super_block, **kwargs) + + else: + # just a normal function call on a context variable + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.node, **new_kwargs) + self.output.write("(") + self._process_args(node, **new_kwargs) + self.output.write(")") + + # only output the semi-colon if we are not interpolating + if self.state != STATE_INTERPOLATING: + self.output.write("") + + def _process_filter(self, node, **kwargs): + method_name = getattr(self, "_process_filter_%s" % node.name, None) + if callable(method_name): + method_name(node, **kwargs) + elif node.name in self.custom_filters: + with self._interpolation(safe=True): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.%s(" % node.name) + self._process_node(node.node, **new_kwargs) + if getattr(node, "args", None): + self.output.write(",") + self._process_args(node, **new_kwargs) + self.output.write(")") + else: + raise Exception("Unsupported filter: %s" % node.name) + + def _process_filter_safe(self, node, **kwargs): + with self._interpolation(safe=True): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.node, **new_kwargs) + + def _process_filter_capitalize(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.capitalize(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_abs(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("Math.abs(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_replace(self, node, **kwargs): + # We're getting a quoted string from Python/Jinja as the pattern to + # replace, but to replace all occurrences in JS, we typically need a + # regex, which would be annoying to convert. So we're using split/join + # instead here. + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.node, **new_kwargs) + self.output.write(".split(") + self._process_node(node.args[0], **new_kwargs) + self.output.write(").join(") + self._process_node(node.args[1], **new_kwargs) + self.output.write(")") + + def _process_filter_pprint(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("JSON.stringify(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_attr(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.node, **new_kwargs) + self.output.write("[") + self._process_node(node.args[0], **new_kwargs) + self.output.write("]") + + def _process_filter_batch(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.batch(") + self._process_node(node.node, **new_kwargs) + self.output.write(",") + self._process_args(node, **new_kwargs) + self.output.write(")") + + def _process_filter_default(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.default(") + self._process_node(node.node, **new_kwargs) + if node.args: + self.output.write(",") + self._process_args(node, **new_kwargs) + self.output.write(")") + + def _process_filter_first(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.first(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_int(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.int(") + self._process_node(node.node, **new_kwargs) + if node.args: + self.output.write(",") + self._process_args(node, **new_kwargs) + self.output.write(")") + + def _process_filter_round(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("Math.round((") + self._process_node(node.node, **new_kwargs) + self.output.write("+ Number.EPSILON) * 10**") + self._process_node(node.args[0], **new_kwargs) + self.output.write(") / 10**") + self._process_node(node.args[0], **new_kwargs) + + def _process_filter_last(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.last(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_length(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.size(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_lower(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("(") + self._process_node(node.node, **new_kwargs) + self.output.write(' + "").toLowerCase()') + + def _process_filter_slice(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.slice(") + self._process_node(node.node, **new_kwargs) + self.output.write(",") + self._process_args(node, **new_kwargs) + self.output.write(")") + + def _process_filter_title(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.title(") + self._process_node(node.node, **new_kwargs) + self.output.write(")") + + def _process_filter_trim(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("(") + self._process_node(node.node, **new_kwargs) + self.output.write(' + "").trim()') + + def _process_filter_upper(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("(") + self._process_node(node.node, **new_kwargs) + self.output.write(' + "").toUpperCase()') + + def _process_filter_truncate(self, node, **kwargs): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self.output.write("__filters.truncate(") + self._process_node(node.node, **new_kwargs) + self.output.write(",") + self._process_args(node, **new_kwargs) + self.output.write(")") + + def _process_assign(self, node, **kwargs): + with self._execution(): + self.output.write("var ") + self._process_node(node.target, **kwargs) + self.output.write(" = ") + self._process_node(node.node, **kwargs) + self.output.write(";") + + def _process_with(self, node, **kwargs): + + # keep a copy of the stored names before the scope + previous_stored_names = self.stored_names.copy() + + # assigns in the with tag + # e.g. {% with var = "something %} + assigns_in_tag = [nodes.Assign(t, v) for t, v in zip(node.targets, node.values)] + + # assigns in the with body + # e.g. {% set name = 'John' %} + assigns_in_body = [x for x in node.body if isinstance(x, nodes.Assign)] + + # remove assigns from the body + node.body = [x for x in node.body if not isinstance(x, nodes.Assign)] + + # get a list of all the assigns in this with block + # both on the tag, and within the body of the block + all_assigns = assigns_in_tag + assigns_in_body + + with self._execution(): + self.output.write("(function () {") + + with self._scoped_variables(all_assigns, **kwargs): + for node in node.body: + self._process_node(node, **kwargs) + + with self._execution(): + self.output.write("})();") + + # restore previous stored names + self.stored_names = previous_stored_names + + def _process_compare(self, node, **kwargs): + + if len(node.ops) > 1: + raise Exception("Multiple operands are not supported.") + + operand = node.ops[0] + is_equality = operand.op in ("eq", "ne") + left_hand_is_const = isinstance(node.expr, nodes.Const) + right_hand_is_const = isinstance(operand.expr, nodes.Const) + + # If the operand is equality and neither the left or right hand side are constants then we + # will need to use the JavaScript deep equals function. Ideally we want to avoid using this + # as it is quite a big function. + use_is_equal_function = is_equality and not ( + left_hand_is_const or right_hand_is_const + ) + + with option(kwargs, use_python_bool_wrapper=False): + if operand.op == "in" or operand.op == "notin": + # Special case for "in" operator + if operand.op == "notin": + self.output.write("!") + self._process_node(operand.expr, **kwargs) + self.output.write(".includes(") + self._process_node(node.expr, **kwargs) + self.output.write(")") + else: + if use_is_equal_function: + if operand.op == "ne": + self.output.write("!") + self.output.write("__runtime.isEqual(") + + self._process_node(node.expr, **kwargs) + + if use_is_equal_function: + self.output.write(",") + else: + self.output.write(OPERANDS.get(operand.op)) + + self._process_node(operand.expr, **kwargs) + + if use_is_equal_function: + self.output.write(")") + + def _process_operand(self, node, **kwargs): + self.output.write(OPERANDS.get(node.op)) + self._process_node(node.expr, **kwargs) + + def _process_const(self, node, **_): + with self._interpolation(): + self.output.write(json.dumps(node.value)) + + def _process_nonetype(self, node, **_): + with self._interpolation(): + self.output.write("null") + + def _process_neg(self, node, **kwargs): + with self._interpolation(): + self.output.write("-") + self._process_node(node.node, **kwargs) + + def _process_list(self, node, **kwargs): + self.output.write("[") + for i, item in enumerate(node.items): + self._process_node(item, **kwargs) + if i < len(node.items) - 1: + self.output.write(",") + self.output.write("]") + + def _process_test(self, node, **kwargs): + with option(kwargs, use_python_bool_wrapper=False): + method_name = getattr(self, "_process_test_%s" % node.name, None) + if callable(method_name): + method_name(node, **kwargs) + else: + raise Exception("Unsupported test: %s" % node.name) + + def _process_test_defined(self, node, **kwargs): + self.output.write("(typeof ") + self._process_node(node.node, **kwargs) + self.output.write(' !== "undefined")') + + def _process_test_undefined(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(" === undefined") + + def _process_test_callable(self, node, **kwargs): + self.output.write("__runtime.type(") + self._process_node(node.node, **kwargs) + self.output.write(') === "Function"') + + def _process_test_divisibleby(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(" % ") + self._process_node(node.args[0], **kwargs) + self.output.write(" === 0") + + def _process_test_even(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(" % 2 === 0") + + def _process_test_odd(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(" % 2 === 1") + + def _process_test_none(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(" === null") + + def _process_test_upper(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(".toUpperCase() === ") + self._process_node(node.node, **kwargs) + + def _process_test_lower(self, node, **kwargs): + self._process_node(node.node, **kwargs) + self.output.write(".toLowerCase() === ") + self._process_node(node.node, **kwargs) + + def _process_test_string(self, node, **kwargs): + self.output.write("__runtime.type(") + self._process_node(node.node, **kwargs) + self.output.write(') === "String"') + + def _process_test_mapping(self, node, **kwargs): + self.output.write("__runtime.type(") + self._process_node(node.node, **kwargs) + self.output.write(') === "Object"') + + def _process_test_number(self, node, **kwargs): + self.output.write("(__runtime.type(") + self._process_node(node.node, **kwargs) + self.output.write(') === "Number" && !isNaN(') + self._process_node(node.node, **kwargs) + self.output.write("))") + + def _process_include(self, node, **kwargs): + with self._interpolation(safe=True): + include_path = node.template.value + + if include_path == self.template_name: + # template is including itself + include_var_name = self.js_function_name + else: + if self.include_prefix: + include_path = self.include_prefix + node.template.value + elif ( + self.js_module_format in ("es6", "commonjs",) and self.template_name + ): + _, absolute_include_path, _ = self.environment.loader.get_source( + self.environment, node.template.value + ) + include_path = os.path.relpath( + absolute_include_path, os.path.dirname(self.template_path) + ) + if not include_path.startswith("."): + include_path = "./" + include_path + + # Jinja2 doesn't accept Windows filepaths (but does output them!) + if os.name == "nt": + include_path = include_path.replace(os.pathsep, "/") + + include_path = path.splitext(include_path)[0] + self.include_ext + include_var_name = self._get_depencency_var_name(include_path) + + if not include_var_name: + include_var_name = self._add_dependency(include_path) + + if self.js_module_format is None: + self.output.write('jinjaToJS.include("') + self.output.write(include_path) + self.output.write('");') + else: + self.output.write(include_var_name) + + self.output.write("(") + self.output.write(self.context_name) + self.output.write(")") + + def _process_add(self, node, **kwargs): + # Handle + operator for lists, which behaves differently in JS. Currently + # only works if we have an explicit list node on either side (in which + # case we assume both are lists). + if isinstance(node.left, nodes.List) or isinstance(node.right, nodes.List): + with self._interpolation(): + with self._python_bool_wrapper(**kwargs) as new_kwargs: + self._process_node(node.left, **new_kwargs) + self.output.write(".concat(") + self._process_node(node.right, **new_kwargs) + self.output.write(")") + else: + self._process_math(node, math_operator=" + ", **kwargs) + + def _process_sub(self, node, **kwargs): + self._process_math(node, math_operator=" - ", **kwargs) + + def _process_div(self, node, **kwargs): + self._process_math(node, math_operator=" / ", **kwargs) + + def _process_floordiv(self, node, **kwargs): + self._process_math(node, math_operator=" / ", function="Math.floor", **kwargs) + + def _process_mul(self, node, **kwargs): + self._process_math(node, math_operator=" * ", **kwargs) + + def _process_mod(self, node, **kwargs): + self._process_math(node, math_operator=" % ", **kwargs) + + def _process_math(self, node, math_operator=None, function=None, **kwargs): + """ + Processes a math node e.g. `Div`, `Sub`, `Add`, `Mul` etc... + If `function` is provided the expression is wrapped in a call to that function. + """ + + with self._interpolation(): + if function: + self.output.write(function) + self.output.write("(") + + self._process_node(node.left, **kwargs) + self.output.write(math_operator) + self._process_node(node.right, **kwargs) + + if function: + self.output.write(")") + + def _process_loop_helper(self, node, **kwargs): + """ + Processes a loop helper e.g. {{ loop.first }} or {{ loop.index }} + """ + + if node.attr == LOOP_HELPER_INDEX: + self.output.write("(arguments[1] + 1)") + elif node.attr == LOOP_HELPER_INDEX_0: + self.output.write("arguments[1]") + elif node.attr == LOOP_HELPER_FIRST: + self.output.write("(arguments[1] == 0)") + elif node.attr == LOOP_HELPER_LAST: + self.output.write("(arguments[1] == arguments[2].length - 1)") + elif node.attr == LOOP_HELPER_LENGTH: + self.output.write("arguments[2].length") + + def _process_args(self, node, **kwargs): + args = getattr(node, "args", None) + if not args: + return + for i, item in enumerate(args): + self._process_node(item, **kwargs) + if i < len(node.args) - 1: + self.output.write(",") + + @contextlib.contextmanager + def _execution(self): + """ + Context manager for executing some JavaScript inside a template. + """ + + did_start_executing = False + + if self.state == STATE_DEFAULT: + did_start_executing = True + self.state = STATE_EXECUTING + + def close(): + if did_start_executing and self.state == STATE_EXECUTING: + self.state = STATE_DEFAULT + + yield close + close() + + @contextlib.contextmanager + def _interpolation(self, safe=False): + + did_start_interpolating = False + + if self.state == STATE_DEFAULT: + did_start_interpolating = True + self.output.write('__result += "" + ') + if safe is not True: + self.output.write("__runtime.escape") + self.output.write("((__tmp = (") + self.state = STATE_INTERPOLATING + + def close(): + if did_start_interpolating and self.state == STATE_INTERPOLATING: + self.output.write(')) == null ? "" : __tmp);') + self.state = STATE_DEFAULT + + yield close + close() + + @contextlib.contextmanager + def _scoped_variables(self, nodes_list, **kwargs): + """ + Context manager for creating scoped variables defined by the nodes in `nodes_list`. + These variables will be added to the context, and when the context manager exits the + context object will be restored to it's previous state. + """ + + tmp_vars = [] + for node in nodes_list: + + is_assign_node = isinstance(node, nodes.Assign) + name = node.target.name if is_assign_node else node.name + + # create a temp variable name + tmp_var = next(self.temp_var_names) + + # save previous context value + with self._execution(): + + # save the current value of this name + self.output.write( + "var %s = %s.%s;" % (tmp_var, self.context_name, name) + ) + + # add new value to context + self.output.write("%s.%s = " % (self.context_name, name)) + + if is_assign_node: + self._process_node(node.node, **kwargs) + else: + self.output.write(node.name) + + self.output.write(";") + + tmp_vars.append((tmp_var, name)) + + yield + + # restore context + for tmp_var, name in tmp_vars: + with self._execution(): + self.output.write("%s.%s = %s;" % (self.context_name, name, tmp_var)) + + @contextlib.contextmanager + def _python_bool_wrapper(self, **kwargs): + + use_python_bool_wrapper = kwargs.get("use_python_bool_wrapper") + + if use_python_bool_wrapper: + self.output.write("__runtime.boolean(") + + with option(kwargs, use_python_bool_wrapper=False): + yield kwargs + + if use_python_bool_wrapper: + self.output.write(")") + + +def main( + # fmt: off + template_path: Path = typer.Argument(..., exists=True, dir_okay=False, help="Path to .jinja file"), + output: Path = typer.Argument(None, help="Path to output module (stdout if unset)"), + data_path: Path = typer.Option(None, "--data", help="Optional JSON file with additional data to be included as DATA") + # fmt: on +): + """Convert a jinja2 template to a JavaScript module.""" + data = "{}" + if data_path is not None: + with data_path.open("r", encoding="utf8") as f: + data = json.dumps(json.loads(f.read())) # dump and load for compactness + tpl_file = template_path.parts[-1] + compiler = JinjaToJS(template_path.parent, tpl_file, js_module_format="es6") + header = f"// This file was auto-generated by {__file__} based on {tpl_file}" + data_str = f"export const DATA = {data}" + result = compiler.get_output() + if output is not None: + with output.open("w") as f: + f.write(f"{header}\n{result}\n{data_str}") + print(f"Updated {output.parts[-1]}") + else: + print(result) + + +if __name__ == "__main__": + typer.run(main) diff --git a/website/setup/requirements.txt b/website/setup/requirements.txt new file mode 100644 index 000000000..7ffb6df0b --- /dev/null +++ b/website/setup/requirements.txt @@ -0,0 +1,3 @@ +# These are used to compile the training quickstart config +jinja2 +typer diff --git a/website/setup/setup.sh b/website/setup/setup.sh new file mode 100755 index 000000000..a6bbd3294 --- /dev/null +++ b/website/setup/setup.sh @@ -0,0 +1 @@ +python jinja_to_js.py ../../spacy/cli/templates/quickstart_training.jinja ../src/widgets/quickstart-training-generator.js --data ../../spacy/cli/templates/quickstart_training_recommendations.json diff --git a/website/src/components/icon.js b/website/src/components/icon.js index a5ccf1bde..322337955 100644 --- a/website/src/components/icon.js +++ b/website/src/components/icon.js @@ -23,6 +23,7 @@ import { ReactComponent as MoonIcon } from '../images/icons/moon.svg' import { ReactComponent as ClipboardIcon } from '../images/icons/clipboard.svg' import { ReactComponent as NetworkIcon } from '../images/icons/network.svg' import { ReactComponent as DownloadIcon } from '../images/icons/download.svg' +import { ReactComponent as PackageIcon } from '../images/icons/package.svg' import classes from '../styles/icon.module.sass' @@ -49,6 +50,7 @@ const icons = { clipboard: ClipboardIcon, network: NetworkIcon, download: DownloadIcon, + package: PackageIcon, } export default function Icon({ name, width = 20, height, inline = false, variant, className }) { diff --git a/website/src/components/infobox.js b/website/src/components/infobox.js index 046384986..363638bf2 100644 --- a/website/src/components/infobox.js +++ b/website/src/components/infobox.js @@ -5,8 +5,17 @@ import classNames from 'classnames' import Icon from './icon' import classes from '../styles/infobox.module.sass' -export default function Infobox({ title, emoji, id, variant = 'default', className, children }) { +export default function Infobox({ + title, + emoji, + id, + variant = 'default', + list = false, + className, + children, +}) { const infoboxClassNames = classNames(classes.root, className, { + [classes.list]: !!list, [classes.warning]: variant === 'warning', [classes.danger]: variant === 'danger', }) diff --git a/website/src/components/link.js b/website/src/components/link.js index 34df20554..3644479c5 100644 --- a/website/src/components/link.js +++ b/website/src/components/link.js @@ -8,13 +8,21 @@ import Icon from './icon' import classes from '../styles/link.module.sass' import { isString } from './util' -const internalRegex = /(http(s?)):\/\/(prodi.gy|spacy.io|irl.spacy.io)/gi +const internalRegex = /(http(s?)):\/\/(prodi.gy|spacy.io|irl.spacy.io|explosion.ai|course.spacy.io)/gi const Whitespace = ({ children }) => ( // Ensure that links are always wrapped in spaces <> {children} ) +function getIcon(dest) { + if (/(github.com)/.test(dest)) return 'code' + if (/^\/?api\/architectures#/.test(dest)) return 'network' + if (/^\/?api/.test(dest)) return 'docs' + if (/^\/?models\/(.+)/.test(dest)) return 'package' + return null +} + export default function Link({ children, to, @@ -30,22 +38,19 @@ export default function Link({ }) { const dest = to || href const external = forceExternal || /(http(s?)):\/\//gi.test(dest) - const isApi = !external && !hidden && !hideIcon && /^\/?api/.test(dest) - const isArch = !external && !hidden && !hideIcon && /^\/?api\/architectures#/.test(dest) - const isSource = external && !hidden && !hideIcon && /(github.com)/.test(dest) - const withIcon = isApi || isArch || isSource + const icon = getIcon(dest) + const withIcon = !hidden && !hideIcon && !!icon const sourceWithText = withIcon && isString(children) const linkClassNames = classNames(classes.root, className, { [classes.hidden]: hidden, - [classes.nowrap]: (withIcon && !sourceWithText) || isArch, + [classes.nowrap]: (withIcon && !sourceWithText) || icon === 'network', [classes.withIcon]: withIcon, }) const Wrapper = ws ? Whitespace : Fragment - const icon = isArch ? 'network' : isApi ? 'docs' : isSource ? 'code' : null const content = ( <> {sourceWithText ? {children} : children} - {icon && } + {withIcon && } ) diff --git a/website/src/components/quickstart.js b/website/src/components/quickstart.js index dcda3d475..f7ab11fa4 100644 --- a/website/src/components/quickstart.js +++ b/website/src/components/quickstart.js @@ -15,24 +15,18 @@ function getNewChecked(optionId, checkedForId, multiple) { return [...checkedForId, optionId] } -function getRawContent(ref) { - if (ref.current && ref.current.childNodes) { - // Select all currently visible nodes (spans and text nodes) - const result = [...ref.current.childNodes].filter(el => el.offsetParent !== null) - return result.map(el => el.textContent).join('\n') - } - return '' -} - const Quickstart = ({ data = [], title, description, copy = true, download, + rawContent = null, id = 'quickstart', setters = {}, hidePrompts, + small, + codeLang, children, }) => { const contentRef = useRef() @@ -46,6 +40,16 @@ const Quickstart = ({ const [copySuccess, setCopySuccess] = useState(false) const [otherState, setOtherState] = useState({}) const setOther = (id, value) => setOtherState({ ...otherState, [id]: value }) + const getRawContent = ref => { + if (rawContent !== null) return rawContent + if (ref.current && ref.current.childNodes) { + // Select all currently visible nodes (spans and text nodes) + const result = [...ref.current.childNodes].filter(el => el.offsetParent !== null) + return result.map(el => el.textContent).join('\n') + } + return '' + } + const onClickCopy = () => { copyAreaRef.current.value = getRawContent(contentRef) copyToClipboard(copyAreaRef, setCopySuccess) @@ -210,7 +214,14 @@ const Quickstart = ({ } )}
-                    
+                    
                         {children}
                     
 
diff --git a/website/src/components/search.js b/website/src/components/search.js
index 4581516c2..eeab9ef40 100644
--- a/website/src/components/search.js
+++ b/website/src/components/search.js
@@ -41,6 +41,6 @@ Search.propTypes = {
         apiKey: PropTypes.string.isRequired,
         indexName: PropTypes.string.isRequired,
     }).isRequired,
-    id: PropTypes.string.isRequired,
-    placeholder: PropTypes.string.isRequired,
+    id: PropTypes.string,
+    placeholder: PropTypes.string,
 }
diff --git a/website/src/images/icons/package.svg b/website/src/images/icons/package.svg
new file mode 100644
index 000000000..4edaf4e6f
--- /dev/null
+++ b/website/src/images/icons/package.svg
@@ -0,0 +1,5 @@
+
+    
+    
+    
+
diff --git a/website/src/styles/infobox.module.sass b/website/src/styles/infobox.module.sass
index baf9919c3..8d6071f18 100644
--- a/website/src/styles/infobox.module.sass
+++ b/website/src/styles/infobox.module.sass
@@ -14,6 +14,21 @@
         font-size: inherit
         line-height: inherit
 
+    ul li
+        padding-left: 0.75em
+
+.list ul li
+    font-size: var(--font-size-sm)
+    list-style: none
+    padding: 0
+    margin: 0 0 0.35rem 0
+
+    &:before
+        all: initial
+
+    a, a span
+        border-bottom: 0 !important
+
 .title
     font-weight: bold
     color: var(--color-theme)
diff --git a/website/src/styles/quickstart.module.sass b/website/src/styles/quickstart.module.sass
index a10bacca1..91dd19f85 100644
--- a/website/src/styles/quickstart.module.sass
+++ b/website/src/styles/quickstart.module.sass
@@ -124,6 +124,16 @@
     & > span
         display: block
 
+.small
+    font-size: var(--font-size-code)
+    line-height: 1.65
+    white-space: pre-wrap
+    max-height: 400px
+    overflow-y: auto
+
+    & > span
+        display: inline
+
 .hide-prompts .prompt:before
     content: initial !important
 
diff --git a/website/src/widgets/quickstart-training-generator.js b/website/src/widgets/quickstart-training-generator.js
new file mode 100644
index 000000000..c7f856073
--- /dev/null
+++ b/website/src/widgets/quickstart-training-generator.js
@@ -0,0 +1,12 @@
+// This file was auto-generated by jinja_to_js.py based on quickstart_training.jinja
+import jinjaToJS from "jinja-to-js";export default function templateQuickstartTraining(ctx) {
+    var __result = "";
+    var __tmp;
+    var __runtime = jinjaToJS.runtime;
+    var __filters = jinjaToJS.filters;
+    var __globals = jinjaToJS.globals;
+    var context = jinjaToJS.createContext(ctx);
+    var use_transformer = context.transformer_data && context.hardware!=="cpu";var transformer = (use_transformer ? context.transformer_data[context.optimize] : {});__result += "[paths]\ntrain = \"\"\ndev = \"\"\n\n[system]\nuse_pytorch_for_gpu_memory = ";__result += "" + __runtime.escape((__tmp = ((use_transformer ? "true" : "false"))) == null ? "" : __tmp);__result += "\n\n[nlp]\nlang = \"";__result += "" + __runtime.escape((__tmp = (context.lang)) == null ? "" : __tmp);__result += "\"";var full_pipeline = [(use_transformer ? "transformer" : "tok2vec")].concat(context.components);__result += "\npipeline = ";__result += "" + ((__tmp = (JSON.stringify(full_pipeline).split("'").join("\""))) == null ? "" : __tmp);__result += "\ntokenizer = {\"@tokenizers\": \"spacy.Tokenizer.v1\"}\n\n[components]\n\n";if(__runtime.boolean(use_transformer)){__result += "[components.transformer]\nfactory = \"transformer\"\n\n[components.transformer.model]\n@architectures = \"spacy-transformers.TransformerModel.v1\"\nname = \"";__result += "" + __runtime.escape((__tmp = (transformer["name"])) == null ? "" : __tmp);__result += "\"\ntokenizer_config = {\"use_fast\": true}\n\n[components.transformer.model.get_spans]\n@span_getters = \"strided_spans.v1\"\nwindow = 128\nstride = 96\n\n";if(context.components.includes("tagger")){__result += "\n[components.tagger]\nfactory = \"tagger\"\n\n[components.tagger.model]\n@architectures = \"spacy.Tagger.v1\"\nnO = null\n\n[components.tagger.model.tok2vec]\n@architectures = \"spacy-transformers.Tok2VecListener.v1\"\ngrad_factor = 1.0\n\n[components.tagger.model.tok2vec.pooling]\n@layers = \"reduce_mean.v1\"";}__result += "\n\n";if(context.components.includes("parser")){__result += "[components.parser]\nfactory = \"parser\"\n\n[components.parser.model]\n@architectures = \"spacy.TransitionBasedParser.v1\"\nnr_feature_tokens = 8\nhidden_width = 128\nmaxout_pieces = 3\nuse_upper = false\nnO = null\n\n[components.parser.model.tok2vec]\n@architectures = \"spacy-transformers.Tok2VecListener.v1\"\ngrad_factor = 1.0\n\n[components.parser.model.tok2vec.pooling]\n@layers = \"reduce_mean.v1\"";}__result += "\n\n";if(context.components.includes("ner")){__result += "[components.ner]\nfactory = \"ner\"\n\n[components.ner.model]\n@architectures = \"spacy.TransitionBasedParser.v1\"\nnr_feature_tokens = 3\nhidden_width = 64\nmaxout_pieces = 2\nuse_upper = false\nnO = null\n\n[components.ner.model.tok2vec]\n@architectures = \"spacy-transformers.Tok2VecListener.v1\"\ngrad_factor = 1.0\n\n[components.ner.model.tok2vec.pooling]\n@layers = \"reduce_mean.v1\"\n";}__result += "\n";} else {if(context.hardware==="gpu"){__result += "# There are no recommended transformer weights available for language '";__result += "" + __runtime.escape((__tmp = (context.lang)) == null ? "" : __tmp);__result += "'\n# yet, so the pipeline described here is not transformer-based.";}__result += "\n\n[components.tok2vec]\nfactory = \"tok2vec\"\n\n[components.tok2vec.model]\n@architectures = \"spacy.Tok2Vec.v1\"\n\n[components.tok2vec.model.embed]\n@architectures = \"spacy.MultiHashEmbed.v1\"\nwidth = ${components.tok2vec.model.encode:width}\nrows = ";__result += "" + __runtime.escape((__tmp = ((context.optimize==="efficiency" ? 2000 : 7000))) == null ? "" : __tmp);__result += "\nalso_embed_subwords = ";__result += "" + __runtime.escape((__tmp = ((context.has_letters ? true : false))) == null ? "" : __tmp);__result += "\nalso_use_static_vectors = ";__result += "" + __runtime.escape((__tmp = ((context.optimize==="accuracy" ? true : false))) == null ? "" : __tmp);__result += "\n\n[components.tok2vec.model.encode]\n@architectures = \"spacy.MaxoutWindowEncoder.v1\"\nwidth = ";__result += "" + __runtime.escape((__tmp = ((context.optimize==="efficiency" ? 96 : 256))) == null ? "" : __tmp);__result += "\ndepth = ";__result += "" + __runtime.escape((__tmp = ((context.optimize==="efficiency" ? 4 : 8))) == null ? "" : __tmp);__result += "\nwindow_size = 1\nmaxout_pieces = 3\n\n";if(context.components.includes("tagger")){__result += "\n[components.tagger]\nfactory = \"tagger\"\n\n[components.tagger.model]\n@architectures = \"spacy.Tagger.v1\"\nnO = null\n\n[components.tagger.model.tok2vec]\n@architectures = \"spacy.Tok2VecListener.v1\"\nwidth = ${components.tok2vec.model.encode:width}";}__result += "\n\n";if(context.components.includes("parser")){__result += "[components.parser]\nfactory = \"parser\"\n\n[components.parser.model]\n@architectures = \"spacy.TransitionBasedParser.v1\"\nnr_feature_tokens = 8\nhidden_width = 128\nmaxout_pieces = 3\nuse_upper = true\nnO = null\n\n[components.parser.model.tok2vec]\n@architectures = \"spacy.Tok2VecListener.v1\"\nwidth = ${components.tok2vec.model.encode:width}";}__result += "\n\n";if(context.components.includes("ner")){__result += "\n[components.ner]\nfactory = \"ner\"\n\n[components.ner.model]\n@architectures = \"spacy.TransitionBasedParser.v1\"\nnr_feature_tokens = 6\nhidden_width = 64\nmaxout_pieces = 2\nuse_upper = true\nnO = null\n\n[components.ner.model.tok2vec]\n@architectures = \"spacy.Tok2VecListener.v1\"\nwidth = ${components.tok2vec.model.encode:width}\n";}__result += "\n";}__result += "\n\n";__runtime.each(context.components,function(pipe){var __$0 = context.pipe;context.pipe = pipe;__result += "\n";if(!["tagger","parser","ner"].includes(pipe)){__result += "\n";__result += "\n[components.";__result += "" + __runtime.escape((__tmp = (pipe)) == null ? "" : __tmp);__result += "]\nfactory = \"";__result += "" + __runtime.escape((__tmp = (pipe)) == null ? "" : __tmp);__result += "\"\n";}__result += "\n";context.pipe = __$0;});__result += "\n\n[training]\n";if(__runtime.boolean(use_transformer) || context.optimize==="efficiency" || !__runtime.boolean(context.word_vectors)){__result += "vectors = null\n";} else {__result += "vectors = \"";__result += "" + __runtime.escape((__tmp = (context.word_vectors)) == null ? "" : __tmp);__result += "\"\n";}if(__runtime.boolean(use_transformer)){__result += "accumulate_gradient = ";__result += "" + __runtime.escape((__tmp = (transformer["size_factor"])) == null ? "" : __tmp);__result += "\n";}__result += "\n\n[training.optimizer]\n@optimizers = \"Adam.v1\"\n\n[training.optimizer.learn_rate]\n@schedules = \"warmup_linear.v1\"\nwarmup_steps = 250\ntotal_steps = 20000\ninitial_rate = 5e-5\n\n[training.train_corpus]\n@readers = \"spacy.Corpus.v1\"\npath = ${paths:train}\nmax_length = ";__result += "" + __runtime.escape((__tmp = ((context.hardware==="gpu" ? 500 : 0))) == null ? "" : __tmp);__result += "\n\n[training.dev_corpus]\n@readers = \"spacy.Corpus.v1\"\npath = ${paths:dev}\nmax_length = 0\n\n";if(__runtime.boolean(use_transformer)){__result += "\n[training.batcher]\n@batchers = \"batch_by_padded.v1\"\ndiscard_oversize = true\nsize = 2000\nbuffer = 256";} else {__result += "\n[training.batcher]\n@batchers = \"batch_by_words.v1\"\ndiscard_oversize = false\ntolerance = 0.2\n\n[training.batcher.size]\n@schedules = \"compounding.v1\"\nstart = 100\nstop = 1000\ncompound = 1.001\n";}__result += "\n\n[training.score_weights]";if(context.components.includes("tagger")){__result += "\ntag_acc = ";__result += "" + __runtime.escape((__tmp = (Math.round((1.0 / __filters.size(context.components)+ Number.EPSILON) * 10**2) / 10**2)) == null ? "" : __tmp);}if(context.components.includes("parser")){__result += "\ndep_uas = 0.0\ndep_las = ";__result += "" + __runtime.escape((__tmp = (Math.round((1.0 / __filters.size(context.components)+ Number.EPSILON) * 10**2) / 10**2)) == null ? "" : __tmp);__result += "\nsents_f = 0.0";}if(context.components.includes("ner")){__result += "\nents_f = ";__result += "" + __runtime.escape((__tmp = (Math.round((1.0 / __filters.size(context.components)+ Number.EPSILON) * 10**2) / 10**2)) == null ? "" : __tmp);__result += "\nents_p = 0.0\nents_r = 0.0";}
+    return __result;
+}
+export const DATA = {"en": {"word_vectors": "en_vectors_web_lg", "transformer": {"efficiency": {"name": "roberta-base", "size_factor": 3}, "accuracy": {"name": "roberta-base", "size_factor": 3}}}, "de": {"word_vectors": null, "transformer": null}}
\ No newline at end of file
diff --git a/website/src/widgets/quickstart-training.js b/website/src/widgets/quickstart-training.js
index b7920dd02..4e379e5ec 100644
--- a/website/src/widgets/quickstart-training.js
+++ b/website/src/widgets/quickstart-training.js
@@ -1,13 +1,19 @@
 import React, { useState } from 'react'
 import { StaticQuery, graphql } from 'gatsby'
+import highlightCode from 'gatsby-remark-prismjs/highlight-code.js'
 
-import { Quickstart, QS } from '../components/quickstart'
+import { Quickstart } from '../components/quickstart'
+import generator, { DATA as GENERATOR_DATA } from './quickstart-training-generator'
+import { isString, htmlToReact } from '../components/util'
 
 const DEFAULT_LANG = 'en'
+const DEFAULT_HARDWARE = 'gpu'
+const DEFAULT_OPT = 'efficiency'
 const COMPONENTS = ['tagger', 'parser', 'ner', 'textcat']
-const COMMENT = `# This is an auto-generated partial config for training a model.
-# To use it for training, auto-fill it with all default values.
-# python -m spacy init config config.cfg --base base_config.cfg`
+const COMMENT = `# This is an auto-generated partial config. To use it with 'spacy train'
+# you can run spacy init fill-config to auto-fill all default settings:
+# python -m spacy init fill-config ./base_config.cfg ./config.cfg`
+
 const DATA = [
     {
         id: 'lang',
@@ -25,9 +31,8 @@ const DATA = [
         id: 'hardware',
         title: 'Hardware',
         options: [
-            { id: 'cpu-only', title: 'CPU only' },
-            { id: 'cpu', title: 'CPU preferred' },
-            { id: 'gpu', title: 'GPU', checked: true },
+            { id: 'cpu', title: 'CPU preferred', checked: DEFAULT_HARDWARE === 'cpu' },
+            { id: 'gpu', title: 'GPU', checked: DEFAULT_HARDWARE === 'gpu' },
         ],
     },
     {
@@ -35,28 +40,45 @@ const DATA = [
         title: 'Optimize for',
         help: '...',
         options: [
-            { id: 'efficiency', title: 'efficiency', checked: true },
-            { id: 'accuracy', title: 'accuracy' },
+            { id: 'efficiency', title: 'efficiency', checked: DEFAULT_OPT === 'efficiency' },
+            { id: 'accuracy', title: 'accuracy', checked: DEFAULT_OPT === 'accuracy' },
         ],
     },
-    {
-        id: 'config',
-        title: 'Configuration',
-        options: [
-            {
-                id: 'independent',
-                title: 'independent components',
-                help: "Make components independent and don't share weights",
-            },
-        ],
-        multiple: true,
-    },
 ]
 
+function stringify(value) {
+    if (isString(value) && value.startsWith('${')) return value
+    const string = JSON.stringify(value)
+    if (Array.isArray(value)) return string.replace(/,/g, ', ')
+    return string
+}
+
 export default function QuickstartTraining({ id, title, download = 'config.cfg' }) {
     const [lang, setLang] = useState(DEFAULT_LANG)
-    const [pipeline, setPipeline] = useState([])
-    const setters = { lang: setLang, components: setPipeline }
+    const [components, setComponents] = useState([])
+    const [[hardware], setHardware] = useState([DEFAULT_HARDWARE])
+    const [[optimize], setOptimize] = useState([DEFAULT_OPT])
+    const setters = {
+        lang: setLang,
+        components: setComponents,
+        hardware: setHardware,
+        optimize: setOptimize,
+    }
+    const reco = GENERATOR_DATA[lang] || {}
+    const content = generator({
+        lang,
+        components,
+        optimize,
+        hardware,
+        transformer_data: reco.transformer,
+        word_vectors: reco.word_vectors,
+    })
+    const rawStr = content.trim().replace(/\n\n\n+/g, '\n\n')
+    const rawContent = `${COMMENT}\n${rawStr}`
+    const displayContent = highlightCode('ini', rawContent)
+        .split('\n')
+        .map(line => (line.startsWith('#') ? `${line}` : line))
+        .join('\n')
     return (
          ({ [code]: { sm: 'TODO', lg: 'TODO' } }))
-                )
                 return (
                     
-                        {COMMENT}
-                        [paths]
-                        train = ""
-                        dev = ""
-                        
- [nlp] - lang = "{lang}" - pipeline = {JSON.stringify(pipeline).replace(/,/g, ', ')} -
- [components] -
- [components.transformer] - name = "{recommendedTrf[lang].sm}" - name = "{recommendedTrf[lang].lg}" - {!!pipeline.length &&
} - {pipeline.map((pipe, i) => ( - <> - {i !== 0 &&
} - [components.{pipe}] - factory = "{pipe}" - -
- [components.parser.model.tok2vec] -
- @architectures = "spacy.Tok2Vec.v1" -
- - ))} + {htmlToReact(displayContent)}
) }}