diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index d0db75f9a..7c3c3e0a6 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -62,6 +62,11 @@ steps: # - script: | # python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" # displayName: 'Test no warnings on load (#11713)' +# condition: eq(variables['python_version'], '3.8') +# +# - script: | +# python -m spacy download ca_core_news_sm 2>&1 | grep -q skipping +# displayName: 'Test skip re-download (#12188)' # condition: eq(variables['python_version'], '3.8') - script: | diff --git a/.gitignore b/.gitignore index ac333f958..af75a4d47 100644 --- a/.gitignore +++ b/.gitignore @@ -10,16 +10,6 @@ spacy/tests/package/setup.cfg spacy/tests/package/pyproject.toml spacy/tests/package/requirements.txt -# Website -website/.cache/ -website/public/ -website/node_modules -website/.npm -website/logs -*.log -npm-debug.log* -quickstart-training-generator.js - # Cython / C extensions cythonize.json spacy/*.html diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e2c5e98fd..8efe733f9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: rev: 22.3.0 hooks: - id: black - language_version: python3.7 + language_version: python3.8 additional_dependencies: ['click==8.0.4'] - repo: https://github.com/pycqa/flake8 rev: 5.0.4 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1f396bd71..1a7c0c9a4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -271,7 +271,7 @@ except: # noqa: E722 ### Python conventions -All Python code must be written **compatible with Python 3.6+**. More detailed +All Python code must be written **compatible with Python 3.8+**. More detailed code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md). #### I/O and handling paths diff --git a/Makefile b/Makefile index 4de628663..24a9bcee4 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ override SPACY_EXTRAS = spacy-lookups-data==1.0.2 jieba spacy-pkuseg==0.0.28 sud endif ifndef PYVER -override PYVER = 3.6 +override PYVER = 3.8 endif VENV := ./env$(PYVER) diff --git a/README.md b/README.md index 195424551..bf8083e0e 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy model packaging, deployment and workflow management. spaCy is commercial open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). -💫 **Version 3.4 out now!** +💫 **Version 3.5 out now!** [Check out the release notes here.](https://github.com/explosion/spaCy/releases) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) @@ -105,7 +105,7 @@ For detailed installation instructions, see the - **Operating system**: macOS / OS X · Linux · Windows (Cygwin, MinGW, Visual Studio) -- **Python version**: Python 3.6+ (only 64 bit) +- **Python version**: Python 3.8+ (only 64 bit) - **Package managers**: [pip] · [conda] (via `conda-forge`) [pip]: https://pypi.org/project/spacy/ diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0f7ea91f9..a6a575315 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -11,25 +11,39 @@ trigger: exclude: - "website/*" - "*.md" + - "*.mdx" - ".github/workflows/*" pr: paths: exclude: - "*.md" + - "*.mdx" - "website/docs/*" - "website/src/*" + - "website/meta/*.tsx" + - "website/meta/*.mjs" + - "website/meta/languages.json" + - "website/meta/site.json" + - "website/meta/sidebars.json" + - "website/meta/type-annotations.json" + - "website/pages/*" - ".github/workflows/*" jobs: - # Perform basic checks for most important errors (syntax etc.) Uses the config - # defined in .flake8 and overwrites the selected codes. + # Check formatting and linting. Perform basic checks for most important errors + # (syntax etc.) Uses the config defined in setup.cfg and overwrites the + # selected codes. - job: "Validate" pool: vmImage: "ubuntu-latest" steps: - task: UsePythonVersion@0 inputs: - versionSpec: "3.7" + versionSpec: "3.8" + - script: | + pip install black==22.3.0 + python -m black spacy --check + displayName: "black" - script: | pip install flake8==5.0.4 python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics @@ -40,24 +54,6 @@ jobs: strategy: matrix: # We're only running one platform per Python version to speed up builds - Python36Linux: - imageName: "ubuntu-20.04" - python.version: "3.6" - # Python36Windows: - # imageName: "windows-latest" - # python.version: "3.6" - # Python36Mac: - # imageName: "macos-latest" - # python.version: "3.6" - # Python37Linux: - # imageName: "ubuntu-20.04" - # python.version: "3.7" - Python37Windows: - imageName: "windows-latest" - python.version: "3.7" - # Python37Mac: - # imageName: "macos-latest" - # python.version: "3.7" # Python38Linux: # imageName: "ubuntu-latest" # python.version: "3.8" diff --git a/pyproject.toml b/pyproject.toml index 72f04dee3..837cf1fd8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=9.0.0.dev1,<9.1.0", + "thinc>=9.0.0.dev2,<9.1.0", "numpy>=1.15.0", ] build-backend = "setuptools.build_meta" diff --git a/requirements.txt b/requirements.txt index 5c49f8d29..78cccfbf1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,9 @@ # Our libraries -spacy-legacy>=3.0.11,<3.1.0 +spacy-legacy>=4.0.0.dev0,<4.1.0 spacy-loggers>=1.0.0,<2.0.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=9.0.0.dev1,<9.1.0 +thinc>=9.0.0.dev2,<9.1.0 ml_datasets>=0.2.0,<0.3.0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.9.1,<1.2.0 @@ -22,7 +22,6 @@ langcodes>=3.2.0,<4.0.0 # Official Python utilities setuptools packaging>=20.0 -typing_extensions>=3.7.4.1,<4.2.0; python_version < "3.8" # Development dependencies pre-commit>=2.13.0 cython>=0.25,<3.0 @@ -31,8 +30,7 @@ pytest-timeout>=1.3.0,<2.0.0 mock>=2.0.0,<3.0.0 flake8>=3.8.0,<6.0.0 hypothesis>=3.27.0,<7.0.0 -mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7" -types-dataclasses>=0.1.3; python_version < "3.7" +mypy>=0.990,<0.1000; platform_machine != "aarch64" types-mock>=0.1.1 types-setuptools>=57.0.0 types-requests diff --git a/setup.cfg b/setup.cfg index deeec99a0..975ec03ce 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,8 +17,6 @@ classifiers = Operating System :: Microsoft :: Windows Programming Language :: Cython Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 - Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 @@ -31,15 +29,15 @@ project_urls = [options] zip_safe = false include_package_data = true -python_requires = >=3.6 +python_requires = >=3.8 install_requires = # Our libraries - spacy-legacy>=3.0.11,<3.1.0 + spacy-legacy>=4.0.0.dev0,<4.1.0 spacy-loggers>=1.0.0,<2.0.0 murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=9.0.0.dev1,<9.1.0 + thinc>=9.0.0.dev2,<9.1.0 wasabi>=0.9.1,<1.2.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 @@ -55,7 +53,6 @@ install_requires = # Official Python utilities setuptools packaging>=20.0 - typing_extensions>=3.7.4,<4.2.0; python_version < "3.8" langcodes>=3.2.0,<4.0.0 [options.entry_points] diff --git a/setup.py b/setup.py index 77a4cf283..d5b82ec68 100755 --- a/setup.py +++ b/setup.py @@ -33,12 +33,10 @@ MOD_NAMES = [ "spacy.kb.candidate", "spacy.kb.kb", "spacy.kb.kb_in_memory", - "spacy.ml.parser_model", + "spacy.ml.tb_framework", "spacy.morphology", - "spacy.pipeline.dep_parser", "spacy.pipeline._edit_tree_internals.edit_trees", "spacy.pipeline.morphologizer", - "spacy.pipeline.ner", "spacy.pipeline.pipe", "spacy.pipeline.trainable_pipe", "spacy.pipeline.sentencizer", @@ -46,6 +44,7 @@ MOD_NAMES = [ "spacy.pipeline.tagger", "spacy.pipeline.transition_parser", "spacy.pipeline._parser_internals.arc_eager", + "spacy.pipeline._parser_internals.batch", "spacy.pipeline._parser_internals.ner", "spacy.pipeline._parser_internals.nonproj", "spacy.pipeline._parser_internals.search", @@ -53,6 +52,7 @@ MOD_NAMES = [ "spacy.pipeline._parser_internals.stateclass", "spacy.pipeline._parser_internals.transition_system", "spacy.pipeline._parser_internals._beam_utils", + "spacy.pipeline._parser_internals._parser_utils", "spacy.tokenizer", "spacy.training.align", "spacy.training.gold_io", diff --git a/spacy/about.py b/spacy/about.py index 640e9e93b..eddbeea09 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy" -__version__ = "3.5.0" +__version__ = "4.0.0.dev0" __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __projects__ = "https://github.com/explosion/projects" diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index aabd1cfef..868526b42 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -4,6 +4,7 @@ from ._util import app, setup_cli # noqa: F401 # These are the actual functions, NOT the wrapped CLI commands. The CLI commands # are registered automatically and won't have to be imported here. +from .benchmark_speed import benchmark_speed_cli # noqa: F401 from .download import download # noqa: F401 from .info import info # noqa: F401 from .package import package # noqa: F401 diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index 6a47ce9da..536b263a0 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -1,4 +1,4 @@ -from typing import Dict, Any, Union, List, Optional, Tuple, Iterable +from typing import Dict, Any, Union, List, Optional, Tuple, Iterable, Literal from typing import TYPE_CHECKING, overload import sys import shutil @@ -16,10 +16,10 @@ from thinc.util import gpu_is_available from configparser import InterpolationError import os -from ..compat import Literal from ..schemas import ProjectConfigSchema, validate from ..util import import_file, run_command, make_tempdir, registry, logger from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS +from ..errors import RENAMED_LANGUAGE_CODES from .. import about if TYPE_CHECKING: @@ -46,6 +46,7 @@ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes commands to check and validate your config files, training and evaluation data, and custom model implementations. """ +BENCHMARK_HELP = """Commands for benchmarking pipelines.""" INIT_HELP = """Commands for initializing configs and pipeline packages.""" # Wrappers for Typer's annotations. Initially created to set defaults and to @@ -54,12 +55,14 @@ Arg = typer.Argument Opt = typer.Option app = typer.Typer(name=NAME, help=HELP) +benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True) project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True) debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True) init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True) app.add_typer(project_cli) app.add_typer(debug_cli) +app.add_typer(benchmark_cli) app.add_typer(init_cli) @@ -132,6 +135,16 @@ def _parse_override(value: Any) -> Any: return str(value) +def _handle_renamed_language_codes(lang: Optional[str]) -> None: + # Throw error for renamed language codes in v4 + if lang in RENAMED_LANGUAGE_CODES: + msg.fail( + title="Renamed language code", + text=f"Language code '{lang}' was replaced with '{RENAMED_LANGUAGE_CODES[lang]}' in spaCy v4. Update the language code from '{lang}' to '{RENAMED_LANGUAGE_CODES[lang]}'.", + exits=1, + ) + + def load_project_config( path: Path, interpolate: bool = True, overrides: Dict[str, Any] = SimpleFrozenDict() ) -> Dict[str, Any]: diff --git a/spacy/cli/benchmark_speed.py b/spacy/cli/benchmark_speed.py new file mode 100644 index 000000000..4eb20a5fa --- /dev/null +++ b/spacy/cli/benchmark_speed.py @@ -0,0 +1,174 @@ +from typing import Iterable, List, Optional +import random +from itertools import islice +import numpy +from pathlib import Path +import time +from tqdm import tqdm +import typer +from wasabi import msg + +from .. import util +from ..language import Language +from ..tokens import Doc +from ..training import Corpus +from ._util import Arg, Opt, benchmark_cli, setup_gpu + + +@benchmark_cli.command( + "speed", + context_settings={"allow_extra_args": True, "ignore_unknown_options": True}, +) +def benchmark_speed_cli( + # fmt: off + ctx: typer.Context, + model: str = Arg(..., help="Model name or path"), + data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True), + batch_size: Optional[int] = Opt(None, "--batch-size", "-b", min=1, help="Override the pipeline batch size"), + no_shuffle: bool = Opt(False, "--no-shuffle", help="Do not shuffle benchmark data"), + use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"), + n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,), + warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"), + # fmt: on +): + """ + Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark + data in the binary .spacy format. + """ + setup_gpu(use_gpu=use_gpu, silent=False) + + nlp = util.load_model(model) + batch_size = batch_size if batch_size is not None else nlp.batch_size + corpus = Corpus(data_path) + docs = [eg.predicted for eg in corpus(nlp)] + + if len(docs) == 0: + msg.fail("Cannot benchmark speed using an empty corpus.", exits=1) + + print(f"Warming up for {warmup_epochs} epochs...") + warmup(nlp, docs, warmup_epochs, batch_size) + + print() + print(f"Benchmarking {n_batches} batches...") + wps = benchmark(nlp, docs, n_batches, batch_size, not no_shuffle) + + print() + print_outliers(wps) + print_mean_with_ci(wps) + + +# Lowercased, behaves as a context manager function. +class time_context: + """Register the running time of a context.""" + + def __enter__(self): + self.start = time.perf_counter() + return self + + def __exit__(self, type, value, traceback): + self.elapsed = time.perf_counter() - self.start + + +class Quartiles: + """Calculate the q1, q2, q3 quartiles and the inter-quartile range (iqr) + of a sample.""" + + q1: float + q2: float + q3: float + iqr: float + + def __init__(self, sample: numpy.ndarray) -> None: + self.q1 = numpy.quantile(sample, 0.25) + self.q2 = numpy.quantile(sample, 0.5) + self.q3 = numpy.quantile(sample, 0.75) + self.iqr = self.q3 - self.q1 + + +def annotate( + nlp: Language, docs: List[Doc], batch_size: Optional[int] +) -> numpy.ndarray: + docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size) + wps = [] + while True: + with time_context() as elapsed: + batch_docs = list( + islice(docs, batch_size if batch_size else nlp.batch_size) + ) + if len(batch_docs) == 0: + break + n_tokens = count_tokens(batch_docs) + wps.append(n_tokens / elapsed.elapsed) + + return numpy.array(wps) + + +def benchmark( + nlp: Language, + docs: List[Doc], + n_batches: int, + batch_size: int, + shuffle: bool, +) -> numpy.ndarray: + if shuffle: + bench_docs = [ + nlp.make_doc(random.choice(docs).text) + for _ in range(n_batches * batch_size) + ] + else: + bench_docs = [ + nlp.make_doc(docs[i % len(docs)].text) + for i in range(n_batches * batch_size) + ] + + return annotate(nlp, bench_docs, batch_size) + + +def bootstrap(x, statistic=numpy.mean, iterations=10000) -> numpy.ndarray: + """Apply a statistic to repeated random samples of an array.""" + return numpy.fromiter( + ( + statistic(numpy.random.choice(x, len(x), replace=True)) + for _ in range(iterations) + ), + numpy.float64, + ) + + +def count_tokens(docs: Iterable[Doc]) -> int: + return sum(len(doc) for doc in docs) + + +def print_mean_with_ci(sample: numpy.ndarray): + mean = numpy.mean(sample) + bootstrap_means = bootstrap(sample) + bootstrap_means.sort() + + # 95% confidence interval + low = bootstrap_means[int(len(bootstrap_means) * 0.025)] + high = bootstrap_means[int(len(bootstrap_means) * 0.975)] + + print(f"Mean: {mean:.1f} words/s (95% CI: {low-mean:.1f} +{high-mean:.1f})") + + +def print_outliers(sample: numpy.ndarray): + quartiles = Quartiles(sample) + + n_outliers = numpy.sum( + (sample < (quartiles.q1 - 1.5 * quartiles.iqr)) + | (sample > (quartiles.q3 + 1.5 * quartiles.iqr)) + ) + n_extreme_outliers = numpy.sum( + (sample < (quartiles.q1 - 3.0 * quartiles.iqr)) + | (sample > (quartiles.q3 + 3.0 * quartiles.iqr)) + ) + print( + f"Outliers: {(100 * n_outliers) / len(sample):.1f}%, extreme outliers: {(100 * n_extreme_outliers) / len(sample)}%" + ) + + +def warmup( + nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int] +) -> numpy.ndarray: + docs = warmup_epochs * docs + return annotate(nlp, docs, batch_size) diff --git a/spacy/cli/convert.py b/spacy/cli/convert.py index 68d454b3e..66f9461a9 100644 --- a/spacy/cli/convert.py +++ b/spacy/cli/convert.py @@ -7,7 +7,7 @@ import re import sys import itertools -from ._util import app, Arg, Opt, walk_directory +from ._util import app, Arg, Opt, _handle_renamed_language_codes, walk_directory from ..training import docs_to_json from ..tokens import Doc, DocBin from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs @@ -112,6 +112,10 @@ def convert( input_path = Path(input_path) if not msg: msg = Printer(no_print=silent) + + # Throw error for renamed language codes in v4 + _handle_renamed_language_codes(lang) + ner_map = srsly.read_json(ner_map) if ner_map is not None else None doc_files = [] for input_loc in walk_directory(input_path, converter): diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 1866f847b..77d529aa8 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -1,5 +1,5 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union -from typing import cast, overload +from typing import Literal, cast, overload from pathlib import Path from collections import Counter import sys @@ -17,10 +17,10 @@ from ..pipeline import TrainablePipe from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals.nonproj import DELIMITER from ..pipeline import Morphologizer, SpanCategorizer +from ..pipeline._edit_tree_internals.edit_trees import EditTrees from ..morphology import Morphology from ..language import Language from ..util import registry, resolve_dot_names -from ..compat import Literal from ..vectors import Mode as VectorsMode from .. import util @@ -671,6 +671,59 @@ def debug_data( f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles" ) + if "trainable_lemmatizer" in factory_names: + msg.divider("Trainable Lemmatizer") + trees_train: Set[str] = gold_train_data["lemmatizer_trees"] + trees_dev: Set[str] = gold_dev_data["lemmatizer_trees"] + # This is necessary context when someone is attempting to interpret whether the + # number of trees exclusively in the dev set is meaningful. + msg.info(f"{len(trees_train)} lemmatizer trees generated from training data") + msg.info(f"{len(trees_dev)} lemmatizer trees generated from dev data") + dev_not_train = trees_dev - trees_train + + if len(dev_not_train) != 0: + pct = len(dev_not_train) / len(trees_dev) + msg.info( + f"{len(dev_not_train)} lemmatizer trees ({pct*100:.1f}% of dev trees)" + " were found exclusively in the dev data." + ) + else: + # Would we ever expect this case? It seems like it would be pretty rare, + # and we might actually want a warning? + msg.info("All trees in dev data present in training data.") + + if gold_train_data["n_low_cardinality_lemmas"] > 0: + n = gold_train_data["n_low_cardinality_lemmas"] + msg.warn(f"{n} training docs with 0 or 1 unique lemmas.") + + if gold_dev_data["n_low_cardinality_lemmas"] > 0: + n = gold_dev_data["n_low_cardinality_lemmas"] + msg.warn(f"{n} dev docs with 0 or 1 unique lemmas.") + + if gold_train_data["no_lemma_annotations"] > 0: + n = gold_train_data["no_lemma_annotations"] + msg.warn(f"{n} training docs with no lemma annotations.") + else: + msg.good("All training docs have lemma annotations.") + + if gold_dev_data["no_lemma_annotations"] > 0: + n = gold_dev_data["no_lemma_annotations"] + msg.warn(f"{n} dev docs with no lemma annotations.") + else: + msg.good("All dev docs have lemma annotations.") + + if gold_train_data["partial_lemma_annotations"] > 0: + n = gold_train_data["partial_lemma_annotations"] + msg.info(f"{n} training docs with partial lemma annotations.") + else: + msg.good("All training docs have complete lemma annotations.") + + if gold_dev_data["partial_lemma_annotations"] > 0: + n = gold_dev_data["partial_lemma_annotations"] + msg.info(f"{n} dev docs with partial lemma annotations.") + else: + msg.good("All dev docs have complete lemma annotations.") + msg.divider("Summary") good_counts = msg.counts[MESSAGES.GOOD] warn_counts = msg.counts[MESSAGES.WARN] @@ -732,7 +785,13 @@ def _compile_gold( "n_cats_multilabel": 0, "n_cats_bad_values": 0, "texts": set(), + "lemmatizer_trees": set(), + "no_lemma_annotations": 0, + "partial_lemma_annotations": 0, + "n_low_cardinality_lemmas": 0, } + if "trainable_lemmatizer" in factory_names: + trees = EditTrees(nlp.vocab.strings) for eg in examples: gold = eg.reference doc = eg.predicted @@ -862,6 +921,25 @@ def _compile_gold( data["n_nonproj"] += 1 if nonproj.contains_cycle(aligned_heads): data["n_cycles"] += 1 + if "trainable_lemmatizer" in factory_names: + # from EditTreeLemmatizer._labels_from_data + if all(token.lemma == 0 for token in gold): + data["no_lemma_annotations"] += 1 + continue + if any(token.lemma == 0 for token in gold): + data["partial_lemma_annotations"] += 1 + lemma_set = set() + for token in gold: + if token.lemma != 0: + lemma_set.add(token.lemma) + tree_id = trees.add(token.text, token.lemma_) + tree_str = trees.tree_to_str(tree_id) + data["lemmatizer_trees"].add(tree_str) + # We want to identify cases where lemmas aren't assigned + # or are all assigned the same value, as this would indicate + # an issue since we're expecting a large set of lemmas + if len(lemma_set) < 2 and len(gold) > 1: + data["n_low_cardinality_lemmas"] += 1 return data diff --git a/spacy/cli/download.py b/spacy/cli/download.py index 4c998a6e0..90471c55e 100644 --- a/spacy/cli/download.py +++ b/spacy/cli/download.py @@ -7,7 +7,8 @@ import typer from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX from .. import about from ..util import is_package, get_minor_version, run_command -from ..util import is_prerelease_version +from ..util import is_prerelease_version, get_installed_models +from ..util import get_package_version @app.command( @@ -63,6 +64,14 @@ def download( compatibility = get_compatibility() version = get_version(model_name, compatibility) + # If we already have this version installed, skip downloading + installed = get_installed_models() + if model_name in installed: + installed_version = get_package_version(model_name) + if installed_version == version: + msg.warn(f"{model_name} v{version} already installed, skipping") + return + filename = get_model_filename(model_name, version, sdist) download_model(filename, pip_args) diff --git a/spacy/cli/evaluate.py b/spacy/cli/evaluate.py index 45f4e4029..9eb51e1f8 100644 --- a/spacy/cli/evaluate.py +++ b/spacy/cli/evaluate.py @@ -7,12 +7,15 @@ from thinc.api import fix_random_seed from ..training import Corpus from ..tokens import Doc -from ._util import app, Arg, Opt, setup_gpu, import_code_paths +from ._util import app, Arg, Opt, setup_gpu, import_code_paths, benchmark_cli from ..scorer import Scorer from .. import util from .. import displacy +@benchmark_cli.command( + "accuracy", +) @app.command("evaluate") def evaluate_cli( # fmt: off @@ -36,7 +39,7 @@ def evaluate_cli( dependency parses in a HTML file, set as output directory as the displacy_path argument. - DOCS: https://spacy.io/api/cli#evaluate + DOCS: https://spacy.io/api/cli#benchmark-accuracy """ import_code_paths(code_paths) evaluate( diff --git a/spacy/cli/init_config.py b/spacy/cli/init_config.py index b634caa4c..40e598e5f 100644 --- a/spacy/cli/init_config.py +++ b/spacy/cli/init_config.py @@ -8,11 +8,11 @@ import re from jinja2 import Template from .. import util -from ..language import DEFAULT_CONFIG_PRETRAIN_PATH +from ..language import DEFAULT_CONFIG_DISTILL_PATH, DEFAULT_CONFIG_PRETRAIN_PATH from ..schemas import RecommendationSchema from ..util import SimpleFrozenList from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND -from ._util import string_to_list, import_code +from ._util import string_to_list, import_code, _handle_renamed_language_codes ROOT = Path(__file__).parent / "templates" @@ -43,7 +43,7 @@ class InitValues: def init_config_cli( # fmt: off output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True), - lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"), + lang: str = Opt(InitValues.lang, "--lang", "-l", help="Code of the language to use"), pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), @@ -83,6 +83,7 @@ def init_fill_config_cli( # fmt: off base_path: Path = Arg(..., help="Path to base config to fill", exists=True, dir_okay=False), output_file: Path = Arg("-", help="Path to output .cfg file (or - for stdout)", allow_dash=True), + distillation: bool = Opt(False, "--distillation", "-dt", help="Include config for distillation (with 'spacy distill')"), pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), diff: bool = Opt(False, "--diff", "-D", help="Print a visual diff highlighting the changes"), code_path: Optional[Path] = Opt(None, "--code-path", "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"), @@ -98,13 +99,20 @@ def init_fill_config_cli( DOCS: https://spacy.io/api/cli#init-fill-config """ import_code(code_path) - fill_config(output_file, base_path, pretraining=pretraining, diff=diff) + fill_config( + output_file, + base_path, + distillation=distillation, + pretraining=pretraining, + diff=diff, + ) def fill_config( output_file: Path, base_path: Path, *, + distillation: bool = False, pretraining: bool = False, diff: bool = False, silent: bool = False, @@ -123,6 +131,9 @@ def fill_config( # replaced with their actual config after loading, so we have to re-add them sourced = util.get_sourced_components(config) filled["components"].update(sourced) + if distillation: + distillation_config = util.load_config(DEFAULT_CONFIG_DISTILL_PATH) + filled = distillation_config.merge(filled) if pretraining: validate_config_for_pretrain(filled, msg) pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH) @@ -158,6 +169,10 @@ def init_config( msg = Printer(no_print=silent) with TEMPLATE_PATH.open("r") as f: template = Template(f.read()) + + # Throw error for renamed language codes in v4 + _handle_renamed_language_codes(lang) + # Filter out duplicates since tok2vec and transformer are added by template pipeline = [pipe for pipe in pipeline if pipe not in ("tok2vec", "transformer")] defaults = RECOMMENDATIONS["__default__"] diff --git a/spacy/cli/init_pipeline.py b/spacy/cli/init_pipeline.py index d53a61b8e..f279cf793 100644 --- a/spacy/cli/init_pipeline.py +++ b/spacy/cli/init_pipeline.py @@ -9,7 +9,7 @@ from .. import util from ..training.initialize import init_nlp, convert_vectors from ..language import Language from ._util import init_cli, Arg, Opt, parse_config_overrides, show_validation_error -from ._util import import_code, setup_gpu +from ._util import import_code, setup_gpu, _handle_renamed_language_codes @init_cli.command("vectors") @@ -31,6 +31,10 @@ def init_vectors_cli( a model with vectors. """ util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + + # Throw error for renamed language codes in v4 + _handle_renamed_language_codes(lang) + msg.info(f"Creating blank nlp object for language '{lang}'") nlp = util.get_lang_class(lang)() if jsonl_loc is not None: diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index b961ac892..eb48d1de5 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -87,12 +87,11 @@ grad_factor = 1.0 factory = "parser" [components.parser.model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "parser" extra_state_tokens = false hidden_width = 128 maxout_pieces = 3 -use_upper = false nO = null [components.parser.model.tok2vec] @@ -108,12 +107,11 @@ grad_factor = 1.0 factory = "ner" [components.ner.model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "ner" extra_state_tokens = false hidden_width = 64 maxout_pieces = 2 -use_upper = false nO = null [components.ner.model.tok2vec] @@ -314,12 +312,11 @@ width = ${components.tok2vec.model.encode.width} factory = "parser" [components.parser.model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "parser" extra_state_tokens = false hidden_width = 128 maxout_pieces = 3 -use_upper = true nO = null [components.parser.model.tok2vec] @@ -332,12 +329,11 @@ width = ${components.tok2vec.model.encode.width} factory = "ner" [components.ner.model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "ner" extra_state_tokens = false hidden_width = 64 maxout_pieces = 2 -use_upper = true nO = null [components.ner.model.tok2vec] diff --git a/spacy/compat.py b/spacy/compat.py index 89132735d..5344b7cd4 100644 --- a/spacy/compat.py +++ b/spacy/compat.py @@ -22,19 +22,6 @@ try: except ImportError: cupy = None -if sys.version_info[:2] >= (3, 8): # Python 3.8+ - from typing import Literal, Protocol, runtime_checkable -else: - from typing_extensions import Literal, Protocol, runtime_checkable # noqa: F401 - -# Important note: The importlib_metadata "backport" includes functionality -# that's not part of the built-in importlib.metadata. We should treat this -# import like the built-in and only use what's available there. -try: # Python 3.8+ - import importlib.metadata as importlib_metadata -except ImportError: - from catalogue import _importlib_metadata as importlib_metadata # type: ignore[no-redef] # noqa: F401 - from thinc.api import Optimizer # noqa: F401 pickle = pickle diff --git a/spacy/default_config_distillation.cfg b/spacy/default_config_distillation.cfg new file mode 100644 index 000000000..1926fafa9 --- /dev/null +++ b/spacy/default_config_distillation.cfg @@ -0,0 +1,34 @@ +[paths] +raw_text = null + +[distillation] +corpus = "corpora.distillation" +dropout = 0.1 +max_epochs = 1 +max_steps = 0 +student_to_teacher = {} + +[distillation.batcher] +@batchers = "spacy.batch_by_words.v1" +size = 3000 +discard_oversize = false +tolerance = 0.2 + +[distillation.optimizer] +@optimizers = "Adam.v1" +beta1 = 0.9 +beta2 = 0.999 +L2_is_weight_decay = true +L2 = 0.01 +grad_clip = 1.0 +use_averages = true +eps = 1e-8 +learn_rate = 1e-4 + +[corpora] + +[corpora.distillation] +@readers = "spacy.PlainTextCorpus.v1" +path = ${paths.raw_text} +min_length = 0 +max_length = 0 diff --git a/spacy/displacy/__init__.py b/spacy/displacy/__init__.py index a3cfd96dd..ea6bba2c9 100644 --- a/spacy/displacy/__init__.py +++ b/spacy/displacy/__init__.py @@ -106,9 +106,7 @@ def serve( if is_in_jupyter(): warnings.warn(Warnings.W011) - render( - docs, style=style, page=page, minify=minify, options=options, manual=manual - ) + render(docs, style=style, page=page, minify=minify, options=options, manual=manual) httpd = simple_server.make_server(host, port, app) print(f"\nUsing the '{style}' visualizer") print(f"Serving on http://{host}:{port} ...\n") diff --git a/spacy/errors.py b/spacy/errors.py index ad90dcf8b..eadbf63d6 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -1,5 +1,5 @@ +from typing import Literal import warnings -from .compat import Literal class ErrorsWithCodes(type): @@ -209,6 +209,8 @@ class Warnings(metaclass=ErrorsWithCodes): "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.") W124 = ("{host}:{port} is already in use, using the nearest available port {serve_port} as an alternative.") + W400 = ("`use_upper=False` is ignored, the upper layer is always enabled") + class Errors(metaclass=ErrorsWithCodes): E001 = ("No component '{name}' found in pipeline. Available names: {opts}") @@ -947,15 +949,20 @@ class Errors(metaclass=ErrorsWithCodes): E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.") E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}") E1049 = ("No available port found for displaCy on host {host}. Please specify an available port " - "with `displacy.serve(doc, port)`") - E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port)` " + "with `displacy.serve(doc, port=port)`") + E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port=port)` " "or use `auto_switch_port=True` to pick an available port automatically.") # v4 error strings E4000 = ("Expected a Doc as input, but got: '{type}'") E4001 = ("Expected input to be one of the following types: ({expected_types}), " "but got '{received_type}'") + E4002 = ("Pipe '{name}' requires a teacher pipe for distillation.") + E4003 = ("Training examples for distillation must have the exact same tokens in the " + "reference and predicted docs.") + E4004 = ("Backprop is not supported when is_train is not set.") +RENAMED_LANGUAGE_CODES = {"xx": "mul", "is": "isl"} # fmt: on diff --git a/spacy/kb/kb_in_memory.pyx b/spacy/kb/kb_in_memory.pyx index 485e52c2f..edba523cf 100644 --- a/spacy/kb/kb_in_memory.pyx +++ b/spacy/kb/kb_in_memory.pyx @@ -25,7 +25,7 @@ cdef class InMemoryLookupKB(KnowledgeBase): """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases, to support entity linking of named entities to real-world concepts. - DOCS: https://spacy.io/api/kb_in_memory + DOCS: https://spacy.io/api/inmemorylookupkb """ def __init__(self, Vocab vocab, entity_vector_length): diff --git a/spacy/lang/is/__init__.py b/spacy/lang/isl/__init__.py similarity index 93% rename from spacy/lang/is/__init__.py rename to spacy/lang/isl/__init__.py index 318363beb..16d1f7957 100644 --- a/spacy/lang/is/__init__.py +++ b/spacy/lang/isl/__init__.py @@ -7,7 +7,7 @@ class IcelandicDefaults(BaseDefaults): class Icelandic(Language): - lang = "is" + lang = "isl" Defaults = IcelandicDefaults diff --git a/spacy/lang/is/stop_words.py b/spacy/lang/isl/stop_words.py similarity index 100% rename from spacy/lang/is/stop_words.py rename to spacy/lang/isl/stop_words.py diff --git a/spacy/lang/xx/__init__.py b/spacy/lang/mul/__init__.py similarity index 67% rename from spacy/lang/xx/__init__.py rename to spacy/lang/mul/__init__.py index aff8403ff..5170f1e86 100644 --- a/spacy/lang/xx/__init__.py +++ b/spacy/lang/mul/__init__.py @@ -3,10 +3,10 @@ from ...language import Language class MultiLanguage(Language): """Language class to be used for models that support multiple languages. - This module allows models to specify their language ID as 'xx'. + This module allows models to specify their language ID as 'mul'. """ - lang = "xx" + lang = "mul" __all__ = ["MultiLanguage"] diff --git a/spacy/lang/xx/examples.py b/spacy/lang/mul/examples.py similarity index 100% rename from spacy/lang/xx/examples.py rename to spacy/lang/mul/examples.py diff --git a/spacy/language.py b/spacy/language.py index dcb62aef0..d2b89029d 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1,4 +1,4 @@ -from typing import Iterator, Optional, Any, Dict, Callable, Iterable +from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Literal from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload @@ -22,7 +22,7 @@ from . import ty from .tokens.underscore import Underscore from .vocab import Vocab, create_vocab from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis -from .training import Example, validate_examples +from .training import Example, validate_examples, validate_distillation_examples from .training.initialize import init_vocab, init_tok2vec from .scorer import Scorer from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES @@ -40,7 +40,6 @@ from .git_info import GIT_VERSION from . import util from . import about from .lookups import load_lookups -from .compat import Literal PipeCallable = Callable[[Doc], Doc] @@ -49,6 +48,9 @@ PipeCallable = Callable[[Doc], Doc] # This is the base config will all settings (training etc.) DEFAULT_CONFIG_PATH = Path(__file__).parent / "default_config.cfg" DEFAULT_CONFIG = util.load_config(DEFAULT_CONFIG_PATH) +# This is the base config for the [distillation] block and currently not included +# in the main config and only added via the 'init fill-config' command +DEFAULT_CONFIG_DISTILL_PATH = Path(__file__).parent / "default_config_distillation.cfg" # This is the base config for the [pretraining] block and currently not included # in the main config and only added via the 'init fill-config' command DEFAULT_CONFIG_PRETRAIN_PATH = Path(__file__).parent / "default_config_pretraining.cfg" @@ -1018,6 +1020,102 @@ class Language: raise ValueError(Errors.E005.format(name=name, returned_type=type(doc))) return doc + def distill( + self, + teacher: "Language", + examples: Iterable[Example], + *, + drop: float = 0.0, + sgd: Optional[Optimizer] = None, + losses: Optional[Dict[str, float]] = None, + component_cfg: Optional[Dict[str, Dict[str, Any]]] = None, + exclude: Iterable[str] = SimpleFrozenList(), + annotates: Iterable[str] = SimpleFrozenList(), + student_to_teacher: Optional[Dict[str, str]] = None, + ): + """Distill the models in a student pipeline from a teacher pipeline. + teacher (Language): Teacher to distill from. + examples (Iterable[Example]): Distillation examples. The reference + (teacher) and predicted (student) docs must have the same number of + tokens and the same orthography. + drop (float): The dropout rate. + sgd (Optional[Optimizer]): An optimizer. + losses (Optional(Dict[str, float])): Dictionary to update with the loss, + keyed by component. + component_cfg (Optional[Dict[str, Dict[str, Any]]]): Config parameters + for specific pipeline components, keyed by component name. + exclude (Iterable[str]): Names of components that shouldn't be updated. + annotates (Iterable[str]): Names of components that should set + annotations on the predicted examples after updating. + student_to_teacher (Optional[Dict[str, str]]): Map student pipe name to + teacher pipe name, only needed for pipes where the student pipe + name does not match the teacher pipe name. + RETURNS (Dict[str, float]): The updated losses dictionary + + DOCS: https://spacy.io/api/language#distill + """ + if student_to_teacher is None: + student_to_teacher = {} + if losses is None: + losses = {} + if isinstance(examples, list) and len(examples) == 0: + return losses + + validate_distillation_examples(examples, "Language.distill") + examples = _copy_examples(examples, copy_x=True, copy_y=True) + + if sgd is None: + if self._optimizer is None: + self._optimizer = self.create_optimizer() + sgd = self._optimizer + + if component_cfg is None: + component_cfg = {} + pipe_kwargs = {} + for student_name, student_proc in self.pipeline: + component_cfg.setdefault(student_name, {}) + pipe_kwargs[student_name] = deepcopy(component_cfg[student_name]) + component_cfg[student_name].setdefault("drop", drop) + pipe_kwargs[student_name].setdefault("batch_size", self.batch_size) + + teacher_pipes = dict(teacher.pipeline) + for student_name, student_proc in self.pipeline: + if student_name in annotates: + for doc, eg in zip( + _pipe( + (eg.predicted for eg in examples), + proc=student_proc, + name=student_name, + default_error_handler=self.default_error_handler, + kwargs=pipe_kwargs[student_name], + ), + examples, + ): + eg.predicted = doc + + if ( + student_name not in exclude + and isinstance(student_proc, ty.DistillableComponent) + and student_proc.is_distillable + ): + # A missing teacher pipe is not an error, some student pipes + # do not need a teacher, such as tok2vec layer losses. + teacher_name = ( + student_to_teacher[student_name] + if student_name in student_to_teacher + else student_name + ) + teacher_pipe = teacher_pipes.get(teacher_name, None) + student_proc.distill( + teacher_pipe, + examples, + sgd=sgd, + losses=losses, + **component_cfg[student_name], + ) + + return losses + def disable_pipes(self, *names) -> "DisabledPipes": """Disable one or more pipeline components. If used as a context manager, the pipeline will be restored to the initial state at the end @@ -1243,12 +1341,16 @@ class Language: self, get_examples: Optional[Callable[[], Iterable[Example]]] = None, *, + labels: Optional[Dict[str, Any]] = None, sgd: Optional[Optimizer] = None, ) -> Optimizer: """Initialize the pipe for training, using data examples if available. get_examples (Callable[[], Iterable[Example]]): Optional function that returns gold-standard Example objects. + labels (Optional[Dict[str, Any]]): Labels to pass to pipe initialization, + using the names of the pipes as keys. Overrides labels that are in + the model configuration. sgd (Optional[Optimizer]): An optimizer to use for updates. If not provided, will be created using the .create_optimizer() method. RETURNS (thinc.api.Optimizer): The optimizer. @@ -1293,6 +1395,8 @@ class Language: for name, proc in self.pipeline: if isinstance(proc, ty.InitializableComponent): p_settings = I["components"].get(name, {}) + if labels is not None and name in labels: + p_settings["labels"] = labels[name] p_settings = validate_init_settings( proc.initialize, p_settings, section="components", name=name ) @@ -1726,6 +1830,7 @@ class Language: # using the nlp.config with all defaults. config = util.copy_config(config) orig_pipeline = config.pop("components", {}) + orig_distill = config.pop("distill", None) orig_pretraining = config.pop("pretraining", None) config["components"] = {} if auto_fill: @@ -1734,6 +1839,9 @@ class Language: filled = config filled["components"] = orig_pipeline config["components"] = orig_pipeline + if orig_distill is not None: + filled["distill"] = orig_distill + config["distill"] = orig_distill if orig_pretraining is not None: filled["pretraining"] = orig_pretraining config["pretraining"] = orig_pretraining @@ -2223,13 +2331,18 @@ class DisabledPipes(list): self[:] = [] -def _copy_examples(examples: Iterable[Example]) -> List[Example]: +def _copy_examples( + examples: Iterable[Example], *, copy_x: bool = True, copy_y: bool = False +) -> List[Example]: """Make a copy of a batch of examples, copying the predicted Doc as well. This is used in contexts where we need to take ownership of the examples so that they can be mutated, for instance during Language.evaluate and Language.update. """ - return [Example(eg.x.copy(), eg.y) for eg in examples] + return [ + Example(eg.x.copy() if copy_x else eg.x, eg.y.copy() if copy_y else eg.y) + for eg in examples + ] def _apply_pipes( diff --git a/spacy/lexeme.pyx b/spacy/lexeme.pyx index 73bf28dc2..e57098f17 100644 --- a/spacy/lexeme.pyx +++ b/spacy/lexeme.pyx @@ -41,7 +41,7 @@ cdef class Lexeme: """ self.vocab = vocab self.orth = orth - self.c = vocab.get_by_orth(vocab.mem, orth) + self.c = vocab.get_by_orth(orth) if self.c.orth != orth: raise ValueError(Errors.E071.format(orth=orth, vocab_orth=self.c.orth)) diff --git a/spacy/matcher/levenshtein.pyx b/spacy/matcher/levenshtein.pyx index 0e8cd26da..e823ce99d 100644 --- a/spacy/matcher/levenshtein.pyx +++ b/spacy/matcher/levenshtein.pyx @@ -22,7 +22,7 @@ cpdef bint levenshtein_compare(input_text: str, pattern_text: str, fuzzy: int = max_edits = fuzzy else: # allow at least two edits (to allow at least one transposition) and up - # to 20% of the pattern string length + # to 30% of the pattern string length max_edits = max(2, round(0.3 * len(pattern_text))) return levenshtein(input_text, pattern_text, max_edits) <= max_edits diff --git a/spacy/matcher/matcher.pyi b/spacy/matcher/matcher.pyi index 77ea7b7a6..9797463aa 100644 --- a/spacy/matcher/matcher.pyi +++ b/spacy/matcher/matcher.pyi @@ -1,12 +1,15 @@ -from typing import Any, List, Dict, Tuple, Optional, Callable, Union +from typing import Any, List, Dict, Tuple, Optional, Callable, Union, Literal from typing import Iterator, Iterable, overload -from ..compat import Literal from ..vocab import Vocab from ..tokens import Doc, Span class Matcher: - def __init__(self, vocab: Vocab, validate: bool = ..., - fuzzy_compare: Callable[[str, str, int], bool] = ...) -> None: ... + def __init__( + self, + vocab: Vocab, + validate: bool = ..., + fuzzy_compare: Callable[[str, str, int], bool] = ..., + ) -> None: ... def __reduce__(self) -> Any: ... def __len__(self) -> int: ... def __contains__(self, key: str) -> bool: ... diff --git a/spacy/matcher/phrasematcher.pyi b/spacy/matcher/phrasematcher.pyi index 670c87409..af3a2d23b 100644 --- a/spacy/matcher/phrasematcher.pyi +++ b/spacy/matcher/phrasematcher.pyi @@ -1,5 +1,5 @@ -from typing import List, Tuple, Union, Optional, Callable, Any, Dict, overload -from ..compat import Literal +from typing import List, Tuple, Union, Optional, Callable, Any, Dict, Literal +from typing import overload from .matcher import Matcher from ..vocab import Vocab from ..tokens import Doc, Span diff --git a/spacy/ml/_precomputable_affine.py b/spacy/ml/_precomputable_affine.py deleted file mode 100644 index 1c20c622b..000000000 --- a/spacy/ml/_precomputable_affine.py +++ /dev/null @@ -1,164 +0,0 @@ -from thinc.api import Model, normal_init - -from ..util import registry - - -@registry.layers("spacy.PrecomputableAffine.v1") -def PrecomputableAffine(nO, nI, nF, nP, dropout=0.1): - model = Model( - "precomputable_affine", - forward, - init=init, - dims={"nO": nO, "nI": nI, "nF": nF, "nP": nP}, - params={"W": None, "b": None, "pad": None}, - attrs={"dropout_rate": dropout}, - ) - return model - - -def forward(model, X, is_train): - nF = model.get_dim("nF") - nO = model.get_dim("nO") - nP = model.get_dim("nP") - nI = model.get_dim("nI") - W = model.get_param("W") - # Preallocate array for layer output, including padding. - Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False) - model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:]) - Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) - - # Set padding. Padding has shape (1, nF, nO, nP). Unfortunately, we cannot - # change its shape to (nF, nO, nP) without breaking existing models. So - # we'll squeeze the first dimension here. - Yf[0] = model.ops.xp.squeeze(model.get_param("pad"), 0) - - def backward(dY_ids): - # This backprop is particularly tricky, because we get back a different - # thing from what we put out. We put out an array of shape: - # (nB, nF, nO, nP), and get back: - # (nB, nO, nP) and ids (nB, nF) - # The ids tell us the values of nF, so we would have: - # - # dYf = zeros((nB, nF, nO, nP)) - # for b in range(nB): - # for f in range(nF): - # dYf[b, ids[b, f]] += dY[b] - # - # However, we avoid building that array for efficiency -- and just pass - # in the indices. - dY, ids = dY_ids - assert dY.ndim == 3 - assert dY.shape[1] == nO, dY.shape - assert dY.shape[2] == nP, dY.shape - # nB = dY.shape[0] - model.inc_grad("pad", _backprop_precomputable_affine_padding(model, dY, ids)) - Xf = X[ids] - Xf = Xf.reshape((Xf.shape[0], nF * nI)) - - model.inc_grad("b", dY.sum(axis=0)) - dY = dY.reshape((dY.shape[0], nO * nP)) - - Wopfi = W.transpose((1, 2, 0, 3)) - Wopfi = Wopfi.reshape((nO * nP, nF * nI)) - dXf = model.ops.gemm(dY.reshape((dY.shape[0], nO * nP)), Wopfi) - - dWopfi = model.ops.gemm(dY, Xf, trans1=True) - dWopfi = dWopfi.reshape((nO, nP, nF, nI)) - # (o, p, f, i) --> (f, o, p, i) - dWopfi = dWopfi.transpose((2, 0, 1, 3)) - model.inc_grad("W", dWopfi) - return dXf.reshape((dXf.shape[0], nF, nI)) - - return Yf, backward - - -def _backprop_precomputable_affine_padding(model, dY, ids): - nB = dY.shape[0] - nF = model.get_dim("nF") - nP = model.get_dim("nP") - nO = model.get_dim("nO") - # Backprop the "padding", used as a filler for missing values. - # Values that are missing are set to -1, and each state vector could - # have multiple missing values. The padding has different values for - # different missing features. The gradient of the padding vector is: - # - # for b in range(nB): - # for f in range(nF): - # if ids[b, f] < 0: - # d_pad[f] += dY[b] - # - # Which can be rewritten as: - # - # (ids < 0).T @ dY - mask = model.ops.asarray(ids < 0, dtype="f") - d_pad = model.ops.gemm(mask, dY.reshape(nB, nO * nP), trans1=True) - return d_pad.reshape((1, nF, nO, nP)) - - -def init(model, X=None, Y=None): - """This is like the 'layer sequential unit variance', but instead - of taking the actual inputs, we randomly generate whitened data. - - Why's this all so complicated? We have a huge number of inputs, - and the maxout unit makes guessing the dynamics tricky. Instead - we set the maxout weights to values that empirically result in - whitened outputs given whitened inputs. - """ - if model.has_param("W") and model.get_param("W").any(): - return - - nF = model.get_dim("nF") - nO = model.get_dim("nO") - nP = model.get_dim("nP") - nI = model.get_dim("nI") - W = model.ops.alloc4f(nF, nO, nP, nI) - b = model.ops.alloc2f(nO, nP) - pad = model.ops.alloc4f(1, nF, nO, nP) - - ops = model.ops - W = normal_init(ops, W.shape, mean=float(ops.xp.sqrt(1.0 / nF * nI))) - pad = normal_init(ops, pad.shape, mean=1.0) - model.set_param("W", W) - model.set_param("b", b) - model.set_param("pad", pad) - - ids = ops.alloc((5000, nF), dtype="f") - ids += ops.xp.random.uniform(0, 1000, ids.shape) - ids = ops.asarray(ids, dtype="i") - tokvecs = ops.alloc((5000, nI), dtype="f") - tokvecs += ops.xp.random.normal(loc=0.0, scale=1.0, size=tokvecs.size).reshape( - tokvecs.shape - ) - - def predict(ids, tokvecs): - # nS ids. nW tokvecs. Exclude the padding array. - hiddens = model.predict(tokvecs[:-1]) # (nW, f, o, p) - vectors = model.ops.alloc((ids.shape[0], nO * nP), dtype="f") - # need nS vectors - hiddens = hiddens.reshape((hiddens.shape[0] * nF, nO * nP)) - model.ops.scatter_add(vectors, ids.flatten(), hiddens) - vectors = vectors.reshape((vectors.shape[0], nO, nP)) - vectors += b - vectors = model.ops.asarray(vectors) - if nP >= 2: - return model.ops.maxout(vectors)[0] - else: - return vectors * (vectors >= 0) - - tol_var = 0.01 - tol_mean = 0.01 - t_max = 10 - W = model.get_param("W").copy() - b = model.get_param("b").copy() - for t_i in range(t_max): - acts1 = predict(ids, tokvecs) - var = model.ops.xp.var(acts1) - mean = model.ops.xp.mean(acts1) - if abs(var - 1.0) >= tol_var: - W /= model.ops.xp.sqrt(var) - model.set_param("W", W) - elif abs(mean) >= tol_mean: - b -= mean - model.set_param("b", b) - else: - break diff --git a/spacy/ml/callbacks.py b/spacy/ml/callbacks.py index 3b60ec2ab..393f208a6 100644 --- a/spacy/ml/callbacks.py +++ b/spacy/ml/callbacks.py @@ -23,6 +23,7 @@ DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS = [ "update", "rehearse", "get_loss", + "get_teacher_student_loss", "initialize", "begin_update", "finish_update", diff --git a/spacy/ml/models/parser.py b/spacy/ml/models/parser.py index a70d84dea..01312983d 100644 --- a/spacy/ml/models/parser.py +++ b/spacy/ml/models/parser.py @@ -1,17 +1,19 @@ -from typing import Optional, List, cast -from thinc.api import Model, chain, list2array, Linear, zero_init, use_ops +from typing import Optional, List, Tuple, Any, Literal from thinc.types import Floats2d +from thinc.api import Model +import warnings -from ...errors import Errors -from ...compat import Literal +from ...errors import Errors, Warnings from ...util import registry -from .._precomputable_affine import PrecomputableAffine from ..tb_framework import TransitionModel -from ...tokens import Doc +from ...tokens.doc import Doc + +TransitionSystem = Any # TODO +State = Any # TODO -@registry.architectures("spacy.TransitionBasedParser.v2") -def build_tb_parser_model( +@registry.architectures.register("spacy.TransitionBasedParser.v2") +def transition_parser_v2( tok2vec: Model[List[Doc], List[Floats2d]], state_type: Literal["parser", "ner"], extra_state_tokens: bool, @@ -19,6 +21,46 @@ def build_tb_parser_model( maxout_pieces: int, use_upper: bool, nO: Optional[int] = None, +) -> Model: + if not use_upper: + warnings.warn(Warnings.W400) + + return build_tb_parser_model( + tok2vec, + state_type, + extra_state_tokens, + hidden_width, + maxout_pieces, + nO=nO, + ) + + +@registry.architectures.register("spacy.TransitionBasedParser.v3") +def transition_parser_v3( + tok2vec: Model[List[Doc], List[Floats2d]], + state_type: Literal["parser", "ner"], + extra_state_tokens: bool, + hidden_width: int, + maxout_pieces: int, + nO: Optional[int] = None, +) -> Model: + return build_tb_parser_model( + tok2vec, + state_type, + extra_state_tokens, + hidden_width, + maxout_pieces, + nO=nO, + ) + + +def build_tb_parser_model( + tok2vec: Model[List[Doc], List[Floats2d]], + state_type: Literal["parser", "ner"], + extra_state_tokens: bool, + hidden_width: int, + maxout_pieces: int, + nO: Optional[int] = None, ) -> Model: """ Build a transition-based parser model. Can apply to NER or dependency-parsing. @@ -51,14 +93,7 @@ def build_tb_parser_model( feature sets (for the NER) or 13 (for the parser). hidden_width (int): The width of the hidden layer. maxout_pieces (int): How many pieces to use in the state prediction layer. - Recommended values are 1, 2 or 3. If 1, the maxout non-linearity - is replaced with a ReLu non-linearity if use_upper=True, and no - non-linearity if use_upper=False. - use_upper (bool): Whether to use an additional hidden layer after the state - vector in order to predict the action scores. It is recommended to set - this to False for large pretrained models such as transformers, and True - for smaller networks. The upper layer is computed on CPU, which becomes - a bottleneck on larger GPU-based models, where it's also less necessary. + Recommended values are 1, 2 or 3. nO (int or None): The number of actions the model will predict between. Usually inferred from data at the beginning of training, or loaded from disk. @@ -69,106 +104,11 @@ def build_tb_parser_model( nr_feature_tokens = 6 if extra_state_tokens else 3 else: raise ValueError(Errors.E917.format(value=state_type)) - t2v_width = tok2vec.get_dim("nO") if tok2vec.has_dim("nO") else None - tok2vec = chain( - tok2vec, - list2array(), - Linear(hidden_width, t2v_width), + return TransitionModel( + tok2vec=tok2vec, + state_tokens=nr_feature_tokens, + hidden_width=hidden_width, + maxout_pieces=maxout_pieces, + nO=nO, + unseen_classes=set(), ) - tok2vec.set_dim("nO", hidden_width) - lower = _define_lower( - nO=hidden_width if use_upper else nO, - nF=nr_feature_tokens, - nI=tok2vec.get_dim("nO"), - nP=maxout_pieces, - ) - upper = None - if use_upper: - with use_ops("cpu"): - # Initialize weights at zero, as it's a classification layer. - upper = _define_upper(nO=nO, nI=None) - return TransitionModel(tok2vec, lower, upper, resize_output) - - -def _define_upper(nO, nI): - return Linear(nO=nO, nI=nI, init_W=zero_init) - - -def _define_lower(nO, nF, nI, nP): - return PrecomputableAffine(nO=nO, nF=nF, nI=nI, nP=nP) - - -def resize_output(model, new_nO): - if model.attrs["has_upper"]: - return _resize_upper(model, new_nO) - return _resize_lower(model, new_nO) - - -def _resize_upper(model, new_nO): - upper = model.get_ref("upper") - if upper.has_dim("nO") is None: - upper.set_dim("nO", new_nO) - return model - elif new_nO == upper.get_dim("nO"): - return model - - smaller = upper - nI = smaller.maybe_get_dim("nI") - with use_ops("cpu"): - larger = _define_upper(nO=new_nO, nI=nI) - # it could be that the model is not initialized yet, then skip this bit - if smaller.has_param("W"): - larger_W = larger.ops.alloc2f(new_nO, nI) - larger_b = larger.ops.alloc1f(new_nO) - smaller_W = smaller.get_param("W") - smaller_b = smaller.get_param("b") - # Weights are stored in (nr_out, nr_in) format, so we're basically - # just adding rows here. - if smaller.has_dim("nO"): - old_nO = smaller.get_dim("nO") - larger_W[:old_nO] = smaller_W - larger_b[:old_nO] = smaller_b - for i in range(old_nO, new_nO): - model.attrs["unseen_classes"].add(i) - - larger.set_param("W", larger_W) - larger.set_param("b", larger_b) - model._layers[-1] = larger - model.set_ref("upper", larger) - return model - - -def _resize_lower(model, new_nO): - lower = model.get_ref("lower") - if lower.has_dim("nO") is None: - lower.set_dim("nO", new_nO) - return model - - smaller = lower - nI = smaller.maybe_get_dim("nI") - nF = smaller.maybe_get_dim("nF") - nP = smaller.maybe_get_dim("nP") - larger = _define_lower(nO=new_nO, nI=nI, nF=nF, nP=nP) - # it could be that the model is not initialized yet, then skip this bit - if smaller.has_param("W"): - larger_W = larger.ops.alloc4f(nF, new_nO, nP, nI) - larger_b = larger.ops.alloc2f(new_nO, nP) - larger_pad = larger.ops.alloc4f(1, nF, new_nO, nP) - smaller_W = smaller.get_param("W") - smaller_b = smaller.get_param("b") - smaller_pad = smaller.get_param("pad") - # Copy the old weights and padding into the new layer - if smaller.has_dim("nO"): - old_nO = smaller.get_dim("nO") - larger_W[:, 0:old_nO, :, :] = smaller_W - larger_pad[:, :, 0:old_nO, :] = smaller_pad - larger_b[0:old_nO, :] = smaller_b - for i in range(old_nO, new_nO): - model.attrs["unseen_classes"].add(i) - - larger.set_param("W", larger_W) - larger.set_param("b", larger_b) - larger.set_param("pad", larger_pad) - model._layers[1] = larger - model.set_ref("lower", larger) - return model diff --git a/spacy/ml/parser_model.pxd b/spacy/ml/parser_model.pxd deleted file mode 100644 index 8def6cea5..000000000 --- a/spacy/ml/parser_model.pxd +++ /dev/null @@ -1,49 +0,0 @@ -from libc.string cimport memset, memcpy -from thinc.backends.cblas cimport CBlas -from ..typedefs cimport weight_t, hash_t -from ..pipeline._parser_internals._state cimport StateC - - -cdef struct SizesC: - int states - int classes - int hiddens - int pieces - int feats - int embed_width - - -cdef struct WeightsC: - const float* feat_weights - const float* feat_bias - const float* hidden_bias - const float* hidden_weights - const float* seen_classes - - -cdef struct ActivationsC: - int* token_ids - float* unmaxed - float* scores - float* hiddens - int* is_valid - int _curr_size - int _max_size - - -cdef WeightsC get_c_weights(model) except * - -cdef SizesC get_c_sizes(model, int batch_size) except * - -cdef ActivationsC alloc_activations(SizesC n) nogil - -cdef void free_activations(const ActivationsC* A) nogil - -cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, - const WeightsC* W, SizesC n) nogil - -cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil - -cdef void cpu_log_loss(float* d_scores, - const float* costs, const int* is_valid, const float* scores, int O) nogil - diff --git a/spacy/ml/parser_model.pyx b/spacy/ml/parser_model.pyx deleted file mode 100644 index 91558683b..000000000 --- a/spacy/ml/parser_model.pyx +++ /dev/null @@ -1,500 +0,0 @@ -# cython: infer_types=True, cdivision=True, boundscheck=False -cimport numpy as np -from libc.math cimport exp -from libc.string cimport memset, memcpy -from libc.stdlib cimport calloc, free, realloc -from thinc.backends.cblas cimport saxpy, sgemm - -import numpy -import numpy.random -from thinc.api import Model, CupyOps, NumpyOps, get_ops - -from .. import util -from ..errors import Errors -from ..typedefs cimport weight_t, class_t, hash_t -from ..pipeline._parser_internals.stateclass cimport StateClass - - -cdef WeightsC get_c_weights(model) except *: - cdef WeightsC output - cdef precompute_hiddens state2vec = model.state2vec - output.feat_weights = state2vec.get_feat_weights() - output.feat_bias = state2vec.bias.data - cdef np.ndarray vec2scores_W - cdef np.ndarray vec2scores_b - if model.vec2scores is None: - output.hidden_weights = NULL - output.hidden_bias = NULL - else: - vec2scores_W = model.vec2scores.get_param("W") - vec2scores_b = model.vec2scores.get_param("b") - output.hidden_weights = vec2scores_W.data - output.hidden_bias = vec2scores_b.data - cdef np.ndarray class_mask = model._class_mask - output.seen_classes = class_mask.data - return output - - -cdef SizesC get_c_sizes(model, int batch_size) except *: - cdef SizesC output - output.states = batch_size - if model.vec2scores is None: - output.classes = model.state2vec.get_dim("nO") - else: - output.classes = model.vec2scores.get_dim("nO") - output.hiddens = model.state2vec.get_dim("nO") - output.pieces = model.state2vec.get_dim("nP") - output.feats = model.state2vec.get_dim("nF") - output.embed_width = model.tokvecs.shape[1] - return output - - -cdef ActivationsC alloc_activations(SizesC n) nogil: - cdef ActivationsC A - memset(&A, 0, sizeof(A)) - resize_activations(&A, n) - return A - - -cdef void free_activations(const ActivationsC* A) nogil: - free(A.token_ids) - free(A.scores) - free(A.unmaxed) - free(A.hiddens) - free(A.is_valid) - - -cdef void resize_activations(ActivationsC* A, SizesC n) nogil: - if n.states <= A._max_size: - A._curr_size = n.states - return - if A._max_size == 0: - A.token_ids = calloc(n.states * n.feats, sizeof(A.token_ids[0])) - A.scores = calloc(n.states * n.classes, sizeof(A.scores[0])) - A.unmaxed = calloc(n.states * n.hiddens * n.pieces, sizeof(A.unmaxed[0])) - A.hiddens = calloc(n.states * n.hiddens, sizeof(A.hiddens[0])) - A.is_valid = calloc(n.states * n.classes, sizeof(A.is_valid[0])) - A._max_size = n.states - else: - A.token_ids = realloc(A.token_ids, - n.states * n.feats * sizeof(A.token_ids[0])) - A.scores = realloc(A.scores, - n.states * n.classes * sizeof(A.scores[0])) - A.unmaxed = realloc(A.unmaxed, - n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0])) - A.hiddens = realloc(A.hiddens, - n.states * n.hiddens * sizeof(A.hiddens[0])) - A.is_valid = realloc(A.is_valid, - n.states * n.classes * sizeof(A.is_valid[0])) - A._max_size = n.states - A._curr_size = n.states - - -cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, - const WeightsC* W, SizesC n) nogil: - cdef double one = 1.0 - resize_activations(A, n) - for i in range(n.states): - states[i].set_context_tokens(&A.token_ids[i*n.feats], n.feats) - memset(A.unmaxed, 0, n.states * n.hiddens * n.pieces * sizeof(float)) - memset(A.hiddens, 0, n.states * n.hiddens * sizeof(float)) - sum_state_features(cblas, A.unmaxed, - W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces) - for i in range(n.states): - saxpy(cblas)(n.hiddens * n.pieces, 1., W.feat_bias, 1, &A.unmaxed[i*n.hiddens*n.pieces], 1) - for j in range(n.hiddens): - index = i * n.hiddens * n.pieces + j * n.pieces - which = _arg_max(&A.unmaxed[index], n.pieces) - A.hiddens[i*n.hiddens + j] = A.unmaxed[index + which] - memset(A.scores, 0, n.states * n.classes * sizeof(float)) - if W.hidden_weights == NULL: - memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) - else: - # Compute hidden-to-output - sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, - 1.0, A.hiddens, n.hiddens, - W.hidden_weights, n.hiddens, - 0.0, A.scores, n.classes) - # Add bias - for i in range(n.states): - saxpy(cblas)(n.classes, 1., W.hidden_bias, 1, &A.scores[i*n.classes], 1) - # Set unseen classes to minimum value - i = 0 - min_ = A.scores[0] - for i in range(1, n.states * n.classes): - if A.scores[i] < min_: - min_ = A.scores[i] - for i in range(n.states): - for j in range(n.classes): - if not W.seen_classes[j]: - A.scores[i*n.classes+j] = min_ - - -cdef void sum_state_features(CBlas cblas, float* output, - const float* cached, const int* token_ids, int B, int F, int O) nogil: - cdef int idx, b, f, i - cdef const float* feature - padding = cached - cached += F * O - cdef int id_stride = F*O - cdef float one = 1. - for b in range(B): - for f in range(F): - if token_ids[f] < 0: - feature = &padding[f*O] - else: - idx = token_ids[f] * id_stride + f*O - feature = &cached[idx] - saxpy(cblas)(O, one, feature, 1, &output[b*O], 1) - token_ids += F - - -cdef void cpu_log_loss(float* d_scores, - const float* costs, const int* is_valid, const float* scores, - int O) nogil: - """Do multi-label log loss""" - cdef double max_, gmax, Z, gZ - best = arg_max_if_gold(scores, costs, is_valid, O) - guess = _arg_max(scores, O) - - if best == -1 or guess == -1: - # These shouldn't happen, but if they do, we want to make sure we don't - # cause an OOB access. - return - Z = 1e-10 - gZ = 1e-10 - max_ = scores[guess] - gmax = scores[best] - for i in range(O): - Z += exp(scores[i] - max_) - if costs[i] <= costs[best]: - gZ += exp(scores[i] - gmax) - for i in range(O): - if costs[i] <= costs[best]: - d_scores[i] = (exp(scores[i]-max_) / Z) - (exp(scores[i]-gmax)/gZ) - else: - d_scores[i] = exp(scores[i]-max_) / Z - - -cdef int arg_max_if_gold(const weight_t* scores, const weight_t* costs, - const int* is_valid, int n) nogil: - # Find minimum cost - cdef float cost = 1 - for i in range(n): - if is_valid[i] and costs[i] < cost: - cost = costs[i] - # Now find best-scoring with that cost - cdef int best = -1 - for i in range(n): - if costs[i] <= cost and is_valid[i]: - if best == -1 or scores[i] > scores[best]: - best = i - return best - - -cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil: - cdef int best = -1 - for i in range(n): - if is_valid[i] >= 1: - if best == -1 or scores[i] > scores[best]: - best = i - return best - - - -class ParserStepModel(Model): - def __init__(self, docs, layers, *, has_upper, unseen_classes=None, train=True, - dropout=0.1): - Model.__init__(self, name="parser_step_model", forward=step_forward) - self.attrs["has_upper"] = has_upper - self.attrs["dropout_rate"] = dropout - self.tokvecs, self.bp_tokvecs = layers[0](docs, is_train=train) - if layers[1].get_dim("nP") >= 2: - activation = "maxout" - elif has_upper: - activation = None - else: - activation = "relu" - self.state2vec = precompute_hiddens(len(docs), self.tokvecs, layers[1], - activation=activation, train=train) - if has_upper: - self.vec2scores = layers[-1] - else: - self.vec2scores = None - self.cuda_stream = util.get_cuda_stream(non_blocking=True) - self.backprops = [] - self._class_mask = numpy.zeros((self.nO,), dtype='f') - self._class_mask.fill(1) - if unseen_classes is not None: - for class_ in unseen_classes: - self._class_mask[class_] = 0. - - def clear_memory(self): - del self.tokvecs - del self.bp_tokvecs - del self.state2vec - del self.backprops - del self._class_mask - - @property - def nO(self): - if self.attrs["has_upper"]: - return self.vec2scores.get_dim("nO") - else: - return self.state2vec.get_dim("nO") - - def class_is_unseen(self, class_): - return self._class_mask[class_] - - def mark_class_unseen(self, class_): - self._class_mask[class_] = 0 - - def mark_class_seen(self, class_): - self._class_mask[class_] = 1 - - def get_token_ids(self, states): - cdef StateClass state - states = [state for state in states if not state.is_final()] - cdef np.ndarray ids = numpy.zeros((len(states), self.state2vec.nF), - dtype='i', order='C') - ids.fill(-1) - c_ids = ids.data - for state in states: - state.c.set_context_tokens(c_ids, ids.shape[1]) - c_ids += ids.shape[1] - return ids - - def backprop_step(self, token_ids, d_vector, get_d_tokvecs): - if isinstance(self.state2vec.ops, CupyOps) \ - and not isinstance(token_ids, self.state2vec.ops.xp.ndarray): - # Move token_ids and d_vector to GPU, asynchronously - self.backprops.append(( - util.get_async(self.cuda_stream, token_ids), - util.get_async(self.cuda_stream, d_vector), - get_d_tokvecs - )) - else: - self.backprops.append((token_ids, d_vector, get_d_tokvecs)) - - - def finish_steps(self, golds): - # Add a padding vector to the d_tokvecs gradient, so that missing - # values don't affect the real gradient. - d_tokvecs = self.ops.alloc((self.tokvecs.shape[0]+1, self.tokvecs.shape[1])) - # Tells CUDA to block, so our async copies complete. - if self.cuda_stream is not None: - self.cuda_stream.synchronize() - for ids, d_vector, bp_vector in self.backprops: - d_state_features = bp_vector((d_vector, ids)) - ids = ids.flatten() - d_state_features = d_state_features.reshape( - (ids.size, d_state_features.shape[2])) - self.ops.scatter_add(d_tokvecs, ids, - d_state_features) - # Padded -- see update() - self.bp_tokvecs(d_tokvecs[:-1]) - return d_tokvecs - -NUMPY_OPS = NumpyOps() - -def step_forward(model: ParserStepModel, states, is_train): - token_ids = model.get_token_ids(states) - vector, get_d_tokvecs = model.state2vec(token_ids, is_train) - mask = None - if model.attrs["has_upper"]: - dropout_rate = model.attrs["dropout_rate"] - if is_train and dropout_rate > 0: - mask = NUMPY_OPS.get_dropout_mask(vector.shape, 0.1) - vector *= mask - scores, get_d_vector = model.vec2scores(vector, is_train) - else: - scores = NumpyOps().asarray(vector) - get_d_vector = lambda d_scores: d_scores - # If the class is unseen, make sure its score is minimum - scores[:, model._class_mask == 0] = numpy.nanmin(scores) - - def backprop_parser_step(d_scores): - # Zero vectors for unseen classes - d_scores *= model._class_mask - d_vector = get_d_vector(d_scores) - if mask is not None: - d_vector *= mask - model.backprop_step(token_ids, d_vector, get_d_tokvecs) - return None - return scores, backprop_parser_step - - -cdef class precompute_hiddens: - """Allow a model to be "primed" by pre-computing input features in bulk. - - This is used for the parser, where we want to take a batch of documents, - and compute vectors for each (token, position) pair. These vectors can then - be reused, especially for beam-search. - - Let's say we're using 12 features for each state, e.g. word at start of - buffer, three words on stack, their children, etc. In the normal arc-eager - system, a document of length N is processed in 2*N states. This means we'll - create 2*N*12 feature vectors --- but if we pre-compute, we only need - N*12 vector computations. The saving for beam-search is much better: - if we have a beam of k, we'll normally make 2*N*12*K computations -- - so we can save the factor k. This also gives a nice CPU/GPU division: - we can do all our hard maths up front, packed into large multiplications, - and do the hard-to-program parsing on the CPU. - """ - cdef readonly int nF, nO, nP - cdef bint _is_synchronized - cdef public object ops - cdef public object numpy_ops - cdef public object _cpu_ops - cdef np.ndarray _features - cdef np.ndarray _cached - cdef np.ndarray bias - cdef object _cuda_stream - cdef object _bp_hiddens - cdef object activation - - def __init__(self, batch_size, tokvecs, lower_model, cuda_stream=None, - activation="maxout", train=False): - gpu_cached, bp_features = lower_model(tokvecs, train) - cdef np.ndarray cached - if not isinstance(gpu_cached, numpy.ndarray): - # Note the passing of cuda_stream here: it lets - # cupy make the copy asynchronously. - # We then have to block before first use. - cached = gpu_cached.get(stream=cuda_stream) - else: - cached = gpu_cached - if not isinstance(lower_model.get_param("b"), numpy.ndarray): - self.bias = lower_model.get_param("b").get(stream=cuda_stream) - else: - self.bias = lower_model.get_param("b") - self.nF = cached.shape[1] - if lower_model.has_dim("nP"): - self.nP = lower_model.get_dim("nP") - else: - self.nP = 1 - self.nO = cached.shape[2] - self.ops = lower_model.ops - self.numpy_ops = NumpyOps() - self._cpu_ops = get_ops("cpu") if isinstance(self.ops, CupyOps) else self.ops - assert activation in (None, "relu", "maxout") - self.activation = activation - self._is_synchronized = False - self._cuda_stream = cuda_stream - self._cached = cached - self._bp_hiddens = bp_features - - cdef const float* get_feat_weights(self) except NULL: - if not self._is_synchronized and self._cuda_stream is not None: - self._cuda_stream.synchronize() - self._is_synchronized = True - return self._cached.data - - def has_dim(self, name): - if name == "nF": - return self.nF if self.nF is not None else True - elif name == "nP": - return self.nP if self.nP is not None else True - elif name == "nO": - return self.nO if self.nO is not None else True - else: - return False - - def get_dim(self, name): - if name == "nF": - return self.nF - elif name == "nP": - return self.nP - elif name == "nO": - return self.nO - else: - raise ValueError(Errors.E1033.format(name=name)) - - def set_dim(self, name, value): - if name == "nF": - self.nF = value - elif name == "nP": - self.nP = value - elif name == "nO": - self.nO = value - else: - raise ValueError(Errors.E1033.format(name=name)) - - def __call__(self, X, bint is_train): - if is_train: - return self.begin_update(X) - else: - return self.predict(X), lambda X: X - - def predict(self, X): - return self.begin_update(X)[0] - - def begin_update(self, token_ids): - cdef np.ndarray state_vector = numpy.zeros( - (token_ids.shape[0], self.nO, self.nP), dtype='f') - # This is tricky, but (assuming GPU available); - # - Input to forward on CPU - # - Output from forward on CPU - # - Input to backward on GPU! - # - Output from backward on GPU - bp_hiddens = self._bp_hiddens - - cdef CBlas cblas = self._cpu_ops.cblas() - - feat_weights = self.get_feat_weights() - cdef int[:, ::1] ids = token_ids - sum_state_features(cblas, state_vector.data, - feat_weights, &ids[0,0], - token_ids.shape[0], self.nF, self.nO*self.nP) - state_vector += self.bias - state_vector, bp_nonlinearity = self._nonlinearity(state_vector) - - def backward(d_state_vector_ids): - d_state_vector, token_ids = d_state_vector_ids - d_state_vector = bp_nonlinearity(d_state_vector) - d_tokens = bp_hiddens((d_state_vector, token_ids)) - return d_tokens - return state_vector, backward - - def _nonlinearity(self, state_vector): - if self.activation == "maxout": - return self._maxout_nonlinearity(state_vector) - else: - return self._relu_nonlinearity(state_vector) - - def _maxout_nonlinearity(self, state_vector): - state_vector, mask = self.numpy_ops.maxout(state_vector) - # We're outputting to CPU, but we need this variable on GPU for the - # backward pass. - mask = self.ops.asarray(mask) - - def backprop_maxout(d_best): - return self.ops.backprop_maxout(d_best, mask, self.nP) - - return state_vector, backprop_maxout - - def _relu_nonlinearity(self, state_vector): - state_vector = state_vector.reshape((state_vector.shape[0], -1)) - mask = state_vector >= 0. - state_vector *= mask - # We're outputting to CPU, but we need this variable on GPU for the - # backward pass. - mask = self.ops.asarray(mask) - - def backprop_relu(d_best): - d_best *= mask - return d_best.reshape((d_best.shape + (1,))) - - return state_vector, backprop_relu - -cdef inline int _arg_max(const float* scores, const int n_classes) nogil: - if n_classes == 2: - return 0 if scores[0] > scores[1] else 1 - cdef int i - cdef int best = 0 - cdef float mode = scores[0] - for i in range(1, n_classes): - if scores[i] > mode: - mode = scores[i] - best = i - return best diff --git a/spacy/ml/tb_framework.pxd b/spacy/ml/tb_framework.pxd new file mode 100644 index 000000000..965508519 --- /dev/null +++ b/spacy/ml/tb_framework.pxd @@ -0,0 +1,28 @@ +from libc.stdint cimport int8_t + + +cdef struct SizesC: + int states + int classes + int hiddens + int pieces + int feats + int embed_width + int tokens + + +cdef struct WeightsC: + const float* feat_weights + const float* feat_bias + const float* hidden_bias + const float* hidden_weights + const int8_t* seen_mask + + +cdef struct ActivationsC: + int* token_ids + float* unmaxed + float* hiddens + int* is_valid + int _curr_size + int _max_size diff --git a/spacy/ml/tb_framework.py b/spacy/ml/tb_framework.py deleted file mode 100644 index ab4a969e2..000000000 --- a/spacy/ml/tb_framework.py +++ /dev/null @@ -1,50 +0,0 @@ -from thinc.api import Model, noop -from .parser_model import ParserStepModel -from ..util import registry - - -@registry.layers("spacy.TransitionModel.v1") -def TransitionModel( - tok2vec, lower, upper, resize_output, dropout=0.2, unseen_classes=set() -): - """Set up a stepwise transition-based model""" - if upper is None: - has_upper = False - upper = noop() - else: - has_upper = True - # don't define nO for this object, because we can't dynamically change it - return Model( - name="parser_model", - forward=forward, - dims={"nI": tok2vec.maybe_get_dim("nI")}, - layers=[tok2vec, lower, upper], - refs={"tok2vec": tok2vec, "lower": lower, "upper": upper}, - init=init, - attrs={ - "has_upper": has_upper, - "unseen_classes": set(unseen_classes), - "resize_output": resize_output, - }, - ) - - -def forward(model, X, is_train): - step_model = ParserStepModel( - X, - model.layers, - unseen_classes=model.attrs["unseen_classes"], - train=is_train, - has_upper=model.attrs["has_upper"], - ) - - return step_model, step_model.finish_steps - - -def init(model, X=None, Y=None): - model.get_ref("tok2vec").initialize(X=X) - lower = model.get_ref("lower") - lower.initialize() - if model.attrs["has_upper"]: - statevecs = model.ops.alloc2f(2, lower.get_dim("nO")) - model.get_ref("upper").initialize(X=statevecs) diff --git a/spacy/ml/tb_framework.pyx b/spacy/ml/tb_framework.pyx new file mode 100644 index 000000000..79be13b00 --- /dev/null +++ b/spacy/ml/tb_framework.pyx @@ -0,0 +1,621 @@ +# cython: infer_types=True, cdivision=True, boundscheck=False +from typing import List, Tuple, Any, Optional, TypeVar, cast +from libc.string cimport memset, memcpy +from libc.stdlib cimport calloc, free, realloc +from libcpp.vector cimport vector +import numpy +cimport numpy as np +from thinc.api import Model, normal_init, chain, list2array, Linear +from thinc.api import uniform_init, glorot_uniform_init, zero_init +from thinc.api import NumpyOps +from thinc.backends.cblas cimport CBlas, saxpy, sgemm +from thinc.types import Floats1d, Floats2d, Floats3d, Floats4d +from thinc.types import Ints1d, Ints2d + +from ..errors import Errors +from ..pipeline._parser_internals import _beam_utils +from ..pipeline._parser_internals.batch import GreedyBatch +from ..pipeline._parser_internals._parser_utils cimport arg_max +from ..pipeline._parser_internals.transition_system cimport c_transition_batch, c_apply_actions +from ..pipeline._parser_internals.transition_system cimport TransitionSystem +from ..pipeline._parser_internals.stateclass cimport StateC, StateClass +from ..tokens.doc import Doc +from ..util import registry + + +State = Any # TODO + + +@registry.layers("spacy.TransitionModel.v2") +def TransitionModel( + *, + tok2vec: Model[List[Doc], List[Floats2d]], + beam_width: int = 1, + beam_density: float = 0.0, + state_tokens: int, + hidden_width: int, + maxout_pieces: int, + nO: Optional[int] = None, + unseen_classes=set(), +) -> Model[Tuple[List[Doc], TransitionSystem], List[Tuple[State, List[Floats2d]]]]: + """Set up a transition-based parsing model, using a maxout hidden + layer and a linear output layer. + """ + t2v_width = tok2vec.get_dim("nO") if tok2vec.has_dim("nO") else None + tok2vec_projected = chain(tok2vec, list2array(), Linear(hidden_width, t2v_width)) # type: ignore + tok2vec_projected.set_dim("nO", hidden_width) + + # FIXME: we use `output` as a container for the output layer's + # weights and biases. Thinc optimizers cannot handle resizing + # of parameters. So, when the parser model is resized, we + # construct a new `output` layer, which has a different key in + # the optimizer. Once the optimizer supports parameter resizing, + # we can replace the `output` layer by `output_W` and `output_b` + # parameters in this model. + output = Linear(nO=None, nI=hidden_width, init_W=zero_init) + + return Model( + name="parser_model", + forward=forward, + init=init, + layers=[tok2vec_projected, output], + refs={ + "tok2vec": tok2vec_projected, + "output": output, + }, + params={ + "hidden_W": None, # Floats2d W for the hidden layer + "hidden_b": None, # Floats1d bias for the hidden layer + "hidden_pad": None, # Floats1d padding for the hidden layer + }, + dims={ + "nO": None, # Output size + "nP": maxout_pieces, + "nH": hidden_width, + "nI": tok2vec_projected.maybe_get_dim("nO"), + "nF": state_tokens, + }, + attrs={ + "beam_width": beam_width, + "beam_density": beam_density, + "unseen_classes": set(unseen_classes), + "resize_output": resize_output, + }, + ) + + +def resize_output(model: Model, new_nO: int) -> Model: + old_nO = model.maybe_get_dim("nO") + output = model.get_ref("output") + if old_nO is None: + model.set_dim("nO", new_nO) + output.set_dim("nO", new_nO) + output.initialize() + return model + elif new_nO <= old_nO: + return model + elif output.has_param("W"): + nH = model.get_dim("nH") + new_output = Linear(nO=new_nO, nI=nH, init_W=zero_init) + new_output.initialize() + new_W = new_output.get_param("W") + new_b = new_output.get_param("b") + old_W = output.get_param("W") + old_b = output.get_param("b") + new_W[:old_nO] = old_W # type: ignore + new_b[:old_nO] = old_b # type: ignore + for i in range(old_nO, new_nO): + model.attrs["unseen_classes"].add(i) + model.layers[-1] = new_output + model.set_ref("output", new_output) + # TODO: Avoid this private intrusion + model._dims["nO"] = new_nO + return model + + +def init( + model, + X: Optional[Tuple[List[Doc], TransitionSystem]] = None, + Y: Optional[Tuple[List[State], List[Floats2d]]] = None, +): + if X is not None: + docs, moves = X + model.get_ref("tok2vec").initialize(X=docs) + else: + model.get_ref("tok2vec").initialize() + inferred_nO = _infer_nO(Y) + if inferred_nO is not None: + current_nO = model.maybe_get_dim("nO") + if current_nO is None or current_nO != inferred_nO: + model.attrs["resize_output"](model, inferred_nO) + nO = model.get_dim("nO") + nP = model.get_dim("nP") + nH = model.get_dim("nH") + nI = model.get_dim("nI") + nF = model.get_dim("nF") + ops = model.ops + + Wl = ops.alloc2f(nH * nP, nF * nI) + bl = ops.alloc1f(nH * nP) + padl = ops.alloc1f(nI) + # Wl = zero_init(ops, Wl.shape) + Wl = glorot_uniform_init(ops, Wl.shape) + padl = uniform_init(ops, padl.shape) # type: ignore + # TODO: Experiment with whether better to initialize output_W + model.set_param("hidden_W", Wl) + model.set_param("hidden_b", bl) + model.set_param("hidden_pad", padl) + # model = _lsuv_init(model) + return model + + +class TransitionModelInputs: + """ + Input to transition model. + """ + + # dataclass annotation is not yet supported in Cython 0.29.x, + # so, we'll do something close to it. + + actions: Optional[List[Ints1d]] + docs: List[Doc] + max_moves: int + moves: TransitionSystem + states: Optional[List[State]] + + __slots__ = [ + "actions", + "docs", + "max_moves", + "moves", + "states", + ] + + def __init__( + self, + docs: List[Doc], + moves: TransitionSystem, + actions: Optional[List[Ints1d]]=None, + max_moves: int=0, + states: Optional[List[State]]=None): + """ + actions (Optional[List[Ints1d]]): actions to apply for each Doc. + docs (List[Doc]): Docs to predict transition sequences for. + max_moves: (int): the maximum number of moves to apply, values less + than 1 will apply moves to states until they are final states. + moves (TransitionSystem): the transition system to use when predicting + the transition sequences. + states (Optional[List[States]]): the initial states to predict the + transition sequences for. When absent, the initial states are + initialized from the provided Docs. + """ + self.actions = actions + self.docs = docs + self.moves = moves + self.max_moves = max_moves + self.states = states + + +def forward(model, inputs: TransitionModelInputs, is_train: bool): + docs = inputs.docs + moves = inputs.moves + actions = inputs.actions + + beam_width = model.attrs["beam_width"] + hidden_pad = model.get_param("hidden_pad") + tok2vec = model.get_ref("tok2vec") + + states = moves.init_batch(docs) if inputs.states is None else inputs.states + tokvecs, backprop_tok2vec = tok2vec(docs, is_train) + tokvecs = model.ops.xp.vstack((tokvecs, hidden_pad)) + feats, backprop_feats = _forward_precomputable_affine(model, tokvecs, is_train) + seen_mask = _get_seen_mask(model) + + if not is_train and beam_width == 1 and isinstance(model.ops, NumpyOps): + # Note: max_moves is only used during training, so we don't need to + # pass it to the greedy inference path. + return _forward_greedy_cpu(model, moves, states, feats, seen_mask, actions=actions) + else: + return _forward_fallback(model, moves, states, tokvecs, backprop_tok2vec, + feats, backprop_feats, seen_mask, is_train, actions=actions, + max_moves=inputs.max_moves) + + +def _forward_greedy_cpu(model: Model, TransitionSystem moves, states: List[StateClass], np.ndarray feats, + np.ndarray[np.npy_bool, ndim=1] seen_mask, actions: Optional[List[Ints1d]]=None): + cdef vector[StateC*] c_states + cdef StateClass state + for state in states: + if not state.is_final(): + c_states.push_back(state.c) + weights = _get_c_weights(model, feats.data, seen_mask) + # Precomputed features have rows for each token, plus one for padding. + cdef int n_tokens = feats.shape[0] - 1 + sizes = _get_c_sizes(model, c_states.size(), n_tokens) + cdef CBlas cblas = model.ops.cblas() + scores = _parse_batch(cblas, moves, &c_states[0], weights, sizes, actions=actions) + + def backprop(dY): + raise ValueError(Errors.E4004) + + return (states, scores), backprop + +cdef list _parse_batch(CBlas cblas, TransitionSystem moves, StateC** states, + WeightsC weights, SizesC sizes, actions: Optional[List[Ints1d]]=None): + cdef int i, j + cdef vector[StateC *] unfinished + cdef ActivationsC activations = _alloc_activations(sizes) + cdef np.ndarray step_scores + cdef np.ndarray step_actions + + scores = [] + while sizes.states >= 1: + step_scores = numpy.empty((sizes.states, sizes.classes), dtype="f") + step_actions = actions[0] if actions is not None else None + with nogil: + _predict_states(cblas, &activations, step_scores.data, states, &weights, sizes) + if actions is None: + # Validate actions, argmax, take action. + c_transition_batch(moves, states, step_scores.data, sizes.classes, + sizes.states) + else: + c_apply_actions(moves, states, step_actions.data, sizes.states) + for i in range(sizes.states): + if not states[i].is_final(): + unfinished.push_back(states[i]) + for i in range(unfinished.size()): + states[i] = unfinished[i] + sizes.states = unfinished.size() + scores.append(step_scores) + unfinished.clear() + actions = actions[1:] if actions is not None else None + _free_activations(&activations) + + return scores + + +def _forward_fallback( + model: Model, + moves: TransitionSystem, + states: List[StateClass], + tokvecs, backprop_tok2vec, + feats, + backprop_feats, + seen_mask, + is_train: bool, + actions: Optional[List[Ints1d]]=None, + max_moves: int=0): + nF = model.get_dim("nF") + output = model.get_ref("output") + hidden_b = model.get_param("hidden_b") + nH = model.get_dim("nH") + nP = model.get_dim("nP") + + beam_width = model.attrs["beam_width"] + beam_density = model.attrs["beam_density"] + + ops = model.ops + + all_ids = [] + all_which = [] + all_statevecs = [] + all_scores = [] + if beam_width == 1: + batch = GreedyBatch(moves, states, None) + else: + batch = _beam_utils.BeamBatch( + moves, states, None, width=beam_width, density=beam_density + ) + arange = ops.xp.arange(nF) + n_moves = 0 + while not batch.is_done: + ids = numpy.zeros((len(batch.get_unfinished_states()), nF), dtype="i") + for i, state in enumerate(batch.get_unfinished_states()): + state.set_context_tokens(ids, i, nF) + # Sum the state features, add the bias and apply the activation (maxout) + # to create the state vectors. + preacts2f = feats[ids, arange].sum(axis=1) # type: ignore + preacts2f += hidden_b + preacts = ops.reshape3f(preacts2f, preacts2f.shape[0], nH, nP) + assert preacts.shape[0] == len(batch.get_unfinished_states()), preacts.shape + statevecs, which = ops.maxout(preacts) + # We don't use output's backprop, since we want to backprop for + # all states at once, rather than a single state. + scores = output.predict(statevecs) + scores[:, seen_mask] = ops.xp.nanmin(scores) + # Transition the states, filtering out any that are finished. + cpu_scores = ops.to_numpy(scores) + if actions is None: + batch.advance(cpu_scores) + else: + batch.advance_with_actions(actions[0]) + actions = actions[1:] + all_scores.append(scores) + if is_train: + # Remember intermediate results for the backprop. + all_ids.append(ids) + all_statevecs.append(statevecs) + all_which.append(which) + if n_moves >= max_moves >= 1: + break + n_moves += 1 + + def backprop_parser(d_states_d_scores): + ids = ops.xp.vstack(all_ids) + which = ops.xp.vstack(all_which) + statevecs = ops.xp.vstack(all_statevecs) + _, d_scores = d_states_d_scores + if model.attrs.get("unseen_classes"): + # If we have a negative gradient (i.e. the probability should + # increase) on any classes we filtered out as unseen, mark + # them as seen. + for clas in set(model.attrs["unseen_classes"]): + if (d_scores[:, clas] < 0).any(): + model.attrs["unseen_classes"].remove(clas) + d_scores *= seen_mask == False + # Calculate the gradients for the parameters of the output layer. + # The weight gemm is (nS, nO) @ (nS, nH).T + output.inc_grad("b", d_scores.sum(axis=0)) + output.inc_grad("W", ops.gemm(d_scores, statevecs, trans1=True)) + # Now calculate d_statevecs, by backproping through the output linear layer. + # This gemm is (nS, nO) @ (nO, nH) + output_W = output.get_param("W") + d_statevecs = ops.gemm(d_scores, output_W) + # Backprop through the maxout activation + d_preacts = ops.backprop_maxout(d_statevecs, which, nP) + d_preacts2f = ops.reshape2f(d_preacts, d_preacts.shape[0], nH * nP) + model.inc_grad("hidden_b", d_preacts2f.sum(axis=0)) + # We don't need to backprop the summation, because we pass back the IDs instead + d_state_features = backprop_feats((d_preacts2f, ids)) + d_tokvecs = ops.alloc2f(tokvecs.shape[0], tokvecs.shape[1]) + ops.scatter_add(d_tokvecs, ids, d_state_features) + model.inc_grad("hidden_pad", d_tokvecs[-1]) + return (backprop_tok2vec(d_tokvecs[:-1]), None) + + return (list(batch), all_scores), backprop_parser + + +def _get_seen_mask(model: Model) -> numpy.array[bool, 1]: + mask = model.ops.xp.zeros(model.get_dim("nO"), dtype="bool") + for class_ in model.attrs.get("unseen_classes", set()): + mask[class_] = True + return mask + + +def _forward_precomputable_affine(model, X: Floats2d, is_train: bool): + W: Floats2d = model.get_param("hidden_W") + nF = model.get_dim("nF") + nH = model.get_dim("nH") + nP = model.get_dim("nP") + nI = model.get_dim("nI") + # The weights start out (nH * nP, nF * nI). Transpose and reshape to (nF * nH *nP, nI) + W3f = model.ops.reshape3f(W, nH * nP, nF, nI) + W3f = W3f.transpose((1, 0, 2)) + W2f = model.ops.reshape2f(W3f, nF * nH * nP, nI) + assert X.shape == (X.shape[0], nI), X.shape + Yf_ = model.ops.gemm(X, W2f, trans2=True) + Yf = model.ops.reshape3f(Yf_, Yf_.shape[0], nF, nH * nP) + + def backward(dY_ids: Tuple[Floats3d, Ints2d]): + # This backprop is particularly tricky, because we get back a different + # thing from what we put out. We put out an array of shape: + # (nB, nF, nH, nP), and get back: + # (nB, nH, nP) and ids (nB, nF) + # The ids tell us the values of nF, so we would have: + # + # dYf = zeros((nB, nF, nH, nP)) + # for b in range(nB): + # for f in range(nF): + # dYf[b, ids[b, f]] += dY[b] + # + # However, we avoid building that array for efficiency -- and just pass + # in the indices. + dY, ids = dY_ids + dXf = model.ops.gemm(dY, W) + Xf = X[ids].reshape((ids.shape[0], -1)) + dW = model.ops.gemm(dY, Xf, trans1=True) + model.inc_grad("hidden_W", dW) + return model.ops.reshape3f(dXf, dXf.shape[0], nF, nI) + + return Yf, backward + + +def _infer_nO(Y: Optional[Tuple[List[State], List[Floats2d]]]) -> Optional[int]: + if Y is None: + return None + _, scores = Y + if len(scores) == 0: + return None + assert scores[0].shape[0] >= 1 + assert len(scores[0].shape) == 2 + return scores[0].shape[1] + + +def _lsuv_init(model: Model): + """This is like the 'layer sequential unit variance', but instead + of taking the actual inputs, we randomly generate whitened data. + + Why's this all so complicated? We have a huge number of inputs, + and the maxout unit makes guessing the dynamics tricky. Instead + we set the maxout weights to values that empirically result in + whitened outputs given whitened inputs. + """ + W = model.maybe_get_param("hidden_W") + if W is not None and W.any(): + return + + nF = model.get_dim("nF") + nH = model.get_dim("nH") + nP = model.get_dim("nP") + nI = model.get_dim("nI") + W = model.ops.alloc4f(nF, nH, nP, nI) + b = model.ops.alloc2f(nH, nP) + pad = model.ops.alloc4f(1, nF, nH, nP) + + ops = model.ops + W = normal_init(ops, W.shape, mean=float(ops.xp.sqrt(1.0 / nF * nI))) + pad = normal_init(ops, pad.shape, mean=1.0) + model.set_param("W", W) + model.set_param("b", b) + model.set_param("pad", pad) + + ids = ops.alloc_f((5000, nF), dtype="f") + ids += ops.xp.random.uniform(0, 1000, ids.shape) + ids = ops.asarray(ids, dtype="i") + tokvecs = ops.alloc_f((5000, nI), dtype="f") + tokvecs += ops.xp.random.normal(loc=0.0, scale=1.0, size=tokvecs.size).reshape( + tokvecs.shape + ) + + def predict(ids, tokvecs): + # nS ids. nW tokvecs. Exclude the padding array. + hiddens, _ = _forward_precomputable_affine(model, tokvecs[:-1], False) + vectors = model.ops.alloc2f(ids.shape[0], nH * nP) + # need nS vectors + hiddens = hiddens.reshape((hiddens.shape[0] * nF, nH * nP)) + model.ops.scatter_add(vectors, ids.flatten(), hiddens) + vectors3f = model.ops.reshape3f(vectors, vectors.shape[0], nH, nP) + vectors3f += b + return model.ops.maxout(vectors3f)[0] + + tol_var = 0.01 + tol_mean = 0.01 + t_max = 10 + W = cast(Floats4d, model.get_param("hidden_W").copy()) + b = cast(Floats2d, model.get_param("hidden_b").copy()) + for t_i in range(t_max): + acts1 = predict(ids, tokvecs) + var = model.ops.xp.var(acts1) + mean = model.ops.xp.mean(acts1) + if abs(var - 1.0) >= tol_var: + W /= model.ops.xp.sqrt(var) + model.set_param("hidden_W", W) + elif abs(mean) >= tol_mean: + b -= mean + model.set_param("hidden_b", b) + else: + break + return model + + +cdef WeightsC _get_c_weights(model, const float* feats, np.ndarray[np.npy_bool, ndim=1] seen_mask) except *: + output = model.get_ref("output") + cdef np.ndarray hidden_b = model.get_param("hidden_b") + cdef np.ndarray output_W = output.get_param("W") + cdef np.ndarray output_b = output.get_param("b") + + cdef WeightsC weights + weights.feat_weights = feats + weights.feat_bias = hidden_b.data + weights.hidden_weights = output_W.data + weights.hidden_bias = output_b.data + weights.seen_mask = seen_mask.data + + return weights + + +cdef SizesC _get_c_sizes(model, int batch_size, int tokens) except *: + cdef SizesC sizes + sizes.states = batch_size + sizes.classes = model.get_dim("nO") + sizes.hiddens = model.get_dim("nH") + sizes.pieces = model.get_dim("nP") + sizes.feats = model.get_dim("nF") + sizes.embed_width = model.get_dim("nI") + sizes.tokens = tokens + return sizes + + +cdef ActivationsC _alloc_activations(SizesC n) nogil: + cdef ActivationsC A + memset(&A, 0, sizeof(A)) + _resize_activations(&A, n) + return A + + +cdef void _free_activations(const ActivationsC* A) nogil: + free(A.token_ids) + free(A.unmaxed) + free(A.hiddens) + free(A.is_valid) + + +cdef void _resize_activations(ActivationsC* A, SizesC n) nogil: + if n.states <= A._max_size: + A._curr_size = n.states + return + if A._max_size == 0: + A.token_ids = calloc(n.states * n.feats, sizeof(A.token_ids[0])) + A.unmaxed = calloc(n.states * n.hiddens * n.pieces, sizeof(A.unmaxed[0])) + A.hiddens = calloc(n.states * n.hiddens, sizeof(A.hiddens[0])) + A.is_valid = calloc(n.states * n.classes, sizeof(A.is_valid[0])) + A._max_size = n.states + else: + A.token_ids = realloc(A.token_ids, + n.states * n.feats * sizeof(A.token_ids[0])) + A.unmaxed = realloc(A.unmaxed, + n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0])) + A.hiddens = realloc(A.hiddens, + n.states * n.hiddens * sizeof(A.hiddens[0])) + A.is_valid = realloc(A.is_valid, + n.states * n.classes * sizeof(A.is_valid[0])) + A._max_size = n.states + A._curr_size = n.states + + +cdef void _predict_states(CBlas cblas, ActivationsC* A, float* scores, StateC** states, const WeightsC* W, SizesC n) nogil: + _resize_activations(A, n) + for i in range(n.states): + states[i].set_context_tokens(&A.token_ids[i*n.feats], n.feats) + memset(A.unmaxed, 0, n.states * n.hiddens * n.pieces * sizeof(float)) + _sum_state_features(cblas, A.unmaxed, W.feat_weights, A.token_ids, n) + for i in range(n.states): + saxpy(cblas)(n.hiddens * n.pieces, 1., W.feat_bias, 1, &A.unmaxed[i*n.hiddens*n.pieces], 1) + for j in range(n.hiddens): + index = i * n.hiddens * n.pieces + j * n.pieces + which = arg_max(&A.unmaxed[index], n.pieces) + A.hiddens[i*n.hiddens + j] = A.unmaxed[index + which] + if W.hidden_weights == NULL: + memcpy(scores, A.hiddens, n.states * n.classes * sizeof(float)) + else: + # Compute hidden-to-output + sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, + 1.0, A.hiddens, n.hiddens, + W.hidden_weights, n.hiddens, + 0.0, scores, n.classes) + # Add bias + for i in range(n.states): + saxpy(cblas)(n.classes, 1., W.hidden_bias, 1, &scores[i*n.classes], 1) + # Set unseen classes to minimum value + i = 0 + min_ = scores[0] + for i in range(1, n.states * n.classes): + if scores[i] < min_: + min_ = scores[i] + for i in range(n.states): + for j in range(n.classes): + if W.seen_mask[j]: + scores[i*n.classes+j] = min_ + + +cdef void _sum_state_features(CBlas cblas, float* output, + const float* cached, const int* token_ids, SizesC n) nogil: + cdef int idx, b, f, i + cdef const float* feature + cdef int B = n.states + cdef int O = n.hiddens * n.pieces + cdef int F = n.feats + cdef int T = n.tokens + padding = cached + (T * F * O) + cdef int id_stride = F*O + cdef float one = 1. + for b in range(B): + for f in range(F): + if token_ids[f] < 0: + feature = &padding[f*O] + else: + idx = token_ids[f] * id_stride + f*O + feature = &cached[idx] + saxpy(cblas)(O, one, feature, 1, &output[b*O], 1) + token_ids += F + diff --git a/spacy/pipeline/_parser_internals/_beam_utils.pyx b/spacy/pipeline/_parser_internals/_beam_utils.pyx index 610c8ddee..d07c13aeb 100644 --- a/spacy/pipeline/_parser_internals/_beam_utils.pyx +++ b/spacy/pipeline/_parser_internals/_beam_utils.pyx @@ -7,6 +7,7 @@ from cpython.ref cimport PyObject, Py_XDECREF from ...typedefs cimport hash_t, class_t from .transition_system cimport TransitionSystem, Transition from ...errors import Errors +from .batch cimport Batch from .search cimport Beam, MaxViolation from .search import MaxViolation from .stateclass cimport StateC, StateClass @@ -26,7 +27,7 @@ cdef int check_final_state(void* _state, void* extra_args) except -1: return state.is_final() -cdef class BeamBatch(object): +cdef class BeamBatch(Batch): cdef public TransitionSystem moves cdef public object states cdef public object docs diff --git a/spacy/pipeline/_parser_internals/_parser_utils.pxd b/spacy/pipeline/_parser_internals/_parser_utils.pxd new file mode 100644 index 000000000..7fee05bad --- /dev/null +++ b/spacy/pipeline/_parser_internals/_parser_utils.pxd @@ -0,0 +1,2 @@ +cdef int arg_max(const float* scores, const int n_classes) nogil +cdef int arg_max_if_valid(const float* scores, const int* is_valid, int n) nogil diff --git a/spacy/pipeline/_parser_internals/_parser_utils.pyx b/spacy/pipeline/_parser_internals/_parser_utils.pyx new file mode 100644 index 000000000..582756bf5 --- /dev/null +++ b/spacy/pipeline/_parser_internals/_parser_utils.pyx @@ -0,0 +1,22 @@ +# cython: infer_types=True + +cdef inline int arg_max(const float* scores, const int n_classes) nogil: + if n_classes == 2: + return 0 if scores[0] > scores[1] else 1 + cdef int i + cdef int best = 0 + cdef float mode = scores[0] + for i in range(1, n_classes): + if scores[i] > mode: + mode = scores[i] + best = i + return best + + +cdef inline int arg_max_if_valid(const float* scores, const int* is_valid, int n) nogil: + cdef int best = -1 + for i in range(n): + if is_valid[i] >= 1: + if best == -1 or scores[i] > scores[best]: + best = i + return best diff --git a/spacy/pipeline/_parser_internals/_state.pxd b/spacy/pipeline/_parser_internals/_state.pxd index a1262bb61..bd5d5208c 100644 --- a/spacy/pipeline/_parser_internals/_state.pxd +++ b/spacy/pipeline/_parser_internals/_state.pxd @@ -6,7 +6,6 @@ cimport libcpp from libcpp.unordered_map cimport unordered_map from libcpp.vector cimport vector from libcpp.set cimport set -from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno from murmurhash.mrmr cimport hash64 from ...vocab cimport EMPTY_LEXEME @@ -26,7 +25,7 @@ cdef struct ArcC: cdef cppclass StateC: - int* _heads + vector[int] _heads const TokenC* _sent vector[int] _stack vector[int] _rebuffer @@ -34,31 +33,34 @@ cdef cppclass StateC: unordered_map[int, vector[ArcC]] _left_arcs unordered_map[int, vector[ArcC]] _right_arcs vector[libcpp.bool] _unshiftable + vector[int] history set[int] _sent_starts TokenC _empty_token int length int offset int _b_i - __init__(const TokenC* sent, int length) nogil: + __init__(const TokenC* sent, int length) nogil except +: + this._heads.resize(length, -1) + this._unshiftable.resize(length, False) + + # Reserve memory ahead of time to minimize allocations during parsing. + # The initial capacity set here ideally reflects the expected average-case/majority usage. + cdef int init_capacity = 32 + this._stack.reserve(init_capacity) + this._rebuffer.reserve(init_capacity) + this._ents.reserve(init_capacity) + this._left_arcs.reserve(init_capacity) + this._right_arcs.reserve(init_capacity) + this.history.reserve(init_capacity) + this._sent = sent - this._heads = calloc(length, sizeof(int)) - if not (this._sent and this._heads): - with gil: - PyErr_SetFromErrno(MemoryError) - PyErr_CheckSignals() this.offset = 0 this.length = length this._b_i = 0 - for i in range(length): - this._heads[i] = -1 - this._unshiftable.push_back(0) memset(&this._empty_token, 0, sizeof(TokenC)) this._empty_token.lex = &EMPTY_LEXEME - __dealloc__(): - free(this._heads) - void set_context_tokens(int* ids, int n) nogil: cdef int i, j if n == 1: @@ -131,19 +133,20 @@ cdef cppclass StateC: ids[i] = -1 int S(int i) nogil const: - if i >= this._stack.size(): + cdef int stack_size = this._stack.size() + if i >= stack_size or i < 0: return -1 - elif i < 0: - return -1 - return this._stack.at(this._stack.size() - (i+1)) + else: + return this._stack[stack_size - (i+1)] int B(int i) nogil const: + cdef int buf_size = this._rebuffer.size() if i < 0: return -1 - elif i < this._rebuffer.size(): - return this._rebuffer.at(this._rebuffer.size() - (i+1)) + elif i < buf_size: + return this._rebuffer[buf_size - (i+1)] else: - b_i = this._b_i + (i - this._rebuffer.size()) + b_i = this._b_i + (i - buf_size) if b_i >= this.length: return -1 else: @@ -242,7 +245,7 @@ cdef cppclass StateC: return 0 elif this._sent[word].sent_start == 1: return 1 - elif this._sent_starts.count(word) >= 1: + elif this._sent_starts.const_find(word) != this._sent_starts.const_end(): return 1 else: return 0 @@ -327,7 +330,7 @@ cdef cppclass StateC: if item >= this._unshiftable.size(): return 0 else: - return this._unshiftable.at(item) + return this._unshiftable[item] void set_reshiftable(int item) nogil: if item < this._unshiftable.size(): @@ -347,6 +350,9 @@ cdef cppclass StateC: this._heads[child] = head void map_del_arc(unordered_map[int, vector[ArcC]]* heads_arcs, int h_i, int c_i) nogil: + cdef vector[ArcC]* arcs + cdef ArcC* arc + arcs_it = heads_arcs.find(h_i) if arcs_it == heads_arcs.end(): return @@ -355,12 +361,12 @@ cdef cppclass StateC: if arcs.size() == 0: return - arc = arcs.back() + arc = &arcs.back() if arc.head == h_i and arc.child == c_i: arcs.pop_back() else: for i in range(arcs.size()-1): - arc = arcs.at(i) + arc = &deref(arcs)[i] if arc.head == h_i and arc.child == c_i: arc.head = -1 arc.child = -1 @@ -400,10 +406,11 @@ cdef cppclass StateC: this._rebuffer = src._rebuffer this._sent_starts = src._sent_starts this._unshiftable = src._unshiftable - memcpy(this._heads, src._heads, this.length * sizeof(this._heads[0])) + this._heads = src._heads this._ents = src._ents this._left_arcs = src._left_arcs this._right_arcs = src._right_arcs this._b_i = src._b_i this.offset = src.offset this._empty_token = src._empty_token + this.history = src.history diff --git a/spacy/pipeline/_parser_internals/arc_eager.pyx b/spacy/pipeline/_parser_internals/arc_eager.pyx index a79aef64a..9c358475a 100644 --- a/spacy/pipeline/_parser_internals/arc_eager.pyx +++ b/spacy/pipeline/_parser_internals/arc_eager.pyx @@ -773,6 +773,8 @@ cdef class ArcEager(TransitionSystem): return list(arcs) def has_gold(self, Example eg, start=0, end=None): + if end is not None and end < 0: + end = None for word in eg.y[start:end]: if word.dep != 0: return True @@ -858,6 +860,7 @@ cdef class ArcEager(TransitionSystem): state.print_state() ))) action.do(state.c, action.label) + state.c.history.push_back(i) break else: failed = False diff --git a/spacy/pipeline/_parser_internals/batch.pxd b/spacy/pipeline/_parser_internals/batch.pxd new file mode 100644 index 000000000..60734e549 --- /dev/null +++ b/spacy/pipeline/_parser_internals/batch.pxd @@ -0,0 +1,2 @@ +cdef class Batch: + pass diff --git a/spacy/pipeline/_parser_internals/batch.pyx b/spacy/pipeline/_parser_internals/batch.pyx new file mode 100644 index 000000000..91073b52e --- /dev/null +++ b/spacy/pipeline/_parser_internals/batch.pyx @@ -0,0 +1,52 @@ +from typing import Any + +TransitionSystem = Any # TODO + +cdef class Batch: + def advance(self, scores): + raise NotImplementedError + + def get_states(self): + raise NotImplementedError + + @property + def is_done(self): + raise NotImplementedError + + def get_unfinished_states(self): + raise NotImplementedError + + def __getitem__(self, i): + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + +class GreedyBatch(Batch): + def __init__(self, moves: TransitionSystem, states, golds): + self._moves = moves + self._states = states + self._next_states = [s for s in states if not s.is_final()] + + def advance(self, scores): + self._next_states = self._moves.transition_states(self._next_states, scores) + + def advance_with_actions(self, actions): + self._next_states = self._moves.apply_actions(self._next_states, actions) + + def get_states(self): + return self._states + + @property + def is_done(self): + return all(s.is_final() for s in self._states) + + def get_unfinished_states(self): + return [st for st in self._states if not st.is_final()] + + def __getitem__(self, i): + return self._states[i] + + def __len__(self): + return len(self._states) diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index 53ed03523..d4d564dc7 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -156,7 +156,7 @@ cdef class BiluoPushDown(TransitionSystem): if token.ent_type: labels.add(token.ent_type_) return labels - + def move_name(self, int move, attr_t label): if move == OUT: return 'O' @@ -306,6 +306,8 @@ cdef class BiluoPushDown(TransitionSystem): for span in eg.y.spans.get(neg_key, []): if span.start >= start and span.end <= end: return True + if end is not None and end < 0: + end = None for word in eg.y[start:end]: if word.ent_iob != 0: return True @@ -646,7 +648,7 @@ cdef class Unit: cost += 1 break return cost - + cdef class Out: diff --git a/spacy/pipeline/_parser_internals/stateclass.pyx b/spacy/pipeline/_parser_internals/stateclass.pyx index 4eaddd997..dbd22117e 100644 --- a/spacy/pipeline/_parser_internals/stateclass.pyx +++ b/spacy/pipeline/_parser_internals/stateclass.pyx @@ -20,6 +20,10 @@ cdef class StateClass: if self._borrowed != 1: del self.c + @property + def history(self): + return list(self.c.history) + @property def stack(self): return [self.S(i) for i in range(self.c.stack_depth())] @@ -176,3 +180,6 @@ cdef class StateClass: def clone(self, StateClass src): self.c.clone(src.c) + + def set_context_tokens(self, int[:, :] output, int row, int n_feats): + self.c.set_context_tokens(&output[row, 0], n_feats) diff --git a/spacy/pipeline/_parser_internals/transition_system.pxd b/spacy/pipeline/_parser_internals/transition_system.pxd index 52ebd2b8e..c8ebd8b27 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pxd +++ b/spacy/pipeline/_parser_internals/transition_system.pxd @@ -53,3 +53,10 @@ cdef class TransitionSystem: cdef int set_costs(self, int* is_valid, weight_t* costs, const StateC* state, gold) except -1 + + +cdef void c_apply_actions(TransitionSystem moves, StateC** states, const int* actions, + int batch_size) nogil + +cdef void c_transition_batch(TransitionSystem moves, StateC** states, const float* scores, + int nr_class, int batch_size) nogil diff --git a/spacy/pipeline/_parser_internals/transition_system.pyx b/spacy/pipeline/_parser_internals/transition_system.pyx index 18eb745a9..89f9e8ae8 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pyx +++ b/spacy/pipeline/_parser_internals/transition_system.pyx @@ -1,6 +1,8 @@ # cython: infer_types=True from __future__ import print_function from cymem.cymem cimport Pool +from libc.stdlib cimport calloc, free +from libcpp.vector cimport vector from collections import Counter import srsly @@ -10,6 +12,7 @@ from ...typedefs cimport weight_t, attr_t from ...tokens.doc cimport Doc from ...structs cimport TokenC from .stateclass cimport StateClass +from ._parser_utils cimport arg_max_if_valid from ...errors import Errors from ... import util @@ -73,7 +76,18 @@ cdef class TransitionSystem: offset += len(doc) return states + def follow_history(self, doc, history): + cdef int clas + cdef StateClass state = StateClass(doc) + for clas in history: + action = self.c[clas] + action.do(state.c, action.label) + state.c.history.push_back(clas) + return state + def get_oracle_sequence(self, Example example, _debug=False): + if not self.has_gold(example): + return [] states, golds, _ = self.init_gold_batch([example]) if not states: return [] @@ -85,6 +99,8 @@ cdef class TransitionSystem: return self.get_oracle_sequence_from_state(state, gold) def get_oracle_sequence_from_state(self, StateClass state, gold, _debug=None): + if state.is_final(): + return [] cdef Pool mem = Pool() # n_moves should not be zero at this point, but make sure to avoid zero-length mem alloc assert self.n_moves > 0 @@ -110,6 +126,7 @@ cdef class TransitionSystem: "S0 head?", str(state.has_head(state.S(0))), ))) action.do(state.c, action.label) + state.c.history.push_back(i) break else: if _debug: @@ -137,6 +154,28 @@ cdef class TransitionSystem: raise ValueError(Errors.E170.format(name=name)) action = self.lookup_transition(name) action.do(state.c, action.label) + state.c.history.push_back(action.clas) + + def apply_actions(self, states, const int[::1] actions): + assert len(states) == actions.shape[0] + cdef StateClass state + cdef vector[StateC*] c_states + c_states.resize(len(states)) + cdef int i + for (i, state) in enumerate(states): + c_states[i] = state.c + c_apply_actions(self, &c_states[0], &actions[0], actions.shape[0]) + return [state for state in states if not state.c.is_final()] + + def transition_states(self, states, float[:, ::1] scores): + assert len(states) == scores.shape[0] + cdef StateClass state + cdef float* c_scores = &scores[0, 0] + cdef vector[StateC*] c_states + for state in states: + c_states.push_back(state.c) + c_transition_batch(self, &c_states[0], c_scores, scores.shape[1], scores.shape[0]) + return [state for state in states if not state.c.is_final()] cdef Transition lookup_transition(self, object name) except *: raise NotImplementedError @@ -250,3 +289,35 @@ cdef class TransitionSystem: self.cfg.update(msg['cfg']) self.initialize_actions(labels) return self + + +cdef void c_apply_actions(TransitionSystem moves, StateC** states, const int* actions, + int batch_size) nogil: + cdef int i + cdef Transition action + cdef StateC* state + for i in range(batch_size): + state = states[i] + action = moves.c[actions[i]] + action.do(state, action.label) + state.history.push_back(action.clas) + + +cdef void c_transition_batch(TransitionSystem moves, StateC** states, const float* scores, + int nr_class, int batch_size) nogil: + is_valid = calloc(moves.n_moves, sizeof(int)) + cdef int i, guess + cdef Transition action + for i in range(batch_size): + moves.set_valid(is_valid, states[i]) + guess = arg_max_if_valid(&scores[i*nr_class], is_valid, nr_class) + if guess == -1: + # This shouldn't happen, but it's hard to raise an error here, + # and we don't want to infinite loop. So, force to end state. + states[i].force_final() + else: + action = moves.c[guess] + action.do(states[i], action.label) + states[i].history.push_back(guess) + free(is_valid) + diff --git a/spacy/pipeline/dep_parser.pyx b/spacy/pipeline/dep_parser.py similarity index 97% rename from spacy/pipeline/dep_parser.pyx rename to spacy/pipeline/dep_parser.py index e5f686158..f6689e017 100644 --- a/spacy/pipeline/dep_parser.pyx +++ b/spacy/pipeline/dep_parser.py @@ -4,8 +4,8 @@ from typing import Optional, Iterable, Callable from thinc.api import Model, Config from ._parser_internals.transition_system import TransitionSystem -from .transition_parser cimport Parser -from ._parser_internals.arc_eager cimport ArcEager +from .transition_parser import Parser +from ._parser_internals.arc_eager import ArcEager from .functions import merge_subtokens from ..language import Language @@ -18,12 +18,11 @@ from ..util import registry default_model_config = """ [model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "parser" extra_state_tokens = false hidden_width = 64 maxout_pieces = 2 -use_upper = true [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v2" @@ -123,6 +122,7 @@ def make_parser( scorer=scorer, ) + @Language.factory( "beam_parser", assigns=["token.dep", "token.head", "token.is_sent_start", "doc.sents"], @@ -228,6 +228,7 @@ def parser_score(examples, **kwargs): DOCS: https://spacy.io/api/dependencyparser#score """ + def has_sents(doc): return doc.has_annotation("SENT_START") @@ -235,8 +236,11 @@ def parser_score(examples, **kwargs): dep = getattr(token, attr) dep = token.vocab.strings.as_string(dep).lower() return dep + results = {} - results.update(Scorer.score_spans(examples, "sents", has_annotation=has_sents, **kwargs)) + results.update( + Scorer.score_spans(examples, "sents", has_annotation=has_sents, **kwargs) + ) kwargs.setdefault("getter", dep_getter) kwargs.setdefault("ignore_labels", ("p", "punct")) results.update(Scorer.score_deps(examples, "dep", **kwargs)) @@ -249,11 +253,12 @@ def make_parser_scorer(): return parser_score -cdef class DependencyParser(Parser): +class DependencyParser(Parser): """Pipeline component for dependency parsing. DOCS: https://spacy.io/api/dependencyparser """ + TransitionSystem = ArcEager def __init__( @@ -273,8 +278,7 @@ cdef class DependencyParser(Parser): incorrect_spans_key=None, scorer=parser_score, ): - """Create a DependencyParser. - """ + """Create a DependencyParser.""" super().__init__( vocab, model, diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index 2a2242aa4..3198b7509 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -5,7 +5,7 @@ from itertools import islice import numpy as np import srsly -from thinc.api import Config, Model +from thinc.api import Config, Model, SequenceCategoricalCrossentropy, NumpyOps from thinc.types import ArrayXd, Floats2d, Ints1d from thinc.legacy import LegacySequenceCategoricalCrossentropy @@ -22,6 +22,8 @@ from .. import util ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]] +# The cutoff value of *top_k* above which an alternative method is used to process guesses. +TOP_K_GUARDRAIL = 20 default_model_config = """ @@ -125,6 +127,7 @@ class EditTreeLemmatizer(TrainablePipe): self.cfg: Dict[str, Any] = {"labels": []} self.scorer = scorer self.save_activations = save_activations + self.numpy_ops = NumpyOps() def get_loss( self, examples: Iterable[Example], scores: List[Floats2d] @@ -140,7 +143,7 @@ class EditTreeLemmatizer(TrainablePipe): for (predicted, gold_lemma) in zip( eg.predicted, eg.get_aligned("LEMMA", as_string=True) ): - if gold_lemma is None: + if gold_lemma is None or gold_lemma == "": label = -1 else: tree_id = self.trees.add(predicted.text, gold_lemma) @@ -155,7 +158,38 @@ class EditTreeLemmatizer(TrainablePipe): return float(loss), d_scores + def get_teacher_student_loss( + self, teacher_scores: List[Floats2d], student_scores: List[Floats2d] + ) -> Tuple[float, List[Floats2d]]: + """Calculate the loss and its gradient for a batch of student + scores, relative to teacher scores. + + teacher_scores: Scores representing the teacher model's predictions. + student_scores: Scores representing the student model's predictions. + + RETURNS (Tuple[float, float]): The loss and the gradient. + + DOCS: https://spacy.io/api/edittreelemmatizer#get_teacher_student_loss + """ + loss_func = LegacySequenceCategoricalCrossentropy(normalize=False) + d_scores, loss = loss_func(student_scores, teacher_scores) + if self.model.ops.xp.isnan(loss): + raise ValueError(Errors.E910.format(name=self.name)) + return float(loss), d_scores + def predict(self, docs: Iterable[Doc]) -> ActivationsT: + if self.top_k == 1: + scores2guesses = self._scores2guesses_top_k_equals_1 + elif self.top_k <= TOP_K_GUARDRAIL: + scores2guesses = self._scores2guesses_top_k_greater_1 + else: + scores2guesses = self._scores2guesses_top_k_guardrail + # The behaviour of *_scores2guesses_top_k_greater_1()* is efficient for values + # of *top_k>1* that are likely to be useful when the edit tree lemmatizer is used + # for its principal purpose of lemmatizing tokens. However, the code could also + # be used for other purposes, and with very large values of *top_k* the method + # becomes inefficient. In such cases, *_scores2guesses_top_k_guardrail()* is used + # instead. n_docs = len(list(docs)) if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. @@ -170,20 +204,52 @@ class EditTreeLemmatizer(TrainablePipe): return {"probabilities": scores, "tree_ids": guesses} scores = self.model.predict(docs) assert len(scores) == n_docs - guesses = self._scores2guesses(docs, scores) + guesses = scores2guesses(docs, scores) assert len(guesses) == n_docs return {"probabilities": scores, "tree_ids": guesses} - def _scores2guesses(self, docs, scores): + def _scores2guesses_top_k_equals_1(self, docs, scores): guesses = [] for doc, doc_scores in zip(docs, scores): - if self.top_k == 1: - doc_guesses = doc_scores.argmax(axis=1).reshape(-1, 1) - else: - doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1] + doc_guesses = doc_scores.argmax(axis=1) + doc_guesses = self.numpy_ops.asarray(doc_guesses) - if not isinstance(doc_guesses, np.ndarray): - doc_guesses = doc_guesses.get() + doc_compat_guesses = [] + for i, token in enumerate(doc): + tree_id = self.cfg["labels"][doc_guesses[i]] + if self.trees.apply(tree_id, token.text) is not None: + doc_compat_guesses.append(tree_id) + else: + doc_compat_guesses.append(-1) + guesses.append(np.array(doc_compat_guesses)) + + return guesses + + def _scores2guesses_top_k_greater_1(self, docs, scores): + guesses = [] + top_k = min(self.top_k, len(self.labels)) + for doc, doc_scores in zip(docs, scores): + doc_scores = self.numpy_ops.asarray(doc_scores) + doc_compat_guesses = [] + for i, token in enumerate(doc): + for _ in range(top_k): + candidate = int(doc_scores[i].argmax()) + candidate_tree_id = self.cfg["labels"][candidate] + if self.trees.apply(candidate_tree_id, token.text) is not None: + doc_compat_guesses.append(candidate_tree_id) + break + doc_scores[i, candidate] = np.finfo(np.float32).min + else: + doc_compat_guesses.append(-1) + guesses.append(np.array(doc_compat_guesses)) + + return guesses + + def _scores2guesses_top_k_guardrail(self, docs, scores): + guesses = [] + for doc, doc_scores in zip(docs, scores): + doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1] + doc_guesses = self.numpy_ops.asarray(doc_guesses) doc_compat_guesses = [] for token, candidates in zip(doc, doc_guesses): diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py index 19c355238..6fe322b62 100644 --- a/spacy/pipeline/entity_linker.py +++ b/spacy/pipeline/entity_linker.py @@ -13,7 +13,6 @@ from ..kb import KnowledgeBase, Candidate from ..ml import empty_kb from ..tokens import Doc, Span from .pipe import deserialize_config -from .legacy.entity_linker import EntityLinker_v1 from .trainable_pipe import TrainablePipe from ..language import Language from ..vocab import Vocab @@ -120,6 +119,12 @@ def make_entity_linker( """ if not model.attrs.get("include_span_maker", False): + try: + from spacy_legacy.components.entity_linker import EntityLinker_v1 + except: + raise ImportError( + "In order to use v1 of the EntityLinker, you must use spacy-legacy>=3.0.12." + ) # The only difference in arguments here is that use_gold_ents and threshold aren't available. return EntityLinker_v1( nlp.vocab, @@ -453,7 +458,11 @@ class EntityLinker(TrainablePipe): docs_ents: List[Ragged] = [] docs_scores: List[Ragged] = [] if not docs: - return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores} + return { + KNOWLEDGE_BASE_IDS: final_kb_ids, + "ents": docs_ents, + "scores": docs_scores, + } if isinstance(docs, Doc): docs = [docs] for doc in docs: @@ -585,7 +594,11 @@ class EntityLinker(TrainablePipe): method="predict", msg="result variables not of equal length" ) raise RuntimeError(err) - return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores} + return { + KNOWLEDGE_BASE_IDS: final_kb_ids, + "ents": docs_ents, + "scores": docs_scores, + } def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None: """Modify a batch of documents, using pre-computed scores. diff --git a/spacy/pipeline/legacy/__init__.py b/spacy/pipeline/legacy/__init__.py deleted file mode 100644 index f216840dc..000000000 --- a/spacy/pipeline/legacy/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .entity_linker import EntityLinker_v1 - -__all__ = ["EntityLinker_v1"] diff --git a/spacy/pipeline/legacy/entity_linker.py b/spacy/pipeline/legacy/entity_linker.py deleted file mode 100644 index c14dfa1db..000000000 --- a/spacy/pipeline/legacy/entity_linker.py +++ /dev/null @@ -1,422 +0,0 @@ -# This file is present to provide a prior version of the EntityLinker component -# for backwards compatability. For details see #9669. - -from typing import Optional, Iterable, Callable, Dict, Union, List, Any -from thinc.types import Floats2d -from pathlib import Path -from itertools import islice -import srsly -import random -from thinc.api import CosineDistance, Model, Optimizer -from thinc.api import set_dropout_rate -import warnings - -from ...kb import KnowledgeBase, Candidate -from ...ml import empty_kb -from ...tokens import Doc, Span -from ..pipe import deserialize_config -from ..trainable_pipe import TrainablePipe -from ...language import Language -from ...vocab import Vocab -from ...training import Example, validate_examples, validate_get_examples -from ...errors import Errors, Warnings -from ...util import SimpleFrozenList -from ... import util -from ...scorer import Scorer - -# See #9050 -BACKWARD_OVERWRITE = True - - -def entity_linker_score(examples, **kwargs): - return Scorer.score_links(examples, negative_labels=[EntityLinker_v1.NIL], **kwargs) - - -class EntityLinker_v1(TrainablePipe): - """Pipeline component for named entity linking. - - DOCS: https://spacy.io/api/entitylinker - """ - - NIL = "NIL" # string used to refer to a non-existing link - - def __init__( - self, - vocab: Vocab, - model: Model, - name: str = "entity_linker", - *, - labels_discard: Iterable[str], - n_sents: int, - incl_prior: bool, - incl_context: bool, - entity_vector_length: int, - get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], - overwrite: bool = BACKWARD_OVERWRITE, - scorer: Optional[Callable] = entity_linker_score, - ) -> None: - """Initialize an entity linker. - - vocab (Vocab): The shared vocabulary. - model (thinc.api.Model): The Thinc Model powering the pipeline component. - name (str): The component instance name, used to add entries to the - losses during training. - labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction. - n_sents (int): The number of neighbouring sentences to take into account. - incl_prior (bool): Whether or not to include prior probabilities from the KB in the model. - incl_context (bool): Whether or not to include the local context in the model. - entity_vector_length (int): Size of encoding vectors in the KB. - get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that - produces a list of candidates, given a certain knowledge base and a textual mention. - scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links. - DOCS: https://spacy.io/api/entitylinker#init - """ - self.vocab = vocab - self.model = model - self.name = name - self.labels_discard = list(labels_discard) - self.n_sents = n_sents - self.incl_prior = incl_prior - self.incl_context = incl_context - self.get_candidates = get_candidates - self.cfg: Dict[str, Any] = {"overwrite": overwrite} - self.distance = CosineDistance(normalize=False) - # how many neighbour sentences to take into account - # create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'. - self.kb = empty_kb(entity_vector_length)(self.vocab) - self.scorer = scorer - - def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): - """Define the KB of this pipe by providing a function that will - create it using this object's vocab.""" - if not callable(kb_loader): - raise ValueError(Errors.E885.format(arg_type=type(kb_loader))) - - self.kb = kb_loader(self.vocab) - - def validate_kb(self) -> None: - # Raise an error if the knowledge base is not initialized. - if self.kb is None: - raise ValueError(Errors.E1018.format(name=self.name)) - if len(self.kb) == 0: - raise ValueError(Errors.E139.format(name=self.name)) - - def initialize( - self, - get_examples: Callable[[], Iterable[Example]], - *, - nlp: Optional[Language] = None, - kb_loader: Optional[Callable[[Vocab], KnowledgeBase]] = None, - ): - """Initialize the pipe for training, using a representative set - of data examples. - - get_examples (Callable[[], Iterable[Example]]): Function that - returns a representative sample of gold-standard Example objects. - nlp (Language): The current nlp object the component is part of. - kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance. - Note that providing this argument, will overwrite all data accumulated in the current KB. - Use this only when loading a KB as-such from file. - - DOCS: https://spacy.io/api/entitylinker#initialize - """ - validate_get_examples(get_examples, "EntityLinker_v1.initialize") - if kb_loader is not None: - self.set_kb(kb_loader) - self.validate_kb() - nO = self.kb.entity_vector_length - doc_sample = [] - vector_sample = [] - for example in islice(get_examples(), 10): - doc_sample.append(example.x) - vector_sample.append(self.model.ops.alloc1f(nO)) - assert len(doc_sample) > 0, Errors.E923.format(name=self.name) - assert len(vector_sample) > 0, Errors.E923.format(name=self.name) - self.model.initialize( - X=doc_sample, Y=self.model.ops.asarray(vector_sample, dtype="float32") - ) - - def update( - self, - examples: Iterable[Example], - *, - drop: float = 0.0, - sgd: Optional[Optimizer] = None, - losses: Optional[Dict[str, float]] = None, - ) -> Dict[str, float]: - """Learn from a batch of documents and gold-standard information, - updating the pipe's model. Delegates to predict and get_loss. - - examples (Iterable[Example]): A batch of Example objects. - drop (float): The dropout rate. - sgd (thinc.api.Optimizer): The optimizer. - losses (Dict[str, float]): Optional record of the loss during training. - Updated using the component name as the key. - RETURNS (Dict[str, float]): The updated losses dictionary. - - DOCS: https://spacy.io/api/entitylinker#update - """ - self.validate_kb() - if losses is None: - losses = {} - losses.setdefault(self.name, 0.0) - if not examples: - return losses - validate_examples(examples, "EntityLinker_v1.update") - sentence_docs = [] - for eg in examples: - sentences = [s for s in eg.reference.sents] - kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) - for ent in eg.reference.ents: - # KB ID of the first token is the same as the whole span - kb_id = kb_ids[ent.start] - if kb_id: - try: - # find the sentence in the list of sentences. - sent_index = sentences.index(ent.sent) - except AttributeError: - # Catch the exception when ent.sent is None and provide a user-friendly warning - raise RuntimeError(Errors.E030) from None - # get n previous sentences, if there are any - start_sentence = max(0, sent_index - self.n_sents) - # get n posterior sentences, or as many < n as there are - end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) - # get token positions - start_token = sentences[start_sentence].start - end_token = sentences[end_sentence].end - # append that span as a doc to training - sent_doc = eg.predicted[start_token:end_token].as_doc() - sentence_docs.append(sent_doc) - set_dropout_rate(self.model, drop) - if not sentence_docs: - warnings.warn(Warnings.W093.format(name="Entity Linker")) - return losses - sentence_encodings, bp_context = self.model.begin_update(sentence_docs) - loss, d_scores = self.get_loss( - sentence_encodings=sentence_encodings, examples=examples - ) - bp_context(d_scores) - if sgd is not None: - self.finish_update(sgd) - losses[self.name] += loss - return losses - - def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d): - validate_examples(examples, "EntityLinker_v1.get_loss") - entity_encodings = [] - for eg in examples: - kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) - for ent in eg.reference.ents: - kb_id = kb_ids[ent.start] - if kb_id: - entity_encoding = self.kb.get_vector(kb_id) - entity_encodings.append(entity_encoding) - entity_encodings = self.model.ops.asarray2f(entity_encodings) - if sentence_encodings.shape != entity_encodings.shape: - err = Errors.E147.format( - method="get_loss", msg="gold entities do not match up" - ) - raise RuntimeError(err) - gradients = self.distance.get_grad(sentence_encodings, entity_encodings) - loss = self.distance.get_loss(sentence_encodings, entity_encodings) - loss = loss / len(entity_encodings) - return float(loss), gradients - - def predict(self, docs: Iterable[Doc]) -> List[str]: - """Apply the pipeline's model to a batch of docs, without modifying them. - Returns the KB IDs for each entity in each doc, including NIL if there is - no prediction. - - docs (Iterable[Doc]): The documents to predict. - RETURNS (List[str]): The models prediction for each document. - - DOCS: https://spacy.io/api/entitylinker#predict - """ - self.validate_kb() - entity_count = 0 - final_kb_ids: List[str] = [] - if not docs: - return final_kb_ids - if isinstance(docs, Doc): - docs = [docs] - for i, doc in enumerate(docs): - sentences = [s for s in doc.sents] - if len(doc) > 0: - # Looping through each entity (TODO: rewrite) - for ent in doc.ents: - sent = ent.sent - sent_index = sentences.index(sent) - assert sent_index >= 0 - # get n_neighbour sentences, clipped to the length of the document - start_sentence = max(0, sent_index - self.n_sents) - end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) - start_token = sentences[start_sentence].start - end_token = sentences[end_sentence].end - sent_doc = doc[start_token:end_token].as_doc() - # currently, the context is the same for each entity in a sentence (should be refined) - xp = self.model.ops.xp - if self.incl_context: - sentence_encoding = self.model.predict([sent_doc])[0] - sentence_encoding_t = sentence_encoding.T - sentence_norm = xp.linalg.norm(sentence_encoding_t) - entity_count += 1 - if ent.label_ in self.labels_discard: - # ignoring this entity - setting to NIL - final_kb_ids.append(self.NIL) - else: - candidates = list(self.get_candidates(self.kb, ent)) - if not candidates: - # no prediction possible for this entity - setting to NIL - final_kb_ids.append(self.NIL) - elif len(candidates) == 1: - # shortcut for efficiency reasons: take the 1 candidate - final_kb_ids.append(candidates[0].entity_) - else: - random.shuffle(candidates) - # set all prior probabilities to 0 if incl_prior=False - prior_probs = xp.asarray([c.prior_prob for c in candidates]) - if not self.incl_prior: - prior_probs = xp.asarray([0.0 for _ in candidates]) - scores = prior_probs - # add in similarity from the context - if self.incl_context: - entity_encodings = xp.asarray( - [c.entity_vector for c in candidates] - ) - entity_norm = xp.linalg.norm(entity_encodings, axis=1) - if len(entity_encodings) != len(prior_probs): - raise RuntimeError( - Errors.E147.format( - method="predict", - msg="vectors not of equal length", - ) - ) - # cosine similarity - sims = xp.dot(entity_encodings, sentence_encoding_t) / ( - sentence_norm * entity_norm - ) - if sims.shape != prior_probs.shape: - raise ValueError(Errors.E161) - scores = prior_probs + sims - (prior_probs * sims) - best_index = scores.argmax().item() - best_candidate = candidates[best_index] - final_kb_ids.append(best_candidate.entity_) - if not (len(final_kb_ids) == entity_count): - err = Errors.E147.format( - method="predict", msg="result variables not of equal length" - ) - raise RuntimeError(err) - return final_kb_ids - - def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None: - """Modify a batch of documents, using pre-computed scores. - - docs (Iterable[Doc]): The documents to modify. - kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict. - - DOCS: https://spacy.io/api/entitylinker#set_annotations - """ - count_ents = len([ent for doc in docs for ent in doc.ents]) - if count_ents != len(kb_ids): - raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids))) - i = 0 - overwrite = self.cfg["overwrite"] - for doc in docs: - for ent in doc.ents: - kb_id = kb_ids[i] - i += 1 - for token in ent: - if token.ent_kb_id == 0 or overwrite: - token.ent_kb_id_ = kb_id - - def to_bytes(self, *, exclude=tuple()): - """Serialize the pipe to a bytestring. - - exclude (Iterable[str]): String names of serialization fields to exclude. - RETURNS (bytes): The serialized object. - - DOCS: https://spacy.io/api/entitylinker#to_bytes - """ - self._validate_serialization_attrs() - serialize = {} - if hasattr(self, "cfg") and self.cfg is not None: - serialize["cfg"] = lambda: srsly.json_dumps(self.cfg) - serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude) - serialize["kb"] = self.kb.to_bytes - serialize["model"] = self.model.to_bytes - return util.to_bytes(serialize, exclude) - - def from_bytes(self, bytes_data, *, exclude=tuple()): - """Load the pipe from a bytestring. - - exclude (Iterable[str]): String names of serialization fields to exclude. - RETURNS (TrainablePipe): The loaded object. - - DOCS: https://spacy.io/api/entitylinker#from_bytes - """ - self._validate_serialization_attrs() - - def load_model(b): - try: - self.model.from_bytes(b) - except AttributeError: - raise ValueError(Errors.E149) from None - - deserialize = {} - if hasattr(self, "cfg") and self.cfg is not None: - deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b)) - deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude) - deserialize["kb"] = lambda b: self.kb.from_bytes(b) - deserialize["model"] = load_model - util.from_bytes(bytes_data, deserialize, exclude) - return self - - def to_disk( - self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() - ) -> None: - """Serialize the pipe to disk. - - path (str / Path): Path to a directory. - exclude (Iterable[str]): String names of serialization fields to exclude. - - DOCS: https://spacy.io/api/entitylinker#to_disk - """ - serialize = {} - serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) - serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg) - serialize["kb"] = lambda p: self.kb.to_disk(p) - serialize["model"] = lambda p: self.model.to_disk(p) - util.to_disk(path, serialize, exclude) - - def from_disk( - self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() - ) -> "EntityLinker_v1": - """Load the pipe from disk. Modifies the object in place and returns it. - - path (str / Path): Path to a directory. - exclude (Iterable[str]): String names of serialization fields to exclude. - RETURNS (EntityLinker): The modified EntityLinker object. - - DOCS: https://spacy.io/api/entitylinker#from_disk - """ - - def load_model(p): - try: - with p.open("rb") as infile: - self.model.from_bytes(infile.read()) - except AttributeError: - raise ValueError(Errors.E149) from None - - deserialize: Dict[str, Callable[[Any], Any]] = {} - deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p)) - deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude) - deserialize["kb"] = lambda p: self.kb.from_disk(p) - deserialize["model"] = load_model - util.from_disk(path, deserialize, exclude) - return self - - def rehearse(self, examples, *, sgd=None, losses=None, **config): - raise NotImplementedError - - def add_label(self, label): - raise NotImplementedError diff --git a/spacy/pipeline/ner.pyx b/spacy/pipeline/ner.py similarity index 92% rename from spacy/pipeline/ner.pyx rename to spacy/pipeline/ner.py index 25f48c9f8..7e44b2835 100644 --- a/spacy/pipeline/ner.pyx +++ b/spacy/pipeline/ner.py @@ -4,22 +4,22 @@ from typing import Optional, Iterable, Callable from thinc.api import Model, Config from ._parser_internals.transition_system import TransitionSystem -from .transition_parser cimport Parser -from ._parser_internals.ner cimport BiluoPushDown +from .transition_parser import Parser +from ._parser_internals.ner import BiluoPushDown from ..language import Language from ..scorer import get_ner_prf, PRFScore +from ..training import validate_examples from ..util import registry from ..training import remove_bilu_prefix default_model_config = """ [model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "ner" extra_state_tokens = false hidden_width = 64 maxout_pieces = 2 -use_upper = true [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v2" @@ -44,8 +44,12 @@ DEFAULT_NER_MODEL = Config().from_str(default_model_config)["model"] "incorrect_spans_key": None, "scorer": {"@scorers": "spacy.ner_scorer.v1"}, }, - default_score_weights={"ents_f": 1.0, "ents_p": 0.0, "ents_r": 0.0, "ents_per_type": None}, - + default_score_weights={ + "ents_f": 1.0, + "ents_p": 0.0, + "ents_r": 0.0, + "ents_per_type": None, + }, ) def make_ner( nlp: Language, @@ -98,6 +102,7 @@ def make_ner( scorer=scorer, ) + @Language.factory( "beam_ner", assigns=["doc.ents", "token.ent_iob", "token.ent_type"], @@ -111,7 +116,12 @@ def make_ner( "incorrect_spans_key": None, "scorer": None, }, - default_score_weights={"ents_f": 1.0, "ents_p": 0.0, "ents_r": 0.0, "ents_per_type": None}, + default_score_weights={ + "ents_f": 1.0, + "ents_p": 0.0, + "ents_r": 0.0, + "ents_per_type": None, + }, ) def make_beam_ner( nlp: Language, @@ -185,11 +195,12 @@ def make_ner_scorer(): return ner_score -cdef class EntityRecognizer(Parser): +class EntityRecognizer(Parser): """Pipeline component for named entity recognition. DOCS: https://spacy.io/api/entityrecognizer """ + TransitionSystem = BiluoPushDown def __init__( @@ -207,15 +218,14 @@ cdef class EntityRecognizer(Parser): incorrect_spans_key=None, scorer=ner_score, ): - """Create an EntityRecognizer. - """ + """Create an EntityRecognizer.""" super().__init__( vocab, model, name, moves, update_with_oracle_cut_size=update_with_oracle_cut_size, - min_action_freq=1, # not relevant for NER + min_action_freq=1, # not relevant for NER learn_tokens=False, # not relevant for NER beam_width=beam_width, beam_density=beam_density, @@ -242,8 +252,11 @@ cdef class EntityRecognizer(Parser): def labels(self): # Get the labels from the model by looking at the available moves, e.g. # B-PERSON, I-PERSON, L-PERSON, U-PERSON - labels = set(remove_bilu_prefix(move) for move in self.move_names - if move[0] in ("B", "I", "L", "U")) + labels = set( + remove_bilu_prefix(move) + for move in self.move_names + if move[0] in ("B", "I", "L", "U") + ) return tuple(sorted(labels)) def scored_ents(self, beams): diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx index c5650382b..8b8fdc361 100644 --- a/spacy/pipeline/pipe.pyx +++ b/spacy/pipeline/pipe.pyx @@ -87,6 +87,10 @@ cdef class Pipe: return self.scorer(examples, **scorer_kwargs) return {} + @property + def is_distillable(self) -> bool: + return False + @property def is_trainable(self) -> bool: return False diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 7a875dda9..33e1c87dc 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -1,12 +1,11 @@ from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast -from typing import Union +from typing import Union, Protocol, runtime_checkable from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Optimizer from thinc.types import Ragged, Ints2d, Floats2d import numpy -from ..compat import Protocol, runtime_checkable from ..scorer import Scorer from ..language import Language from .trainable_pipe import TrainablePipe diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx index e12f116af..a6be51c3c 100644 --- a/spacy/pipeline/tagger.pyx +++ b/spacy/pipeline/tagger.pyx @@ -1,5 +1,6 @@ # cython: infer_types=True, profile=True, binding=True from typing import Callable, Dict, Iterable, List, Optional, Union +from typing import Tuple import numpy import srsly from thinc.api import Model, set_dropout_rate, Config @@ -245,7 +246,6 @@ class Tagger(TrainablePipe): DOCS: https://spacy.io/api/tagger#rehearse """ - loss_func = LegacySequenceCategoricalCrossentropy() if losses is None: losses = {} losses.setdefault(self.name, 0.0) @@ -259,12 +259,32 @@ class Tagger(TrainablePipe): set_dropout_rate(self.model, drop) tag_scores, bp_tag_scores = self.model.begin_update(docs) tutor_tag_scores, _ = self._rehearsal_model.begin_update(docs) - grads, loss = loss_func(tag_scores, tutor_tag_scores) + loss, grads = self.get_teacher_student_loss(tutor_tag_scores, tag_scores) bp_tag_scores(grads) - self.finish_update(sgd) + if sgd is not None: + self.finish_update(sgd) losses[self.name] += loss return losses + def get_teacher_student_loss( + self, teacher_scores: List[Floats2d], student_scores: List[Floats2d] + ) -> Tuple[float, List[Floats2d]]: + """Calculate the loss and its gradient for a batch of student + scores, relative to teacher scores. + + teacher_scores: Scores representing the teacher model's predictions. + student_scores: Scores representing the student model's predictions. + + RETURNS (Tuple[float, float]): The loss and the gradient. + + DOCS: https://spacy.io/api/tagger#get_teacher_student_loss + """ + loss_func = LegacySequenceCategoricalCrossentropy(normalize=False) + d_scores, loss = loss_func(student_scores, teacher_scores) + if self.model.ops.xp.isnan(loss): + raise ValueError(Errors.E910.format(name=self.name)) + return float(loss), d_scores + def get_loss(self, examples, scores): """Find the loss and gradient of loss for the batch of documents and their predicted scores. diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx index 5bba34e4a..fcffd11ee 100644 --- a/spacy/pipeline/trainable_pipe.pyx +++ b/spacy/pipeline/trainable_pipe.pyx @@ -6,7 +6,7 @@ import warnings from ..tokens.doc cimport Doc -from ..training import validate_examples +from ..training import validate_examples, validate_distillation_examples from ..errors import Errors, Warnings from .pipe import Pipe, deserialize_config from .. import util @@ -56,6 +56,53 @@ cdef class TrainablePipe(Pipe): except Exception as e: error_handler(self.name, self, [doc], e) + + def distill(self, + teacher_pipe: Optional["TrainablePipe"], + examples: Iterable["Example"], + *, + drop: float=0.0, + sgd: Optional[Optimizer]=None, + losses: Optional[Dict[str, float]]=None) -> Dict[str, float]: + """Train a pipe (the student) on the predictions of another pipe + (the teacher). The student is typically trained on the probability + distribution of the teacher, but details may differ per pipe. + + teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn + from. + examples (Iterable[Example]): Distillation examples. The reference + (teacher) and predicted (student) docs must have the same number of + tokens and the same orthography. + drop (float): dropout rate. + sgd (Optional[Optimizer]): An optimizer. Will be created via + create_optimizer if not set. + losses (Optional[Dict[str, float]]): Optional record of loss during + distillation. + RETURNS: The updated losses dictionary. + + DOCS: https://spacy.io/api/pipe#distill + """ + # By default we require a teacher pipe, but there are downstream + # implementations that don't require a pipe. + if teacher_pipe is None: + raise ValueError(Errors.E4002.format(name=self.name)) + if losses is None: + losses = {} + losses.setdefault(self.name, 0.0) + validate_distillation_examples(examples, "TrainablePipe.distill") + set_dropout_rate(self.model, drop) + for node in teacher_pipe.model.walk(): + if node.name == "softmax": + node.attrs["softmax_normalize"] = True + teacher_scores = teacher_pipe.model.predict([eg.reference for eg in examples]) + student_scores, bp_student_scores = self.model.begin_update([eg.predicted for eg in examples]) + loss, d_scores = self.get_teacher_student_loss(teacher_scores, student_scores) + bp_student_scores(d_scores) + if sgd is not None: + self.finish_update(sgd) + losses[self.name] += loss + return losses + def pipe(self, stream: Iterable[Doc], *, batch_size: int=128) -> Iterator[Doc]: """Apply the pipe to a stream of documents. This usually happens under the hood when the nlp object is called on a text and all components are @@ -169,6 +216,19 @@ cdef class TrainablePipe(Pipe): """ raise NotImplementedError(Errors.E931.format(parent="TrainablePipe", method="get_loss", name=self.name)) + def get_teacher_student_loss(self, teacher_scores, student_scores): + """Calculate the loss and its gradient for a batch of student + scores, relative to teacher scores. + + teacher_scores: Scores representing the teacher model's predictions. + student_scores: Scores representing the student model's predictions. + + RETURNS (Tuple[float, float]): The loss and the gradient. + + DOCS: https://spacy.io/api/pipe#get_teacher_student_loss + """ + raise NotImplementedError(Errors.E931.format(parent="TrainablePipe", method="get_teacher_student_loss", name=self.name)) + def create_optimizer(self) -> Optimizer: """Create an optimizer for the pipeline component. @@ -205,6 +265,14 @@ cdef class TrainablePipe(Pipe): """ raise NotImplementedError(Errors.E931.format(parent="Pipe", method="add_label", name=self.name)) + @property + def is_distillable(self) -> bool: + # Normally a pipe overrides `get_teacher_student_loss` to implement + # distillation. In more exceptional cases, a pipe can provide its + # own `distill` implementation. If neither of these methods is + # overridden, the pipe does not implement distillation. + return not (self.__class__.distill is TrainablePipe.distill and self.__class__.get_teacher_student_loss is TrainablePipe.get_teacher_student_loss) + @property def is_trainable(self) -> bool: return True diff --git a/spacy/pipeline/transition_parser.pxd b/spacy/pipeline/transition_parser.pxd deleted file mode 100644 index f20e69a6e..000000000 --- a/spacy/pipeline/transition_parser.pxd +++ /dev/null @@ -1,21 +0,0 @@ -from cymem.cymem cimport Pool -from thinc.backends.cblas cimport CBlas - -from ..vocab cimport Vocab -from .trainable_pipe cimport TrainablePipe -from ._parser_internals.transition_system cimport Transition, TransitionSystem -from ._parser_internals._state cimport StateC -from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC - - -cdef class Parser(TrainablePipe): - cdef public object _rehearsal_model - cdef readonly TransitionSystem moves - cdef public object _multitasks - cdef object _cpu_ops - - cdef void _parseC(self, CBlas cblas, StateC** states, - WeightsC weights, SizesC sizes) nogil - - cdef void c_transition_batch(self, StateC** states, const float* scores, - int nr_class, int batch_size) nogil diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx index 9d7b258c6..9e50dd7b2 100644 --- a/spacy/pipeline/transition_parser.pyx +++ b/spacy/pipeline/transition_parser.pyx @@ -1,5 +1,6 @@ # cython: infer_types=True, cdivision=True, boundscheck=False, binding=True from __future__ import print_function +from typing import Dict, Iterable, List, Optional, Tuple from cymem.cymem cimport Pool cimport numpy as np from itertools import islice @@ -7,25 +8,30 @@ from libcpp.vector cimport vector from libc.string cimport memset, memcpy from libc.stdlib cimport calloc, free import random +import contextlib import srsly -from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps +from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps, Optimizer +from thinc.api import chain, softmax_activation, use_ops, get_array_module +from thinc.legacy import LegacySequenceCategoricalCrossentropy +from thinc.types import Floats2d, Ints1d import numpy.random import numpy import warnings -from ._parser_internals.stateclass cimport StateClass +from ..ml.tb_framework import TransitionModelInputs +from ._parser_internals.stateclass cimport StateC, StateClass from ._parser_internals.search cimport Beam -from ..ml.parser_model cimport alloc_activations, free_activations -from ..ml.parser_model cimport predict_states, arg_max_if_valid -from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC, cpu_log_loss -from ..ml.parser_model cimport get_c_weights, get_c_sizes from ..tokens.doc cimport Doc -from .trainable_pipe import TrainablePipe +from .trainable_pipe cimport TrainablePipe from ._parser_internals cimport _beam_utils from ._parser_internals import _beam_utils +from ..vocab cimport Vocab +from ._parser_internals.transition_system cimport Transition, TransitionSystem +from ..typedefs cimport weight_t from ..training import validate_examples, validate_get_examples +from ..training import validate_distillation_examples from ..errors import Errors, Warnings from .. import util @@ -33,7 +39,7 @@ from .. import util NUMPY_OPS = NumpyOps() -cdef class Parser(TrainablePipe): +class Parser(TrainablePipe): """ Base class of the DependencyParser and EntityRecognizer. """ @@ -133,8 +139,9 @@ cdef class Parser(TrainablePipe): @property def move_names(self): names = [] + cdef TransitionSystem moves = self.moves for i in range(self.moves.n_moves): - name = self.moves.move_name(self.moves.c[i].move, self.moves.c[i].label) + name = self.moves.move_name(moves.c[i].move, moves.c[i].label) # Explicitly removing the internal "U-" token used for blocking entities if name != "U-": names.append(name) @@ -203,6 +210,118 @@ cdef class Parser(TrainablePipe): # Defined in subclasses, to avoid circular import raise NotImplementedError + def distill(self, + teacher_pipe: Optional[TrainablePipe], + examples: Iterable["Example"], + *, + drop: float=0.0, + sgd: Optional[Optimizer]=None, + losses: Optional[Dict[str, float]]=None): + """Train a pipe (the student) on the predictions of another pipe + (the teacher). The student is trained on the transition probabilities + of the teacher. + + teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn + from. + examples (Iterable[Example]): Distillation examples. The reference + (teacher) and predicted (student) docs must have the same number of + tokens and the same orthography. + drop (float): dropout rate. + sgd (Optional[Optimizer]): An optimizer. Will be created via + create_optimizer if not set. + losses (Optional[Dict[str, float]]): Optional record of loss during + distillation. + RETURNS: The updated losses dictionary. + + DOCS: https://spacy.io/api/dependencyparser#distill + """ + if teacher_pipe is None: + raise ValueError(Errors.E4002.format(name=self.name)) + if losses is None: + losses = {} + losses.setdefault(self.name, 0.0) + + validate_distillation_examples(examples, "TransitionParser.distill") + + set_dropout_rate(self.model, drop) + + student_docs = [eg.predicted for eg in examples] + + max_moves = self.cfg["update_with_oracle_cut_size"] + if max_moves >= 1: + # Chop sequences into lengths of this many words, to make the + # batch uniform length. Since we do not have a gold standard + # sequence, we use the teacher's predictions as the gold + # standard. + max_moves = int(random.uniform(max_moves // 2, max_moves * 2)) + states = self._init_batch(teacher_pipe, student_docs, max_moves) + else: + states = self.moves.init_batch(student_docs) + + # We distill as follows: 1. we first let the student predict transition + # sequences (and the corresponding transition probabilities); (2) we + # let the teacher follow the student's predicted transition sequences + # to obtain the teacher's transition probabilities; (3) we compute the + # gradients of the student's transition distributions relative to the + # teacher's distributions. + + student_inputs = TransitionModelInputs(docs=student_docs, moves=self.moves, + max_moves=max_moves) + (student_states, student_scores), backprop_scores = self.model.begin_update(student_inputs) + actions = states2actions(student_states) + teacher_inputs = TransitionModelInputs(docs=[eg.reference for eg in examples], + moves=self.moves, actions=actions) + (_, teacher_scores) = teacher_pipe.model.predict(teacher_inputs) + + loss, d_scores = self.get_teacher_student_loss(teacher_scores, student_scores) + backprop_scores((student_states, d_scores)) + + if sgd is not None: + self.finish_update(sgd) + + losses[self.name] += loss + + return losses + + + def get_teacher_student_loss( + self, teacher_scores: List[Floats2d], student_scores: List[Floats2d], + normalize: bool=False, + ) -> Tuple[float, List[Floats2d]]: + """Calculate the loss and its gradient for a batch of student + scores, relative to teacher scores. + + teacher_scores: Scores representing the teacher model's predictions. + student_scores: Scores representing the student model's predictions. + + RETURNS (Tuple[float, float]): The loss and the gradient. + + DOCS: https://spacy.io/api/dependencyparser#get_teacher_student_loss + """ + + # We can't easily hook up a softmax layer in the parsing model, since + # the get_loss does additional masking. So, we could apply softmax + # manually here and use Thinc's cross-entropy loss. But it's a bit + # suboptimal, since we can have a lot of states that would result in + # many kernel launches. Futhermore the parsing model's backprop expects + # a XP array, so we'd have to concat the softmaxes anyway. So, like + # the get_loss implementation, we'll compute the loss and gradients + # ourselves. + + teacher_scores = self.model.ops.softmax(self.model.ops.xp.vstack(teacher_scores), + axis=-1, inplace=True) + student_scores = self.model.ops.softmax(self.model.ops.xp.vstack(student_scores), + axis=-1, inplace=True) + + assert teacher_scores.shape == student_scores.shape + + d_scores = student_scores - teacher_scores + if normalize: + d_scores /= d_scores.shape[0] + loss = (d_scores**2).sum() / d_scores.size + + return float(loss), d_scores + def init_multitask_objectives(self, get_examples, pipeline, **cfg): """Setup models for secondary objectives, to benefit from multi-task learning. This method is intended to be overridden by subclasses. @@ -223,9 +342,6 @@ cdef class Parser(TrainablePipe): stream: The sequence of documents to process. batch_size (int): Number of documents to accumulate into a working set. - error_handler (Callable[[str, List[Doc], Exception], Any]): Function that - deals with a failing batch of documents. The default function just reraises - the exception. YIELDS (Doc): Documents, in order. """ @@ -247,78 +363,29 @@ cdef class Parser(TrainablePipe): def predict(self, docs): if isinstance(docs, Doc): docs = [docs] + self._ensure_labels_are_added(docs) if not any(len(doc) for doc in docs): result = self.moves.init_batch(docs) return result - if self.cfg["beam_width"] == 1: - return self.greedy_parse(docs, drop=0.0) - else: - return self.beam_parse( - docs, - drop=0.0, - beam_width=self.cfg["beam_width"], - beam_density=self.cfg["beam_density"] - ) + with _change_attrs(self.model, beam_width=self.cfg["beam_width"], beam_density=self.cfg["beam_density"]): + inputs = TransitionModelInputs(docs=docs, moves=self.moves) + states_or_beams, _ = self.model.predict(inputs) + return states_or_beams def greedy_parse(self, docs, drop=0.): - cdef vector[StateC*] states - cdef StateClass state - cdef CBlas cblas = self._cpu_ops.cblas() + self._resize() self._ensure_labels_are_added(docs) - set_dropout_rate(self.model, drop) - batch = self.moves.init_batch(docs) - model = self.model.predict(docs) - weights = get_c_weights(model) - for state in batch: - if not state.is_final(): - states.push_back(state.c) - sizes = get_c_sizes(model, states.size()) - with nogil: - self._parseC(cblas, &states[0], weights, sizes) - model.clear_memory() - del model - return batch + with _change_attrs(self.model, beam_width=1): + inputs = TransitionModelInputs(docs=docs, moves=self.moves) + states, _ = self.model.predict(inputs) + return states def beam_parse(self, docs, int beam_width, float drop=0., beam_density=0.): - cdef Beam beam - cdef Doc doc self._ensure_labels_are_added(docs) - batch = _beam_utils.BeamBatch( - self.moves, - self.moves.init_batch(docs), - None, - beam_width, - density=beam_density - ) - model = self.model.predict(docs) - while not batch.is_done: - states = batch.get_unfinished_states() - if not states: - break - scores = model.predict(states) - batch.advance(scores) - model.clear_memory() - del model - return list(batch) - - cdef void _parseC(self, CBlas cblas, StateC** states, - WeightsC weights, SizesC sizes) nogil: - cdef int i, j - cdef vector[StateC*] unfinished - cdef ActivationsC activations = alloc_activations(sizes) - while sizes.states >= 1: - predict_states(cblas, &activations, states, &weights, sizes) - # Validate actions, argmax, take action. - self.c_transition_batch(states, - activations.scores, sizes.classes, sizes.states) - for i in range(sizes.states): - if not states[i].is_final(): - unfinished.push_back(states[i]) - for i in range(unfinished.size()): - states[i] = unfinished[i] - sizes.states = unfinished.size() - unfinished.clear() - free_activations(&activations) + with _change_attrs(self.model, beam_width=self.cfg["beam_width"], beam_density=self.cfg["beam_density"]): + inputs = TransitionModelInputs(docs=docs, moves=self.moves) + beams, _ = self.model.predict(inputs) + return beams def set_annotations(self, docs, states_or_beams): cdef StateClass state @@ -330,35 +397,6 @@ cdef class Parser(TrainablePipe): for hook in self.postprocesses: hook(doc) - def transition_states(self, states, float[:, ::1] scores): - cdef StateClass state - cdef float* c_scores = &scores[0, 0] - cdef vector[StateC*] c_states - for state in states: - c_states.push_back(state.c) - self.c_transition_batch(&c_states[0], c_scores, scores.shape[1], scores.shape[0]) - return [state for state in states if not state.c.is_final()] - - cdef void c_transition_batch(self, StateC** states, const float* scores, - int nr_class, int batch_size) nogil: - # n_moves should not be zero at this point, but make sure to avoid zero-length mem alloc - with gil: - assert self.moves.n_moves > 0, Errors.E924.format(name=self.name) - is_valid = calloc(self.moves.n_moves, sizeof(int)) - cdef int i, guess - cdef Transition action - for i in range(batch_size): - self.moves.set_valid(is_valid, states[i]) - guess = arg_max_if_valid(&scores[i*nr_class], is_valid, nr_class) - if guess == -1: - # This shouldn't happen, but it's hard to raise an error here, - # and we don't want to infinite loop. So, force to end state. - states[i].force_final() - else: - action = self.moves.c[guess] - action.do(states[i], action.label) - free(is_valid) - def update(self, examples, *, drop=0., sgd=None, losses=None): cdef StateClass state if losses is None: @@ -370,67 +408,99 @@ cdef class Parser(TrainablePipe): ) for multitask in self._multitasks: multitask.update(examples, drop=drop, sgd=sgd) + # We need to take care to act on the whole batch, because we might be + # getting vectors via a listener. n_examples = len([eg for eg in examples if self.moves.has_gold(eg)]) if n_examples == 0: return losses set_dropout_rate(self.model, drop) - # The probability we use beam update, instead of falling back to - # a greedy update - beam_update_prob = self.cfg["beam_update_prob"] - if self.cfg['beam_width'] >= 2 and numpy.random.random() < beam_update_prob: - return self.update_beam( - examples, - beam_width=self.cfg["beam_width"], - sgd=sgd, - losses=losses, - beam_density=self.cfg["beam_density"] - ) + docs = [eg.x for eg in examples if len(eg.x)] + max_moves = self.cfg["update_with_oracle_cut_size"] if max_moves >= 1: # Chop sequences into lengths of this many words, to make the # batch uniform length. - max_moves = int(random.uniform(max_moves // 2, max_moves * 2)) - states, golds, _ = self._init_gold_batch( + max_moves = int(random.uniform(max(max_moves // 2, 1), max_moves * 2)) + init_states, gold_states, _ = self._init_gold_batch( examples, max_length=max_moves ) else: - states, golds, _ = self.moves.init_gold_batch(examples) - if not states: - return losses - model, backprop_tok2vec = self.model.begin_update([eg.x for eg in examples]) - - all_states = list(states) - states_golds = list(zip(states, golds)) - n_moves = 0 - while states_golds: - states, golds = zip(*states_golds) - scores, backprop = model.begin_update(states) - d_scores = self.get_batch_loss(states, golds, scores, losses) - # Note that the gradient isn't normalized by the batch size - # here, because our "samples" are really the states...But we - # can't normalize by the number of states either, as then we'd - # be getting smaller gradients for states in long sequences. - backprop(d_scores) - # Follow the predicted action - self.transition_states(states, scores) - states_golds = [(s, g) for (s, g) in zip(states, golds) if not s.is_final()] - if max_moves >= 1 and n_moves >= max_moves: - break - n_moves += 1 + init_states, gold_states, _ = self.moves.init_gold_batch(examples) - backprop_tok2vec(golds) + inputs = TransitionModelInputs(docs=docs, moves=self.moves, + max_moves=max_moves, states=[state.copy() for state in init_states]) + (pred_states, scores), backprop_scores = self.model.begin_update(inputs) + if sum(s.shape[0] for s in scores) == 0: + return losses + d_scores = self.get_loss((gold_states, init_states, pred_states, scores), + examples, max_moves) + backprop_scores((pred_states, d_scores)) if sgd not in (None, False): self.finish_update(sgd) + losses[self.name] += float((d_scores**2).sum()) # Ugh, this is annoying. If we're working on GPU, we want to free the # memory ASAP. It seems that Python doesn't necessarily get around to # removing these in time if we don't explicitly delete? It's confusing. - del backprop - del backprop_tok2vec - model.clear_memory() - del model + del backprop_scores return losses + def get_loss(self, states_scores, examples, max_moves): + gold_states, init_states, pred_states, scores = states_scores + scores = self.model.ops.xp.vstack(scores) + costs = self._get_costs_from_histories( + examples, + gold_states, + init_states, + [list(state.history) for state in pred_states], + max_moves + ) + xp = get_array_module(scores) + best_costs = costs.min(axis=1, keepdims=True) + gscores = scores.copy() + min_score = scores.min() - 1000 + assert costs.shape == scores.shape, (costs.shape, scores.shape) + gscores[costs > best_costs] = min_score + max_ = scores.max(axis=1, keepdims=True) + gmax = gscores.max(axis=1, keepdims=True) + exp_scores = xp.exp(scores - max_) + exp_gscores = xp.exp(gscores - gmax) + Z = exp_scores.sum(axis=1, keepdims=True) + gZ = exp_gscores.sum(axis=1, keepdims=True) + d_scores = exp_scores / Z + d_scores -= (costs <= best_costs) * (exp_gscores / gZ) + return d_scores + + def _get_costs_from_histories(self, examples, gold_states, init_states, histories, max_moves): + cdef TransitionSystem moves = self.moves + cdef StateClass state + cdef int clas + cdef int nF = self.model.get_dim("nF") + cdef int nO = moves.n_moves + cdef int nS = sum([len(history) for history in histories]) + cdef Pool mem = Pool() + cdef np.ndarray costs_i + is_valid = mem.alloc(nO, sizeof(int)) + batch = list(zip(init_states, histories, gold_states)) + n_moves = 0 + output = [] + while batch: + costs = numpy.zeros((len(batch), nO), dtype="f") + for i, (state, history, gold) in enumerate(batch): + costs_i = costs[i] + clas = history.pop(0) + moves.set_costs(is_valid, costs_i.data, state.c, gold) + action = moves.c[clas] + action.do(state.c, action.label) + state.c.history.push_back(clas) + output.append(costs) + batch = [(s, h, g) for s, h, g in batch if len(h) != 0] + if n_moves >= max_moves >= 1: + break + n_moves += 1 + + return self.model.ops.xp.vstack(output) + def rehearse(self, examples, sgd=None, losses=None, **cfg): """Perform a "rehearsal" update, to prevent catastrophic forgetting.""" if losses is None: @@ -440,10 +510,9 @@ cdef class Parser(TrainablePipe): multitask.rehearse(examples, losses=losses, sgd=sgd) if self._rehearsal_model is None: return None - losses.setdefault(self.name, 0.) + losses.setdefault(self.name, 0.0) validate_examples(examples, "Parser.rehearse") docs = [eg.predicted for eg in examples] - states = self.moves.init_batch(docs) # This is pretty dirty, but the NER can resize itself in init_batch, # if labels are missing. We therefore have to check whether we need to # expand our model output. @@ -451,85 +520,33 @@ cdef class Parser(TrainablePipe): # Prepare the stepwise model, and get the callback for finishing the batch set_dropout_rate(self._rehearsal_model, 0.0) set_dropout_rate(self.model, 0.0) - tutor, _ = self._rehearsal_model.begin_update(docs) - model, backprop_tok2vec = self.model.begin_update(docs) - n_scores = 0. - loss = 0. - while states: - targets, _ = tutor.begin_update(states) - guesses, backprop = model.begin_update(states) - d_scores = (guesses - targets) / targets.shape[0] - # If all weights for an output are 0 in the original model, don't - # supervise that output. This allows us to add classes. - loss += (d_scores**2).sum() - backprop(d_scores) - # Follow the predicted action - self.transition_states(states, guesses) - states = [state for state in states if not state.is_final()] - n_scores += d_scores.size - # Do the backprop - backprop_tok2vec(docs) + student_inputs = TransitionModelInputs(docs=docs, moves=self.moves) + (student_states, student_scores), backprop_scores = self.model.begin_update(student_inputs) + actions = states2actions(student_states) + teacher_inputs = TransitionModelInputs(docs=docs, moves=self.moves, actions=actions) + _, teacher_scores = self._rehearsal_model.predict(teacher_inputs) + + loss, d_scores = self.get_teacher_student_loss(teacher_scores, student_scores, normalize=True) + + teacher_scores = self.model.ops.xp.vstack(teacher_scores) + student_scores = self.model.ops.xp.vstack(student_scores) + assert teacher_scores.shape == student_scores.shape + + d_scores = (student_scores - teacher_scores) / teacher_scores.shape[0] + # If all weights for an output are 0 in the original model, don't + # supervise that output. This allows us to add classes. + loss = (d_scores**2).sum() / d_scores.size + backprop_scores((student_states, d_scores)) + if sgd is not None: self.finish_update(sgd) - losses[self.name] += loss / n_scores - del backprop - del backprop_tok2vec - model.clear_memory() - tutor.clear_memory() - del model - del tutor + losses[self.name] += loss + return losses def update_beam(self, examples, *, beam_width, drop=0., sgd=None, losses=None, beam_density=0.0): - states, golds, _ = self.moves.init_gold_batch(examples) - if not states: - return losses - # Prepare the stepwise model, and get the callback for finishing the batch - model, backprop_tok2vec = self.model.begin_update( - [eg.predicted for eg in examples]) - loss = _beam_utils.update_beam( - self.moves, - states, - golds, - model, - beam_width, - beam_density=beam_density, - ) - losses[self.name] += loss - backprop_tok2vec(golds) - if sgd is not None: - self.finish_update(sgd) - - def get_batch_loss(self, states, golds, float[:, ::1] scores, losses): - cdef StateClass state - cdef Pool mem = Pool() - cdef int i - - # n_moves should not be zero at this point, but make sure to avoid zero-length mem alloc - assert self.moves.n_moves > 0, Errors.E924.format(name=self.name) - - is_valid = mem.alloc(self.moves.n_moves, sizeof(int)) - costs = mem.alloc(self.moves.n_moves, sizeof(float)) - cdef np.ndarray d_scores = numpy.zeros((len(states), self.moves.n_moves), - dtype='f', order='C') - c_d_scores = d_scores.data - unseen_classes = self.model.attrs["unseen_classes"] - for i, (state, gold) in enumerate(zip(states, golds)): - memset(is_valid, 0, self.moves.n_moves * sizeof(int)) - memset(costs, 0, self.moves.n_moves * sizeof(float)) - self.moves.set_costs(is_valid, costs, state.c, gold) - for j in range(self.moves.n_moves): - if costs[j] <= 0.0 and j in unseen_classes: - unseen_classes.remove(j) - cpu_log_loss(c_d_scores, - costs, is_valid, &scores[i, 0], d_scores.shape[1]) - c_d_scores += d_scores.shape[1] - # Note that we don't normalize this. See comment in update() for why. - if losses is not None: - losses.setdefault(self.name, 0.) - losses[self.name] += (d_scores**2).sum() - return d_scores + raise NotImplementedError def set_output(self, nO): self.model.attrs["resize_output"](self.model, nO) @@ -568,7 +585,7 @@ cdef class Parser(TrainablePipe): for example in islice(get_examples(), 10): doc_sample.append(example.predicted) assert len(doc_sample) > 0, Errors.E923.format(name=self.name) - self.model.initialize(doc_sample) + self.model.initialize((doc_sample, self.moves)) if nlp is not None: self.init_multitask_objectives(get_examples, nlp.pipeline) @@ -625,28 +642,63 @@ cdef class Parser(TrainablePipe): raise ValueError(Errors.E149) from None return self - def _init_gold_batch(self, examples, max_length): - """Make a square batch, of length equal to the shortest transition + def _init_batch(self, teacher_step_model, docs, max_length): + """Make a square batch of length equal to the shortest transition sequence or a cap. A long doc will get multiple states. Let's say we have a doc of length 2*N, where N is the shortest doc. We'll make two states, one representing - long_doc[:N], and another representing long_doc[N:].""" + long_doc[:N], and another representing long_doc[N:]. In contrast to + _init_gold_batch, this version uses a teacher model to generate the + cut sequences.""" cdef: StateClass start_state StateClass state Transition action - all_states = self.moves.init_batch([eg.predicted for eg in examples]) + all_states = self.moves.init_batch(docs) + states = [] + to_cut = [] + for state, doc in zip(all_states, docs): + if not state.is_final(): + if len(doc) < max_length: + states.append(state) + else: + to_cut.append(state) + while to_cut: + states.extend(state.copy() for state in to_cut) + # Move states forward max_length actions. + length = 0 + while to_cut and length < max_length: + teacher_scores = teacher_step_model.predict(to_cut) + self.transition_states(to_cut, teacher_scores) + # States that are completed do not need further cutting. + to_cut = [state for state in to_cut if not state.is_final()] + length += 1 + return states + + + def _init_gold_batch(self, examples, max_length): + """Make a square batch, of length equal to the shortest transition + sequence or a cap. A long doc will get multiple states. Let's say we + have a doc of length 2*N, where N is the shortest doc. We'll make + two states, one representing long_doc[:N], and another representing + long_doc[N:].""" + cdef: + StateClass start_state + StateClass state + Transition action + TransitionSystem moves = self.moves + all_states = moves.init_batch([eg.predicted for eg in examples]) states = [] golds = [] to_cut = [] for state, eg in zip(all_states, examples): - if self.moves.has_gold(eg) and not state.is_final(): - gold = self.moves.init_gold(state, eg) + if moves.has_gold(eg) and not state.is_final(): + gold = moves.init_gold(state, eg) if len(eg.x) < max_length: states.append(state) golds.append(gold) else: - oracle_actions = self.moves.get_oracle_sequence_from_state( + oracle_actions = moves.get_oracle_sequence_from_state( state.copy(), gold) to_cut.append((eg, state, gold, oracle_actions)) if not to_cut: @@ -656,13 +708,52 @@ cdef class Parser(TrainablePipe): for i in range(0, len(oracle_actions), max_length): start_state = state.copy() for clas in oracle_actions[i:i+max_length]: - action = self.moves.c[clas] + action = moves.c[clas] action.do(state.c, action.label) if state.is_final(): break - if self.moves.has_gold(eg, start_state.B(0), state.B(0)): + if moves.has_gold(eg, start_state.B(0), state.B(0)): states.append(start_state) golds.append(gold) if state.is_final(): break return states, golds, max_length + + +@contextlib.contextmanager +def _change_attrs(model, **kwargs): + """Temporarily modify a thinc model's attributes.""" + unset = object() + old_attrs = {} + for key, value in kwargs.items(): + old_attrs[key] = model.attrs.get(key, unset) + model.attrs[key] = value + yield model + for key, value in old_attrs.items(): + if value is unset: + model.attrs.pop(key) + else: + model.attrs[key] = value + + +def states2actions(states: List[StateClass]) -> List[Ints1d]: + cdef int step + cdef StateClass state + cdef StateC* c_state + actions = [] + while True: + step = len(actions) + + step_actions = [] + for state in states: + c_state = state.c + if step < c_state.history.size(): + step_actions.append(c_state.history[step]) + + # We are done if we have exhausted all histories. + if len(step_actions) == 0: + break + + actions.append(numpy.array(step_actions, dtype="i")) + + return actions diff --git a/spacy/schemas.py b/spacy/schemas.py index 15f7a499b..975affc67 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -1,6 +1,5 @@ from typing import Dict, List, Union, Optional, Any, Callable, Type, Tuple -from typing import Iterable, TypeVar, TYPE_CHECKING -from .compat import Literal +from typing import Iterable, TypeVar, Literal, TYPE_CHECKING from enum import Enum from pydantic import BaseModel, Field, ValidationError, validator, create_model from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr @@ -163,15 +162,33 @@ class TokenPatternString(BaseModel): IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset") INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects") FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy") - FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy1") - FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy2") - FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy3") - FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy4") - FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy5") - FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy6") - FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy7") - FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy8") - FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy9") + FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy1" + ) + FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy2" + ) + FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy3" + ) + FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy4" + ) + FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy5" + ) + FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy6" + ) + FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy7" + ) + FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy8" + ) + FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field( + None, alias="fuzzy9" + ) class Config: extra = "forbid" @@ -405,6 +422,27 @@ class ConfigSchemaInit(BaseModel): arbitrary_types_allowed = True +class ConfigSchemaDistillEmpty(BaseModel): + class Config: + extra = "forbid" + + +class ConfigSchemaDistill(BaseModel): + # fmt: off + batcher: Batcher = Field(..., title="Batcher for the training data") + corpus: StrictStr = Field(..., title="Path in the config to the distillation data") + dropout: StrictFloat = Field(..., title="Dropout rate") + max_epochs: StrictInt = Field(..., title="Maximum number of epochs to distill for") + max_steps: StrictInt = Field(..., title="Maximum number of steps to distill for") + optimizer: Optimizer = Field(..., title="The optimizer to use") + student_to_teacher: Dict[str, str] = Field(..., title="Mapping from student to teacher pipe") + # fmt: on + + class Config: + extra = "forbid" + arbitrary_types_allowed = True + + class ConfigSchema(BaseModel): training: ConfigSchemaTraining nlp: ConfigSchemaNlp @@ -412,6 +450,7 @@ class ConfigSchema(BaseModel): components: Dict[str, Dict[str, Any]] corpora: Dict[str, Reader] initialize: ConfigSchemaInit + distillation: Union[ConfigSchemaDistill, ConfigSchemaDistillEmpty] = {} # type: ignore[assignment] class Config: extra = "allow" @@ -423,6 +462,7 @@ CONFIG_SCHEMAS = { "training": ConfigSchemaTraining, "pretraining": ConfigSchemaPretrain, "initialize": ConfigSchemaInit, + "distill": ConfigSchemaDistill, } diff --git a/spacy/scorer.py b/spacy/scorer.py index de4f52be6..095effdcf 100644 --- a/spacy/scorer.py +++ b/spacy/scorer.py @@ -104,7 +104,7 @@ class Scorer: def __init__( self, nlp: Optional["Language"] = None, - default_lang: str = "xx", + default_lang: str = "mul", default_pipeline: Iterable[str] = DEFAULT_PIPELINE, **cfg, ) -> None: diff --git a/spacy/tests/README.md b/spacy/tests/README.md index f3c96a39e..9ac1e6d2e 100644 --- a/spacy/tests/README.md +++ b/spacy/tests/README.md @@ -86,7 +86,7 @@ These are the main fixtures that are currently available: | Fixture | Description | | ----------------------------------- | ---------------------------------------------------------------------------- | -| `tokenizer` | Basic, language-independent tokenizer. Identical to the `xx` language class. | +| `tokenizer` | Basic, language-independent tokenizer. Identical to the `mul` language class. | | `en_tokenizer`, `de_tokenizer`, ... | Creates an English, German etc. tokenizer. | | `en_vocab` | Creates an instance of the English `Vocab`. | diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index b9c4ef715..cc0450cab 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -83,7 +83,7 @@ def register_cython_tests(cython_mod_name: str, test_mod_name: str): @pytest.fixture(scope="module") def tokenizer(): - return get_lang_class("xx")().tokenizer + return get_lang_class("mul")().tokenizer @pytest.fixture(scope="session") @@ -243,8 +243,8 @@ def id_tokenizer(): @pytest.fixture(scope="session") -def is_tokenizer(): - return get_lang_class("is")().tokenizer +def isl_tokenizer(): + return get_lang_class("isl")().tokenizer @pytest.fixture(scope="session") @@ -496,8 +496,8 @@ def vi_tokenizer(): @pytest.fixture(scope="session") -def xx_tokenizer(): - return get_lang_class("xx")().tokenizer +def mul_tokenizer(): + return get_lang_class("mul")().tokenizer @pytest.fixture(scope="session") diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py index f77d54493..2009a29d6 100644 --- a/spacy/tests/doc/test_doc_api.py +++ b/spacy/tests/doc/test_doc_api.py @@ -9,7 +9,7 @@ from thinc.api import NumpyOps, get_current_ops from spacy.attrs import DEP, ENT_IOB, ENT_TYPE, HEAD, IS_ALPHA, MORPH, POS from spacy.attrs import SENT_START, TAG from spacy.lang.en import English -from spacy.lang.xx import MultiLanguage +from spacy.lang.mul import MultiLanguage from spacy.language import Language from spacy.lexeme import Lexeme from spacy.tokens import Doc, Span, SpanGroup, Token diff --git a/spacy/tests/lang/is/__init__.py b/spacy/tests/lang/isl/__init__.py similarity index 100% rename from spacy/tests/lang/is/__init__.py rename to spacy/tests/lang/isl/__init__.py diff --git a/spacy/tests/lang/is/test_text.py b/spacy/tests/lang/isl/test_text.py similarity index 85% rename from spacy/tests/lang/is/test_text.py rename to spacy/tests/lang/isl/test_text.py index 6e3654a6e..9e177485d 100644 --- a/spacy/tests/lang/is/test_text.py +++ b/spacy/tests/lang/isl/test_text.py @@ -1,7 +1,7 @@ import pytest -def test_long_text(is_tokenizer): +def test_long_text(isl_tokenizer): # Excerpt: European Convention on Human Rights text = """ hafa í huga, að yfirlýsing þessi hefur það markmið að tryggja @@ -15,12 +15,12 @@ réttlætis og friðar í heiminum og best er tryggt, annars vegar með virku, lýðræðislegu stjórnarfari og, hins vegar, almennum skilningi og varðveislu þeirra mannréttinda, sem eru grundvöllur frelsisins; """ - tokens = is_tokenizer(text) + tokens = isl_tokenizer(text) assert len(tokens) == 120 @pytest.mark.xfail -def test_ordinal_number(is_tokenizer): +def test_ordinal_number(isl_tokenizer): text = "10. desember 1948" - tokens = is_tokenizer(text) + tokens = isl_tokenizer(text) assert len(tokens) == 3 diff --git a/spacy/tests/lang/is/test_tokenizer.py b/spacy/tests/lang/isl/test_tokenizer.py similarity index 72% rename from spacy/tests/lang/is/test_tokenizer.py rename to spacy/tests/lang/isl/test_tokenizer.py index 0c05a6050..ba534aaf6 100644 --- a/spacy/tests/lang/is/test_tokenizer.py +++ b/spacy/tests/lang/isl/test_tokenizer.py @@ -1,6 +1,6 @@ import pytest -IS_BASIC_TOKENIZATION_TESTS = [ +ISL_BASIC_TOKENIZATION_TESTS = [ ( "Enginn maður skal sæta pyndingum eða ómannlegri eða " "vanvirðandi meðferð eða refsingu. ", @@ -23,8 +23,8 @@ IS_BASIC_TOKENIZATION_TESTS = [ ] -@pytest.mark.parametrize("text,expected_tokens", IS_BASIC_TOKENIZATION_TESTS) -def test_is_tokenizer_basic(is_tokenizer, text, expected_tokens): - tokens = is_tokenizer(text) +@pytest.mark.parametrize("text,expected_tokens", ISL_BASIC_TOKENIZATION_TESTS) +def test_isl_tokenizer_basic(isl_tokenizer, text, expected_tokens): + tokens = isl_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] assert expected_tokens == token_list diff --git a/spacy/tests/lang/xx/__init__.py b/spacy/tests/lang/mul/__init__.py similarity index 100% rename from spacy/tests/lang/xx/__init__.py rename to spacy/tests/lang/mul/__init__.py diff --git a/spacy/tests/lang/xx/test_text.py b/spacy/tests/lang/mul/test_text.py similarity index 96% rename from spacy/tests/lang/xx/test_text.py rename to spacy/tests/lang/mul/test_text.py index 477f0ebe2..6e4262d66 100644 --- a/spacy/tests/lang/xx/test_text.py +++ b/spacy/tests/lang/mul/test_text.py @@ -1,7 +1,7 @@ import pytest -def test_long_text(xx_tokenizer): +def test_long_text(mul_tokenizer): # Excerpt: Text in Skolt Sami taken from https://www.samediggi.fi text = """ Säʹmmla lie Euroopp unioon oʹdinakai alggmeer. Säʹmmlai alggmeerstatus lij raʹvvjum Lääʹddjânnam vuâđđlääʹjjest. @@ -20,5 +20,5 @@ vuâđđlääʹjj meâldlaž jiõččvaaldâšm. Säʹmmlai jiõččvaldšma kuu Sääʹmteʹǧǧ. """ - tokens = xx_tokenizer(text) + tokens = mul_tokenizer(text) assert len(tokens) == 179 diff --git a/spacy/tests/lang/xx/test_tokenizer.py b/spacy/tests/lang/mul/test_tokenizer.py similarity index 68% rename from spacy/tests/lang/xx/test_tokenizer.py rename to spacy/tests/lang/mul/test_tokenizer.py index 15c760a6b..3d06dc11c 100644 --- a/spacy/tests/lang/xx/test_tokenizer.py +++ b/spacy/tests/lang/mul/test_tokenizer.py @@ -1,6 +1,6 @@ import pytest -XX_BASIC_TOKENIZATION_TESTS = [ +MUL_BASIC_TOKENIZATION_TESTS = [ ( "Lääʹddjânnmest lie nuʹtt 10 000 säʹmmliʹžžed. Seeʹst pâʹjjel", [ @@ -18,8 +18,8 @@ XX_BASIC_TOKENIZATION_TESTS = [ ] -@pytest.mark.parametrize("text,expected_tokens", XX_BASIC_TOKENIZATION_TESTS) -def test_xx_tokenizer_basic(xx_tokenizer, text, expected_tokens): - tokens = xx_tokenizer(text) +@pytest.mark.parametrize("text,expected_tokens", MUL_BASIC_TOKENIZATION_TESTS) +def test_mul_tokenizer_basic(mul_tokenizer, text, expected_tokens): + tokens = mul_tokenizer(text) token_list = [token.text for token in tokens if not token.is_space] assert expected_tokens == token_list diff --git a/spacy/tests/lang/test_initialize.py b/spacy/tests/lang/test_initialize.py index 36f4a75e0..98d37f832 100644 --- a/spacy/tests/lang/test_initialize.py +++ b/spacy/tests/lang/test_initialize.py @@ -7,10 +7,10 @@ from spacy.util import get_lang_class # excluded: ja, ko, th, vi, zh LANGUAGES = ["af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fr", "ga", "gu", "he", "hi", - "hr", "hu", "hy", "id", "is", "it", "kn", "ky", "lb", "lt", "lv", - "mk", "ml", "mr", "nb", "ne", "nl", "pl", "pt", "ro", "ru", "sa", + "hr", "hu", "hy", "id", "isl", "it", "kn", "ky", "lb", "lt", "lv", + "mk", "ml", "mr", "mul", "nb", "ne", "nl", "pl", "pt", "ro", "ru", "sa", "si", "sk", "sl", "sq", "sr", "sv", "ta", "te", "ti", "tl", "tn", - "tr", "tt", "uk", "ur", "xx", "yo"] + "tr", "tt", "uk", "ur", "yo"] # fmt: on diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index 00889efdc..d6cd11e55 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -13,6 +13,7 @@ from spacy.pipeline._parser_internals.ner import BiluoPushDown from spacy.training import Example, iob_to_biluo, split_bilu_label from spacy.tokens import Doc, Span from spacy.vocab import Vocab +from thinc.api import fix_random_seed import logging from ..util import make_tempdir @@ -412,7 +413,7 @@ def test_train_empty(): train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) ner = nlp.add_pipe("ner", last=True) ner.add_label("PERSON") - nlp.initialize() + nlp.initialize(get_examples=lambda: train_examples) for itn in range(2): losses = {} batches = util.minibatch(train_examples, size=8) @@ -539,11 +540,11 @@ def test_block_ner(): assert [token.ent_type_ for token in doc] == expected_types -@pytest.mark.parametrize("use_upper", [True, False]) -def test_overfitting_IO(use_upper): +def test_overfitting_IO(): + fix_random_seed(1) # Simple test to try and quickly overfit the NER component nlp = English() - ner = nlp.add_pipe("ner", config={"model": {"use_upper": use_upper}}) + ner = nlp.add_pipe("ner", config={"model": {}}) train_examples = [] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) @@ -575,7 +576,6 @@ def test_overfitting_IO(use_upper): assert ents2[0].label_ == "LOC" # Ensure that the predictions are still the same, even after adding a new label ner2 = nlp2.get_pipe("ner") - assert ner2.model.attrs["has_upper"] == use_upper ner2.add_label("RANDOM_NEW_LABEL") doc3 = nlp2(test_text) ents3 = doc3.ents @@ -617,6 +617,52 @@ def test_overfitting_IO(use_upper): assert ents[1].kb_id == 0 +def test_is_distillable(): + nlp = English() + ner = nlp.add_pipe("ner") + assert ner.is_distillable + + +def test_distill(): + teacher = English() + teacher_ner = teacher.add_pipe("ner") + train_examples = [] + for text, annotations in TRAIN_DATA: + train_examples.append(Example.from_dict(teacher.make_doc(text), annotations)) + for ent in annotations.get("entities"): + teacher_ner.add_label(ent[2]) + + optimizer = teacher.initialize(get_examples=lambda: train_examples) + + for i in range(50): + losses = {} + teacher.update(train_examples, sgd=optimizer, losses=losses) + assert losses["ner"] < 0.00001 + + student = English() + student_ner = student.add_pipe("ner") + student_ner.initialize( + get_examples=lambda: train_examples, labels=teacher_ner.label_data + ) + + distill_examples = [ + Example.from_dict(teacher.make_doc(t[0]), {}) for t in TRAIN_DATA + ] + + for i in range(100): + losses = {} + student_ner.distill(teacher_ner, distill_examples, sgd=optimizer, losses=losses) + assert losses["ner"] < 0.0001 + + # test the trained model + test_text = "I like London." + doc = student(test_text) + ents = doc.ents + assert len(ents) == 1 + assert ents[0].text == "London" + assert ents[0].label_ == "LOC" + + def test_beam_ner_scores(): # Test that we can get confidence values out of the beam_ner pipe beam_width = 16 diff --git a/spacy/tests/parser/test_parse.py b/spacy/tests/parser/test_parse.py index aaf31ed56..57b6e188b 100644 --- a/spacy/tests/parser/test_parse.py +++ b/spacy/tests/parser/test_parse.py @@ -1,13 +1,17 @@ +import itertools import pytest +import numpy from numpy.testing import assert_equal from thinc.api import Adam from spacy import registry, util from spacy.attrs import DEP, NORM from spacy.lang.en import English -from spacy.tokens import Doc from spacy.training import Example +from spacy.tokens import Doc from spacy.vocab import Vocab +from spacy import util, registry +from thinc.api import fix_random_seed from ...pipeline import DependencyParser from ...pipeline.dep_parser import DEFAULT_PARSER_MODEL @@ -59,6 +63,8 @@ PARTIAL_DATA = [ ), ] +PARSERS = ["parser"] # TODO: Test beam_parser when ready + eps = 0.1 @@ -171,6 +177,57 @@ def test_parser_parse_one_word_sentence(en_vocab, en_parser, words): assert doc[0].dep != 0 +def test_parser_apply_actions(en_vocab, en_parser): + words = ["I", "ate", "pizza"] + words2 = ["Eat", "more", "pizza", "!"] + doc1 = Doc(en_vocab, words=words) + doc2 = Doc(en_vocab, words=words2) + docs = [doc1, doc2] + + moves = en_parser.moves + moves.add_action(0, "") + moves.add_action(1, "") + moves.add_action(2, "nsubj") + moves.add_action(3, "obj") + moves.add_action(2, "amod") + + actions = [ + numpy.array([0, 0], dtype="i"), + numpy.array([2, 0], dtype="i"), + numpy.array([0, 4], dtype="i"), + numpy.array([3, 3], dtype="i"), + numpy.array([1, 1], dtype="i"), + numpy.array([1, 1], dtype="i"), + numpy.array([0], dtype="i"), + numpy.array([1], dtype="i"), + ] + + states = moves.init_batch(docs) + active_states = states + + for step_actions in actions: + active_states = moves.apply_actions(active_states, step_actions) + + assert len(active_states) == 0 + + for (state, doc) in zip(states, docs): + moves.set_annotations(state, doc) + + assert docs[0][0].head.i == 1 + assert docs[0][0].dep_ == "nsubj" + assert docs[0][1].head.i == 1 + assert docs[0][1].dep_ == "ROOT" + assert docs[0][2].head.i == 1 + assert docs[0][2].dep_ == "obj" + + assert docs[1][0].head.i == 0 + assert docs[1][0].dep_ == "ROOT" + assert docs[1][1].head.i == 2 + assert docs[1][1].dep_ == "amod" + assert docs[1][2].head.i == 0 + assert docs[1][2].dep_ == "obj" + + @pytest.mark.skip( reason="The step_through API was removed (but should be brought back)" ) @@ -319,7 +376,7 @@ def test_parser_constructor(en_vocab): DependencyParser(en_vocab, model) -@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"]) +@pytest.mark.parametrize("pipe_name", PARSERS) def test_incomplete_data(pipe_name): # Test that the parser works with incomplete information nlp = English() @@ -345,11 +402,15 @@ def test_incomplete_data(pipe_name): assert doc[2].head.i == 1 -@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"]) -def test_overfitting_IO(pipe_name): +@pytest.mark.parametrize( + "pipe_name,max_moves", itertools.product(PARSERS, [0, 1, 5, 100]) +) +def test_overfitting_IO(pipe_name, max_moves): + fix_random_seed(0) # Simple test to try and quickly overfit the dependency parser (normal or beam) nlp = English() parser = nlp.add_pipe(pipe_name) + parser.cfg["update_with_oracle_cut_size"] = max_moves train_examples = [] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) @@ -396,16 +457,67 @@ def test_overfitting_IO(pipe_name): assert_equal(batch_deps_1, no_batch_deps) +def test_is_distillable(): + nlp = English() + parser = nlp.add_pipe("parser") + assert parser.is_distillable + + +def test_distill(): + teacher = English() + teacher_parser = teacher.add_pipe("parser") + train_examples = [] + for text, annotations in TRAIN_DATA: + train_examples.append(Example.from_dict(teacher.make_doc(text), annotations)) + for dep in annotations.get("deps", []): + teacher_parser.add_label(dep) + + optimizer = teacher.initialize(get_examples=lambda: train_examples) + + for i in range(200): + losses = {} + teacher.update(train_examples, sgd=optimizer, losses=losses) + assert losses["parser"] < 0.0001 + + student = English() + student_parser = student.add_pipe("parser") + student_parser.initialize( + get_examples=lambda: train_examples, labels=teacher_parser.label_data + ) + + distill_examples = [ + Example.from_dict(teacher.make_doc(t[0]), {}) for t in TRAIN_DATA + ] + + for i in range(200): + losses = {} + student_parser.distill( + teacher_parser, distill_examples, sgd=optimizer, losses=losses + ) + assert losses["parser"] < 0.0001 + + test_text = "I like securities." + doc = student(test_text) + assert doc[0].dep_ == "nsubj" + assert doc[2].dep_ == "dobj" + assert doc[3].dep_ == "punct" + assert doc[0].head.i == 1 + assert doc[2].head.i == 1 + assert doc[3].head.i == 1 + + # fmt: off @pytest.mark.slow @pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"]) @pytest.mark.parametrize( "parser_config", [ - # TransitionBasedParser V1 - ({"@architectures": "spacy.TransitionBasedParser.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}), - # TransitionBasedParser V2 + # TODO: re-enable after we have a spacy-legacy release for v4. See + # https://github.com/explosion/spacy-legacy/pull/36 + #({"@architectures": "spacy.TransitionBasedParser.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}), ({"@architectures": "spacy.TransitionBasedParser.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}), + ({"@architectures": "spacy.TransitionBasedParser.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": False}), + ({"@architectures": "spacy.TransitionBasedParser.v3", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2}), ], ) # fmt: on diff --git a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py index 5eeb55aa2..c5c50c77f 100644 --- a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py +++ b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py @@ -103,14 +103,15 @@ def test_initialize_from_labels(): } -def test_no_data(): +@pytest.mark.parametrize("top_k", (1, 5, 30)) +def test_no_data(top_k): # Test that the lemmatizer provides a nice error when there's no tagging data / labels TEXTCAT_DATA = [ ("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}), ("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}), ] nlp = English() - nlp.add_pipe("trainable_lemmatizer") + nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k}) nlp.add_pipe("textcat") train_examples = [] @@ -121,10 +122,11 @@ def test_no_data(): nlp.initialize(get_examples=lambda: train_examples) -def test_incomplete_data(): +@pytest.mark.parametrize("top_k", (1, 5, 30)) +def test_incomplete_data(top_k): # Test that the lemmatizer works with incomplete information nlp = English() - lemmatizer = nlp.add_pipe("trainable_lemmatizer") + lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k}) lemmatizer.min_tree_freq = 1 train_examples = [] for t in PARTIAL_DATA: @@ -141,10 +143,25 @@ def test_incomplete_data(): assert doc[1].lemma_ == "like" assert doc[2].lemma_ == "blue" + # Check that incomplete annotations are ignored. + scores, _ = lemmatizer.model([eg.predicted for eg in train_examples], is_train=True) + _, dX = lemmatizer.get_loss(train_examples, scores) + xp = lemmatizer.model.ops.xp -def test_overfitting_IO(): + # Missing annotations. + assert xp.count_nonzero(dX[0][0]) == 0 + assert xp.count_nonzero(dX[0][3]) == 0 + assert xp.count_nonzero(dX[1][0]) == 0 + assert xp.count_nonzero(dX[1][3]) == 0 + + # Misaligned annotations. + assert xp.count_nonzero(dX[1][1]) == 0 + + +@pytest.mark.parametrize("top_k", (1, 5, 30)) +def test_overfitting_IO(top_k): nlp = English() - lemmatizer = nlp.add_pipe("trainable_lemmatizer") + lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k}) lemmatizer.min_tree_freq = 1 train_examples = [] for t in TRAIN_DATA: @@ -177,7 +194,7 @@ def test_overfitting_IO(): # Check model after a {to,from}_bytes roundtrip nlp_bytes = nlp.to_bytes() nlp3 = English() - nlp3.add_pipe("trainable_lemmatizer") + nlp3.add_pipe("trainable_lemmatizer", config={"top_k": top_k}) nlp3.from_bytes(nlp_bytes) doc3 = nlp3(test_text) assert doc3[0].lemma_ == "she" @@ -195,6 +212,53 @@ def test_overfitting_IO(): assert doc4[3].lemma_ == "egg" +def test_is_distillable(): + nlp = English() + lemmatizer = nlp.add_pipe("trainable_lemmatizer") + assert lemmatizer.is_distillable + + +def test_distill(): + teacher = English() + teacher_lemmatizer = teacher.add_pipe("trainable_lemmatizer") + teacher_lemmatizer.min_tree_freq = 1 + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(teacher.make_doc(t[0]), t[1])) + + optimizer = teacher.initialize(get_examples=lambda: train_examples) + + for i in range(50): + losses = {} + teacher.update(train_examples, sgd=optimizer, losses=losses) + assert losses["trainable_lemmatizer"] < 0.00001 + + student = English() + student_lemmatizer = student.add_pipe("trainable_lemmatizer") + student_lemmatizer.min_tree_freq = 1 + student_lemmatizer.initialize( + get_examples=lambda: train_examples, labels=teacher_lemmatizer.label_data + ) + + distill_examples = [ + Example.from_dict(teacher.make_doc(t[0]), {}) for t in TRAIN_DATA + ] + + for i in range(50): + losses = {} + student_lemmatizer.distill( + teacher_lemmatizer, distill_examples, sgd=optimizer, losses=losses + ) + assert losses["trainable_lemmatizer"] < 0.00001 + + test_text = "She likes blue eggs" + doc = student(test_text) + assert doc[0].lemma_ == "she" + assert doc[1].lemma_ == "like" + assert doc[2].lemma_ == "blue" + assert doc[3].lemma_ == "egg" + + def test_lemmatizer_requires_labels(): nlp = English() nlp.add_pipe("trainable_lemmatizer") diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index 9a8ce6653..506530591 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -12,7 +12,6 @@ from spacy.lang.en import English from spacy.ml import load_kb from spacy.ml.models.entity_linker import build_span_maker from spacy.pipeline import EntityLinker, TrainablePipe -from spacy.pipeline.legacy import EntityLinker_v1 from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.scorer import Scorer from spacy.tests.util import make_tempdir @@ -997,6 +996,8 @@ def test_scorer_links(): ) # fmt: on def test_legacy_architectures(name, config): + from spacy_legacy.components.entity_linker import EntityLinker_v1 + # Ensure that the legacy architectures still work vector_length = 3 nlp = English() diff --git a/spacy/tests/pipeline/test_morphologizer.py b/spacy/tests/pipeline/test_morphologizer.py index 70fc77304..5b9b17c01 100644 --- a/spacy/tests/pipeline/test_morphologizer.py +++ b/spacy/tests/pipeline/test_morphologizer.py @@ -50,6 +50,12 @@ def test_implicit_label(): nlp.initialize(get_examples=lambda: train_examples) +def test_is_distillable(): + nlp = English() + morphologizer = nlp.add_pipe("morphologizer") + assert morphologizer.is_distillable + + def test_no_resize(): nlp = Language() morphologizer = nlp.add_pipe("morphologizer") diff --git a/spacy/tests/pipeline/test_senter.py b/spacy/tests/pipeline/test_senter.py index 3deac9e9a..a771d62fa 100644 --- a/spacy/tests/pipeline/test_senter.py +++ b/spacy/tests/pipeline/test_senter.py @@ -11,6 +11,12 @@ from spacy.pipeline import TrainablePipe from spacy.tests.util import make_tempdir +def test_is_distillable(): + nlp = English() + senter = nlp.add_pipe("senter") + assert senter.is_distillable + + def test_label_types(): nlp = Language() senter = nlp.add_pipe("senter") diff --git a/spacy/tests/pipeline/test_span_ruler.py b/spacy/tests/pipeline/test_span_ruler.py index 794815359..fe3bdd1bf 100644 --- a/spacy/tests/pipeline/test_span_ruler.py +++ b/spacy/tests/pipeline/test_span_ruler.py @@ -47,7 +47,7 @@ def person_org_date_patterns(person_org_patterns): def test_span_ruler_add_empty(patterns): """Test that patterns don't get added excessively.""" - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler", config={"validate": True}) ruler.add_patterns(patterns) pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values()) @@ -58,7 +58,7 @@ def test_span_ruler_add_empty(patterns): def test_span_ruler_init(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler) == len(patterns) @@ -74,7 +74,7 @@ def test_span_ruler_init(patterns): def test_span_ruler_no_patterns_warns(): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") assert len(ruler) == 0 assert len(ruler.labels) == 0 @@ -86,7 +86,7 @@ def test_span_ruler_no_patterns_warns(): def test_span_ruler_init_patterns(patterns): # initialize with patterns - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") assert len(ruler.labels) == 0 ruler.initialize(lambda: [], patterns=patterns) @@ -110,7 +110,7 @@ def test_span_ruler_init_patterns(patterns): def test_span_ruler_init_clear(patterns): """Test that initialization clears patterns.""" - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler.labels) == 4 @@ -119,7 +119,7 @@ def test_span_ruler_init_clear(patterns): def test_span_ruler_clear(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler.labels) == 4 @@ -133,7 +133,7 @@ def test_span_ruler_clear(patterns): def test_span_ruler_existing(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler", config={"overwrite": False}) ruler.add_patterns(patterns) doc = nlp.make_doc("OH HELLO WORLD bye bye") @@ -148,7 +148,7 @@ def test_span_ruler_existing(patterns): def test_span_ruler_existing_overwrite(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler", config={"overwrite": True}) ruler.add_patterns(patterns) doc = nlp.make_doc("OH HELLO WORLD bye bye") @@ -161,13 +161,13 @@ def test_span_ruler_existing_overwrite(patterns): def test_span_ruler_serialize_bytes(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 ruler_bytes = ruler.to_bytes() - new_nlp = spacy.blank("xx") + new_nlp = spacy.blank("mul") new_ruler = new_nlp.add_pipe("span_ruler") assert len(new_ruler) == 0 assert len(new_ruler.labels) == 0 @@ -181,7 +181,7 @@ def test_span_ruler_serialize_bytes(patterns): def test_span_ruler_validate(): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") validated_ruler = nlp.add_pipe( "span_ruler", name="validated_span_ruler", config={"validate": True} @@ -203,14 +203,14 @@ def test_span_ruler_validate(): def test_span_ruler_properties(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler", config={"overwrite": True}) ruler.add_patterns(patterns) assert sorted(ruler.labels) == sorted(set([p["label"] for p in patterns])) def test_span_ruler_overlapping_spans(overlapping_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(overlapping_patterns) doc = ruler(nlp.make_doc("foo bar baz")) @@ -220,7 +220,7 @@ def test_span_ruler_overlapping_spans(overlapping_patterns): def test_span_ruler_scorer(overlapping_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(overlapping_patterns) text = "foo bar baz" @@ -243,7 +243,7 @@ def test_span_ruler_multiprocessing(n_process): patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut"}] - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) @@ -253,7 +253,7 @@ def test_span_ruler_multiprocessing(n_process): def test_span_ruler_serialize_dir(patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(patterns) with make_tempdir() as d: @@ -264,7 +264,7 @@ def test_span_ruler_serialize_dir(patterns): def test_span_ruler_remove_basic(person_org_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_patterns) doc = ruler(nlp.make_doc("Dina went to school")) @@ -279,7 +279,7 @@ def test_span_ruler_remove_basic(person_org_patterns): def test_span_ruler_remove_nonexisting_pattern(person_org_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_patterns) assert len(ruler.patterns) == 3 @@ -290,7 +290,7 @@ def test_span_ruler_remove_nonexisting_pattern(person_org_patterns): def test_span_ruler_remove_several_patterns(person_org_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_patterns) doc = ruler(nlp.make_doc("Dina founded the company ACME.")) @@ -314,7 +314,7 @@ def test_span_ruler_remove_several_patterns(person_org_patterns): def test_span_ruler_remove_patterns_in_a_row(person_org_date_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_date_patterns) doc = ruler(nlp.make_doc("Dina founded the company ACME on June 14th")) @@ -332,7 +332,7 @@ def test_span_ruler_remove_patterns_in_a_row(person_org_date_patterns): def test_span_ruler_remove_all_patterns(person_org_date_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") ruler.add_patterns(person_org_date_patterns) assert len(ruler.patterns) == 4 @@ -348,7 +348,7 @@ def test_span_ruler_remove_all_patterns(person_org_date_patterns): def test_span_ruler_remove_and_add(): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler") patterns1 = [{"label": "DATE1", "pattern": "last time"}] ruler.add_patterns(patterns1) @@ -404,7 +404,7 @@ def test_span_ruler_remove_and_add(): def test_span_ruler_spans_filter(overlapping_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe( "span_ruler", config={"spans_filter": {"@misc": "spacy.first_longest_spans_filter.v1"}}, @@ -416,7 +416,7 @@ def test_span_ruler_spans_filter(overlapping_patterns): def test_span_ruler_ents_default_filter(overlapping_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe("span_ruler", config={"annotate_ents": True}) ruler.add_patterns(overlapping_patterns) doc = ruler(nlp.make_doc("foo bar baz")) @@ -425,7 +425,7 @@ def test_span_ruler_ents_default_filter(overlapping_patterns): def test_span_ruler_ents_overwrite_filter(overlapping_patterns): - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe( "span_ruler", config={ @@ -452,7 +452,7 @@ def test_span_ruler_ents_bad_filter(overlapping_patterns): return pass_through_filter - nlp = spacy.blank("xx") + nlp = spacy.blank("mul") ruler = nlp.add_pipe( "span_ruler", config={ diff --git a/spacy/tests/pipeline/test_tagger.py b/spacy/tests/pipeline/test_tagger.py index a0c71198e..505b41f8c 100644 --- a/spacy/tests/pipeline/test_tagger.py +++ b/spacy/tests/pipeline/test_tagger.py @@ -24,7 +24,9 @@ def test_issue4348(): optimizer = nlp.initialize() for i in range(5): losses = {} - batches = util.minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) + batches = util.minibatch( + TRAIN_DATA, size=compounding(4.0, 32.0, 1.001).to_generator() + ) for batch in batches: nlp.update(batch, sgd=optimizer, losses=losses) @@ -213,6 +215,52 @@ def test_overfitting_IO(): assert doc3[0].tag_ != "N" +def test_is_distillable(): + nlp = English() + tagger = nlp.add_pipe("tagger") + assert tagger.is_distillable + + +def test_distill(): + teacher = English() + teacher_tagger = teacher.add_pipe("tagger") + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(teacher.make_doc(t[0]), t[1])) + + optimizer = teacher.initialize(get_examples=lambda: train_examples) + + for i in range(50): + losses = {} + teacher.update(train_examples, sgd=optimizer, losses=losses) + assert losses["tagger"] < 0.00001 + + student = English() + student_tagger = student.add_pipe("tagger") + student_tagger.min_tree_freq = 1 + student_tagger.initialize( + get_examples=lambda: train_examples, labels=teacher_tagger.label_data + ) + + distill_examples = [ + Example.from_dict(teacher.make_doc(t[0]), {}) for t in TRAIN_DATA + ] + + for i in range(50): + losses = {} + student_tagger.distill( + teacher_tagger, distill_examples, sgd=optimizer, losses=losses + ) + assert losses["tagger"] < 0.00001 + + test_text = "I like blue eggs" + doc = student(test_text) + assert doc[0].tag_ == "N" + assert doc[1].tag_ == "V" + assert doc[2].tag_ == "J" + assert doc[3].tag_ == "N" + + def test_save_activations(): # Test if activations are correctly added to Doc when requested. nlp = English() diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index 304209933..506897a45 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -91,7 +91,9 @@ def test_issue3611(): optimizer = nlp.initialize() for i in range(3): losses = {} - batches = util.minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) + batches = util.minibatch( + train_data, size=compounding(4.0, 32.0, 1.001).to_generator() + ) for batch in batches: nlp.update(examples=batch, sgd=optimizer, drop=0.1, losses=losses) @@ -128,7 +130,9 @@ def test_issue4030(): optimizer = nlp.initialize() for i in range(3): losses = {} - batches = util.minibatch(train_data, size=compounding(4.0, 32.0, 1.001)) + batches = util.minibatch( + train_data, size=compounding(4.0, 32.0, 1.001).to_generator() + ) for batch in batches: nlp.update(examples=batch, sgd=optimizer, drop=0.1, losses=losses) @@ -565,6 +569,12 @@ def test_initialize_examples(name, get_examples, train_data): nlp.initialize(get_examples=get_examples()) +def test_is_distillable(): + nlp = English() + textcat = nlp.add_pipe("textcat") + assert not textcat.is_distillable + + def test_overfitting_IO(): # Simple test to try and quickly overfit the single-label textcat component - ensuring the ML models work correctly fix_random_seed(0) diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py index e423d9a19..ee62b1ab4 100644 --- a/spacy/tests/pipeline/test_tok2vec.py +++ b/spacy/tests/pipeline/test_tok2vec.py @@ -382,7 +382,7 @@ cfg_string_multi = """ factory = "ner" [components.ner.model] - @architectures = "spacy.TransitionBasedParser.v2" + @architectures = "spacy.TransitionBasedParser.v3" [components.ner.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" diff --git a/spacy/tests/serialize/test_serialize_config.py b/spacy/tests/serialize/test_serialize_config.py index 85e6f8b2c..6eb95001a 100644 --- a/spacy/tests/serialize/test_serialize_config.py +++ b/spacy/tests/serialize/test_serialize_config.py @@ -6,10 +6,11 @@ import spacy from spacy.lang.de import German from spacy.lang.en import English from spacy.language import DEFAULT_CONFIG, DEFAULT_CONFIG_PRETRAIN_PATH +from spacy.language import DEFAULT_CONFIG_DISTILL_PATH from spacy.language import Language from spacy.ml.models import MaxoutWindowEncoder, MultiHashEmbed from spacy.ml.models import build_tb_parser_model, build_Tok2Vec_model -from spacy.schemas import ConfigSchema, ConfigSchemaPretrain +from spacy.schemas import ConfigSchema, ConfigSchemaDistill, ConfigSchemaPretrain from spacy.util import load_config, load_config_from_str from spacy.util import load_model_from_config, registry @@ -66,6 +67,60 @@ factory = "tagger" width = ${components.tok2vec.model.width} """ +distill_config_string = """ +[paths] +train = null +dev = null + +[corpora] + +[corpora.train] +@readers = "spacy.Corpus.v1" +path = ${paths.train} + +[corpora.dev] +@readers = "spacy.Corpus.v1" +path = ${paths.dev} + +[training] + +[training.batcher] +@batchers = "spacy.batch_by_words.v1" +size = 666 + +[nlp] +lang = "en" +pipeline = ["tok2vec", "tagger"] + +[components] + +[components.tok2vec] +factory = "tok2vec" + +[components.tok2vec.model] +@architectures = "spacy.HashEmbedCNN.v1" +pretrained_vectors = null +width = 342 +depth = 4 +window_size = 1 +embed_size = 2000 +maxout_pieces = 3 +subword_features = true + +[components.tagger] +factory = "tagger" + +[components.tagger.model] +@architectures = "spacy.Tagger.v2" + +[components.tagger.model.tok2vec] +@architectures = "spacy.Tok2VecListener.v1" +width = ${components.tok2vec.model.width} + +[distill] +""" + + pretrain_config_string = """ [paths] train = null @@ -122,33 +177,11 @@ width = ${components.tok2vec.model.width} parser_config_string_upper = """ [model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "parser" extra_state_tokens = false hidden_width = 66 maxout_pieces = 2 -use_upper = true - -[model.tok2vec] -@architectures = "spacy.HashEmbedCNN.v1" -pretrained_vectors = null -width = 333 -depth = 4 -embed_size = 5555 -window_size = 1 -maxout_pieces = 7 -subword_features = false -""" - - -parser_config_string_no_upper = """ -[model] -@architectures = "spacy.TransitionBasedParser.v2" -state_type = "parser" -extra_state_tokens = false -hidden_width = 66 -maxout_pieces = 2 -use_upper = false [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v1" @@ -179,7 +212,6 @@ def my_parser(): extra_state_tokens=True, hidden_width=65, maxout_pieces=5, - use_upper=True, ) return parser @@ -224,6 +256,14 @@ def test_create_nlp_from_config(): load_model_from_config(Config(bad_cfg), auto_fill=True) +def test_nlp_from_distillation_config(): + """Test that the default distillation config validates properly""" + config = Config().from_str(distill_config_string) + distill_config = load_config(DEFAULT_CONFIG_DISTILL_PATH) + filled = config.merge(distill_config) + registry.resolve(filled["distillation"], schema=ConfigSchemaDistill) + + def test_create_nlp_from_pretraining_config(): """Test that the default pretraining config validates properly""" config = Config().from_str(pretrain_config_string) @@ -285,15 +325,16 @@ def test_serialize_custom_nlp(): nlp.to_disk(d) nlp2 = spacy.load(d) model = nlp2.get_pipe("parser").model - model.get_ref("tok2vec") - # check that we have the correct settings, not the default ones - assert model.get_ref("upper").get_dim("nI") == 65 - assert model.get_ref("lower").get_dim("nI") == 65 + assert model.get_ref("tok2vec") is not None + assert model.has_param("hidden_W") + assert model.has_param("hidden_b") + output = model.get_ref("output") + assert output is not None + assert output.has_param("W") + assert output.has_param("b") -@pytest.mark.parametrize( - "parser_config_string", [parser_config_string_upper, parser_config_string_no_upper] -) +@pytest.mark.parametrize("parser_config_string", [parser_config_string_upper]) def test_serialize_parser(parser_config_string): """Create a non-default parser config to check nlp serializes it correctly""" nlp = English() @@ -306,11 +347,13 @@ def test_serialize_parser(parser_config_string): nlp.to_disk(d) nlp2 = spacy.load(d) model = nlp2.get_pipe("parser").model - model.get_ref("tok2vec") - # check that we have the correct settings, not the default ones - if model.attrs["has_upper"]: - assert model.get_ref("upper").get_dim("nI") == 66 - assert model.get_ref("lower").get_dim("nI") == 66 + assert model.get_ref("tok2vec") is not None + assert model.has_param("hidden_W") + assert model.has_param("hidden_b") + output = model.get_ref("output") + assert output is not None + assert output.has_param("b") + assert output.has_param("W") def test_config_nlp_roundtrip(): @@ -457,9 +500,7 @@ def test_config_auto_fill_extra_fields(): load_model_from_config(nlp.config) -@pytest.mark.parametrize( - "parser_config_string", [parser_config_string_upper, parser_config_string_no_upper] -) +@pytest.mark.parametrize("parser_config_string", [parser_config_string_upper]) def test_config_validate_literal(parser_config_string): nlp = English() config = Config().from_str(parser_config_string) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index c88e20de2..42ffae22d 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -618,7 +618,6 @@ def test_string_to_list_intify(value): assert string_to_list(value, intify=True) == [1, 2, 3] -@pytest.mark.skip(reason="Temporarily skip for dev version") def test_download_compatibility(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False @@ -629,7 +628,6 @@ def test_download_compatibility(): assert get_minor_version(about.__version__) == get_minor_version(version) -@pytest.mark.skip(reason="Temporarily skip for dev version") def test_validate_compatibility_table(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False @@ -1076,7 +1074,7 @@ def test_cli_find_threshold(capsys): ) with make_tempdir() as nlp_dir: nlp.to_disk(nlp_dir) - res = find_threshold( + best_threshold, best_score, res = find_threshold( model=nlp_dir, data_path=docs_dir / "docs.spacy", pipe_name="tc_multi", @@ -1084,10 +1082,10 @@ def test_cli_find_threshold(capsys): scores_key="cats_macro_f", silent=True, ) - assert res[0] != thresholds[0] - assert thresholds[0] < res[0] < thresholds[9] - assert res[1] == 1.0 - assert res[2][1.0] == 0.0 + assert best_threshold != thresholds[0] + assert thresholds[0] < best_threshold < thresholds[9] + assert best_score == max(res.values()) + assert res[1.0] == 0.0 # Test with spancat. nlp, _ = init_nlp((("spancat", {}),)) @@ -1209,3 +1207,69 @@ def test_walk_directory(): assert (len(walk_directory(d, suffix="iob"))) == 2 assert (len(walk_directory(d, suffix="conll"))) == 3 assert (len(walk_directory(d, suffix="pdf"))) == 0 + + +def test_debug_data_trainable_lemmatizer_basic(): + examples = [ + ("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}), + ("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}), + ] + nlp = Language() + train_examples = [] + for t in examples: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + + data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) + # ref test_edit_tree_lemmatizer::test_initialize_from_labels + # this results in 4 trees + assert len(data["lemmatizer_trees"]) == 4 + + +def test_debug_data_trainable_lemmatizer_partial(): + partial_examples = [ + # partial annotation + ("She likes green eggs", {"lemmas": ["", "like", "green", ""]}), + # misaligned partial annotation + ( + "He hates green eggs", + { + "words": ["He", "hat", "es", "green", "eggs"], + "lemmas": ["", "hat", "e", "green", ""], + }, + ), + ] + nlp = Language() + train_examples = [] + for t in partial_examples: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + + data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) + assert data["partial_lemma_annotations"] == 2 + + +def test_debug_data_trainable_lemmatizer_low_cardinality(): + low_cardinality_examples = [ + ("She likes green eggs", {"lemmas": ["no", "no", "no", "no"]}), + ("Eat blue ham", {"lemmas": ["no", "no", "no"]}), + ] + nlp = Language() + train_examples = [] + for t in low_cardinality_examples: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + + data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) + assert data["n_low_cardinality_lemmas"] == 2 + + +def test_debug_data_trainable_lemmatizer_not_annotated(): + unannotated_examples = [ + ("She likes green eggs", {}), + ("Eat blue ham", {}), + ] + nlp = Language() + train_examples = [] + for t in unannotated_examples: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + + data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) + assert data["no_lemma_annotations"] == 2 diff --git a/spacy/tests/test_cli_app.py b/spacy/tests/test_cli_app.py index 19b2073a0..9b099ccb5 100644 --- a/spacy/tests/test_cli_app.py +++ b/spacy/tests/test_cli_app.py @@ -3,6 +3,7 @@ from pathlib import Path import pytest import subprocess from typer.testing import CliRunner +from spacy.tokens import DocBin, Doc import spacy from spacy.cli._util import app @@ -240,3 +241,60 @@ def test_multi_code_evaluate(code_paths, data_paths, noop_config): # check that it succeeds with the code arg result = subprocess.run([*cmd, *code_paths]) assert result.returncode == 0 + + +def test_benchmark_accuracy_alias(): + # Verify that the `evaluate` alias works correctly. + result_benchmark = CliRunner().invoke(app, ["benchmark", "accuracy", "--help"]) + result_evaluate = CliRunner().invoke(app, ["evaluate", "--help"]) + assert result_benchmark.stdout == result_evaluate.stdout.replace( + "spacy evaluate", "spacy benchmark accuracy" + ) + + +def test_debug_data_trainable_lemmatizer_cli(en_vocab): + train_docs = [ + Doc(en_vocab, words=["I", "like", "cats"], lemmas=["I", "like", "cat"]), + Doc( + en_vocab, + words=["Dogs", "are", "great", "too"], + lemmas=["dog", "be", "great", "too"], + ), + ] + dev_docs = [ + Doc(en_vocab, words=["Cats", "are", "cute"], lemmas=["cat", "be", "cute"]), + Doc(en_vocab, words=["Pets", "are", "great"], lemmas=["pet", "be", "great"]), + ] + with make_tempdir() as d_in: + train_bin = DocBin(docs=train_docs) + train_bin.to_disk(d_in / "train.spacy") + dev_bin = DocBin(docs=dev_docs) + dev_bin.to_disk(d_in / "dev.spacy") + # `debug data` requires an input pipeline config + CliRunner().invoke( + app, + [ + "init", + "config", + f"{d_in}/config.cfg", + "--lang", + "en", + "--pipeline", + "trainable_lemmatizer", + ], + ) + result_debug_data = CliRunner().invoke( + app, + [ + "debug", + "data", + f"{d_in}/config.cfg", + "--paths.train", + f"{d_in}/train.spacy", + "--paths.dev", + f"{d_in}/dev.spacy", + ], + ) + # Instead of checking specific wording of the output, which may change, + # we'll check that this section of the debug output is present. + assert "= Trainable Lemmatizer =" in result_debug_data.stdout diff --git a/spacy/tests/test_language.py b/spacy/tests/test_language.py index 03790eb86..f2d6d5fc0 100644 --- a/spacy/tests/test_language.py +++ b/spacy/tests/test_language.py @@ -26,6 +26,12 @@ except ImportError: pass +TAGGER_TRAIN_DATA = [ + ("I like green eggs", {"tags": ["N", "V", "J", "N"]}), + ("Eat blue ham", {"tags": ["V", "J", "N"]}), +] + + def evil_component(doc): if "2" in doc.text: raise ValueError("no dice") @@ -658,11 +664,12 @@ def test_spacy_blank(): ("fra", "fr"), ("fre", "fr"), ("iw", "he"), + ("is", "isl"), ("mo", "ro"), - ("mul", "xx"), + ("mul", "mul"), ("no", "nb"), ("pt-BR", "pt"), - ("xx", "xx"), + ("xx", "mul"), ("zh-Hans", "zh"), ("zh-Hant", None), ("zxx", None), @@ -683,11 +690,11 @@ def test_language_matching(lang, target): ("fra", "fr"), ("fre", "fr"), ("iw", "he"), + ("is", "isl"), ("mo", "ro"), - ("mul", "xx"), + ("xx", "mul"), ("no", "nb"), ("pt-BR", "pt"), - ("xx", "xx"), ("zh-Hans", "zh"), ], ) @@ -799,3 +806,66 @@ def test_component_return(): nlp.add_pipe("test_component_bad_pipe") with pytest.raises(ValueError, match="instead of a Doc"): nlp("text") + + +@pytest.mark.slow +@pytest.mark.parametrize("teacher_tagger_name", ["tagger", "teacher_tagger"]) +def test_distill(teacher_tagger_name): + teacher = English() + teacher_tagger = teacher.add_pipe("tagger", name=teacher_tagger_name) + train_examples = [] + for t in TAGGER_TRAIN_DATA: + train_examples.append(Example.from_dict(teacher.make_doc(t[0]), t[1])) + + optimizer = teacher.initialize(get_examples=lambda: train_examples) + + for i in range(50): + losses = {} + teacher.update(train_examples, sgd=optimizer, losses=losses) + assert losses[teacher_tagger_name] < 0.00001 + + student = English() + student_tagger = student.add_pipe("tagger") + student_tagger.min_tree_freq = 1 + student_tagger.initialize( + get_examples=lambda: train_examples, labels=teacher_tagger.label_data + ) + + distill_examples = [ + Example.from_dict(teacher.make_doc(t[0]), {}) for t in TAGGER_TRAIN_DATA + ] + + student_to_teacher = ( + None + if teacher_tagger.name == student_tagger.name + else {student_tagger.name: teacher_tagger.name} + ) + + for i in range(50): + losses = {} + student.distill( + teacher, + distill_examples, + sgd=optimizer, + losses=losses, + student_to_teacher=student_to_teacher, + ) + assert losses["tagger"] < 0.00001 + + test_text = "I like blue eggs" + doc = student(test_text) + assert doc[0].tag_ == "N" + assert doc[1].tag_ == "V" + assert doc[2].tag_ == "J" + assert doc[3].tag_ == "N" + + # Do an extra update to check if annotates works, though we can't really + # validate the resuls, since the annotations are ephemeral. + student.distill( + teacher, + distill_examples, + sgd=optimizer, + losses=losses, + student_to_teacher=student_to_teacher, + annotates=["tagger"], + ) diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index 618f17334..e4e0f9d83 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -5,10 +5,8 @@ from pathlib import Path from spacy.about import __version__ as spacy_version from spacy import util from spacy import prefer_gpu, require_gpu, require_cpu -from spacy.ml._precomputable_affine import PrecomputableAffine -from spacy.ml._precomputable_affine import _backprop_precomputable_affine_padding -from spacy.util import dot_to_object, SimpleFrozenList, import_file -from spacy.util import to_ternary_int, find_available_port +from spacy.util import dot_to_object, SimpleFrozenList, import_file, to_ternary_int +from spacy.util import find_available_port from thinc.api import Config, Optimizer, ConfigValidationError from thinc.api import get_current_ops, set_current_ops, NumpyOps, CupyOps, MPSOps from thinc.compat import has_cupy_gpu, has_torch_mps_gpu @@ -81,34 +79,6 @@ def test_util_get_package_path(package): assert isinstance(path, Path) -def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2): - model = PrecomputableAffine(nO=nO, nI=nI, nF=nF, nP=nP).initialize() - assert model.get_param("W").shape == (nF, nO, nP, nI) - tensor = model.ops.alloc((10, nI)) - Y, get_dX = model.begin_update(tensor) - assert Y.shape == (tensor.shape[0] + 1, nF, nO, nP) - dY = model.ops.alloc((15, nO, nP)) - ids = model.ops.alloc((15, nF)) - ids[1, 2] = -1 - dY[1] = 1 - assert not model.has_grad("pad") - d_pad = _backprop_precomputable_affine_padding(model, dY, ids) - assert d_pad[0, 2, 0, 0] == 1.0 - ids.fill(0.0) - dY.fill(0.0) - dY[0] = 0 - ids[1, 2] = 0 - ids[1, 1] = -1 - ids[1, 0] = -1 - dY[1] = 1 - ids[2, 0] = -1 - dY[2] = 5 - d_pad = _backprop_precomputable_affine_padding(model, dY, ids) - assert d_pad[0, 0, 0, 0] == 6 - assert d_pad[0, 1, 0, 0] == 1 - assert d_pad[0, 2, 0, 0] == 0 - - def test_prefer_gpu(): current_ops = get_current_ops() if has_cupy_gpu: diff --git a/spacy/tests/tokenizer/test_explain.py b/spacy/tests/tokenizer/test_explain.py index 5b4eeca16..4268392dd 100644 --- a/spacy/tests/tokenizer/test_explain.py +++ b/spacy/tests/tokenizer/test_explain.py @@ -36,6 +36,7 @@ LANGUAGES = [ "hu", pytest.param("id", marks=pytest.mark.slow()), pytest.param("it", marks=pytest.mark.slow()), + pytest.param("isl", marks=pytest.mark.slow()), pytest.param("kn", marks=pytest.mark.slow()), pytest.param("lb", marks=pytest.mark.slow()), pytest.param("lt", marks=pytest.mark.slow()), diff --git a/spacy/tests/training/test_corpus.py b/spacy/tests/training/test_corpus.py new file mode 100644 index 000000000..b4f9cc13a --- /dev/null +++ b/spacy/tests/training/test_corpus.py @@ -0,0 +1,78 @@ +from typing import IO, Generator, Iterable, List, TextIO, Tuple +from contextlib import contextmanager +from pathlib import Path +import pytest +import tempfile + +from spacy.lang.en import English +from spacy.training import Example, PlainTextCorpus +from spacy.util import make_tempdir + +# Intentional newlines to check that they are skipped. +PLAIN_TEXT_DOC = """ + +This is a doc. It contains two sentences. +This is another doc. + +A third doc. + +""" + +PLAIN_TEXT_DOC_TOKENIZED = [ + [ + "This", + "is", + "a", + "doc", + ".", + "It", + "contains", + "two", + "sentences", + ".", + ], + ["This", "is", "another", "doc", "."], + ["A", "third", "doc", "."], +] + + +@pytest.mark.parametrize("min_length", [0, 5]) +@pytest.mark.parametrize("max_length", [0, 5]) +def test_plain_text_reader(min_length, max_length): + nlp = English() + with _string_to_tmp_file(PLAIN_TEXT_DOC) as file_path: + corpus = PlainTextCorpus( + file_path, min_length=min_length, max_length=max_length + ) + + check = [ + doc + for doc in PLAIN_TEXT_DOC_TOKENIZED + if len(doc) >= min_length and (max_length == 0 or len(doc) <= max_length) + ] + reference, predicted = _examples_to_tokens(corpus(nlp)) + + assert reference == check + assert predicted == check + + +@contextmanager +def _string_to_tmp_file(s: str) -> Generator[Path, None, None]: + with make_tempdir() as d: + file_path = Path(d) / "string.txt" + with open(file_path, "w", encoding="utf-8") as f: + f.write(s) + yield file_path + + +def _examples_to_tokens( + examples: Iterable[Example], +) -> Tuple[List[List[str]], List[List[str]]]: + reference = [] + predicted = [] + + for eg in examples: + reference.append([t.text for t in eg.reference]) + predicted.append([t.text for t in eg.predicted]) + + return reference, predicted diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py index 7933ea31f..0c8962098 100644 --- a/spacy/tests/training/test_training.py +++ b/spacy/tests/training/test_training.py @@ -8,7 +8,7 @@ from spacy.lang.en import English from spacy.tokens import Doc, DocBin from spacy.training import Alignment, Corpus, Example, biluo_tags_to_offsets from spacy.training import biluo_tags_to_spans, docs_to_json, iob_to_biluo -from spacy.training import offsets_to_biluo_tags +from spacy.training import offsets_to_biluo_tags, validate_distillation_examples from spacy.training.alignment_array import AlignmentArray from spacy.training.align import get_alignments from spacy.training.converters import json_to_docs @@ -365,6 +365,19 @@ def test_example_from_dict_some_ner(en_vocab): assert ner_tags == ["U-LOC", None, None, None] +def test_validate_distillation_examples(en_vocab): + words = ["a", "b", "c", "d"] + spaces = [True, True, False, True] + predicted = Doc(en_vocab, words=words, spaces=spaces) + + example = Example.from_dict(predicted, {}) + validate_distillation_examples([example], "test_validate_distillation_examples") + + example = Example.from_dict(predicted, {"words": words + ["e"]}) + with pytest.raises(ValueError, match=r"distillation"): + validate_distillation_examples([example], "test_validate_distillation_examples") + + @pytest.mark.filterwarnings("ignore::UserWarning") def test_json_to_docs_no_ner(en_vocab): data = [ @@ -905,7 +918,9 @@ def _train_tuples(train_data): optimizer = nlp.initialize() for i in range(5): losses = {} - batches = minibatch(train_examples, size=compounding(4.0, 32.0, 1.001)) + batches = minibatch( + train_examples, size=compounding(4.0, 32.0, 1.001).to_generator() + ) for batch in batches: nlp.update(batch, sgd=optimizer, losses=losses) diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd index 6f9dfc90f..ba268eaeb 100644 --- a/spacy/tokenizer.pxd +++ b/spacy/tokenizer.pxd @@ -37,7 +37,7 @@ cdef class Tokenizer: bint with_special_cases) except -1 cdef int _tokenize(self, Doc tokens, str span, hash_t key, int* has_special, bint with_special_cases) except -1 - cdef str _split_affixes(self, Pool mem, str string, + cdef str _split_affixes(self, str string, vector[LexemeC*] *prefixes, vector[LexemeC*] *suffixes, int* has_special, bint with_special_cases) diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index ff8d85ac7..0466b041a 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -389,14 +389,14 @@ cdef class Tokenizer: cdef vector[LexemeC*] suffixes cdef int orig_size orig_size = tokens.length - span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes, + span = self._split_affixes(span, &prefixes, &suffixes, has_special, with_special_cases) self._attach_tokens(tokens, span, &prefixes, &suffixes, has_special, with_special_cases) self._save_cached(&tokens.c[orig_size], orig_key, has_special, tokens.length - orig_size) - cdef str _split_affixes(self, Pool mem, str string, + cdef str _split_affixes(self, str string, vector[const LexemeC*] *prefixes, vector[const LexemeC*] *suffixes, int* has_special, @@ -419,7 +419,7 @@ cdef class Tokenizer: minus_pre = string[pre_len:] if minus_pre and with_special_cases and self._specials.get(hash_string(minus_pre)) != NULL: string = minus_pre - prefixes.push_back(self.vocab.get(mem, prefix)) + prefixes.push_back(self.vocab.get(prefix)) break suf_len = self.find_suffix(string[pre_len:]) if suf_len != 0: @@ -427,18 +427,18 @@ cdef class Tokenizer: minus_suf = string[:-suf_len] if minus_suf and with_special_cases and self._specials.get(hash_string(minus_suf)) != NULL: string = minus_suf - suffixes.push_back(self.vocab.get(mem, suffix)) + suffixes.push_back(self.vocab.get(suffix)) break if pre_len and suf_len and (pre_len + suf_len) <= len(string): string = string[pre_len:-suf_len] - prefixes.push_back(self.vocab.get(mem, prefix)) - suffixes.push_back(self.vocab.get(mem, suffix)) + prefixes.push_back(self.vocab.get(prefix)) + suffixes.push_back(self.vocab.get(suffix)) elif pre_len: string = minus_pre - prefixes.push_back(self.vocab.get(mem, prefix)) + prefixes.push_back(self.vocab.get(prefix)) elif suf_len: string = minus_suf - suffixes.push_back(self.vocab.get(mem, suffix)) + suffixes.push_back(self.vocab.get(suffix)) return string cdef int _attach_tokens(self, Doc tokens, str string, @@ -465,11 +465,11 @@ cdef class Tokenizer: # We're always saying 'no' to spaces here -- the caller will # fix up the outermost one, with reference to the original. # See Issue #859 - tokens.push_back(self.vocab.get(tokens.mem, string), False) + tokens.push_back(self.vocab.get(string), False) else: matches = self.find_infix(string) if not matches: - tokens.push_back(self.vocab.get(tokens.mem, string), False) + tokens.push_back(self.vocab.get(string), False) else: # Let's say we have dyn-o-mite-dave - the regex finds the # start and end positions of the hyphens @@ -484,7 +484,7 @@ cdef class Tokenizer: if infix_start != start: span = string[start:infix_start] - tokens.push_back(self.vocab.get(tokens.mem, span), False) + tokens.push_back(self.vocab.get(span), False) if infix_start != infix_end: # If infix_start != infix_end, it means the infix @@ -492,11 +492,11 @@ cdef class Tokenizer: # for tokenization in some languages (see # https://github.com/explosion/spaCy/issues/768) infix_span = string[infix_start:infix_end] - tokens.push_back(self.vocab.get(tokens.mem, infix_span), False) + tokens.push_back(self.vocab.get(infix_span), False) start = infix_end span = string[start:] if span: - tokens.push_back(self.vocab.get(tokens.mem, span), False) + tokens.push_back(self.vocab.get(span), False) cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin() while it != suffixes.rend(): lexeme = deref(it) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 25af6ca6a..2b3b83e6a 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -266,12 +266,12 @@ cdef class Doc: cdef const LexemeC* lexeme for word, has_space in zip(words, spaces): if isinstance(word, str): - lexeme = self.vocab.get(self.mem, word) + lexeme = self.vocab.get(word) elif isinstance(word, bytes): raise ValueError(Errors.E028.format(value=word)) else: try: - lexeme = self.vocab.get_by_orth(self.mem, word) + lexeme = self.vocab.get_by_orth(word) except TypeError: raise TypeError(Errors.E1022.format(wtype=type(word))) self.push_back(lexeme, has_space) @@ -1430,7 +1430,7 @@ cdef class Doc: end = start + attrs[i, 0] has_space = attrs[i, 1] orth_ = text[start:end] - lex = self.vocab.get(self.mem, orth_) + lex = self.vocab.get(orth_) self.push_back(lex, has_space) start = end + has_space self.from_array(msg["array_head"][2:], attrs[:, 2:]) @@ -1536,7 +1536,7 @@ cdef class Doc: assert words == reconstructed_words for word, has_space in zip(words, spaces): - lex = self.vocab.get(self.mem, word) + lex = self.vocab.get(word) self.push_back(lex, has_space) # Set remaining token-level attributes via Doc.from_array(). diff --git a/spacy/tokens/retokenizer.pyx b/spacy/tokens/retokenizer.pyx index 29143bed3..8aef1d74f 100644 --- a/spacy/tokens/retokenizer.pyx +++ b/spacy/tokens/retokenizer.pyx @@ -223,7 +223,7 @@ def _merge(Doc doc, merges): if doc.vocab.vectors_length > 0: doc.vocab.set_vector(new_orth, span.vector) token = tokens[token_index] - lex = doc.vocab.get(doc.mem, new_orth) + lex = doc.vocab.get(new_orth) token.lex = lex # We set trailing space here too token.spacy = doc.c[spans[token_index].end-1].spacy @@ -359,7 +359,7 @@ def _split(Doc doc, int token_index, orths, heads, attrs): cdef int idx_offset = 0 for i, orth in enumerate(orths): token = &doc.c[token_index + i] - lex = doc.vocab.get(doc.mem, orth) + lex = doc.vocab.get(orth) token.lex = lex # If lemma is currently set, set default lemma to orth if token.lemma != 0: diff --git a/spacy/training/__init__.py b/spacy/training/__init__.py index 71d1fa775..f8e69b1c8 100644 --- a/spacy/training/__init__.py +++ b/spacy/training/__init__.py @@ -1,5 +1,6 @@ -from .corpus import Corpus, JsonlCorpus # noqa: F401 +from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401 from .example import Example, validate_examples, validate_get_examples # noqa: F401 +from .example import validate_distillation_examples # noqa: F401 from .alignment import Alignment # noqa: F401 from .augment import dont_augment, orth_variants_augmenter # noqa: F401 from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 diff --git a/spacy/training/batchers.py b/spacy/training/batchers.py index 73678c7fc..d9aa04e32 100644 --- a/spacy/training/batchers.py +++ b/spacy/training/batchers.py @@ -2,12 +2,13 @@ from typing import Union, Iterable, Sequence, TypeVar, List, Callable, Iterator from typing import Optional, Any from functools import partial import itertools -from thinc.schedules import Schedule, constant as constant_schedule +from thinc.schedules import Schedule from ..util import registry, minibatch -Sizing = Union[Sequence[int], int, Schedule[int]] +SizingSchedule = Union[Iterable[int], int, Schedule] +Sizing = Union[Iterable[int], int] ItemT = TypeVar("ItemT") BatcherT = Callable[[Iterable[ItemT]], Iterable[List[ItemT]]] @@ -15,7 +16,7 @@ BatcherT = Callable[[Iterable[ItemT]], Iterable[List[ItemT]]] @registry.batchers("spacy.batch_by_padded.v1") def configure_minibatch_by_padded_size( *, - size: Sizing, + size: SizingSchedule, buffer: int, discard_oversize: bool, get_length: Optional[Callable[[ItemT], int]] = None @@ -25,8 +26,8 @@ def configure_minibatch_by_padded_size( The padded size is defined as the maximum length of sequences within the batch multiplied by the number of sequences in the batch. - size (int or Sequence[int]): The largest padded size to batch sequences into. - Can be a single integer, or a sequence, allowing for variable batch sizes. + size (int, Iterable[int] or Schedule): The largest padded size to batch sequences + into. Can be a single integer, or a sequence, allowing for variable batch sizes. buffer (int): The number of sequences to accumulate before sorting by length. A larger buffer will result in more even sizing, but if the buffer is very large, the iteration order will be less random, which can result @@ -40,7 +41,7 @@ def configure_minibatch_by_padded_size( optionals = {"get_length": get_length} if get_length is not None else {} return partial( minibatch_by_padded_size, - size=size, + size=_schedule_to_sizing(size), buffer=buffer, discard_oversize=discard_oversize, **optionals @@ -50,14 +51,14 @@ def configure_minibatch_by_padded_size( @registry.batchers("spacy.batch_by_words.v1") def configure_minibatch_by_words( *, - size: Sizing, + size: SizingSchedule, tolerance: float, discard_oversize: bool, get_length: Optional[Callable[[ItemT], int]] = None ) -> BatcherT: """Create a batcher that uses the "minibatch by words" strategy. - size (int or Sequence[int]): The target number of words per batch. + size (int, Iterable[int] or Schedule): The target number of words per batch. Can be a single integer, or a sequence, allowing for variable batch sizes. tolerance (float): What percentage of the size to allow batches to exceed. discard_oversize (bool): Whether to discard sequences that by themselves @@ -68,7 +69,7 @@ def configure_minibatch_by_words( optionals = {"get_length": get_length} if get_length is not None else {} return partial( minibatch_by_words, - size=size, + size=_schedule_to_sizing(size), tolerance=tolerance, discard_oversize=discard_oversize, **optionals @@ -77,15 +78,15 @@ def configure_minibatch_by_words( @registry.batchers("spacy.batch_by_sequence.v1") def configure_minibatch( - size: Sizing, get_length: Optional[Callable[[ItemT], int]] = None + size: SizingSchedule, get_length: Optional[Callable[[ItemT], int]] = None ) -> BatcherT: """Create a batcher that creates batches of the specified size. - size (int or Sequence[int]): The target number of items per batch. + size (int, Iterable[int] or Schedule): The target number of items per batch. Can be a single integer, or a sequence, allowing for variable batch sizes. """ optionals = {"get_length": get_length} if get_length is not None else {} - return partial(minibatch, size=size, **optionals) + return partial(minibatch, size=_schedule_to_sizing(size), **optionals) def minibatch_by_padded_size( @@ -101,7 +102,7 @@ def minibatch_by_padded_size( The padded size is defined as the maximum length of sequences within the batch multiplied by the number of sequences in the batch. - size (int or Sequence[int]): The largest padded size to batch sequences into. + size (int or Iterable[int]): The largest padded size to batch sequences into. buffer (int): The number of sequences to accumulate before sorting by length. A larger buffer will result in more even sizing, but if the buffer is very large, the iteration order will be less random, which can result @@ -112,13 +113,12 @@ def minibatch_by_padded_size( The `len` function is used by default. """ if isinstance(size, int): - size_ = constant_schedule(size) + size_: Iterator[int] = itertools.repeat(size) else: - assert isinstance(size, Schedule) - size_ = size - for step, outer_batch in enumerate(minibatch(seqs, size=buffer)): + size_ = iter(size) + for outer_batch in minibatch(seqs, size=buffer): outer_batch = list(outer_batch) - target_size = size_(step) + target_size = next(size_) for indices in _batch_by_length(outer_batch, target_size, get_length): subbatch = [outer_batch[i] for i in indices] padded_size = max(len(seq) for seq in subbatch) * len(subbatch) @@ -140,7 +140,7 @@ def minibatch_by_words( themselves, or be discarded if discard_oversize=True. seqs (Iterable[Sequence]): The sequences to minibatch. - size (int or Sequence[int]): The target number of words per batch. + size (int or Iterable[int]): The target number of words per batch. Can be a single integer, or a sequence, allowing for variable batch sizes. tolerance (float): What percentage of the size to allow batches to exceed. discard_oversize (bool): Whether to discard sequences that by themselves @@ -149,12 +149,10 @@ def minibatch_by_words( item. The `len` function is used by default. """ if isinstance(size, int): - size_ = constant_schedule(size) + size_: Iterator[int] = itertools.repeat(size) else: - assert isinstance(size, Schedule) - size_ = size - step = 0 - target_size = size_(step) + size_ = iter(size) + target_size = next(size_) tol_size = target_size * tolerance batch = [] overflow = [] @@ -179,8 +177,7 @@ def minibatch_by_words( else: if batch: yield batch - step += 1 - target_size = size_(step) + target_size = next(size_) tol_size = target_size * tolerance batch = overflow batch_size = overflow_size @@ -198,8 +195,7 @@ def minibatch_by_words( else: if batch: yield batch - step += 1 - target_size = size_(step) + target_size = next(size_) tol_size = target_size * tolerance batch = [seq] batch_size = n_words @@ -236,3 +232,9 @@ def _batch_by_length( batches = [list(sorted(batch)) for batch in batches] batches.reverse() return batches + + +def _schedule_to_sizing(size: SizingSchedule) -> Sizing: + if isinstance(size, Schedule): + return size.to_generator() + return size diff --git a/spacy/training/converters/conll_ner_to_docs.py b/spacy/training/converters/conll_ner_to_docs.py index 28b21c5f0..259f5fa8c 100644 --- a/spacy/training/converters/conll_ner_to_docs.py +++ b/spacy/training/converters/conll_ner_to_docs.py @@ -86,7 +86,7 @@ def conll_ner_to_docs( if model: nlp = load_model(model) else: - nlp = get_lang_class("xx")() + nlp = get_lang_class("mul")() for conll_doc in input_data.strip().split(doc_delimiter): conll_doc = conll_doc.strip() if not conll_doc: @@ -133,7 +133,7 @@ def segment_sents_and_docs(doc, n_sents, doc_delimiter, model=None, msg=None): "Segmenting sentences with sentencizer. (Use `-b model` for " "improved parser-based sentence segmentation.)" ) - nlp = get_lang_class("xx")() + nlp = get_lang_class("mul")() sentencizer = nlp.create_pipe("sentencizer") lines = doc.strip().split("\n") words = [line.strip().split()[0] for line in lines] diff --git a/spacy/training/converters/json_to_docs.py b/spacy/training/converters/json_to_docs.py index 4123839f2..1ff7a64e0 100644 --- a/spacy/training/converters/json_to_docs.py +++ b/spacy/training/converters/json_to_docs.py @@ -3,7 +3,7 @@ from ..gold_io import json_iterate, json_to_annotations from ..example import annotations_to_doc from ..example import _fix_legacy_dict_data, _parse_example_dict_data from ...util import load_model -from ...lang.xx import MultiLanguage +from ...lang.mul import MultiLanguage def json_to_docs(input_data, model=None, **kwargs): diff --git a/spacy/training/corpus.py b/spacy/training/corpus.py index b9f929fcd..d626ad0e0 100644 --- a/spacy/training/corpus.py +++ b/spacy/training/corpus.py @@ -58,6 +58,28 @@ def read_labels(path: Path, *, require: bool = False): return srsly.read_json(path) +@util.registry.readers("spacy.PlainTextCorpus.v1") +def create_plain_text_reader( + path: Optional[Path], + min_length: int = 0, + max_length: int = 0, +) -> Callable[["Language"], Iterable[Doc]]: + """Iterate Example objects from a file or directory of plain text + UTF-8 files with one line per doc. + + path (Path): The directory or filename to read from. + min_length (int): Minimum document length (in tokens). Shorter documents + will be skipped. Defaults to 0, which indicates no limit. + max_length (int): Maximum document length (in tokens). Longer documents will + be skipped. Defaults to 0, which indicates no limit. + + DOCS: https://spacy.io/api/corpus#plaintextcorpus + """ + if path is None: + raise ValueError(Errors.E913) + return PlainTextCorpus(path, min_length=min_length, max_length=max_length) + + def walk_corpus(path: Union[str, Path], file_type) -> List[Path]: path = util.ensure_path(path) if not path.is_dir() and path.parts[-1].endswith(file_type): @@ -257,3 +279,52 @@ class JsonlCorpus: # We don't *need* an example here, but it seems nice to # make it match the Corpus signature. yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces)) + + +class PlainTextCorpus: + """Iterate Example objects from a file or directory of plain text + UTF-8 files with one line per doc. + + path (Path): The directory or filename to read from. + min_length (int): Minimum document length (in tokens). Shorter documents + will be skipped. Defaults to 0, which indicates no limit. + max_length (int): Maximum document length (in tokens). Longer documents will + be skipped. Defaults to 0, which indicates no limit. + + DOCS: https://spacy.io/api/corpus#plaintextcorpus + """ + + file_type = "txt" + + def __init__( + self, + path: Optional[Union[str, Path]], + *, + min_length: int = 0, + max_length: int = 0, + ) -> None: + self.path = util.ensure_path(path) + self.min_length = min_length + self.max_length = max_length + + def __call__(self, nlp: "Language") -> Iterator[Example]: + """Yield examples from the data. + + nlp (Language): The current nlp object. + YIELDS (Example): The example objects. + + DOCS: https://spacy.io/api/corpus#plaintextcorpus-call + """ + for loc in walk_corpus(self.path, ".txt"): + with open(loc, encoding="utf-8") as f: + for text in f: + text = text.rstrip("\r\n") + if len(text): + doc = nlp.make_doc(text) + if self.min_length >= 1 and len(doc) < self.min_length: + continue + elif self.max_length >= 1 and len(doc) > self.max_length: + continue + # We don't *need* an example here, but it seems nice to + # make it match the Corpus signature. + yield Example(doc, doc.copy()) diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index 95b0f0de9..a36fa0d73 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -1,5 +1,4 @@ from collections.abc import Iterable as IterableInstance -import warnings import numpy from murmurhash.mrmr cimport hash64 @@ -47,6 +46,13 @@ def validate_examples(examples, method): raise TypeError(err) +def validate_distillation_examples(examples, method): + validate_examples(examples, method) + for eg in examples: + if [token.text for token in eg.reference] != [token.text for token in eg.predicted]: + raise ValueError(Errors.E4003) + + def validate_get_examples(get_examples, method): """Check that a generator of a batch of examples received during processing is valid: the callable produces a non-empty list of Example objects. diff --git a/spacy/ty.py b/spacy/ty.py index 8f2903d78..f6dece840 100644 --- a/spacy/ty.py +++ b/spacy/ty.py @@ -1,6 +1,5 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Protocol, runtime_checkable from typing import Optional, Any, Iterable, Dict, Callable, Sequence, List -from .compat import Protocol, runtime_checkable from thinc.api import Optimizer, Model @@ -27,6 +26,25 @@ class TrainableComponent(Protocol): ... +@runtime_checkable +class DistillableComponent(Protocol): + is_distillable: bool + + def distill( + self, + teacher_pipe: Optional[TrainableComponent], + examples: Iterable["Example"], + *, + drop: float = 0.0, + sgd: Optional[Optimizer] = None, + losses: Optional[Dict[str, float]] = None + ) -> Dict[str, float]: + ... + + def finish_update(self, sgd: Optimizer) -> None: + ... + + @runtime_checkable class InitializableComponent(Protocol): def initialize( diff --git a/spacy/util.py b/spacy/util.py index 63af4c85d..e2ca0e6a4 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -4,6 +4,7 @@ from typing import Iterator, Pattern, Generator, TYPE_CHECKING from types import ModuleType import os import importlib +import importlib.metadata import importlib.util import re from pathlib import Path @@ -40,7 +41,7 @@ except ImportError: from .symbols import ORTH -from .compat import cupy, CudaStream, is_windows, importlib_metadata +from .compat import cupy, CudaStream, is_windows from .errors import Errors, Warnings from . import about @@ -282,7 +283,7 @@ def find_matching_language(lang: str) -> Optional[str]: import spacy.lang # noqa: F401 if lang == "xx": - return "xx" + return "mul" # Find out which language modules we have possible_languages = [] @@ -300,11 +301,7 @@ def find_matching_language(lang: str) -> Optional[str]: # is labeled that way is probably trying to be distinct from 'zh' and # shouldn't automatically match. match = langcodes.closest_supported_match(lang, possible_languages, max_distance=9) - if match == "mul": - # Convert 'mul' back to spaCy's 'xx' - return "xx" - else: - return match + return match def get_lang_class(lang: str) -> Type["Language"]: @@ -706,8 +703,8 @@ def get_package_version(name: str) -> Optional[str]: RETURNS (str / None): The version or None if package not installed. """ try: - return importlib_metadata.version(name) # type: ignore[attr-defined] - except importlib_metadata.PackageNotFoundError: # type: ignore[attr-defined] + return importlib.metadata.version(name) # type: ignore[attr-defined] + except importlib.metadata.PackageNotFoundError: # type: ignore[attr-defined] return None @@ -895,7 +892,7 @@ def is_package(name: str) -> bool: RETURNS (bool): True if installed package, False if not. """ try: - importlib_metadata.distribution(name) # type: ignore[attr-defined] + importlib.metadata.distribution(name) # type: ignore[attr-defined] return True except: # noqa: E722 return False @@ -1583,12 +1580,12 @@ def minibatch(items, size): so that batch-size can vary on each step. """ if isinstance(size, int): - size_ = constant_schedule(size) + size_ = itertools.repeat(size) else: - size_ = size + size_ = iter(size) items = iter(items) - for step in itertools.count(): - batch_size = size_(step) + while True: + batch_size = next(size_) batch = list(itertools.islice(items, int(batch_size))) if len(batch) == 0: break @@ -1718,7 +1715,7 @@ def packages_distributions() -> Dict[str, List[str]]: it's not available in the builtin importlib.metadata. """ pkg_to_dist = defaultdict(list) - for dist in importlib_metadata.distributions(): + for dist in importlib.metadata.distributions(): for pkg in (dist.read_text("top_level.txt") or "").split(): pkg_to_dist[pkg].append(dist.metadata["Name"]) return dict(pkg_to_dist) diff --git a/spacy/vocab.pxd b/spacy/vocab.pxd index 815de0765..2db709b71 100644 --- a/spacy/vocab.pxd +++ b/spacy/vocab.pxd @@ -35,12 +35,11 @@ cdef class Vocab: cdef public object lex_attr_getters cdef public object cfg - cdef const LexemeC* get(self, Pool mem, str string) except NULL - cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL + cdef const LexemeC* get(self, str string) except NULL + cdef const LexemeC* get_by_orth(self, attr_t orth) except NULL cdef const TokenC* make_fused_token(self, substrings) except NULL - cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL + cdef const LexemeC* _new_lexeme(self, str string) except NULL cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1 - cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL cdef PreshMap _by_orth diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index fc496a68b..a87f50ad4 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -139,7 +139,7 @@ cdef class Vocab: self.lex_attr_getters[flag_id] = flag_getter return flag_id - cdef const LexemeC* get(self, Pool mem, str string) except NULL: + cdef const LexemeC* get(self, str string) except NULL: """Get a pointer to a `LexemeC` from the lexicon, creating a new `Lexeme` if necessary using memory acquired from the given pool. If the pool is the lexicon's own memory, the lexeme is saved in the lexicon. @@ -157,9 +157,9 @@ cdef class Vocab: orth=key, orth_id=string)) return lex else: - return self._new_lexeme(mem, string) + return self._new_lexeme(string) - cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL: + cdef const LexemeC* get_by_orth(self, attr_t orth) except NULL: """Get a pointer to a `LexemeC` from the lexicon, creating a new `Lexeme` if necessary using memory acquired from the given pool. If the pool is the lexicon's own memory, the lexeme is saved in the lexicon. @@ -171,21 +171,10 @@ cdef class Vocab: if lex != NULL: return lex else: - return self._new_lexeme(mem, self.strings[orth]) + return self._new_lexeme(self.strings[orth]) - cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL: - # I think this heuristic is bad, and the Vocab should always - # own the lexemes. It avoids weird bugs this way, as it's how the thing - # was originally supposed to work. The best solution to the growing - # memory use is to periodically reset the vocab, which is an action - # that should be up to the user to do (so we don't need to keep track - # of the doc ownership). - # TODO: Change the C API so that the mem isn't passed in here. - mem = self.mem - #if len(string) < 3 or self.length < 10000: - # mem = self.mem - cdef bint is_oov = mem is not self.mem - lex = mem.alloc(1, sizeof(LexemeC)) + cdef const LexemeC* _new_lexeme(self, str string) except NULL: + lex = self.mem.alloc(1, sizeof(LexemeC)) lex.orth = self.strings.add(string) lex.length = len(string) if self.vectors is not None: @@ -199,8 +188,7 @@ cdef class Vocab: value = self.strings.add(value) if value is not None: Lexeme.set_struct_attr(lex, attr, value) - if not is_oov: - self._add_lex_to_vocab(lex.orth, lex) + self._add_lex_to_vocab(lex.orth, lex) if lex == NULL: raise ValueError(Errors.E085.format(string=string)) return lex @@ -271,7 +259,7 @@ cdef class Vocab: props = intify_attrs(props, strings_map=self.strings) token = &tokens[i] # Set the special tokens up to have arbitrary attributes - lex = self.get_by_orth(self.mem, props[ORTH]) + lex = self.get_by_orth(props[ORTH]) token.lex = lex for attr_id, value in props.items(): Token.set_struct_attr(token, attr_id, value) diff --git a/website/.dockerignore b/website/.dockerignore new file mode 100644 index 000000000..e4a88552e --- /dev/null +++ b/website/.dockerignore @@ -0,0 +1,9 @@ +.cache/ +.next/ +public/ +node_modules +.npm +logs +*.log +npm-debug.log* +quickstart-training-generator.js diff --git a/website/.gitignore b/website/.gitignore index 70ef99fa5..599c0953a 100644 --- a/website/.gitignore +++ b/website/.gitignore @@ -1,5 +1,7 @@ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. +quickstart-training-generator.js + # dependencies /node_modules /.pnp @@ -41,4 +43,4 @@ next-env.d.ts public/robots.txt public/sitemap* public/sw.js* -public/workbox* \ No newline at end of file +public/workbox* diff --git a/website/Dockerfile b/website/Dockerfile index f71733e55..9b2f6cac4 100644 --- a/website/Dockerfile +++ b/website/Dockerfile @@ -1,16 +1,14 @@ -FROM node:11.15.0 +FROM node:18 -WORKDIR /spacy-io - -RUN npm install -g gatsby-cli@2.7.4 - -COPY package.json . -COPY package-lock.json . - -RUN npm install +USER node # This is so the installed node_modules will be up one directory # from where a user mounts files, so that they don't accidentally mount # their own node_modules from a different build # https://nodejs.org/api/modules.html#modules_loading_from_node_modules_folders -WORKDIR /spacy-io/website/ +WORKDIR /home/node +COPY --chown=node package.json . +COPY --chown=node package-lock.json . +RUN npm install + +WORKDIR /home/node/website/ diff --git a/website/README.md b/website/README.md index e9d7aec26..a434efe9a 100644 --- a/website/README.md +++ b/website/README.md @@ -41,33 +41,27 @@ If you'd like to do this, **be sure you do _not_ include your local `node_modules` folder**, since there are some dependencies that need to be built for the image system. Rename it before using. -```bash -docker run -it \ - -v $(pwd):/spacy-io/website \ - -p 8000:8000 \ - ghcr.io/explosion/spacy-io \ - gatsby develop -H 0.0.0.0 -``` - -This will allow you to access the built website at http://0.0.0.0:8000/ in your -browser, and still edit code in your editor while having the site reflect those -changes. - -**Note**: If you're working on a Mac with an M1 processor, you might see -segfault errors from `qemu` if you use the default image. To fix this use the -`arm64` tagged image in the `docker run` command -(ghcr.io/explosion/spacy-io:arm64). - -### Building the Docker image - -If you'd like to build the image locally, you can do so like this: +First build the Docker image. This only needs to be done on the first run +or when changes are made to `Dockerfile` or the website dependencies: ```bash docker build -t spacy-io . ``` -This will take some time, so if you want to use the prebuilt image you'll save a -bit of time. +You can then build and run the website with: + +```bash +docker run -it \ + --rm \ + -v $(pwd):/home/node/website \ + -p 3000:3000 \ + spacy-io \ + npm run dev -- -H 0.0.0.0 +``` + +This will allow you to access the built website at http://0.0.0.0:3000/ in your +browser, and still edit code in your editor while having the site reflect those +changes. ## Project structure diff --git a/website/docs/api/architectures.mdx b/website/docs/api/architectures.mdx index 2a1bc4380..54b5065e8 100644 --- a/website/docs/api/architectures.mdx +++ b/website/docs/api/architectures.mdx @@ -553,18 +553,17 @@ for a Tok2Vec layer. ## Parser & NER architectures {id="parser"} -### spacy.TransitionBasedParser.v2 {id="TransitionBasedParser",source="spacy/ml/models/parser.py"} +### spacy.TransitionBasedParser.v3 {id="TransitionBasedParser",source="spacy/ml/models/parser.py"} > #### Example Config > > ```ini > [model] -> @architectures = "spacy.TransitionBasedParser.v2" +> @architectures = "spacy.TransitionBasedParser.v3" > state_type = "ner" > extra_state_tokens = false > hidden_width = 64 > maxout_pieces = 2 -> use_upper = true > > [model.tok2vec] > @architectures = "spacy.HashEmbedCNN.v2" @@ -594,23 +593,22 @@ consists of either two or three subnetworks: state representation. If not present, the output from the lower model is used as action scores directly. -| Name | Description | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `tok2vec` | Subnetwork to map tokens into vector representations. ~~Model[List[Doc], List[Floats2d]]~~ | -| `state_type` | Which task to extract features for. Possible values are "ner" and "parser". ~~str~~ | -| `extra_state_tokens` | Whether to use an expanded feature set when extracting the state tokens. Slightly slower, but sometimes improves accuracy slightly. Defaults to `False`. ~~bool~~ | -| `hidden_width` | The width of the hidden layer. ~~int~~ | -| `maxout_pieces` | How many pieces to use in the state prediction layer. Recommended values are `1`, `2` or `3`. If `1`, the maxout non-linearity is replaced with a [`Relu`](https://thinc.ai/docs/api-layers#relu) non-linearity if `use_upper` is `True`, and no non-linearity if `False`. ~~int~~ | -| `use_upper` | Whether to use an additional hidden layer after the state vector in order to predict the action scores. It is recommended to set this to `False` for large pretrained models such as transformers, and `True` for smaller networks. The upper layer is computed on CPU, which becomes a bottleneck on larger GPU-based models, where it's also less necessary. ~~bool~~ | -| `nO` | The number of actions the model will predict between. Usually inferred from data at the beginning of training, or loaded from disk. ~~int~~ | -| **CREATES** | The model using the architecture. ~~Model[List[Docs], List[List[Floats2d]]]~~ | +| Name | Description | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `tok2vec` | Subnetwork to map tokens into vector representations. ~~Model[List[Doc], List[Floats2d]]~~ | +| `state_type` | Which task to extract features for. Possible values are "ner" and "parser". ~~str~~ | +| `extra_state_tokens` | Whether to use an expanded feature set when extracting the state tokens. Slightly slower, but sometimes improves accuracy slightly. Defaults to `False`. ~~bool~~ | +| `hidden_width` | The width of the hidden layer. ~~int~~ | +| `maxout_pieces` | How many pieces to use in the state prediction layer. Recommended values are `1`, `2` or `3`. ~~int~~ | +| `nO` | The number of actions the model will predict between. Usually inferred from data at the beginning of training, or loaded from disk. ~~int~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Docs], List[List[Floats2d]]]~~ | [TransitionBasedParser.v1](/api/legacy#TransitionBasedParser_v1) had the exact same signature, but the `use_upper` argument was `True` by default. - + ## Tagging architectures {id="tagger",source="spacy/ml/models/tagger.py"} diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index 2c59d2d69..0bf708183 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -12,6 +12,7 @@ menu: - ['train', 'train'] - ['pretrain', 'pretrain'] - ['evaluate', 'evaluate'] + - ['benchmark', 'benchmark'] - ['apply', 'apply'] - ['find-threshold', 'find-threshold'] - ['assemble', 'assemble'] @@ -269,10 +270,10 @@ $ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] | `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ | | `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ | | `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ | -| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ | +| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str] (option)~~ | | `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ | | `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ | -| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ | +| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path] (option)~~ | | `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ | | `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | @@ -361,7 +362,7 @@ Module spacy.language File /path/to/spacy/language.py (line 64) ℹ [components.ner.model] Registry @architectures -Name spacy.TransitionBasedParser.v1 +Name spacy.TransitionBasedParser.v3 Module spacy.ml.models.parser File /path/to/spacy/ml/models/parser.py (line 11) ℹ [components.ner.model.tok2vec] @@ -371,7 +372,7 @@ Module spacy.ml.models.tok2vec File /path/to/spacy/ml/models/tok2vec.py (line 16) ℹ [components.parser.model] Registry @architectures -Name spacy.TransitionBasedParser.v1 +Name spacy.TransitionBasedParser.v3 Module spacy.ml.models.parser File /path/to/spacy/ml/models/parser.py (line 11) ℹ [components.parser.model.tok2vec] @@ -696,7 +697,7 @@ scorer = {"@scorers":"spacy.ner_scorer.v1"} update_with_oracle_cut_size = 100 [components.ner.model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "ner" extra_state_tokens = false - hidden_width = 64 @@ -719,7 +720,7 @@ scorer = {"@scorers":"spacy.parser_scorer.v1"} update_with_oracle_cut_size = 100 [components.parser.model] -@architectures = "spacy.TransitionBasedParser.v2" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "parser" extra_state_tokens = false hidden_width = 128 @@ -1135,8 +1136,19 @@ $ python -m spacy pretrain [config_path] [output_dir] [--code] [--resume-path] [ ## evaluate {id="evaluate",version="2",tag="command"} -Evaluate a trained pipeline. Expects a loadable spaCy pipeline (package name or -path) and evaluation data in the +The `evaluate` subcommand is superseded by +[`spacy benchmark accuracy`](#benchmark-accuracy). `evaluate` is provided as an +alias to `benchmark accuracy` for compatibility. + +## benchmark {id="benchmark", version="3.5"} + +The `spacy benchmark` CLI includes commands for benchmarking the accuracy and +speed of your spaCy pipelines. + +### accuracy {id="benchmark-accuracy", version="3.5", tag="command"} + +Evaluate the accuracy of a trained pipeline. Expects a loadable spaCy pipeline +(package name or path) and evaluation data in the [binary `.spacy` format](/api/data-formats#binary-training). The `--gold-preproc` option sets up the evaluation examples with gold-standard sentences and tokens for the predictions. Gold preprocessing helps the @@ -1147,7 +1159,7 @@ skew. To render a sample of dependency parses in a HTML file using the `--displacy-path` argument. ```bash -$ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] +$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] ``` | Name | Description | @@ -1163,6 +1175,29 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Training results and optional metrics and visualizations. | +### speed {id="benchmark-speed", version="3.5", tag="command"} + +Benchmark the speed of a trained pipeline with a 95% confidence interval. +Expects a loadable spaCy pipeline (package name or path) and benchmark data in +the [binary `.spacy` format](/api/data-formats#binary-training). The pipeline is +warmed up before any measurements are taken. + +```cli +$ python -m spacy benchmark speed [model] [data_path] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup] +``` + +| Name | Description | +| -------------------- | -------------------------------------------------------------------------------------------------------- | +| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ | +| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ | +| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ | +| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ | +| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ | +| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. | + ## apply {id="apply", version="3.5", tag="command"} Applies a trained pipeline to data and stores the resulting annotated documents @@ -1176,7 +1211,7 @@ input formats are: When a directory is provided it is traversed recursively to collect all files. -```cli +```bash $ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process] ``` diff --git a/website/docs/api/corpus.mdx b/website/docs/api/corpus.mdx index c58723e82..75e8f5c0f 100644 --- a/website/docs/api/corpus.mdx +++ b/website/docs/api/corpus.mdx @@ -175,3 +175,68 @@ Yield examples from the data. | ---------- | -------------------------------------- | | `nlp` | The current `nlp` object. ~~Language~~ | | **YIELDS** | The examples. ~~Example~~ | + +## PlainTextCorpus {id="plaintextcorpus",tag="class",version="3.5.1"} + +Iterate over documents from a plain text file. Can be used to read the raw text +corpus for language model +[pretraining](/usage/embeddings-transformers#pretraining). The expected file +format is: + +- UTF-8 encoding +- One document per line +- Blank lines are ignored. + +```text {title="Example"} +Can I ask where you work now and what you do, and if you enjoy it? +They may just pull out of the Seattle market completely, at least until they have autonomous vehicles. +My cynical view on this is that it will never be free to the public. Reason: what would be the draw of joining the military? Right now their selling point is free Healthcare and Education. Ironically both are run horribly and most, that I've talked to, come out wishing they never went in. +``` + +### PlainTextCorpus.\_\_init\_\_ {id="plaintextcorpus-init",tag="method"} + +Initialize the reader. + +> #### Example +> +> ```python +> from spacy.training import PlainTextCorpus +> +> corpus = PlainTextCorpus("./data/docs.txt") +> ``` +> +> ```ini +> ### Example config +> [corpora.pretrain] +> @readers = "spacy.PlainTextCorpus.v1" +> path = "corpus/raw_text.txt" +> min_length = 0 +> max_length = 0 +> ``` + +| Name | Description | +| -------------- | -------------------------------------------------------------------------------------------------------------------------- | +| `path` | The directory or filename to read from. Expects newline-delimited documents in UTF8 format. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `min_length` | Minimum document length (in tokens). Shorter documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ | +| `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ | + +### PlainTextCorpus.\_\_call\_\_ {id="plaintextcorpus-call",tag="method"} + +Yield examples from the data. + +> #### Example +> +> ```python +> from spacy.training import PlainTextCorpus +> import spacy +> +> corpus = PlainTextCorpus("./docs.txt") +> nlp = spacy.blank("en") +> data = corpus(nlp) +> ``` + +| Name | Description | +| ---------- | -------------------------------------- | +| `nlp` | The current `nlp` object. ~~Language~~ | +| **YIELDS** | The examples. ~~Example~~ | diff --git a/website/docs/api/cython-classes.mdx b/website/docs/api/cython-classes.mdx index ce7c03940..88bd92c72 100644 --- a/website/docs/api/cython-classes.mdx +++ b/website/docs/api/cython-classes.mdx @@ -163,14 +163,13 @@ vocabulary. > #### Example > > ```python -> lexeme = vocab.get(vocab.mem, "hello") +> lexeme = vocab.get("hello") > ``` -| Name | Description | -| ----------- | ---------------------------------------------------------------------------------------------------------- | -| `mem` | A memory pool. Allocated memory will be freed once the `Vocab` object is garbage collected. ~~cymem.Pool~~ | -| `string` | The string of the word to look up. ~~str~~ | -| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ | +| Name | Description | +| ----------- | ------------------------------------------------- | +| `string` | The string of the word to look up. ~~str~~ | +| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ | ### Vocab.get_by_orth {id="vocab_get_by_orth",tag="method"} @@ -183,11 +182,10 @@ vocabulary. > lexeme = vocab.get_by_orth(doc[0].lex.norm) > ``` -| Name | Description | -| ----------- | ---------------------------------------------------------------------------------------------------------- | -| `mem` | A memory pool. Allocated memory will be freed once the `Vocab` object is garbage collected. ~~cymem.Pool~~ | -| `orth` | ID of the verbatim text content. ~~attr_t (uint64_t)~~ | -| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ | +| Name | Description | +| ----------- | ------------------------------------------------------ | +| `orth` | ID of the verbatim text content. ~~attr_t (uint64_t)~~ | +| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ | ## StringStore {id="stringstore",tag="cdef class",source="spacy/strings.pxd"} diff --git a/website/docs/api/dependencyparser.mdx b/website/docs/api/dependencyparser.mdx index 771a00aee..296d6d87d 100644 --- a/website/docs/api/dependencyparser.mdx +++ b/website/docs/api/dependencyparser.mdx @@ -131,6 +131,39 @@ and all pipeline components are applied to the `Doc` in order. Both | `doc` | The document to process. ~~Doc~~ | | **RETURNS** | The processed document. ~~Doc~~ | +## DependencyParser.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("parser") +> student_pipe = student.add_pipe("parser") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## DependencyParser.pipe {id="pipe",tag="method"} Apply the pipe to a stream of documents. This usually happens under the hood @@ -268,6 +301,27 @@ predicted scores. | `scores` | Scores representing the model's predictions. ~~StateClass~~ | | **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | +## DependencyParser.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + +> #### Example +> +> ```python +> teacher_parser = teacher.get_pipe("parser") +> student_parser = student.add_pipe("parser") +> student_scores = student_parser.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_parser.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_parser.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## DependencyParser.create_optimizer {id="create_optimizer",tag="method"} Create an [`Optimizer`](https://thinc.ai/docs/api-optimizers) for the pipeline diff --git a/website/docs/api/edittreelemmatizer.mdx b/website/docs/api/edittreelemmatizer.mdx index 17af19e8c..c8b5c7180 100644 --- a/website/docs/api/edittreelemmatizer.mdx +++ b/website/docs/api/edittreelemmatizer.mdx @@ -115,6 +115,39 @@ and all pipeline components are applied to the `Doc` in order. Both | `doc` | The document to process. ~~Doc~~ | | **RETURNS** | The processed document. ~~Doc~~ | +## EditTreeLemmatizer.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("trainable_lemmatizer") +> student_pipe = student.add_pipe("trainable_lemmatizer") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## EditTreeLemmatizer.pipe {id="pipe",tag="method"} Apply the pipe to a stream of documents. This usually happens under the hood @@ -269,6 +302,27 @@ Create an optimizer for the pipeline component. | ----------- | ---------------------------- | | **RETURNS** | The optimizer. ~~Optimizer~~ | +## EditTreeLemmatizer.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + +> #### Example +> +> ```python +> teacher_lemmatizer = teacher.get_pipe("trainable_lemmatizer") +> student_lemmatizer = student.add_pipe("trainable_lemmatizer") +> student_scores = student_lemmatizer.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_lemmatizer.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_lemmatizer.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## EditTreeLemmatizer.use_params {id="use_params",tag="method, contextmanager"} Modify the pipe's model, to use the given parameter values. At the end of the diff --git a/website/docs/api/entitylinker.mdx b/website/docs/api/entitylinker.mdx index b4e331bb5..238b62a2e 100644 --- a/website/docs/api/entitylinker.mdx +++ b/website/docs/api/entitylinker.mdx @@ -15,7 +15,7 @@ world". It requires a `KnowledgeBase`, as well as a function to generate plausible candidates from that `KnowledgeBase` given a certain textual mention, and a machine learning model to pick the right candidate, given the local context of the mention. `EntityLinker` defaults to using the -[`InMemoryLookupKB`](/api/kb_in_memory) implementation. +[`InMemoryLookupKB`](/api/inmemorylookupkb) implementation. ## Assigned Attributes {id="assigned-attributes"} diff --git a/website/docs/api/entityrecognizer.mdx b/website/docs/api/entityrecognizer.mdx index 1f386bbb6..f503cc998 100644 --- a/website/docs/api/entityrecognizer.mdx +++ b/website/docs/api/entityrecognizer.mdx @@ -127,6 +127,39 @@ and all pipeline components are applied to the `Doc` in order. Both | `doc` | The document to process. ~~Doc~~ | | **RETURNS** | The processed document. ~~Doc~~ | +## EntityRecognizer.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("ner") +> student_pipe = student.add_pipe("ner") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## EntityRecognizer.pipe {id="pipe",tag="method"} Apply the pipe to a stream of documents. This usually happens under the hood @@ -264,6 +297,27 @@ predicted scores. | `scores` | Scores representing the model's predictions. ~~StateClass~~ | | **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | +## EntityRecognizer.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + +> #### Example +> +> ```python +> teacher_ner = teacher.get_pipe("ner") +> student_ner = student.add_pipe("ner") +> student_scores = student_ner.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_ner.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_ner.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## EntityRecognizer.create_optimizer {id="create_optimizer",tag="method"} Create an optimizer for the pipeline component. diff --git a/website/docs/api/kb_in_memory.mdx b/website/docs/api/inmemorylookupkb.mdx similarity index 96% rename from website/docs/api/kb_in_memory.mdx rename to website/docs/api/inmemorylookupkb.mdx index e85b63c45..c24fe78d6 100644 --- a/website/docs/api/kb_in_memory.mdx +++ b/website/docs/api/inmemorylookupkb.mdx @@ -43,7 +43,7 @@ The length of the fixed-size entity vectors in the knowledge base. Add an entity to the knowledge base, specifying its corpus frequency and entity vector, which should be of length -[`entity_vector_length`](/api/kb_in_memory#entity_vector_length). +[`entity_vector_length`](/api/inmemorylookupkb#entity_vector_length). > #### Example > @@ -79,8 +79,9 @@ frequency and entity vector for each entity. Add an alias or mention to the knowledge base, specifying its potential KB identifiers and their prior probabilities. The entity identifiers should refer -to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity) -or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior +to entities previously added with +[`add_entity`](/api/inmemorylookupkb#add_entity) or +[`set_entities`](/api/inmemorylookupkb#set_entities). The sum of the prior probabilities should not exceed 1. Note that an empty string can not be used as alias. @@ -156,7 +157,7 @@ Get a list of all aliases in the knowledge base. Given a certain textual mention as input, retrieve a list of candidate entities of type [`Candidate`](/api/kb#candidate). Wraps -[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). +[`get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates). > #### Example > @@ -174,7 +175,7 @@ of type [`Candidate`](/api/kb#candidate). Wraps ## InMemoryLookupKB.get_candidates_batch {id="get_candidates_batch",tag="method"} -Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an +Same as [`get_candidates()`](/api/inmemorylookupkb#get_candidates), but for an arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component will call `get_candidates_batch()` instead of `get_candidates()`, if the config parameter `candidates_batch_size` is greater or equal than 1. @@ -231,7 +232,7 @@ Given a certain entity ID, retrieve its pretrained entity vector. ## InMemoryLookupKB.get_vectors {id="get_vectors",tag="method"} -Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary +Same as [`get_vector()`](/api/inmemorylookupkb#get_vector), but for an arbitrary number of entity IDs. The default implementation of `get_vectors()` executes `get_vector()` in a loop. diff --git a/website/docs/api/kb.mdx b/website/docs/api/kb.mdx index 887b7fe97..2b0d4d9d6 100644 --- a/website/docs/api/kb.mdx +++ b/website/docs/api/kb.mdx @@ -21,8 +21,8 @@ functions called by the [`EntityLinker`](/api/entitylinker) component. This class was not abstract up to spaCy version 3.5. The `KnowledgeBase` -implementation up to that point is available as `InMemoryLookupKB` from 3.5 -onwards. +implementation up to that point is available as +[`InMemoryLookupKB`](/api/inmemorylookupkb) from 3.5 onwards. @@ -110,14 +110,15 @@ to you. From spaCy 3.5 on `KnowledgeBase` is an abstract class (with -[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow -more flexibility in customizing knowledge bases. Some of its methods were moved -to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those -being `get_alias_candidates()`. This method is now available as -[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). -Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates) +[`InMemoryLookupKB`](/api/inmemorylookupkb) being a drop-in replacement) to +allow more flexibility in customizing knowledge bases. Some of its methods were +moved to [`InMemoryLookupKB`](/api/inmemorylookupkb) during this refactoring, +one of those being `get_alias_candidates()`. This method is now available as +[`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates). +Note: +[`InMemoryLookupKB.get_candidates()`](/api/inmemorylookupkb#get_candidates) defaults to -[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). +[`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates). ## KnowledgeBase.get_vector {id="get_vector",tag="method"} diff --git a/website/docs/api/language.mdx b/website/docs/api/language.mdx index a34ea7242..c25bfcee5 100644 --- a/website/docs/api/language.mdx +++ b/website/docs/api/language.mdx @@ -333,6 +333,34 @@ and custom registered functions if needed. See the | `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | +## Language.distill {id="distill",tag="method,experimental",version="4"} + +Distill the models in a student pipeline from a teacher pipeline. + +> #### Example +> +> ```python +> +> teacher = spacy.load("en_core_web_lg") +> student = English() +> student.add_pipe("tagger") +> student.distill(teacher, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher` | The teacher pipeline to distill from. ~~Language~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | The dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Dictionary to update with the loss, keyed by pipeline component. ~~Optional[Dict[str, float]]~~ | +| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | +| `exclude` | Names of components that shouldn't be updated. Defaults to `[]`. ~~Iterable[str]~~ | +| `annotates` | Names of components that should set annotations on the prediced examples after updating. Defaults to `[]`. ~~Iterable[str]~~ | +| `student_to_teacher` | Map student component names to teacher component names, only necessary when the names differ. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## Language.rehearse {id="rehearse",tag="method,experimental",version="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the diff --git a/website/docs/api/legacy.mdx b/website/docs/api/legacy.mdx index ea6d3a899..70d6223e7 100644 --- a/website/docs/api/legacy.mdx +++ b/website/docs/api/legacy.mdx @@ -225,7 +225,7 @@ the others, but may not be as accurate, especially if texts are short. ### spacy.TransitionBasedParser.v1 {id="TransitionBasedParser_v1"} Identical to -[`spacy.TransitionBasedParser.v2`](/api/architectures#TransitionBasedParser) +[`spacy.TransitionBasedParser.v3`](/api/architectures#TransitionBasedParser) except the `use_upper` was set to `True` by default. ## Layers {id="layers"} diff --git a/website/docs/api/morphologizer.mdx b/website/docs/api/morphologizer.mdx index 1fda807cb..4660ec312 100644 --- a/website/docs/api/morphologizer.mdx +++ b/website/docs/api/morphologizer.mdx @@ -121,6 +121,39 @@ delegate to the [`predict`](/api/morphologizer#predict) and | `doc` | The document to process. ~~Doc~~ | | **RETURNS** | The processed document. ~~Doc~~ | +## Morphologizer.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("morphologizer") +> student_pipe = student.add_pipe("morphologizer") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## Morphologizer.pipe {id="pipe",tag="method"} Apply the pipe to a stream of documents. This usually happens under the hood @@ -259,6 +292,27 @@ predicted scores. | `scores` | Scores representing the model's predictions. | | **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | +## Morphologizer.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + +> #### Example +> +> ```python +> teacher_morphologizer = teacher.get_pipe("morphologizer") +> student_morphologizer = student.add_pipe("morphologizer") +> student_scores = student_morphologizer.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_morphologizer.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_morphologizer.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## Morphologizer.create_optimizer {id="create_optimizer",tag="method"} Create an optimizer for the pipeline component. diff --git a/website/docs/api/pipe.mdx b/website/docs/api/pipe.mdx index b387ea586..e1e7f5d70 100644 --- a/website/docs/api/pipe.mdx +++ b/website/docs/api/pipe.mdx @@ -234,6 +234,39 @@ predictions and gold-standard annotations, and update the component's model. | `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | +## TrainablePipe.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("your_custom_pipe") +> student_pipe = student.add_pipe("your_custom_pipe") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## TrainablePipe.rehearse {id="rehearse",tag="method,experimental",version="3"} Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the @@ -281,6 +314,34 @@ This method needs to be overwritten with your own custom `get_loss` method. | `scores` | Scores representing the model's predictions. | | **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | +## TrainablePipe.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + + + +This method needs to be overwritten with your own custom +`get_teacher_student_loss` method. + + + +> #### Example +> +> ```python +> teacher_pipe = teacher.get_pipe("your_custom_pipe") +> student_pipe = student.add_pipe("your_custom_pipe") +> student_scores = student_pipe.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_pipe.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_pipe.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## TrainablePipe.score {id="score",tag="method",version="3"} Score a batch of examples. diff --git a/website/docs/api/scorer.mdx b/website/docs/api/scorer.mdx index 6f0c95f6f..d72018b90 100644 --- a/website/docs/api/scorer.mdx +++ b/website/docs/api/scorer.mdx @@ -30,7 +30,7 @@ Create a new `Scorer`. | Name | Description | | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `nlp` | The pipeline to use for scoring, where each pipeline component may provide a scoring method. If none is provided, then a default pipeline is constructed using the `default_lang` and `default_pipeline` settings. ~~Optional[Language]~~ | -| `default_lang` | The language to use for a default pipeline if `nlp` is not provided. Defaults to `xx`. ~~str~~ | +| `default_lang` | The language to use for a default pipeline if `nlp` is not provided. Defaults to `mul`. ~~str~~ | | `default_pipeline` | The pipeline components to use for a default pipeline if `nlp` is not provided. Defaults to `("senter", "tagger", "morphologizer", "parser", "ner", "textcat")`. ~~Iterable[string]~~ | | _keyword-only_ | | | `\*\*kwargs` | Any additional settings to pass on to the individual scoring methods. ~~Any~~ | diff --git a/website/docs/api/sentencerecognizer.mdx b/website/docs/api/sentencerecognizer.mdx index d5d096d76..dfb7ed308 100644 --- a/website/docs/api/sentencerecognizer.mdx +++ b/website/docs/api/sentencerecognizer.mdx @@ -106,6 +106,39 @@ and all pipeline components are applied to the `Doc` in order. Both | `doc` | The document to process. ~~Doc~~ | | **RETURNS** | The processed document. ~~Doc~~ | +## SentenceRecognizer.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("senter") +> student_pipe = student.add_pipe("senter") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## SentenceRecognizer.pipe {id="pipe",tag="method"} Apply the pipe to a stream of documents. This usually happens under the hood @@ -254,6 +287,27 @@ predicted scores. | `scores` | Scores representing the model's predictions. | | **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | +## SentenceRecognizer.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + +> #### Example +> +> ```python +> teacher_senter = teacher.get_pipe("senter") +> student_senter = student.add_pipe("senter") +> student_scores = student_senter.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_senter.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_senter.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## SentenceRecognizer.create_optimizer {id="create_optimizer",tag="method"} Create an optimizer for the pipeline component. diff --git a/website/docs/api/stringstore.mdx b/website/docs/api/stringstore.mdx index 542ee1ab9..7e380f5f8 100644 --- a/website/docs/api/stringstore.mdx +++ b/website/docs/api/stringstore.mdx @@ -90,7 +90,7 @@ Iterate over the stored strings in insertion order. | ----------- | ------------------------------ | | **RETURNS** | A string in the store. ~~str~~ | -## StringStore.items {id="iter", tag="method", version="4"} +## StringStore.items {id="items", tag="method", version="4"} Iterate over the stored string-hash pairs in insertion order. @@ -106,7 +106,7 @@ Iterate over the stored string-hash pairs in insertion order. | ----------- | ------------------------------------------------------ | | **RETURNS** | A list of string-hash pairs. ~~List[Tuple[str, int]]~~ | -## StringStore.keys {id="iter", tag="method", version="4"} +## StringStore.keys {id="keys", tag="method", version="4"} Iterate over the stored strings in insertion order. @@ -122,7 +122,7 @@ Iterate over the stored strings in insertion order. | ----------- | -------------------------------- | | **RETURNS** | A list of strings. ~~List[str]~~ | -## StringStore.values {id="iter", tag="method", version="4"} +## StringStore.values {id="values", tag="method", version="4"} Iterate over the stored string hashes in insertion order. diff --git a/website/docs/api/tagger.mdx b/website/docs/api/tagger.mdx index ae14df212..35e7a23b1 100644 --- a/website/docs/api/tagger.mdx +++ b/website/docs/api/tagger.mdx @@ -105,6 +105,39 @@ and all pipeline components are applied to the `Doc` in order. Both | `doc` | The document to process. ~~Doc~~ | | **RETURNS** | The processed document. ~~Doc~~ | +## Tagger.distill {id="distill", tag="method,experimental", version="4"} + +Train a pipe (the student) on the predictions of another pipe (the teacher). The +student is typically trained on the probability distribution of the teacher, but +details may differ per pipe. The goal of distillation is to transfer knowledge +from the teacher to the student. + +The distillation is performed on ~~Example~~ objects. The `Example.reference` +and `Example.predicted` ~~Doc~~s must have the same number of tokens and the +same orthography. Even though the reference does not need have to have gold +annotations, the teacher could adds its own annotations when necessary. + +This feature is experimental. + +> #### Example +> +> ```python +> teacher_pipe = teacher.add_pipe("tagger") +> student_pipe = student.add_pipe("tagger") +> optimizer = nlp.resume_training() +> losses = student.distill(teacher_pipe, examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | +| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | Dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + ## Tagger.pipe {id="pipe",tag="method"} Apply the pipe to a stream of documents. This usually happens under the hood @@ -265,6 +298,27 @@ predicted scores. | `scores` | Scores representing the model's predictions. | | **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | +## Tagger.get_teacher_student_loss {id="get_teacher_student_loss", tag="method", version="4"} + +Calculate the loss and its gradient for the batch of student scores relative to +the teacher scores. + +> #### Example +> +> ```python +> teacher_tagger = teacher.get_pipe("tagger") +> student_tagger = student.add_pipe("tagger") +> student_scores = student_tagger.predict([eg.predicted for eg in examples]) +> teacher_scores = teacher_tagger.predict([eg.predicted for eg in examples]) +> loss, d_loss = student_tagger.get_teacher_student_loss(teacher_scores, student_scores) +> ``` + +| Name | Description | +| ---------------- | --------------------------------------------------------------------------- | +| `teacher_scores` | Scores representing the teacher model's predictions. | +| `student_scores` | Scores representing the student model's predictions. | +| **RETURNS** | The loss and the gradient, i.e. `(loss, gradient)`. ~~Tuple[float, float]~~ | + ## Tagger.create_optimizer {id="create_optimizer",tag="method"} Create an optimizer for the pipeline component. diff --git a/website/docs/api/top-level.mdx b/website/docs/api/top-level.mdx index a222cfa8f..b13a6d28b 100644 --- a/website/docs/api/top-level.mdx +++ b/website/docs/api/top-level.mdx @@ -236,17 +236,17 @@ browser. Will run a simple web server. > displacy.serve([doc1, doc2], style="dep") > ``` -| Name | Description | -| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ | -| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` 3.3. Defaults to `"dep"`. ~~str~~ | -| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ | -| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ | -| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ | -| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ | -| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ | -| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ | -| `auto_select_port` | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ | +| Name | Description | +| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ | +| `style` 3.3 | Visualization style, `"dep"`, `"ent"` or `"span"`. Defaults to `"dep"`. ~~str~~ | +| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ | +| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ | +| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ | +| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ | +| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ | +| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ | +| `auto_select_port` 3.5 | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ | ### displacy.render {id="displacy.render",tag="method",version="2"} @@ -751,14 +751,14 @@ themselves, or be discarded if `discard_oversize` is set to `True`. The argument > get_length = null > ``` -| Name | Description | -| ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `seqs` | The sequences to minibatch. ~~Iterable[Any]~~ | -| `size` | The target number of words per batch. Can also be a block referencing a schedule, e.g. [`compounding`](https://thinc.ai/docs/api-schedules/#compounding). ~~Union[int, Sequence[int]]~~ | -| `tolerance` | What percentage of the size to allow batches to exceed. ~~float~~ | -| `discard_oversize` | Whether to discard sequences that by themselves exceed the tolerated size. ~~bool~~ | -| `get_length` | Optional function that receives a sequence item and returns its length. Defaults to the built-in `len()` if not set. ~~Optional[Callable[[Any], int]]~~ | -| **CREATES** | The batcher that takes an iterable of items and returns batches. ~~Callable[[Iterable[Any]], Iterable[List[Any]]]~~ | +| Name | Description | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `seqs` | The sequences to minibatch. ~~Iterable[Any]~~ | +| `size` | The target number of words per batch. Can also be a block referencing a schedule, e.g. [`compounding`](https://thinc.ai/docs/api-schedules/#compounding). ~~Union[int, Iterable[int], Schedule]~~ | +| `tolerance` | What percentage of the size to allow batches to exceed. ~~float~~ | +| `discard_oversize` | Whether to discard sequences that by themselves exceed the tolerated size. ~~bool~~ | +| `get_length` | Optional function that receives a sequence item and returns its length. Defaults to the built-in `len()` if not set. ~~Optional[Callable[[Any], int]]~~ | +| **CREATES** | The batcher that takes an iterable of items and returns batches. ~~Callable[[Iterable[Any]], Iterable[List[Any]]]~~ | ### spacy.batch_by_sequence.v1 {id="batch_by_sequence",tag="registered function"} @@ -773,11 +773,11 @@ themselves, or be discarded if `discard_oversize` is set to `True`. The argument Create a batcher that creates batches of the specified size. -| Name | Description | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `size` | The target number of items per batch. Can also be a block referencing a schedule, e.g. [`compounding`](https://thinc.ai/docs/api-schedules/#compounding). ~~Union[int, Sequence[int]]~~ | -| `get_length` | Optional function that receives a sequence item and returns its length. Defaults to the built-in `len()` if not set. ~~Optional[Callable[[Any], int]]~~ | -| **CREATES** | The batcher that takes an iterable of items and returns batches. ~~Callable[[Iterable[Any]], Iterable[List[Any]]]~~ | +| Name | Description | +| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `size` | The target number of items per batch. Can also be a block referencing a schedule, e.g. [`compounding`](https://thinc.ai/docs/api-schedules/#compounding). ~~Union[int, Iterable[int], Schedule]~~ | +| `get_length` | Optional function that receives a sequence item and returns its length. Defaults to the built-in `len()` if not set. ~~Optional[Callable[[Any], int]]~~ | +| **CREATES** | The batcher that takes an iterable of items and returns batches. ~~Callable[[Iterable[Any]], Iterable[List[Any]]]~~ | ### spacy.batch_by_padded.v1 {id="batch_by_padded",tag="registered function"} @@ -799,7 +799,7 @@ sequences in the batch. | Name | Description | | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `size` | The largest padded size to batch sequences into. Can also be a block referencing a schedule, e.g. [`compounding`](https://thinc.ai/docs/api-schedules/#compounding). ~~Union[int, Sequence[int]]~~ | +| `size` | The largest padded size to batch sequences into. Can also be a block referencing a schedule, e.g. [`compounding`](https://thinc.ai/docs/api-schedules/#compounding). ~~Union[int, Iterable[int], Schedule]~~ | | `buffer` | The number of sequences to accumulate before sorting by length. A larger buffer will result in more even sizing, but if the buffer is very large, the iteration order will be less random, which can result in suboptimal training. ~~int~~ | | `discard_oversize` | Whether to discard sequences that are by themselves longer than the largest padded batch size. ~~bool~~ | | `get_length` | Optional function that receives a sequence item and returns its length. Defaults to the built-in `len()` if not set. ~~Optional[Callable[[Any], int]]~~ | @@ -921,7 +921,8 @@ backprop passes. Recursively wrap both the models and methods of each pipe using [NVTX](https://nvidia.github.io/NVTX/) range markers. By default, the following methods are wrapped: `pipe`, `predict`, `set_annotations`, `update`, `rehearse`, -`get_loss`, `initialize`, `begin_update`, `finish_update`, `update`. +`get_loss`, `get_teacher_student_loss`, `initialize`, `begin_update`, +`finish_update`, `update`. | Name | Description | | --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -1400,7 +1401,7 @@ vary on each step. | Name | Description | | ---------- | ------------------------------------------------ | | `items` | The items to batch up. ~~Iterable[Any]~~ | -| `size` | The batch size(s). ~~Union[int, Sequence[int]]~~ | +| `size` | The batch size(s). ~~Union[int, Iterable[int]]~~ | | **YIELDS** | The batches. | ### util.filter_spans {id="util.filter_spans",tag="function",version="2.1.4"} diff --git a/website/docs/usage/101/_architecture.mdx b/website/docs/usage/101/_architecture.mdx index 5dd56e486..35c36088a 100644 --- a/website/docs/usage/101/_architecture.mdx +++ b/website/docs/usage/101/_architecture.mdx @@ -81,7 +81,7 @@ operates on a `Doc` and gives you access to the matched tokens **in context**. | ------------------------------------------------ | -------------------------------------------------------------------------------------------------- | | [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. | | [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. | -| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. | +| [`InMemoryLookupKB`](/api/inmemorylookupkb) | Implementation of `KnowledgeBase` storing all data in memory. | | [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. | | [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. | | [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. | diff --git a/website/docs/usage/101/_vectors-similarity.mdx b/website/docs/usage/101/_vectors-similarity.mdx index c27f777d8..6deab926d 100644 --- a/website/docs/usage/101/_vectors-similarity.mdx +++ b/website/docs/usage/101/_vectors-similarity.mdx @@ -134,6 +134,7 @@ useful for your purpose. Here are some important considerations to keep in mind: sense2vec Screenshot [`sense2vec`](https://github.com/explosion/sense2vec) is a library developed by diff --git a/website/docs/usage/embeddings-transformers.mdx b/website/docs/usage/embeddings-transformers.mdx index cf80822fb..0de173a21 100644 --- a/website/docs/usage/embeddings-transformers.mdx +++ b/website/docs/usage/embeddings-transformers.mdx @@ -140,7 +140,7 @@ factory = "tok2vec" factory = "ner" [components.ner.model] -@architectures = "spacy.TransitionBasedParser.v1" +@architectures = "spacy.TransitionBasedParser.v3" [components.ner.model.tok2vec] @architectures = "spacy.Tok2VecListener.v1" @@ -156,7 +156,7 @@ same. This makes them fully independent and doesn't require an upstream factory = "ner" [components.ner.model] -@architectures = "spacy.TransitionBasedParser.v1" +@architectures = "spacy.TransitionBasedParser.v3" [components.ner.model.tok2vec] @architectures = "spacy.Tok2Vec.v2" @@ -472,7 +472,7 @@ sneakily delegates to the `Transformer` pipeline component. factory = "ner" [nlp.pipeline.ner.model] -@architectures = "spacy.TransitionBasedParser.v1" +@architectures = "spacy.TransitionBasedParser.v3" state_type = "ner" extra_state_tokens = false hidden_width = 128 diff --git a/website/docs/usage/index.mdx b/website/docs/usage/index.mdx index a5b7990d6..07f2bd282 100644 --- a/website/docs/usage/index.mdx +++ b/website/docs/usage/index.mdx @@ -20,7 +20,7 @@ menu: ## Installation instructions {id="installation"} -spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**, +spaCy is compatible with **64-bit CPython 3.8+** and runs on **Unix/Linux**, **macOS/OS X** and **Windows**. The latest spaCy releases are available over [pip](https://pypi.python.org/pypi/spacy) and [conda](https://anaconda.org/conda-forge/spacy). @@ -290,7 +290,7 @@ You can configure the build process with the following environment variables: | Variable | Description | | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `SPACY_EXTRAS` | Additional Python packages to install alongside spaCy with optional version specifications. Should be a string that can be passed to `pip install`. See [`Makefile`](%%GITHUB_SPACY/Makefile) for defaults. | -| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.6`. | +| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.8`. | | `WHEELHOUSE` | Directory to store the wheel files during compilation. Defaults to `./wheelhouse`. | ### Run tests {id="run-tests"} diff --git a/website/docs/usage/layers-architectures.mdx b/website/docs/usage/layers-architectures.mdx index 37f11e8e2..8f6bf3a20 100644 --- a/website/docs/usage/layers-architectures.mdx +++ b/website/docs/usage/layers-architectures.mdx @@ -113,6 +113,7 @@ code. Screenshot of Thinc type checking in VSCode with mypy diff --git a/website/docs/usage/models.mdx b/website/docs/usage/models.mdx index 765805dc2..5b783002c 100644 --- a/website/docs/usage/models.mdx +++ b/website/docs/usage/models.mdx @@ -74,23 +74,23 @@ your data. > ```python > # Standard import -> from spacy.lang.xx import MultiLanguage +> from spacy.lang.mul import MultiLanguage > nlp = MultiLanguage() > > # With lazy-loading -> nlp = spacy.blank("xx") +> nlp = spacy.blank("mul") > ``` spaCy also supports pipelines trained on more than one language. This is especially useful for named entity recognition. The language ID used for -multi-language or language-neutral pipelines is `xx`. The language class, a +multi-language or language-neutral pipelines is `mul`. The language class, a generic subclass containing only the base language data, can be found in -[`lang/xx`](%%GITHUB_SPACY/spacy/lang/xx). +[`lang/mul`](%%GITHUB_SPACY/spacy/lang/mul). To train a pipeline using the neutral multi-language class, you can set -`lang = "xx"` in your [training config](/usage/training#config). You can also +`lang = "mul"` in your [training config](/usage/training#config). You can also \import the `MultiLanguage` class directly, or call -[`spacy.blank("xx")`](/api/top-level#spacy.blank) for lazy-loading. +[`spacy.blank("mul")`](/api/top-level#spacy.blank) for lazy-loading. ### Chinese language support {id="chinese",version="2.3"} diff --git a/website/docs/usage/processing-pipelines.mdx b/website/docs/usage/processing-pipelines.mdx index 11e1cb620..08cd64aa7 100644 --- a/website/docs/usage/processing-pipelines.mdx +++ b/website/docs/usage/processing-pipelines.mdx @@ -1354,12 +1354,14 @@ For some use cases, it makes sense to also overwrite additional methods to customize how the model is updated from examples, how it's initialized, how the loss is calculated and to add evaluation scores to the training output. -| Name | Description | -| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`update`](/api/pipe#update) | Learn from a batch of [`Example`](/api/example) objects containing the predictions and gold-standard annotations, and update the component's model. | -| [`initialize`](/api/pipe#initialize) | Initialize the model. Typically calls into [`Model.initialize`](https://thinc.ai/docs/api-model#initialize) and can be passed custom arguments via the [`[initialize]`](/api/data-formats#config-initialize) config block that are only loaded during training or when you call [`nlp.initialize`](/api/language#initialize), not at runtime. | -| [`get_loss`](/api/pipe#get_loss) | Return a tuple of the loss and the gradient for a batch of [`Example`](/api/example) objects. | -| [`score`](/api/pipe#score) | Score a batch of [`Example`](/api/example) objects and return a dictionary of scores. The [`@Language.factory`](/api/language#factory) decorator can define the `default_score_weights` of the component to decide which keys of the scores to display during training and how they count towards the final score. | +| Name | Description | +| ---------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`update`](/api/pipe#update) | Learn from a batch of [`Example`](/api/example) objects containing the predictions and gold-standard annotations, and update the component's model. | +| [`distill`](/api/pipe#distill) | Learn from a teacher pipeline using a batch of [`Doc`](/api/doc) objects and update the component's model. | +| [`initialize`](/api/pipe#initialize) | Initialize the model. Typically calls into [`Model.initialize`](https://thinc.ai/docs/api-model#initialize) and can be passed custom arguments via the [`[initialize]`](/api/data-formats#config-initialize) config block that are only loaded during training or when you call [`nlp.initialize`](/api/language#initialize), not at runtime. | +| [`get_loss`](/api/pipe#get_loss) | Return a tuple of the loss and the gradient for a batch of [`Example`](/api/example) objects. | +| [`get_teacher_student_loss`](/api/pipe#get_teacher_student_loss) | Return a tuple of the loss and the gradient for the student scores relative to the teacher scores. | +| [`score`](/api/pipe#score) | Score a batch of [`Example`](/api/example) objects and return a dictionary of scores. The [`@Language.factory`](/api/language#factory) decorator can define the `default_score_weights` of the component to decide which keys of the scores to display during training and how they count towards the final score. | diff --git a/website/docs/usage/projects.mdx b/website/docs/usage/projects.mdx index 8ec035942..f3cca8013 100644 --- a/website/docs/usage/projects.mdx +++ b/website/docs/usage/projects.mdx @@ -943,7 +943,7 @@ full embedded visualizer, as well as individual components. > $ pip install spacy-streamlit --pre > ``` -![](/images/spacy-streamlit.png) +![Screenshot of the spacy-streamlit package in Streamlit](/images/spacy-streamlit.png) Using [`spacy-streamlit`](https://github.com/explosion/spacy-streamlit), your projects can easily define their own scripts that spin up an interactive diff --git a/website/docs/usage/rule-based-matching.mdx b/website/docs/usage/rule-based-matching.mdx index 1c3c6e3b8..0c2bd7a66 100644 --- a/website/docs/usage/rule-based-matching.mdx +++ b/website/docs/usage/rule-based-matching.mdx @@ -384,14 +384,14 @@ the more specific attributes `FUZZY1`..`FUZZY9` you can specify the maximum allowed edit distance directly. ```python -# Match lowercase with fuzzy matching (allows 2 edits) +# Match lowercase with fuzzy matching (allows 3 edits) pattern = [{"LOWER": {"FUZZY": "definitely"}}] -# Match custom attribute values with fuzzy matching (allows 2 edits) +# Match custom attribute values with fuzzy matching (allows 3 edits) pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}] -# Match with exact Levenshtein edit distance limits (allows 3 edits) -pattern = [{"_": {"country": {"FUZZY3": "Kyrgyzstan"}}}] +# Match with exact Levenshtein edit distance limits (allows 4 edits) +pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}] ``` #### Regex and fuzzy matching with lists {id="regex-fuzzy-lists", version="3.5"} diff --git a/website/docs/usage/saving-loading.mdx b/website/docs/usage/saving-loading.mdx index d4f5cda76..cdc587273 100644 --- a/website/docs/usage/saving-loading.mdx +++ b/website/docs/usage/saving-loading.mdx @@ -304,6 +304,28 @@ installed in the same environment – that's it. | `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. | | [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. | +### Loading probability tables into existing models + +You can load a probability table from [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data) into an existing spaCy model like `en_core_web_sm`. + +```python +# Requirements: pip install spacy-lookups-data +import spacy +from spacy.lookups import load_lookups +nlp = spacy.load("en_core_web_sm") +lookups = load_lookups("en", ["lexeme_prob"]) +nlp.vocab.lookups.add_table("lexeme_prob", lookups.get_table("lexeme_prob")) +``` + +When training a model from scratch you can also specify probability tables in the `config.cfg`. + +```ini {title="config.cfg (excerpt)"} +[initialize.lookups] +@misc = "spacy.LookupsDataLoader.v1" +lang = ${nlp.lang} +tables = ["lexeme_prob"] +``` + ### Custom components via entry points {id="entry-points-components"} When you load a pipeline, spaCy will generally use its `config.cfg` to set up @@ -684,10 +706,15 @@ If your pipeline includes [custom components](/usage/processing-pipelines#custom-components), model architectures or other [code](/usage/training#custom-code), those functions need to be registered **before** your pipeline is loaded. Otherwise, spaCy won't know -how to create the objects referenced in the config. The -[`spacy package`](/api/cli#package) command lets you provide one or more paths -to Python files containing custom registered functions using the `--code` -argument. +how to create the objects referenced in the config. If you're loading your own +pipeline in Python, you can make custom components available just by importing +the code that defines them before calling +[`spacy.load`](/api/top-level#spacy.load). This is also how the `--code` +argument to CLI commands works. + +With the [`spacy package`](/api/cli#package) command, you can provide one or +more paths to Python files containing custom registered functions using the +`--code` argument. > #### \_\_init\_\_.py (excerpt) > diff --git a/website/docs/usage/spacy-101.mdx b/website/docs/usage/spacy-101.mdx index a02e73508..6d444a1e9 100644 --- a/website/docs/usage/spacy-101.mdx +++ b/website/docs/usage/spacy-101.mdx @@ -567,7 +567,10 @@ If you would like to use the spaCy logo on your site, please get in touch and ask us first. However, if you want to show support and tell others that your project is using spaCy, you can grab one of our **spaCy badges** here: - +Built with spaCy ```markdown [![Built with spaCy](https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg)](https://spacy.io) @@ -575,8 +578,9 @@ project is using spaCy, you can grab one of our **spaCy badges** here: Made with love and spaCy ```markdown -[![Built with spaCy](https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg)](https://spacy.io) +[![Made with love and spaCy](https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg)](https://spacy.io) ``` diff --git a/website/docs/usage/v3-5.mdx b/website/docs/usage/v3-5.mdx new file mode 100644 index 000000000..ac61338e3 --- /dev/null +++ b/website/docs/usage/v3-5.mdx @@ -0,0 +1,215 @@ +--- +title: What's New in v3.5 +teaser: New features and how to upgrade +menu: + - ['New Features', 'features'] + - ['Upgrading Notes', 'upgrading'] +--- + +## New features {id="features",hidden="true"} + +spaCy v3.5 introduces three new CLI commands, `apply`, `benchmark` and +`find-threshold`, adds fuzzy matching, provides improvements to our entity +linking functionality, and includes a range of language updates and bug fixes. + +### New CLI commands {id="cli"} + +#### apply CLI + +The [`apply` CLI](/api/cli#apply) can be used to apply a pipeline to one or more +`.txt`, `.jsonl` or `.spacy` input files, saving the annotated docs in a single +`.spacy` file. + +```bash +$ spacy apply en_core_web_sm my_texts/ output.spacy +``` + +#### benchmark CLI + +The [`benchmark` CLI](/api/cli#benchmark) has been added to extend the existing +`evaluate` functionality with a wider range of profiling subcommands. + +The `benchmark accuracy` CLI is introduced as an alias for `evaluate`. The new +`benchmark speed` CLI performs warmup rounds before measuring the speed in words +per second on batches of randomly shuffled documents from the provided data. + +```bash +$ spacy benchmark speed my_pipeline data.spacy +``` + +The output is the mean performance using batches (`nlp.pipe`) with a 95% +confidence interval, e.g., profiling `en_core_web_sm` on CPU: + +```none +Outliers: 2.0%, extreme outliers: 0.0% +Mean: 18904.1 words/s (95% CI: -256.9 +244.1) +``` + +#### find-threshold CLI + +The [`find-threshold` CLI](/api/cli#find-threshold) runs a series of trials +across threshold values from `0.0` to `1.0` and identifies the best threshold +for the provided score metric. + +The following command runs 20 trials for the `spancat` component in +`my_pipeline`, recording the `spans_sc_f` score for each value of the threshold +`[components.spancat.threshold]` from `0.0` to `1.0`: + +```bash +$ spacy find-threshold my_pipeline data.spacy spancat threshold spans_sc_f --n_trials 20 +``` + +The `find-threshold` CLI can be used with `textcat_multilabel`, `spancat` and +custom components with thresholds that are applied while predicting or scoring. + +### Fuzzy matching {id="fuzzy"} + +New `FUZZY` operators support [fuzzy matching](/usage/rule-based-matching#fuzzy) +with the `Matcher`. By default, the `FUZZY` operator allows a Levenshtein edit +distance of 2 and up to 30% of the pattern string length. `FUZZY1`..`FUZZY9` can +be used to specify the exact number of allowed edits. + +```python +# Match lowercase with fuzzy matching (allows up to 3 edits) +pattern = [{"LOWER": {"FUZZY": "definitely"}}] + +# Match custom attribute values with fuzzy matching (allows up to 3 edits) +pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}] + +# Match with exact Levenshtein edit distance limits (allows up to 4 edits) +pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}] +``` + +Note that `FUZZY` uses Levenshtein edit distance rather than Damerau-Levenshtein +edit distance, so a transposition like `teh` for `the` counts as two edits, one +insertion and one deletion. + +If you'd prefer an alternate fuzzy matching algorithm, you can provide your own +custom method to the `Matcher` or as a config option for an entity ruler and +span ruler. + +### FUZZY and REGEX with lists {id="fuzzy-regex-lists"} + +The `FUZZY` and `REGEX` operators are also now supported for lists with `IN` and +`NOT_IN`: + +```python +pattern = [{"TEXT": {"FUZZY": {"IN": ["awesome", "cool", "wonderful"]}}}] +pattern = [{"TEXT": {"REGEX": {"NOT_IN": ["^awe(some)?$", "^wonder(ful)?"]}}}] +``` + +### Entity linking generalization {id="el"} + +The knowledge base used for entity linking is now easier to customize and has a +new default implementation [`InMemoryLookupKB`](/api/inmemorylookupkb). + +### Additional features and improvements {id="additional-features-and-improvements"} + +- Language updates: + - Extended support for Slovenian + - Fixed lookup fallback for French and Catalan lemmatizers + - Switch Russian and Ukrainian lemmatizers to `pymorphy3` + - Support for editorial punctuation in Ancient Greek + - Update to Russian tokenizer exceptions + - Small fix for Dutch stop words +- Allow up to `typer` v0.7.x, `mypy` 0.990 and `typing_extensions` v4.4.x. +- New `spacy.ConsoleLogger.v3` with expanded progress + [tracking](/api/top-level#ConsoleLogger). +- Improved scoring behavior for `textcat` with `spacy.textcat_scorer.v2` and + `spacy.textcat_multilabel_scorer.v2`. +- Updates so that downstream components can train properly on a frozen `tok2vec` + or `transformer` layer. +- Allow interpolation of variables in directory names in projects. +- Support for local file system [remotes](/usage/projects#remote) for projects. +- Improve UX around `displacy.serve` when the default port is in use. +- Optional `before_update` callback that is invoked at the start of each + [training step](/api/data-formats#config-training). +- Improve performance of `SpanGroup` and fix typing issues for `SpanGroup` and + `Span` objects. +- Patch a + [security vulnerability](https://github.com/advisories/GHSA-gw9q-c7gh-j9vm) in + extracting tar files. +- Add equality definition for `Vectors`. +- Ensure `Vocab.to_disk` respects the exclude setting for `lookups` and + `vectors`. +- Correctly handle missing annotations in the edit tree lemmatizer. + +### Trained pipeline updates {id="pipelines"} + +- The CNN pipelines add `IS_SPACE` as a `tok2vec` feature for `tagger` and + `morphologizer` components to improve tagging of non-whitespace vs. whitespace + tokens. +- The transformer pipelines require `spacy-transformers` v1.2, which uses the + exact alignment from `tokenizers` for fast tokenizers instead of the heuristic + alignment from `spacy-alignments`. For all trained pipelines except + `ja_core_news_trf`, the alignments between spaCy tokens and transformer tokens + may be slightly different. More details about the `spacy-transformers` changes + in the + [v1.2.0 release notes](https://github.com/explosion/spacy-transformers/releases/tag/v1.2.0). + +## Notes about upgrading from v3.4 {id="upgrading"} + +### Validation of textcat values {id="textcat-validation"} + +An error is now raised when unsupported values are given as input to train a +`textcat` or `textcat_multilabel` model - ensure that values are `0.0` or `1.0` +as explained in the [docs](/api/textcategorizer#assigned-attributes). + +### Updated scorers for tokenization and textcat {id="scores"} + +We fixed a bug that inflated the `token_acc` scores in v3.0-v3.4. The reported +`token_acc` will drop from v3.4 to v3.5, but if `token_p/r/f` stay the same, +your tokenization performance has not changed from v3.4. + +For new `textcat` or `textcat_multilabel` configs, the new default `v2` scorers: + +- ignore `threshold` for `textcat`, so the reported `cats_p/r/f` may increase + slightly in v3.5 even though the underlying predictions are unchanged +- report the performance of only the **final** `textcat` or `textcat_multilabel` + component in the pipeline by default +- allow custom scorers to be used to score multiple `textcat` and + `textcat_multilabel` components with `Scorer.score_cats` by restricting the + evaluation to the component's provided labels + +### Pipeline package version compatibility {id="version-compat"} + +> #### Using legacy implementations +> +> In spaCy v3, you'll still be able to load and reference legacy implementations +> via [`spacy-legacy`](https://github.com/explosion/spacy-legacy), even if the +> components or architectures change and newer versions are available in the +> core library. + +When you're loading a pipeline package trained with an earlier version of spaCy +v3, you will see a warning telling you that the pipeline may be incompatible. +This doesn't necessarily have to be true, but we recommend running your +pipelines against your test suite or evaluation data to make sure there are no +unexpected results. + +If you're using one of the [trained pipelines](/models) we provide, you should +run [`spacy download`](/api/cli#download) to update to the latest version. To +see an overview of all installed packages and their compatibility, you can run +[`spacy validate`](/api/cli#validate). + +If you've trained your own custom pipeline and you've confirmed that it's still +working as expected, you can update the spaCy version requirements in the +[`meta.json`](/api/data-formats#meta): + +```diff +- "spacy_version": ">=3.4.0,<3.5.0", ++ "spacy_version": ">=3.4.0,<3.6.0", +``` + +### Updating v3.4 configs + +To update a config from spaCy v3.4 with the new v3.5 settings, run +[`init fill-config`](/api/cli#init-fill-config): + +```cli +$ python -m spacy init fill-config config-v3.4.cfg config-v3.5.cfg +``` + +In many cases ([`spacy train`](/api/cli#train), +[`spacy.load`](/api/top-level#spacy.load)), the new defaults will be filled in +automatically, but you'll need to fill in the new settings to run +[`debug config`](/api/cli#debug) and [`debug data`](/api/cli#debug-data). diff --git a/website/docs/usage/visualizers.mdx b/website/docs/usage/visualizers.mdx index f1ff6dd3d..1d3682af4 100644 --- a/website/docs/usage/visualizers.mdx +++ b/website/docs/usage/visualizers.mdx @@ -437,6 +437,6 @@ Alternatively, if you're using [Streamlit](https://streamlit.io), check out the helps you integrate spaCy visualizations into your apps. It includes a full embedded visualizer, as well as individual components. -![](/images/spacy-streamlit.png) +![Screenshot of the spacy-streamlit package in Streamlit](/images/spacy-streamlit.png) diff --git a/website/meta/languages.json b/website/meta/languages.json index 46c0d3adb..eeb3a74b7 100644 --- a/website/meta/languages.json +++ b/website/meta/languages.json @@ -165,7 +165,7 @@ "has_examples": true }, { - "code": "is", + "code": "isl", "name": "Icelandic" }, { @@ -434,9 +434,9 @@ ] }, { - "code": "xx", + "code": "mul", "name": "Multi-language", - "models": ["xx_ent_wiki_sm", "xx_sent_ud_sm"], + "models": ["mul_ent_wiki_sm", "mul_sent_ud_sm"], "example": "This is a sentence about Facebook." }, { diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index 339e4085b..b5c555da6 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -13,7 +13,8 @@ { "text": "New in v3.1", "url": "/usage/v3-1" }, { "text": "New in v3.2", "url": "/usage/v3-2" }, { "text": "New in v3.3", "url": "/usage/v3-3" }, - { "text": "New in v3.4", "url": "/usage/v3-4" } + { "text": "New in v3.4", "url": "/usage/v3-4" }, + { "text": "New in v3.5", "url": "/usage/v3-5" } ] }, { @@ -129,6 +130,7 @@ "items": [ { "text": "Attributes", "url": "/api/attributes" }, { "text": "Corpus", "url": "/api/corpus" }, + { "text": "InMemoryLookupKB", "url": "/api/inmemorylookupkb" }, { "text": "KnowledgeBase", "url": "/api/kb" }, { "text": "Lookups", "url": "/api/lookups" }, { "text": "MorphAnalysis", "url": "/api/morphology#morphanalysis" }, diff --git a/website/meta/site.json b/website/meta/site.json index 5dcb89443..3d4f2d5ee 100644 --- a/website/meta/site.json +++ b/website/meta/site.json @@ -27,7 +27,7 @@ "indexName": "spacy" }, "binderUrl": "explosion/spacy-io-binder", - "binderVersion": "3.4", + "binderVersion": "3.5", "sections": [ { "id": "usage", "title": "Usage Documentation", "theme": "blue" }, { "id": "models", "title": "Models Documentation", "theme": "blue" }, diff --git a/website/meta/universe.json b/website/meta/universe.json index f15d461e8..e35a4f045 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -2381,7 +2381,7 @@ "author": "Nikita Kitaev", "author_links": { "github": "nikitakit", - "website": " http://kitaev.io" + "website": "http://kitaev.io" }, "category": ["research", "pipeline"] }, diff --git a/website/pages/_app.tsx b/website/pages/_app.tsx index 8db80a672..a837d9ce8 100644 --- a/website/pages/_app.tsx +++ b/website/pages/_app.tsx @@ -17,7 +17,7 @@ export default function App({ Component, pageProps }: AppProps) { diff --git a/website/pages/index.tsx b/website/pages/index.tsx index 170bca137..fc0dba378 100644 --- a/website/pages/index.tsx +++ b/website/pages/index.tsx @@ -13,7 +13,7 @@ import { LandingBanner, } from '../src/components/landing' import { H2 } from '../src/components/typography' -import { InlineCode } from '../src/components/code' +import { InlineCode } from '../src/components/inlineCode' import { Ul, Li } from '../src/components/list' import Button from '../src/components/button' import Link from '../src/components/link' @@ -89,8 +89,8 @@ const Landing = () => { - In the five years since its release, spaCy has become an industry standard with - a huge ecosystem. Choose from a variety of plugins, integrate with your machine + Since its release in 2015, spaCy has become an industry standard with a huge + ecosystem. Choose from a variety of plugins, integrate with your machine learning stack and build custom components and workflows. @@ -162,7 +162,7 @@ const Landing = () => { small >

- + { - +

diff --git a/website/src/components/accordion.js b/website/src/components/accordion.js index 504f415a5..9ff145bd2 100644 --- a/website/src/components/accordion.js +++ b/website/src/components/accordion.js @@ -33,7 +33,7 @@ export default function Accordion({ title, id, expanded = false, spaced = false, event.stopPropagation()} > ¶ diff --git a/website/src/components/card.js b/website/src/components/card.js index 9eb597b7b..ef43eb866 100644 --- a/website/src/components/card.js +++ b/website/src/components/card.js @@ -1,6 +1,7 @@ import React from 'react' import PropTypes from 'prop-types' import classNames from 'classnames' +import ImageNext from 'next/image' import Link from './link' import { H5 } from './typography' @@ -10,7 +11,7 @@ export default function Card({ title, to, image, header, small, onClick, childre return (

{header && ( - + {header} )} @@ -18,18 +19,17 @@ export default function Card({ title, to, image, header, small, onClick, childre
{image && (
- {/* eslint-disable-next-line @next/next/no-img-element */} - +
)} {title && ( - + {title} )}
)} - + {children}
diff --git a/website/src/components/code.js b/website/src/components/code.js index 51067115b..09c2fabfc 100644 --- a/website/src/components/code.js +++ b/website/src/components/code.js @@ -14,96 +14,16 @@ import 'prismjs/components/prism-markdown.min.js' import 'prismjs/components/prism-python.min.js' import 'prismjs/components/prism-yaml.min.js' -import CUSTOM_TYPES from '../../meta/type-annotations.json' -import { isString, htmlToReact } from './util' +import { isString } from './util' import Link, { OptionalLink } from './link' import GitHubCode from './github' -import Juniper from './juniper' import classes from '../styles/code.module.sass' import siteMetadata from '../../meta/site.json' import { binderBranch } from '../../meta/dynamicMeta.mjs' +import dynamic from 'next/dynamic' -const WRAP_THRESHOLD = 30 const CLI_GROUPS = ['init', 'debug', 'project', 'ray', 'huggingface-hub'] -const CodeBlock = (props) => ( -
-        
-    
-) - -export default CodeBlock - -export const Pre = (props) => { - return
{props.children}
-} - -export const InlineCode = ({ wrap = false, className, children, ...props }) => { - const codeClassNames = classNames(classes['inline-code'], className, { - [classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD), - }) - return ( - - {children} - - ) -} - -InlineCode.propTypes = { - wrap: PropTypes.bool, - className: PropTypes.string, - children: PropTypes.node, -} - -function linkType(el, showLink = true) { - if (!isString(el) || !el.length) return el - const elStr = el.trim() - if (!elStr) return el - const typeUrl = CUSTOM_TYPES[elStr] - const url = typeUrl == true ? DEFAULT_TYPE_URL : typeUrl - const ws = el[0] == ' ' - return url && showLink ? ( - - {ws && ' '} - - {elStr} - - - ) : ( - el - ) -} - -export const TypeAnnotation = ({ lang = 'python', link = true, children }) => { - // Hacky, but we're temporarily replacing a dot to prevent it from being split during highlighting - const TMP_DOT = '۔' - const code = Array.isArray(children) ? children.join('') : children || '' - const [rawText, meta] = code.split(/(?= \(.+\)$)/) - const rawStr = rawText.replace(/\./g, TMP_DOT) - const rawHtml = - lang === 'none' || !code ? code : Prism.highlight(rawStr, Prism.languages[lang], lang) - const html = rawHtml.replace(new RegExp(TMP_DOT, 'g'), '.').replace(/\n/g, ' ') - const result = htmlToReact(html) - const elements = Array.isArray(result) ? result : [result] - const annotClassNames = classNames( - 'type-annotation', - `language-${lang}`, - classes['inline-code'], - classes['type-annotation'], - { - [classes['wrap']]: code.length >= WRAP_THRESHOLD, - } - ) - return ( - - {elements.map((el, i) => ( - {linkType(el, !!link)} - ))} - {meta && {meta}} - - ) -} - const splitLines = (children) => { const listChildrenPerLine = [] @@ -235,7 +155,7 @@ const handlePromot = ({ lineFlat, prompt }) => { {j !== 0 && ' '} - @@ -288,7 +208,7 @@ const addLineHighlight = (children, highlight) => { }) } -export const CodeHighlighted = ({ children, highlight, lang }) => { +const CodeHighlighted = ({ children, highlight, lang }) => { const [html, setHtml] = useState() useEffect( @@ -305,7 +225,7 @@ export const CodeHighlighted = ({ children, highlight, lang }) => { return <>{html} } -export class Code extends React.Component { +export default class Code extends React.Component { static defaultProps = { lang: 'none', executable: null, @@ -354,6 +274,8 @@ export class Code extends React.Component { } } +const JuniperDynamic = dynamic(() => import('./juniper')) + const JuniperWrapper = ({ title, lang, children }) => { const { binderUrl, binderVersion } = siteMetadata const juniperTitle = title || 'Editable Code' @@ -363,13 +285,13 @@ const JuniperWrapper = ({ title, lang, children }) => { {juniperTitle} spaCy v{binderVersion} · Python 3 · via{' '} - + Binder - { }} > {children} - + ) } diff --git a/website/src/components/codeBlock.js b/website/src/components/codeBlock.js new file mode 100644 index 000000000..d990b93dd --- /dev/null +++ b/website/src/components/codeBlock.js @@ -0,0 +1,14 @@ +import React from 'react' +import Code from './codeDynamic' +import classes from '../styles/code.module.sass' + +export const Pre = (props) => { + return
{props.children}
+} + +const CodeBlock = (props) => ( +
+        
+    
+) +export default CodeBlock diff --git a/website/src/components/codeDynamic.js b/website/src/components/codeDynamic.js new file mode 100644 index 000000000..8c9483567 --- /dev/null +++ b/website/src/components/codeDynamic.js @@ -0,0 +1,5 @@ +import dynamic from 'next/dynamic' + +export default dynamic(() => import('./code'), { + loading: () =>
Loading...
, +}) diff --git a/website/src/components/copy.js b/website/src/components/copy.js index 4caabac98..bc7327115 100644 --- a/website/src/components/copy.js +++ b/website/src/components/copy.js @@ -14,7 +14,7 @@ export function copyToClipboard(ref, callback) { } } -export default function CopyInput({ text, prefix }) { +export default function CopyInput({ text, description, prefix }) { const isClient = typeof window !== 'undefined' const [supportsCopy, setSupportsCopy] = useState(false) @@ -41,6 +41,7 @@ export default function CopyInput({ text, prefix }) { defaultValue={text} rows={1} onClick={selectText} + aria-label={description} /> {supportsCopy && (