From 7a2c58864cfb24b78c28643e22ce8c9686e1f1bf Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 26 Aug 2022 17:23:10 +0900 Subject: [PATCH 01/82] Move deps outside explosion to "third-party" (#11381) --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 708300b04..bf4890a68 100644 --- a/setup.cfg +++ b/setup.cfg @@ -50,9 +50,9 @@ install_requires = wasabi>=0.9.1,<1.1.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 + # Third-party dependencies typer>=0.3.0,<0.5.0 pathy>=0.3.5 - # Third-party dependencies tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 requests>=2.13.0,<3.0.0 From ba3320097948cd5056fc068cfc1a9cc1b2d89cf2 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 26 Aug 2022 16:07:16 +0200 Subject: [PATCH 02/82] Remove pathy from pyproject.toml (#11383) --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 317c5fdbe..7abd7a96f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ requires = [ "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", "thinc>=8.1.0,<8.2.0", - "pathy", "numpy>=1.15.0", ] build-backend = "setuptools.build_meta" From 6723d76f24a55f24ef1632ac8be46567a984d0ef Mon Sep 17 00:00:00 2001 From: Edward <43848523+thomashacker@users.noreply.github.com> Date: Mon, 29 Aug 2022 10:23:05 +0200 Subject: [PATCH 03/82] Add ConsoleLogger.v2 (#11214) * Init * Change logger to ConsoleLogger.v2 * adjust naming * More naming adjustments * Fix output_file reference error * ignore type * Add basic test for logger * Hopefully fix mypy issue * mypy ignore line * Update mypy line Co-authored-by: Adriane Boyd * Update test method name Co-authored-by: Adriane Boyd * Change file saving logic * Fix finalize method * increase spacy-legacy version in requirements * Update docs * small adjustments Co-authored-by: Adriane Boyd --- requirements.txt | 2 +- setup.cfg | 2 +- spacy/tests/training/test_logger.py | 30 ++++++++ spacy/training/loggers.py | 102 +++++++++++++++++++++------- website/docs/api/legacy.md | 53 +++++++++++++++ website/docs/api/top-level.md | 57 +++++++++------- 6 files changed, 198 insertions(+), 48 deletions(-) create mode 100644 spacy/tests/training/test_logger.py diff --git a/requirements.txt b/requirements.txt index 437dd415a..3b8d66e0e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # Our libraries -spacy-legacy>=3.0.9,<3.1.0 +spacy-legacy>=3.0.10,<3.1.0 spacy-loggers>=1.0.0,<2.0.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 diff --git a/setup.cfg b/setup.cfg index bf4890a68..5fd820a96 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,7 @@ setup_requires = thinc>=8.1.0,<8.2.0 install_requires = # Our libraries - spacy-legacy>=3.0.9,<3.1.0 + spacy-legacy>=3.0.10,<3.1.0 spacy-loggers>=1.0.0,<2.0.0 murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 diff --git a/spacy/tests/training/test_logger.py b/spacy/tests/training/test_logger.py new file mode 100644 index 000000000..0dfd0cbf4 --- /dev/null +++ b/spacy/tests/training/test_logger.py @@ -0,0 +1,30 @@ +import pytest +import spacy + +from spacy.training import loggers + + +@pytest.fixture() +def nlp(): + nlp = spacy.blank("en") + nlp.add_pipe("ner") + return nlp + + +@pytest.fixture() +def info(): + return { + "losses": {"ner": 100}, + "other_scores": {"ENTS_F": 0.85, "ENTS_P": 0.90, "ENTS_R": 0.80}, + "epoch": 100, + "step": 125, + "score": 85, + } + + +def test_console_logger(nlp, info): + console_logger = loggers.console_logger( + progress_bar=True, console_output=True, output_file=None + ) + log_step, finalize = console_logger(nlp) + log_step(info) diff --git a/spacy/training/loggers.py b/spacy/training/loggers.py index edd0f1959..408ea7140 100644 --- a/spacy/training/loggers.py +++ b/spacy/training/loggers.py @@ -1,10 +1,13 @@ -from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO +from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO, Union from wasabi import Printer +from pathlib import Path import tqdm import sys +import srsly from ..util import registry from ..errors import Errors +from .. import util if TYPE_CHECKING: from ..language import Language # noqa: F401 @@ -23,13 +26,44 @@ def setup_table( return final_cols, final_widths, ["r" for _ in final_widths] -@registry.loggers("spacy.ConsoleLogger.v1") -def console_logger(progress_bar: bool = False): +@registry.loggers("spacy.ConsoleLogger.v2") +def console_logger( + progress_bar: bool = False, + console_output: bool = True, + output_file: Optional[Union[str, Path]] = None, +): + """The ConsoleLogger.v2 prints out training logs in the console and/or saves them to a jsonl file. + progress_bar (bool): Whether the logger should print the progress bar. + console_output (bool): Whether the logger should print the logs on the console. + output_file (Optional[Union[str, Path]]): The file to save the training logs to. + """ + _log_exist = False + if output_file: + output_file = util.ensure_path(output_file) # type: ignore + if output_file.exists(): # type: ignore + _log_exist = True + if not output_file.parents[0].exists(): # type: ignore + output_file.parents[0].mkdir(parents=True) # type: ignore + def setup_printer( nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr ) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]: write = lambda text: print(text, file=stdout, flush=True) msg = Printer(no_print=True) + + nonlocal output_file + output_stream = None + if _log_exist: + write( + msg.warn( + f"Saving logs is disabled because {output_file} already exists." + ) + ) + output_file = None + elif output_file: + write(msg.info(f"Saving results to {output_file}")) + output_stream = open(output_file, "w", encoding="utf-8") + # ensure that only trainable components are logged logged_pipes = [ name @@ -40,13 +74,15 @@ def console_logger(progress_bar: bool = False): score_weights = nlp.config["training"]["score_weights"] score_cols = [col for col, value in score_weights.items() if value is not None] loss_cols = [f"Loss {pipe}" for pipe in logged_pipes] - spacing = 2 - table_header, table_widths, table_aligns = setup_table( - cols=["E", "#"] + loss_cols + score_cols + ["Score"], - widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6], - ) - write(msg.row(table_header, widths=table_widths, spacing=spacing)) - write(msg.row(["-" * width for width in table_widths], spacing=spacing)) + + if console_output: + spacing = 2 + table_header, table_widths, table_aligns = setup_table( + cols=["E", "#"] + loss_cols + score_cols + ["Score"], + widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6], + ) + write(msg.row(table_header, widths=table_widths, spacing=spacing)) + write(msg.row(["-" * width for width in table_widths], spacing=spacing)) progress = None def log_step(info: Optional[Dict[str, Any]]) -> None: @@ -57,12 +93,15 @@ def console_logger(progress_bar: bool = False): if progress is not None: progress.update(1) return - losses = [ - "{0:.2f}".format(float(info["losses"][pipe_name])) - for pipe_name in logged_pipes - ] + + losses = [] + log_losses = {} + for pipe_name in logged_pipes: + losses.append("{0:.2f}".format(float(info["losses"][pipe_name]))) + log_losses[pipe_name] = float(info["losses"][pipe_name]) scores = [] + log_scores = {} for col in score_cols: score = info["other_scores"].get(col, 0.0) try: @@ -73,6 +112,7 @@ def console_logger(progress_bar: bool = False): if col != "speed": score *= 100 scores.append("{0:.2f}".format(score)) + log_scores[str(col)] = score data = ( [info["epoch"], info["step"]] @@ -80,20 +120,36 @@ def console_logger(progress_bar: bool = False): + scores + ["{0:.2f}".format(float(info["score"]))] ) + + if output_stream: + # Write to log file per log_step + log_data = { + "epoch": info["epoch"], + "step": info["step"], + "losses": log_losses, + "scores": log_scores, + "score": float(info["score"]), + } + output_stream.write(srsly.json_dumps(log_data) + "\n") + if progress is not None: progress.close() - write( - msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing) - ) - if progress_bar: - # Set disable=None, so that it disables on non-TTY - progress = tqdm.tqdm( - total=eval_frequency, disable=None, leave=False, file=stderr + if console_output: + write( + msg.row( + data, widths=table_widths, aligns=table_aligns, spacing=spacing + ) ) - progress.set_description(f"Epoch {info['epoch']+1}") + if progress_bar: + # Set disable=None, so that it disables on non-TTY + progress = tqdm.tqdm( + total=eval_frequency, disable=None, leave=False, file=stderr + ) + progress.set_description(f"Epoch {info['epoch']+1}") def finalize() -> None: - pass + if output_stream: + output_stream.close() return log_step, finalize diff --git a/website/docs/api/legacy.md b/website/docs/api/legacy.md index 31d178b67..d9167c76f 100644 --- a/website/docs/api/legacy.md +++ b/website/docs/api/legacy.md @@ -248,6 +248,59 @@ added to an existing vectors table. See more details in ## Loggers {#loggers} +These functions are available from `@spacy.registry.loggers`. + +### spacy.ConsoleLogger.v1 {#ConsoleLogger_v1} + +> #### Example config +> +> ```ini +> [training.logger] +> @loggers = "spacy.ConsoleLogger.v1" +> progress_bar = true +> ``` + +Writes the results of a training step to the console in a tabular format. + + + +```cli +$ python -m spacy train config.cfg +``` + +``` +ℹ Using CPU +ℹ Loading config and nlp from: config.cfg +ℹ Pipeline: ['tok2vec', 'tagger'] +ℹ Start training +ℹ Training. Initial learn rate: 0.0 + +E # LOSS TOK2VEC LOSS TAGGER TAG_ACC SCORE +--- ------ ------------ ----------- ------- ------ + 0 0 0.00 86.20 0.22 0.00 + 0 200 3.08 18968.78 34.00 0.34 + 0 400 31.81 22539.06 33.64 0.34 + 0 600 92.13 22794.91 43.80 0.44 + 0 800 183.62 21541.39 56.05 0.56 + 0 1000 352.49 25461.82 65.15 0.65 + 0 1200 422.87 23708.82 71.84 0.72 + 0 1400 601.92 24994.79 76.57 0.77 + 0 1600 662.57 22268.02 80.20 0.80 + 0 1800 1101.50 28413.77 82.56 0.83 + 0 2000 1253.43 28736.36 85.00 0.85 + 0 2200 1411.02 28237.53 87.42 0.87 + 0 2400 1605.35 28439.95 88.70 0.89 +``` + +Note that the cumulative loss keeps increasing within one epoch, but should +start decreasing across epochs. + + + +| Name | Description | +| -------------- | --------------------------------------------------------- | +| `progress_bar` | Whether the logger should print the progress bar ~~bool~~ | + Logging utilities for spaCy are implemented in the [`spacy-loggers`](https://github.com/explosion/spacy-loggers) repo, and the functions are typically available from `@spacy.registry.loggers`. diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 1e1925442..c3dc42f1a 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -275,8 +275,8 @@ Render a dependency parse tree or named entity visualization. ### displacy.parse_deps {#displacy.parse_deps tag="method" new="2"} -Generate dependency parse in `{'words': [], 'arcs': []}` format. -For use with the `manual=True` argument in `displacy.render`. +Generate dependency parse in `{'words': [], 'arcs': []}` format. For use with +the `manual=True` argument in `displacy.render`. > #### Example > @@ -297,8 +297,8 @@ For use with the `manual=True` argument in `displacy.render`. ### displacy.parse_ents {#displacy.parse_ents tag="method" new="2"} -Generate named entities in `[{start: i, end: i, label: 'label'}]` format. -For use with the `manual=True` argument in `displacy.render`. +Generate named entities in `[{start: i, end: i, label: 'label'}]` format. For +use with the `manual=True` argument in `displacy.render`. > #### Example > @@ -319,8 +319,8 @@ For use with the `manual=True` argument in `displacy.render`. ### displacy.parse_spans {#displacy.parse_spans tag="method" new="2"} -Generate spans in `[{start_token: i, end_token: i, label: 'label'}]` format. -For use with the `manual=True` argument in `displacy.render`. +Generate spans in `[{start_token: i, end_token: i, label: 'label'}]` format. For +use with the `manual=True` argument in `displacy.render`. > #### Example > @@ -505,7 +505,7 @@ finished. To log each training step, a and the accuracy scores on the development set. The built-in, default logger is the ConsoleLogger, which prints results to the -console in tabular format. The +console in tabular format and saves them to a `jsonl` file. The [spacy-loggers](https://github.com/explosion/spacy-loggers) package, included as a dependency of spaCy, enables other loggers, such as one that sends results to a [Weights & Biases](https://www.wandb.com/) dashboard. @@ -513,16 +513,20 @@ a [Weights & Biases](https://www.wandb.com/) dashboard. Instead of using one of the built-in loggers, you can [implement your own](/usage/training#custom-logging). -#### spacy.ConsoleLogger.v1 {#ConsoleLogger tag="registered function"} +#### spacy.ConsoleLogger.v2 {#ConsoleLogger tag="registered function"} > #### Example config > > ```ini > [training.logger] -> @loggers = "spacy.ConsoleLogger.v1" +> @loggers = "spacy.ConsoleLogger.v2" +> progress_bar = true +> console_output = true +> output_file = "training_log.jsonl" > ``` -Writes the results of a training step to the console in a tabular format. +Writes the results of a training step to the console in a tabular format and +saves them to a `jsonl` file. @@ -536,22 +540,23 @@ $ python -m spacy train config.cfg ℹ Pipeline: ['tok2vec', 'tagger'] ℹ Start training ℹ Training. Initial learn rate: 0.0 +ℹ Saving results to training_log.jsonl E # LOSS TOK2VEC LOSS TAGGER TAG_ACC SCORE --- ------ ------------ ----------- ------- ------ - 1 0 0.00 86.20 0.22 0.00 - 1 200 3.08 18968.78 34.00 0.34 - 1 400 31.81 22539.06 33.64 0.34 - 1 600 92.13 22794.91 43.80 0.44 - 1 800 183.62 21541.39 56.05 0.56 - 1 1000 352.49 25461.82 65.15 0.65 - 1 1200 422.87 23708.82 71.84 0.72 - 1 1400 601.92 24994.79 76.57 0.77 - 1 1600 662.57 22268.02 80.20 0.80 - 1 1800 1101.50 28413.77 82.56 0.83 - 1 2000 1253.43 28736.36 85.00 0.85 - 1 2200 1411.02 28237.53 87.42 0.87 - 1 2400 1605.35 28439.95 88.70 0.89 + 0 0 0.00 86.20 0.22 0.00 + 0 200 3.08 18968.78 34.00 0.34 + 0 400 31.81 22539.06 33.64 0.34 + 0 600 92.13 22794.91 43.80 0.44 + 0 800 183.62 21541.39 56.05 0.56 + 0 1000 352.49 25461.82 65.15 0.65 + 0 1200 422.87 23708.82 71.84 0.72 + 0 1400 601.92 24994.79 76.57 0.77 + 0 1600 662.57 22268.02 80.20 0.80 + 0 1800 1101.50 28413.77 82.56 0.83 + 0 2000 1253.43 28736.36 85.00 0.85 + 0 2200 1411.02 28237.53 87.42 0.87 + 0 2400 1605.35 28439.95 88.70 0.89 ``` Note that the cumulative loss keeps increasing within one epoch, but should @@ -559,6 +564,12 @@ start decreasing across epochs. +| Name | Description | +| ---------------- | --------------------------------------------------------------------- | +| `progress_bar` | Whether the logger should print the progress bar ~~bool~~ | +| `console_output` | Whether the logger should print the logs on the console. ~~bool~~ | +| `output_file` | The file to save the training logs to. ~~Optional[Union[str, Path]]~~ | + ## Readers {#readers} ### File readers {#file-readers source="github.com/explosion/srsly" new="3"} From aafee5e1b7c8d13d9ac14c438063621a18bec743 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 29 Aug 2022 17:32:38 +0900 Subject: [PATCH 04/82] Fix lookup usage in French/Catalan (fix #11347) (#11382) * Fix lookup usage (fix #11347) Before using the lookups table in the French (and Catalan) lemmatizers, there's a check to see if the current term is in the table. But it's checking a string against hashes, so it's always false. Also the table lookup function is designed so you don't have to do that anyway. * Use the lookup table directly * Use string, not token --- spacy/lang/ca/lemmatizer.py | 6 +++--- spacy/lang/fr/lemmatizer.py | 13 ++++++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/spacy/lang/ca/lemmatizer.py b/spacy/lang/ca/lemmatizer.py index 2fd012912..0f15e6e65 100644 --- a/spacy/lang/ca/lemmatizer.py +++ b/spacy/lang/ca/lemmatizer.py @@ -72,10 +72,10 @@ class CatalanLemmatizer(Lemmatizer): oov_forms.append(form) if not forms: forms.extend(oov_forms) - if not forms and string in lookup_table.keys(): - forms.append(self.lookup_lemmatize(token)[0]) + + # use lookups, and fall back to the token itself if not forms: - forms.append(string) + forms.append(lookup_table.get(string, [string])[0]) forms = list(dict.fromkeys(forms)) self.cache[cache_key] = forms return forms diff --git a/spacy/lang/fr/lemmatizer.py b/spacy/lang/fr/lemmatizer.py index c6422cf96..a7cbe0bcf 100644 --- a/spacy/lang/fr/lemmatizer.py +++ b/spacy/lang/fr/lemmatizer.py @@ -53,11 +53,16 @@ class FrenchLemmatizer(Lemmatizer): rules = rules_table.get(univ_pos, []) string = string.lower() forms = [] + # first try lookup in table based on upos if string in index: forms.append(string) self.cache[cache_key] = forms return forms + + # then add anything in the exceptions table forms.extend(exceptions.get(string, [])) + + # if nothing found yet, use the rules oov_forms = [] if not forms: for old, new in rules: @@ -69,12 +74,14 @@ class FrenchLemmatizer(Lemmatizer): forms.append(form) else: oov_forms.append(form) + + # if still nothing, add the oov forms from rules if not forms: forms.extend(oov_forms) - if not forms and string in lookup_table.keys(): - forms.append(self.lookup_lemmatize(token)[0]) + + # use lookups, which fall back to the token itself if not forms: - forms.append(string) + forms.append(lookup_table.get(string, [string])[0]) forms = list(dict.fromkeys(forms)) self.cache[cache_key] = forms return forms From 5ae63b1fbd549fdfc0f7399c0b9656d4a6681544 Mon Sep 17 00:00:00 2001 From: "Patrick J. Burns" Date: Tue, 30 Aug 2022 08:04:54 -0400 Subject: [PATCH 05/82] Add Latin language support (#11349) * Add lang folder for la (Latin) * Add Latin lang classes * Add minimal tokenizer exceptions * Add minimal stopwords * Add minimal lex_attrs * Update stopwords, tokenizer exceptions * Add la tests; register la_tokenizer in conftest.py * Update spacy/lang/la/lex_attrs.py Remove duplicate form in Latin lex_attrs Co-authored-by: Sofie Van Landeghem * Update natto-py version spec (#11222) * Update natto-py version spec * Update setup.cfg Co-authored-by: Adriane Boyd Co-authored-by: Adriane Boyd * Add scorer to textcat API docs config settings (#11263) * Update docs for pipeline initialize() methods (#11221) * Update documentation for dependency parser * Update documentation for trainable_lemmatizer * Update documentation for entity_linker * Update documentation for ner * Update documentation for morphologizer * Update documentation for senter * Update documentation for spancat * Update documentation for tagger * Update documentation for textcat * Update documentation for tok2vec * Run prettier on edited files * Apply similar changes in transformer docs * Remove need to say annotated example explicitly I removed the need to say "Must contain at least one annotated Example" because it's often a given that Examples will contain some gold-standard annotation. * Run prettier on transformer docs * chore: add 'concepCy' to spacy universe (#11255) * chore: add 'concepCy' to spacy universe * docs: add 'slogan' to concepCy * Support full prerelease versions in the compat table (#11228) * Support full prerelease versions in the compat table * Fix types * adding spans to doc_annotation in Example.to_dict (#11261) * adding spans to doc_annotation in Example.to_dict * to_dict compatible with from_dict: tuples instead of spans * use strings for label and kb_id * Simplify test * Update data formats docs Co-authored-by: Stefanie Wolf Co-authored-by: Adriane Boyd * Fix regex invalid escape sequences (#11276) * Add W605 to the errors raised by flake8 in the CI (#11283) * Clean up automated label-based issue handling (#11284) * Clean up automated label-based issue handline 1. upgrade tiangolo/issue-manager to latest 2. move needs-more-info to tiangolo 3. change needs-more-info close time to 7 days 4. delete old needs-more-info config * Use old, longer message * Fix label name * Fix Dutch noun chunks to skip overlapping spans (#11275) * Add test for overlapping noun chunks * Skip overlapping noun chunks * Update spacy/tests/lang/nl/test_noun_chunks.py Co-authored-by: Sofie Van Landeghem Co-authored-by: Sofie Van Landeghem * Docs: displaCy documentation - data types, `parse_{deps,ents,spans}`, spans example (#10950) * add in spans example and parse references * rm autoformatter * rm extra ents copy * TypedDict draft * type fixes * restore non-documentation files * docs update * fix spans example * fix hyperlinks * add parse example * example fix + argument fix * fix api arg in docs * fix bad variable replacement * fix spacing in style Co-authored-by: Sofie Van Landeghem * fix spacing on table * fix spacing on table * rm temp files Co-authored-by: Sofie Van Landeghem * include span_ruler for default warning filter (#11333) * Add uk pipelines to website (#11332) * Check for . in factory names (#11336) * Make fixes for PR #11349 * Fix roman numeral coverage in #11349 Co-authored-by: Patrick J. Burns Co-authored-by: Sofie Van Landeghem Co-authored-by: Paul O'Leary McCann Co-authored-by: Adriane Boyd Co-authored-by: Lj Miranda <12949683+ljvmiranda921@users.noreply.github.com> Co-authored-by: Jules Belveze <32683010+JulesBelveze@users.noreply.github.com> Co-authored-by: stefawolf Co-authored-by: Stefanie Wolf Co-authored-by: Peter Baumgartner <5107405+pmbaumgartner@users.noreply.github.com> --- spacy/lang/la/__init__.py | 18 +++++++++++++ spacy/lang/la/lex_attrs.py | 32 +++++++++++++++++++++++ spacy/lang/la/stop_words.py | 37 +++++++++++++++++++++++++++ spacy/lang/la/tokenizer_exceptions.py | 30 ++++++++++++++++++++++ spacy/tests/conftest.py | 5 ++++ spacy/tests/lang/la/__init__.py | 0 spacy/tests/lang/la/test_exception.py | 7 +++++ spacy/tests/lang/la/test_text.py | 33 ++++++++++++++++++++++++ website/docs/api/top-level.md | 2 +- 9 files changed, 163 insertions(+), 1 deletion(-) create mode 100644 spacy/lang/la/__init__.py create mode 100644 spacy/lang/la/lex_attrs.py create mode 100644 spacy/lang/la/stop_words.py create mode 100644 spacy/lang/la/tokenizer_exceptions.py create mode 100644 spacy/tests/lang/la/__init__.py create mode 100644 spacy/tests/lang/la/test_exception.py create mode 100644 spacy/tests/lang/la/test_text.py diff --git a/spacy/lang/la/__init__.py b/spacy/lang/la/__init__.py new file mode 100644 index 000000000..5f2cccee3 --- /dev/null +++ b/spacy/lang/la/__init__.py @@ -0,0 +1,18 @@ +from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from .stop_words import STOP_WORDS +from .lex_attrs import LEX_ATTRS + + +class LatinDefaults(BaseDefaults): + tokenizer_exceptions = TOKENIZER_EXCEPTIONS + stop_words = STOP_WORDS + lex_attr_getters = LEX_ATTRS + + +class Latin(Language): + lang = "la" + Defaults = LatinDefaults + + +__all__ = ["Latin"] diff --git a/spacy/lang/la/lex_attrs.py b/spacy/lang/la/lex_attrs.py new file mode 100644 index 000000000..9348a811a --- /dev/null +++ b/spacy/lang/la/lex_attrs.py @@ -0,0 +1,32 @@ +from ...attrs import LIKE_NUM +import re + +# cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4 +roman_numerals_compile = re.compile(r'(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$') + +_num_words = set( + """ +unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem +""".split() +) + +_ordinal_words = set( + """ +primus prima primum secundus secunda secundum tertius tertia tertium +""".split() +) + + +def like_num(text): + if text.isdigit(): + return True + if roman_numerals_compile.match(text): + return True + if text.lower() in _num_words: + return True + if text.lower() in _ordinal_words: + return True + return False + + +LEX_ATTRS = {LIKE_NUM: like_num} diff --git a/spacy/lang/la/stop_words.py b/spacy/lang/la/stop_words.py new file mode 100644 index 000000000..8b590bb67 --- /dev/null +++ b/spacy/lang/la/stop_words.py @@ -0,0 +1,37 @@ +# Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin + +STOP_WORDS = set( + """ +ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem + +cum cur + +de deinde dum + +ego enim ergo es est et etiam etsi ex + +fio + +haud hic + +iam idem igitur ille in infra inter interim ipse is ita + +magis modo mox + +nam ne nec necque neque nisi non nos + +o ob + +per possum post pro + +quae quam quare qui quia quicumque quidem quilibet quis quisnam quisquam quisque quisquis quo quoniam + +sed si sic sive sub sui sum super suus + +tam tamen trans tu tum + +ubi uel uero + +vel vero +""".split() +) diff --git a/spacy/lang/la/tokenizer_exceptions.py b/spacy/lang/la/tokenizer_exceptions.py new file mode 100644 index 000000000..905304188 --- /dev/null +++ b/spacy/lang/la/tokenizer_exceptions.py @@ -0,0 +1,30 @@ +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ...symbols import ORTH +from ...util import update_exc + + +## TODO: Look into systematically handling u/v +_exc = { + "mecum": [{ORTH: "me"}, {ORTH: "cum"}], + "tecum": [{ORTH: "te"}, {ORTH: "cum"}], + "nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}], + "vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}], + "uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}], +} + +for orth in [ + + 'A.', 'Agr.', 'Ap.', 'C.', 'Cn.', 'D.', 'F.', 'K.', 'L.', "M'.", 'M.', 'Mam.', 'N.', 'Oct.', + 'Opet.', 'P.', 'Paul.', 'Post.', 'Pro.', 'Q.', 'S.', 'Ser.', 'Sert.', 'Sex.', 'St.', 'Sta.', + 'T.', 'Ti.', 'V.', 'Vol.', 'Vop.', 'U.', 'Uol.', 'Uop.', + + 'Ian.', 'Febr.', 'Mart.', 'Apr.', 'Mai.', 'Iun.', 'Iul.', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Nou.', + 'Dec.', + + 'Non.', 'Id.', 'A.D.', + + 'Coll.', 'Cos.', 'Ord.', 'Pl.', 'S.C.', 'Suff.', 'Trib.', +]: + _exc[orth] = [{ORTH: orth}] + +TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc) diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 5193bd301..0395ba7ca 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -256,6 +256,11 @@ def ko_tokenizer_tokenizer(): return nlp.tokenizer +@pytest.fixture(scope="module") +def la_tokenizer(): + return get_lang_class("la")().tokenizer + + @pytest.fixture(scope="session") def lb_tokenizer(): return get_lang_class("lb")().tokenizer diff --git a/spacy/tests/lang/la/__init__.py b/spacy/tests/lang/la/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/lang/la/test_exception.py b/spacy/tests/lang/la/test_exception.py new file mode 100644 index 000000000..04bc1d489 --- /dev/null +++ b/spacy/tests/lang/la/test_exception.py @@ -0,0 +1,7 @@ +import pytest + +def test_la_tokenizer_handles_exc_in_text(la_tokenizer): + text = "scio te omnia facturum, ut nobiscum quam primum sis" + tokens = la_tokenizer(text) + assert len(tokens) == 11 + assert tokens[6].text == "nobis" diff --git a/spacy/tests/lang/la/test_text.py b/spacy/tests/lang/la/test_text.py new file mode 100644 index 000000000..11676b92b --- /dev/null +++ b/spacy/tests/lang/la/test_text.py @@ -0,0 +1,33 @@ +import pytest +from spacy.lang.la.lex_attrs import like_num + +@pytest.mark.parametrize( + "text,match", + [ + ("IIII", True), + ("VI", True), + ("vi", True), + ("IV", True), + ("iv", True), + ("IX", True), + ("ix", True), + ("MMXXII", True), + ("0", True), + ("1", True), + ("quattuor", True), + ("decem", True), + ("tertius", True), + ("canis", False), + ("MMXX11", False), + (",", False), + ], +) +def test_lex_attrs_like_number(la_tokenizer, text, match): + tokens = la_tokenizer(text) + assert len(tokens) == 1 + assert tokens[0].like_num == match + +@pytest.mark.parametrize("word", ["quinque"]) +def test_la_lex_attrs_capitals(word): + assert like_num(word) + assert like_num(word.upper()) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index c3dc42f1a..724f2775e 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -451,7 +451,7 @@ factories. | Registry name | Description | | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `architectures` | Registry for functions that create [model architectures](/api/architectures). Can be used to register custom model architectures and reference them in the `config.cfg`. | -| `augmenters` | Registry for functions that create [data augmentation](#augmenters) callbacks for corpora and other training data iterators. | +| `augmenters` | Registry for functions that create [data augmentation](#augmenters) callbacks for corpora and other training data iterators. | | `batchers` | Registry for training and evaluation [data batchers](#batchers). | | `callbacks` | Registry for custom callbacks to [modify the `nlp` object](/usage/training#custom-code-nlp-callbacks) before training. | | `displacy_colors` | Registry for custom color scheme for the [`displacy` NER visualizer](/usage/visualizers). Automatically reads from [entry points](/usage/saving-loading#entry-points). | From 3f4b4b7b4fa2df6c5d888cdc97efb71093d3fb6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Tue, 30 Aug 2022 14:21:02 +0200 Subject: [PATCH 06/82] Fix `test_{prefer,require}_gpu` (#11390) * Fix `test_{prefer,require}_gpu` These tests assumed that GPUs are only supported with CuPy, but since Thinc 8.1 we also support Metal Performance Shaders. * test_misc: arrange thinc imports to be together --- spacy/tests/test_misc.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index d8743d322..1c9b045ac 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -10,7 +10,8 @@ from spacy.ml._precomputable_affine import _backprop_precomputable_affine_paddin from spacy.util import dot_to_object, SimpleFrozenList, import_file from spacy.util import to_ternary_int from thinc.api import Config, Optimizer, ConfigValidationError -from thinc.api import set_current_ops +from thinc.api import get_current_ops, set_current_ops, NumpyOps, CupyOps, MPSOps +from thinc.compat import has_cupy_gpu, has_torch_mps_gpu from spacy.training.batchers import minibatch_by_words from spacy.lang.en import English from spacy.lang.nl import Dutch @@ -18,7 +19,6 @@ from spacy.language import DEFAULT_CONFIG_PATH from spacy.schemas import ConfigSchemaTraining, TokenPattern, TokenPatternSchema from pydantic import ValidationError -from thinc.api import get_current_ops, NumpyOps, CupyOps from .util import get_random_doc, make_tempdir @@ -111,26 +111,25 @@ def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2): def test_prefer_gpu(): current_ops = get_current_ops() - try: - import cupy # noqa: F401 - - prefer_gpu() + if has_cupy_gpu: + assert prefer_gpu() assert isinstance(get_current_ops(), CupyOps) - except ImportError: + elif has_torch_mps_gpu: + assert prefer_gpu() + assert isinstance(get_current_ops(), MPSOps) + else: assert not prefer_gpu() set_current_ops(current_ops) def test_require_gpu(): current_ops = get_current_ops() - try: - import cupy # noqa: F401 - + if has_cupy_gpu: require_gpu() assert isinstance(get_current_ops(), CupyOps) - except ImportError: - with pytest.raises(ValueError): - require_gpu() + elif has_torch_mps_gpu: + require_gpu() + assert isinstance(get_current_ops(), MPSOps) set_current_ops(current_ops) From 8fc0efc502da2f02076575e0887cb585d0e0f391 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Wed, 31 Aug 2022 09:02:34 +0200 Subject: [PATCH 07/82] Allow string argument for disable/enable/exclude (#11406) * adding unit test for spacy.load with disable/exclude string arg * allow pure strings in from_config * update docs * upstream type adjustements * docs update * make docstring more consistent * Update spacy/language.py Co-authored-by: Adriane Boyd * two more cleanups * fix type in internal method Co-authored-by: Adriane Boyd --- spacy/__init__.py | 12 ++--- spacy/language.py | 32 +++++++----- spacy/tests/pipeline/test_pipe_methods.py | 11 +++++ spacy/util.py | 60 +++++++++++------------ website/docs/api/language.md | 27 +++++----- website/docs/api/top-level.md | 58 +++++++++++----------- 6 files changed, 112 insertions(+), 88 deletions(-) diff --git a/spacy/__init__.py b/spacy/__init__.py index 069215fda..d60f46b96 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -31,21 +31,21 @@ def load( name: Union[str, Path], *, vocab: Union[Vocab, bool] = True, - disable: Iterable[str] = util.SimpleFrozenList(), - enable: Iterable[str] = util.SimpleFrozenList(), - exclude: Iterable[str] = util.SimpleFrozenList(), + disable: Union[str, Iterable[str]] = util.SimpleFrozenList(), + enable: Union[str, Iterable[str]] = util.SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = util.SimpleFrozenList(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), ) -> Language: """Load a spaCy model from an installed package or a local path. name (str): Package name or model path. vocab (Vocab): A Vocab object. If True, a vocab is created. - disable (Iterable[str]): Names of pipeline components to disable. Disabled + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. - enable (Iterable[str]): Names of pipeline components to enable. All other + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other pipes will be disabled (but can be enabled later using nlp.enable_pipe). - exclude (Iterable[str]): Names of pipeline components to exclude. Excluded + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. diff --git a/spacy/language.py b/spacy/language.py index e89ae142b..ec330753c 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1063,7 +1063,7 @@ class Language: """ if enable is None and disable is None: raise ValueError(Errors.E991) - if disable is not None and isinstance(disable, str): + if isinstance(disable, str): disable = [disable] if enable is not None: if isinstance(enable, str): @@ -1698,9 +1698,9 @@ class Language: config: Union[Dict[str, Any], Config] = {}, *, vocab: Union[Vocab, bool] = True, - disable: Iterable[str] = SimpleFrozenList(), - enable: Iterable[str] = SimpleFrozenList(), - exclude: Iterable[str] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = SimpleFrozenList(), + enable: Union[str, Iterable[str]] = SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = SimpleFrozenList(), meta: Dict[str, Any] = SimpleFrozenDict(), auto_fill: bool = True, validate: bool = True, @@ -1711,12 +1711,12 @@ class Language: config (Dict[str, Any] / Config): The loaded config. vocab (Vocab): A Vocab object. If True, a vocab is created. - disable (Iterable[str]): Names of pipeline components to disable. + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. - enable (Iterable[str]): Names of pipeline components to enable. All other + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other pipes will be disabled (and can be enabled using `nlp.enable_pipe`). - exclude (Iterable[str]): Names of pipeline components to exclude. + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded components won't be loaded. meta (Dict[str, Any]): Meta overrides for nlp.meta. auto_fill (bool): Automatically fill in missing values in config based @@ -1727,6 +1727,12 @@ class Language: DOCS: https://spacy.io/api/language#from_config """ + if isinstance(disable, str): + disable = [disable] + if isinstance(enable, str): + enable = [enable] + if isinstance(exclude, str): + exclude = [exclude] if auto_fill: config = Config( cls.default_config, section_order=CONFIG_SECTION_ORDER @@ -2031,25 +2037,29 @@ class Language: @staticmethod def _resolve_component_status( - disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str] + disable: Union[str, Iterable[str]], + enable: Union[str, Iterable[str]], + pipe_names: Iterable[str], ) -> Tuple[str, ...]: """Derives whether (1) `disable` and `enable` values are consistent and (2) resolves those to a single set of disabled components. Raises an error in case of inconsistency. - disable (Iterable[str]): Names of components or serialization fields to disable. - enable (Iterable[str]): Names of pipeline components to enable. + disable (Union[str, Iterable[str]]): Name(s) of component(s) or serialization fields to disable. + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. pipe_names (Iterable[str]): Names of all pipeline components. RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t. specified includes and excludes. """ - if disable is not None and isinstance(disable, str): + if isinstance(disable, str): disable = [disable] to_disable = disable if enable: + if isinstance(enable, str): + enable = [enable] to_disable = [ pipe_name for pipe_name in pipe_names if pipe_name not in enable ] diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index 6f00a1cd9..b946061f6 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -618,6 +618,7 @@ def test_load_disable_enable() -> None: base_nlp.to_disk(tmp_dir) to_disable = ["parser", "tagger"] to_enable = ["tagger", "parser"] + single_str = "tagger" # Setting only `disable`. nlp = spacy.load(tmp_dir, disable=to_disable) @@ -632,6 +633,16 @@ def test_load_disable_enable() -> None: ] ) + # Loading with a string representing one component + nlp = spacy.load(tmp_dir, exclude=single_str) + assert single_str not in nlp.component_names + + nlp = spacy.load(tmp_dir, disable=single_str) + assert single_str in nlp.component_names + assert single_str not in nlp.pipe_names + assert nlp._disabled == {single_str} + assert nlp.disabled == [single_str] + # Testing consistent enable/disable combination. nlp = spacy.load( tmp_dir, diff --git a/spacy/util.py b/spacy/util.py index d170fc15b..4e1a62d05 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -398,9 +398,9 @@ def load_model( name: Union[str, Path], *, vocab: Union["Vocab", bool] = True, - disable: Iterable[str] = SimpleFrozenList(), - enable: Iterable[str] = SimpleFrozenList(), - exclude: Iterable[str] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = SimpleFrozenList(), + enable: Union[str, Iterable[str]] = SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Load a model from a package or data path. @@ -408,9 +408,9 @@ def load_model( name (str): Package name or model path. vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. - disable (Iterable[str]): Names of pipeline components to disable. - enable (Iterable[str]): Names of pipeline components to enable. All others will be disabled. - exclude (Iterable[str]): Names of pipeline components to exclude. + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All others will be disabled. + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. RETURNS (Language): The loaded nlp object. @@ -440,9 +440,9 @@ def load_model_from_package( name: str, *, vocab: Union["Vocab", bool] = True, - disable: Iterable[str] = SimpleFrozenList(), - enable: Iterable[str] = SimpleFrozenList(), - exclude: Iterable[str] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = SimpleFrozenList(), + enable: Union[str, Iterable[str]] = SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Load a model from an installed package. @@ -450,12 +450,12 @@ def load_model_from_package( name (str): The package name. vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. - disable (Iterable[str]): Names of pipeline components to disable. Disabled + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. - enable (Iterable[str]): Names of pipeline components to enable. All other + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other pipes will be disabled (and can be enabled using `nlp.enable_pipe`). - exclude (Iterable[str]): Names of pipeline components to exclude. Excluded + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. @@ -470,9 +470,9 @@ def load_model_from_path( *, meta: Optional[Dict[str, Any]] = None, vocab: Union["Vocab", bool] = True, - disable: Iterable[str] = SimpleFrozenList(), - enable: Iterable[str] = SimpleFrozenList(), - exclude: Iterable[str] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = SimpleFrozenList(), + enable: Union[str, Iterable[str]] = SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Load a model from a data directory path. Creates Language class with @@ -482,12 +482,12 @@ def load_model_from_path( meta (Dict[str, Any]): Optional model meta. vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. - disable (Iterable[str]): Names of pipeline components to disable. Disabled + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. - enable (Iterable[str]): Names of pipeline components to enable. All other + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other pipes will be disabled (and can be enabled using `nlp.enable_pipe`). - exclude (Iterable[str]): Names of pipeline components to exclude. Excluded + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. @@ -516,9 +516,9 @@ def load_model_from_config( *, meta: Dict[str, Any] = SimpleFrozenDict(), vocab: Union["Vocab", bool] = True, - disable: Iterable[str] = SimpleFrozenList(), - enable: Iterable[str] = SimpleFrozenList(), - exclude: Iterable[str] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = SimpleFrozenList(), + enable: Union[str, Iterable[str]] = SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = SimpleFrozenList(), auto_fill: bool = False, validate: bool = True, ) -> "Language": @@ -529,12 +529,12 @@ def load_model_from_config( meta (Dict[str, Any]): Optional model meta. vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. - disable (Iterable[str]): Names of pipeline components to disable. Disabled + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. - enable (Iterable[str]): Names of pipeline components to enable. All other + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other pipes will be disabled (and can be enabled using `nlp.enable_pipe`). - exclude (Iterable[str]): Names of pipeline components to exclude. Excluded + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded components won't be loaded. auto_fill (bool): Whether to auto-fill config with missing defaults. validate (bool): Whether to show config validation errors. @@ -616,9 +616,9 @@ def load_model_from_init_py( init_file: Union[Path, str], *, vocab: Union["Vocab", bool] = True, - disable: Iterable[str] = SimpleFrozenList(), - enable: Iterable[str] = SimpleFrozenList(), - exclude: Iterable[str] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = SimpleFrozenList(), + enable: Union[str, Iterable[str]] = SimpleFrozenList(), + exclude: Union[str, Iterable[str]] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Helper function to use in the `load()` method of a model package's @@ -626,12 +626,12 @@ def load_model_from_init_py( vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. - disable (Iterable[str]): Names of pipeline components to disable. Disabled + disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. - enable (Iterable[str]): Names of pipeline components to enable. All other + enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other pipes will be disabled (and can be enabled using `nlp.enable_pipe`). - exclude (Iterable[str]): Names of pipeline components to exclude. Excluded + exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 9a413efaf..ed763e36a 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -63,17 +63,18 @@ spaCy loads a model under the hood based on its > nlp = Language.from_config(config) > ``` -| Name | Description | -| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | -| _keyword-only_ | | -| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | -| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~List[str]~~ | -| `exclude` | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | -| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | -| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | -| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | -| **RETURNS** | The initialized object. ~~Language~~ | +| Name | Description | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | +| _keyword-only_ | | +| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | +| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | +| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | +| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | +| **RETURNS** | The initialized object. ~~Language~~ | ## Language.component {#component tag="classmethod" new="3"} @@ -695,8 +696,8 @@ As of spaCy v3.0, the `disable_pipes` method has been renamed to `select_pipes`: | Name | Description | | -------------- | ------------------------------------------------------------------------------------------------------ | | _keyword-only_ | | -| `disable` | Name(s) of pipeline components to disable. ~~Optional[Union[str, Iterable[str]]]~~ | -| `enable` | Name(s) of pipeline components that will not be disabled. ~~Optional[Union[str, Iterable[str]]]~~ | +| `disable` | Name(s) of pipeline component(s) to disable. ~~Optional[Union[str, Iterable[str]]]~~ | +| `enable` | Name(s) of pipeline component(s) that will not be disabled. ~~Optional[Union[str, Iterable[str]]]~~ | | **RETURNS** | The disabled pipes that can be restored by calling the object's `.restore()` method. ~~DisabledPipes~~ | ## Language.get_factory_meta {#get_factory_meta tag="classmethod" new="3"} diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 724f2775e..220b2d6e9 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -45,16 +45,16 @@ specified separately using the new `exclude` keyword argument. > nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"]) > ``` -| Name | Description | -| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | -| _keyword-only_ | | -| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | -| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ | -| `enable` | Names of pipeline components to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~List[str]~~ | -| `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | -| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | -| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | +| Name | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | +| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ | +| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | +| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's [`config.cfg`](/api/data-formats#config), uses the language and pipeline @@ -1049,15 +1049,16 @@ and create a `Language` object. The model data will then be loaded in via > nlp = util.load_model("/path/to/data") > ``` -| Name | Description | -| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `name` | Package name or path. ~~str~~ | -| _keyword-only_ | | -| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | -| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~List[str]~~ | -| `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | -| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ | -| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ | +| Name | Description | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Package name or path. ~~str~~ | +| _keyword-only_ | | +| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | +| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ | +| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ | ### util.load_model_from_init_py {#util.load_model_from_init_py tag="function" new="2"} @@ -1073,15 +1074,16 @@ A helper function to use in the `load()` method of a pipeline package's > return load_model_from_init_py(__file__, **overrides) > ``` -| Name | Description | -| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `init_file` | Path to package's `__init__.py`, i.e. `__file__`. ~~Union[str, Path]~~ | -| _keyword-only_ | | -| `vocab` 3 | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | -| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ | -| `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | -| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ | -| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ | +| Name | Description | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `init_file` | Path to package's `__init__.py`, i.e. `__file__`. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `vocab` 3 | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | +| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ | +| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ | ### util.load_config {#util.load_config tag="function" new="3"} From 604a7c3c26bcc6737a9676c3ba1b16c9ac705be3 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Wed, 31 Aug 2022 09:03:20 +0200 Subject: [PATCH 08/82] `SpanGroup(s)`-related optimizations (#11380) * `SpanGroup`: Add support for binding copies to a new reference document * `SpanGroups`: Replace superfluous serialize-deserialize roundtrip in `copy` Instead, directly copy the in-memory representations of the constituent `SpanGroup`s. * Update `SpanGroup.copy()` signature * Rename `new_doc` param to `doc` * Fix kwdarg * Update `.pyi` file and docstrings * `mypy` fix * Update spacy/tokens/span_group.pyx * Update docs Co-authored-by: Adriane Boyd --- spacy/tokens/_dict_proxies.py | 3 ++- spacy/tokens/span_group.pyi | 4 ++-- spacy/tokens/span_group.pyx | 7 +++++-- website/docs/api/spangroup.md | 7 ++++--- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/spacy/tokens/_dict_proxies.py b/spacy/tokens/_dict_proxies.py index 9630da261..6edcce13d 100644 --- a/spacy/tokens/_dict_proxies.py +++ b/spacy/tokens/_dict_proxies.py @@ -42,7 +42,8 @@ class SpanGroups(UserDict): def copy(self, doc: Optional["Doc"] = None) -> "SpanGroups": if doc is None: doc = self._ensure_doc() - return SpanGroups(doc).from_bytes(self.to_bytes()) + data_copy = ((k, v.copy(doc=doc)) for k, v in self.items()) + return SpanGroups(doc, items=data_copy) def setdefault(self, key, default=None): if not isinstance(default, SpanGroup): diff --git a/spacy/tokens/span_group.pyi b/spacy/tokens/span_group.pyi index 245eb4dbe..21cd124ab 100644 --- a/spacy/tokens/span_group.pyi +++ b/spacy/tokens/span_group.pyi @@ -1,4 +1,4 @@ -from typing import Any, Dict, Iterable +from typing import Any, Dict, Iterable, Optional from .doc import Doc from .span import Span @@ -24,4 +24,4 @@ class SpanGroup: def __getitem__(self, i: int) -> Span: ... def to_bytes(self) -> bytes: ... def from_bytes(self, bytes_data: bytes) -> SpanGroup: ... - def copy(self) -> SpanGroup: ... + def copy(self, doc: Optional[Doc] = ...) -> SpanGroup: ... diff --git a/spacy/tokens/span_group.pyx b/spacy/tokens/span_group.pyx index bb0fab24f..1aa3c0bc8 100644 --- a/spacy/tokens/span_group.pyx +++ b/spacy/tokens/span_group.pyx @@ -241,15 +241,18 @@ cdef class SpanGroup: cdef void push_back(self, SpanC span) nogil: self.c.push_back(span) - def copy(self) -> SpanGroup: + def copy(self, doc: Optional["Doc"] = None) -> SpanGroup: """Clones the span group. + doc (Doc): New reference document to which the copy is bound. RETURNS (SpanGroup): A copy of the span group. DOCS: https://spacy.io/api/spangroup#copy """ + if doc is None: + doc = self.doc return SpanGroup( - self.doc, + doc, name=self.name, attrs=deepcopy(self.attrs), spans=list(self), diff --git a/website/docs/api/spangroup.md b/website/docs/api/spangroup.md index 8dbdefc01..2d1cf73c4 100644 --- a/website/docs/api/spangroup.md +++ b/website/docs/api/spangroup.md @@ -255,9 +255,10 @@ Return a copy of the span group. > new_group = doc.spans["errors"].copy() > ``` -| Name | Description | -| ----------- | ----------------------------------------------- | -| **RETURNS** | A copy of the `SpanGroup` object. ~~SpanGroup~~ | +| Name | Description | +| ----------- | -------------------------------------------------------------------------------------------------- | +| `doc` | The document to which the copy is bound. Defaults to `None` for the current doc. ~~Optional[Doc]~~ | +| **RETURNS** | A copy of the `SpanGroup` object. ~~SpanGroup~~ | ## SpanGroup.to_bytes {#to_bytes tag="method"} From 78f5503a29b3ab27b860220499346b79d26e666b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 1 Sep 2022 19:37:23 +0200 Subject: [PATCH 09/82] Check for any non-Doc returned value for components (#11424) --- spacy/errors.py | 5 +++-- spacy/language.py | 4 ++-- spacy/tests/test_language.py | 22 ++++++++++++++++++++++ 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 608305a06..5ee1476c2 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -230,8 +230,9 @@ class Errors(metaclass=ErrorsWithCodes): "initialized component.") E004 = ("Can't set up pipeline component: a factory for '{name}' already " "exists. Existing factory: {func}. New factory: {new_func}") - E005 = ("Pipeline component '{name}' returned None. If you're using a " - "custom component, maybe you forgot to return the processed Doc?") + E005 = ("Pipeline component '{name}' returned {returned_type} instead of a " + "Doc. If you're using a custom component, maybe you forgot to " + "return the processed Doc?") E006 = ("Invalid constraints for adding pipeline component. You can only " "set one of the following: before (component name or index), " "after (component name or index), first (True) or last (True). " diff --git a/spacy/language.py b/spacy/language.py index ec330753c..34a06e576 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1028,8 +1028,8 @@ class Language: raise ValueError(Errors.E109.format(name=name)) from e except Exception as e: error_handler(name, proc, [doc], e) - if doc is None: - raise ValueError(Errors.E005.format(name=name)) + if not isinstance(doc, Doc): + raise ValueError(Errors.E005.format(name=name, returned_type=type(doc))) return doc def disable_pipes(self, *names) -> "DisabledPipes": diff --git a/spacy/tests/test_language.py b/spacy/tests/test_language.py index 6f3ba8acc..03a98d32f 100644 --- a/spacy/tests/test_language.py +++ b/spacy/tests/test_language.py @@ -670,3 +670,25 @@ def test_dot_in_factory_names(nlp): with pytest.raises(ValueError, match="not permitted"): Language.factory("my.evil.component.v1", func=evil_component) + + +def test_component_return(): + """Test that an error is raised if components return a type other than a + doc.""" + nlp = English() + + @Language.component("test_component_good_pipe") + def good_pipe(doc): + return doc + + nlp.add_pipe("test_component_good_pipe") + nlp("text") + nlp.remove_pipe("test_component_good_pipe") + + @Language.component("test_component_bad_pipe") + def bad_pipe(doc): + return doc.text + + nlp.add_pipe("test_component_bad_pipe") + with pytest.raises(ValueError, match="instead of a Doc"): + nlp("text") From 4a615cacd2af35bbbcf9e735da19ce92480b6cf6 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Sep 2022 09:08:40 +0200 Subject: [PATCH 10/82] Consolidate and freeze symbols (#11352) * Consolidate and freeze symbols Instead of having symbol values defined in three potentially conflicting places (`spacy.attrs`, `spacy.parts_of_speech`, `spacy.symbols`), define all symbols in `spacy.symbols` and reference those values in `spacy.attrs` and `spacy.parts_of_speech`. Remove deprecated and placeholder symbols from `spacy.attrs.IDS`. Make `spacy.attrs.NAMES` and `spacy.symbols.NAMES` reverse dicts rather than lists in order to support future use of hash values in `attr_id_t`. Minor changes: * Use `uint64_t` for attrs in `Doc.to_array` to support future use of hash values * Remove unneeded attrs filter for error message in `Doc.to_array` * Remove unused attr `SENT_END` * Handle dynamic size of attr_id_t in Doc.to_array * Undo added warnings * Refactor to make Doc.to_array more similar to Doc.from_array * Improve refactoring --- spacy/attrs.pxd | 129 +++------- spacy/attrs.pyx | 49 +--- spacy/parts_of_speech.pxd | 38 +-- spacy/schemas.py | 2 +- spacy/strings.pyx | 4 +- spacy/symbols.pxd | 15 +- spacy/symbols.pyx | 6 +- spacy/tests/test_symbols.py | 467 ++++++++++++++++++++++++++++++++++++ spacy/tokens/doc.pyx | 20 +- 9 files changed, 551 insertions(+), 179 deletions(-) create mode 100644 spacy/tests/test_symbols.py diff --git a/spacy/attrs.pxd b/spacy/attrs.pxd index 33d5372de..b8a7a1f08 100644 --- a/spacy/attrs.pxd +++ b/spacy/attrs.pxd @@ -1,98 +1,49 @@ -# Reserve 64 values for flag features from . cimport symbols cdef enum attr_id_t: - NULL_ATTR - IS_ALPHA - IS_ASCII - IS_DIGIT - IS_LOWER - IS_PUNCT - IS_SPACE - IS_TITLE - IS_UPPER - LIKE_URL - LIKE_NUM - LIKE_EMAIL - IS_STOP - IS_OOV_DEPRECATED - IS_BRACKET - IS_QUOTE - IS_LEFT_PUNCT - IS_RIGHT_PUNCT - IS_CURRENCY + NULL_ATTR = 0 + IS_ALPHA = symbols.IS_ALPHA + IS_ASCII = symbols.IS_ASCII + IS_DIGIT = symbols.IS_DIGIT + IS_LOWER = symbols.IS_LOWER + IS_PUNCT = symbols.IS_PUNCT + IS_SPACE = symbols.IS_SPACE + IS_TITLE = symbols.IS_TITLE + IS_UPPER = symbols.IS_UPPER + LIKE_URL = symbols.LIKE_URL + LIKE_NUM = symbols.LIKE_NUM + LIKE_EMAIL = symbols.LIKE_EMAIL + IS_STOP = symbols.IS_STOP + IS_BRACKET = symbols.IS_BRACKET + IS_QUOTE = symbols.IS_QUOTE + IS_LEFT_PUNCT = symbols.IS_LEFT_PUNCT + IS_RIGHT_PUNCT = symbols.IS_RIGHT_PUNCT + IS_CURRENCY = symbols.IS_CURRENCY - FLAG19 = 19 - FLAG20 - FLAG21 - FLAG22 - FLAG23 - FLAG24 - FLAG25 - FLAG26 - FLAG27 - FLAG28 - FLAG29 - FLAG30 - FLAG31 - FLAG32 - FLAG33 - FLAG34 - FLAG35 - FLAG36 - FLAG37 - FLAG38 - FLAG39 - FLAG40 - FLAG41 - FLAG42 - FLAG43 - FLAG44 - FLAG45 - FLAG46 - FLAG47 - FLAG48 - FLAG49 - FLAG50 - FLAG51 - FLAG52 - FLAG53 - FLAG54 - FLAG55 - FLAG56 - FLAG57 - FLAG58 - FLAG59 - FLAG60 - FLAG61 - FLAG62 - FLAG63 + ID = symbols.ID + ORTH = symbols.ORTH + LOWER = symbols.LOWER + NORM = symbols.NORM + SHAPE = symbols.SHAPE + PREFIX = symbols.PREFIX + SUFFIX = symbols.SUFFIX - ID - ORTH - LOWER - NORM - SHAPE - PREFIX - SUFFIX + LENGTH = symbols.LENGTH + CLUSTER = symbols.CLUSTER + LEMMA = symbols.LEMMA + POS = symbols.POS + TAG = symbols.TAG + DEP = symbols.DEP + ENT_IOB = symbols.ENT_IOB + ENT_TYPE = symbols.ENT_TYPE + HEAD = symbols.HEAD + SENT_START = symbols.SENT_START + SPACY = symbols.SPACY + PROB = symbols.PROB - LENGTH - CLUSTER - LEMMA - POS - TAG - DEP - ENT_IOB - ENT_TYPE - HEAD - SENT_START - SPACY - PROB - - LANG + LANG = symbols.LANG ENT_KB_ID = symbols.ENT_KB_ID - MORPH + MORPH = symbols.MORPH ENT_ID = symbols.ENT_ID - IDX - SENT_END \ No newline at end of file + IDX = symbols.IDX diff --git a/spacy/attrs.pyx b/spacy/attrs.pyx index 7b6fd9e94..9b0ae3400 100644 --- a/spacy/attrs.pyx +++ b/spacy/attrs.pyx @@ -16,57 +16,11 @@ IDS = { "LIKE_NUM": LIKE_NUM, "LIKE_EMAIL": LIKE_EMAIL, "IS_STOP": IS_STOP, - "IS_OOV_DEPRECATED": IS_OOV_DEPRECATED, "IS_BRACKET": IS_BRACKET, "IS_QUOTE": IS_QUOTE, "IS_LEFT_PUNCT": IS_LEFT_PUNCT, "IS_RIGHT_PUNCT": IS_RIGHT_PUNCT, "IS_CURRENCY": IS_CURRENCY, - "FLAG19": FLAG19, - "FLAG20": FLAG20, - "FLAG21": FLAG21, - "FLAG22": FLAG22, - "FLAG23": FLAG23, - "FLAG24": FLAG24, - "FLAG25": FLAG25, - "FLAG26": FLAG26, - "FLAG27": FLAG27, - "FLAG28": FLAG28, - "FLAG29": FLAG29, - "FLAG30": FLAG30, - "FLAG31": FLAG31, - "FLAG32": FLAG32, - "FLAG33": FLAG33, - "FLAG34": FLAG34, - "FLAG35": FLAG35, - "FLAG36": FLAG36, - "FLAG37": FLAG37, - "FLAG38": FLAG38, - "FLAG39": FLAG39, - "FLAG40": FLAG40, - "FLAG41": FLAG41, - "FLAG42": FLAG42, - "FLAG43": FLAG43, - "FLAG44": FLAG44, - "FLAG45": FLAG45, - "FLAG46": FLAG46, - "FLAG47": FLAG47, - "FLAG48": FLAG48, - "FLAG49": FLAG49, - "FLAG50": FLAG50, - "FLAG51": FLAG51, - "FLAG52": FLAG52, - "FLAG53": FLAG53, - "FLAG54": FLAG54, - "FLAG55": FLAG55, - "FLAG56": FLAG56, - "FLAG57": FLAG57, - "FLAG58": FLAG58, - "FLAG59": FLAG59, - "FLAG60": FLAG60, - "FLAG61": FLAG61, - "FLAG62": FLAG62, - "FLAG63": FLAG63, "ID": ID, "ORTH": ORTH, "LOWER": LOWER, @@ -92,8 +46,7 @@ IDS = { } -# ATTR IDs, in order of the symbol -NAMES = [key for key, value in sorted(IDS.items(), key=lambda item: item[1])] +NAMES = {v: k for k, v in IDS.items()} locals().update(IDS) diff --git a/spacy/parts_of_speech.pxd b/spacy/parts_of_speech.pxd index 0bf5b4789..67390ad63 100644 --- a/spacy/parts_of_speech.pxd +++ b/spacy/parts_of_speech.pxd @@ -3,22 +3,22 @@ from . cimport symbols cpdef enum univ_pos_t: NO_TAG = 0 ADJ = symbols.ADJ - ADP - ADV - AUX - CONJ - CCONJ # U20 - DET - INTJ - NOUN - NUM - PART - PRON - PROPN - PUNCT - SCONJ - SYM - VERB - X - EOL - SPACE + ADP = symbols.ADP + ADV = symbols.ADV + AUX = symbols.AUX + CONJ = symbols.CONJ + CCONJ = symbols.CCONJ # U20 + DET = symbols.DET + INTJ = symbols.INTJ + NOUN = symbols.NOUN + NUM = symbols.NUM + PART = symbols.PART + PRON = symbols.PRON + PROPN = symbols.PROPN + PUNCT = symbols.PUNCT + SCONJ = symbols.SCONJ + SYM = symbols.SYM + VERB = symbols.VERB + X = symbols.X + EOL = symbols.EOL + SPACE = symbols.SPACE diff --git a/spacy/schemas.py b/spacy/schemas.py index 048082134..a38421fa0 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -144,7 +144,7 @@ def validate_init_settings( def validate_token_pattern(obj: list) -> List[str]: # Try to convert non-string keys (e.g. {ORTH: "foo"} -> {"ORTH": "foo"}) - get_key = lambda k: NAMES[k] if isinstance(k, int) and k < len(NAMES) else k + get_key = lambda k: NAMES[k] if isinstance(k, int) and k in NAMES else k if isinstance(obj, list): converted = [] for pattern in obj: diff --git a/spacy/strings.pyx b/spacy/strings.pyx index c5f218342..e86682733 100644 --- a/spacy/strings.pyx +++ b/spacy/strings.pyx @@ -147,7 +147,7 @@ cdef class StringStore: elif _try_coerce_to_hash(string_or_id, &str_hash): if str_hash == 0: return "" - elif str_hash < len(SYMBOLS_BY_INT): + elif str_hash in SYMBOLS_BY_INT: return SYMBOLS_BY_INT[str_hash] else: utf8str = self._map.get(str_hash) @@ -223,7 +223,7 @@ cdef class StringStore: # TODO: Raise an error instead return self._map.get(string_or_id) is not NULL - if str_hash < len(SYMBOLS_BY_INT): + if str_hash in SYMBOLS_BY_INT: return True else: return self._map.get(str_hash) is not NULL diff --git a/spacy/symbols.pxd b/spacy/symbols.pxd index bc15d9b80..f5d7784dc 100644 --- a/spacy/symbols.pxd +++ b/spacy/symbols.pxd @@ -1,5 +1,6 @@ +# DO NOT EDIT! The symbols are frozen as of spaCy v3.0.0. cdef enum symbol_t: - NIL + NIL = 0 IS_ALPHA IS_ASCII IS_DIGIT @@ -65,7 +66,7 @@ cdef enum symbol_t: FLAG62 FLAG63 - ID + ID = 64 ORTH LOWER NORM @@ -385,7 +386,7 @@ cdef enum symbol_t: DEPRECATED275 DEPRECATED276 - PERSON + PERSON = 380 NORP FACILITY ORG @@ -405,7 +406,7 @@ cdef enum symbol_t: ORDINAL CARDINAL - acomp + acomp = 398 advcl advmod agent @@ -458,12 +459,12 @@ cdef enum symbol_t: rcmod root xcomp - acl - ENT_KB_ID + ENT_KB_ID = 452 MORPH ENT_ID IDX - _ + _ = 456 + # DO NOT ADD ANY NEW SYMBOLS! diff --git a/spacy/symbols.pyx b/spacy/symbols.pyx index b0345c710..fbfc6f10d 100644 --- a/spacy/symbols.pyx +++ b/spacy/symbols.pyx @@ -469,11 +469,7 @@ IDS = { } -def sort_nums(x): - return x[1] - - -NAMES = [it[0] for it in sorted(IDS.items(), key=sort_nums)] +NAMES = {v: k for k, v in IDS.items()} # Unfortunate hack here, to work around problem with long cpdef enum # (which is generating an enormous amount of C++ in Cython 0.24+) # We keep the enum cdef, and just make sure the names are available to Python diff --git a/spacy/tests/test_symbols.py b/spacy/tests/test_symbols.py new file mode 100644 index 000000000..fb034acca --- /dev/null +++ b/spacy/tests/test_symbols.py @@ -0,0 +1,467 @@ +import pytest +from spacy.symbols import IDS, NAMES + +V3_SYMBOLS = { + "": 0, + "IS_ALPHA": 1, + "IS_ASCII": 2, + "IS_DIGIT": 3, + "IS_LOWER": 4, + "IS_PUNCT": 5, + "IS_SPACE": 6, + "IS_TITLE": 7, + "IS_UPPER": 8, + "LIKE_URL": 9, + "LIKE_NUM": 10, + "LIKE_EMAIL": 11, + "IS_STOP": 12, + "IS_OOV_DEPRECATED": 13, + "IS_BRACKET": 14, + "IS_QUOTE": 15, + "IS_LEFT_PUNCT": 16, + "IS_RIGHT_PUNCT": 17, + "IS_CURRENCY": 18, + "FLAG19": 19, + "FLAG20": 20, + "FLAG21": 21, + "FLAG22": 22, + "FLAG23": 23, + "FLAG24": 24, + "FLAG25": 25, + "FLAG26": 26, + "FLAG27": 27, + "FLAG28": 28, + "FLAG29": 29, + "FLAG30": 30, + "FLAG31": 31, + "FLAG32": 32, + "FLAG33": 33, + "FLAG34": 34, + "FLAG35": 35, + "FLAG36": 36, + "FLAG37": 37, + "FLAG38": 38, + "FLAG39": 39, + "FLAG40": 40, + "FLAG41": 41, + "FLAG42": 42, + "FLAG43": 43, + "FLAG44": 44, + "FLAG45": 45, + "FLAG46": 46, + "FLAG47": 47, + "FLAG48": 48, + "FLAG49": 49, + "FLAG50": 50, + "FLAG51": 51, + "FLAG52": 52, + "FLAG53": 53, + "FLAG54": 54, + "FLAG55": 55, + "FLAG56": 56, + "FLAG57": 57, + "FLAG58": 58, + "FLAG59": 59, + "FLAG60": 60, + "FLAG61": 61, + "FLAG62": 62, + "FLAG63": 63, + "ID": 64, + "ORTH": 65, + "LOWER": 66, + "NORM": 67, + "SHAPE": 68, + "PREFIX": 69, + "SUFFIX": 70, + "LENGTH": 71, + "CLUSTER": 72, + "LEMMA": 73, + "POS": 74, + "TAG": 75, + "DEP": 76, + "ENT_IOB": 77, + "ENT_TYPE": 78, + "ENT_ID": 454, + "ENT_KB_ID": 452, + "HEAD": 79, + "SENT_START": 80, + "SPACY": 81, + "PROB": 82, + "LANG": 83, + "IDX": 455, + "ADJ": 84, + "ADP": 85, + "ADV": 86, + "AUX": 87, + "CONJ": 88, + "CCONJ": 89, + "DET": 90, + "INTJ": 91, + "NOUN": 92, + "NUM": 93, + "PART": 94, + "PRON": 95, + "PROPN": 96, + "PUNCT": 97, + "SCONJ": 98, + "SYM": 99, + "VERB": 100, + "X": 101, + "EOL": 102, + "SPACE": 103, + "DEPRECATED001": 104, + "DEPRECATED002": 105, + "DEPRECATED003": 106, + "DEPRECATED004": 107, + "DEPRECATED005": 108, + "DEPRECATED006": 109, + "DEPRECATED007": 110, + "DEPRECATED008": 111, + "DEPRECATED009": 112, + "DEPRECATED010": 113, + "DEPRECATED011": 114, + "DEPRECATED012": 115, + "DEPRECATED013": 116, + "DEPRECATED014": 117, + "DEPRECATED015": 118, + "DEPRECATED016": 119, + "DEPRECATED017": 120, + "DEPRECATED018": 121, + "DEPRECATED019": 122, + "DEPRECATED020": 123, + "DEPRECATED021": 124, + "DEPRECATED022": 125, + "DEPRECATED023": 126, + "DEPRECATED024": 127, + "DEPRECATED025": 128, + "DEPRECATED026": 129, + "DEPRECATED027": 130, + "DEPRECATED028": 131, + "DEPRECATED029": 132, + "DEPRECATED030": 133, + "DEPRECATED031": 134, + "DEPRECATED032": 135, + "DEPRECATED033": 136, + "DEPRECATED034": 137, + "DEPRECATED035": 138, + "DEPRECATED036": 139, + "DEPRECATED037": 140, + "DEPRECATED038": 141, + "DEPRECATED039": 142, + "DEPRECATED040": 143, + "DEPRECATED041": 144, + "DEPRECATED042": 145, + "DEPRECATED043": 146, + "DEPRECATED044": 147, + "DEPRECATED045": 148, + "DEPRECATED046": 149, + "DEPRECATED047": 150, + "DEPRECATED048": 151, + "DEPRECATED049": 152, + "DEPRECATED050": 153, + "DEPRECATED051": 154, + "DEPRECATED052": 155, + "DEPRECATED053": 156, + "DEPRECATED054": 157, + "DEPRECATED055": 158, + "DEPRECATED056": 159, + "DEPRECATED057": 160, + "DEPRECATED058": 161, + "DEPRECATED059": 162, + "DEPRECATED060": 163, + "DEPRECATED061": 164, + "DEPRECATED062": 165, + "DEPRECATED063": 166, + "DEPRECATED064": 167, + "DEPRECATED065": 168, + "DEPRECATED066": 169, + "DEPRECATED067": 170, + "DEPRECATED068": 171, + "DEPRECATED069": 172, + "DEPRECATED070": 173, + "DEPRECATED071": 174, + "DEPRECATED072": 175, + "DEPRECATED073": 176, + "DEPRECATED074": 177, + "DEPRECATED075": 178, + "DEPRECATED076": 179, + "DEPRECATED077": 180, + "DEPRECATED078": 181, + "DEPRECATED079": 182, + "DEPRECATED080": 183, + "DEPRECATED081": 184, + "DEPRECATED082": 185, + "DEPRECATED083": 186, + "DEPRECATED084": 187, + "DEPRECATED085": 188, + "DEPRECATED086": 189, + "DEPRECATED087": 190, + "DEPRECATED088": 191, + "DEPRECATED089": 192, + "DEPRECATED090": 193, + "DEPRECATED091": 194, + "DEPRECATED092": 195, + "DEPRECATED093": 196, + "DEPRECATED094": 197, + "DEPRECATED095": 198, + "DEPRECATED096": 199, + "DEPRECATED097": 200, + "DEPRECATED098": 201, + "DEPRECATED099": 202, + "DEPRECATED100": 203, + "DEPRECATED101": 204, + "DEPRECATED102": 205, + "DEPRECATED103": 206, + "DEPRECATED104": 207, + "DEPRECATED105": 208, + "DEPRECATED106": 209, + "DEPRECATED107": 210, + "DEPRECATED108": 211, + "DEPRECATED109": 212, + "DEPRECATED110": 213, + "DEPRECATED111": 214, + "DEPRECATED112": 215, + "DEPRECATED113": 216, + "DEPRECATED114": 217, + "DEPRECATED115": 218, + "DEPRECATED116": 219, + "DEPRECATED117": 220, + "DEPRECATED118": 221, + "DEPRECATED119": 222, + "DEPRECATED120": 223, + "DEPRECATED121": 224, + "DEPRECATED122": 225, + "DEPRECATED123": 226, + "DEPRECATED124": 227, + "DEPRECATED125": 228, + "DEPRECATED126": 229, + "DEPRECATED127": 230, + "DEPRECATED128": 231, + "DEPRECATED129": 232, + "DEPRECATED130": 233, + "DEPRECATED131": 234, + "DEPRECATED132": 235, + "DEPRECATED133": 236, + "DEPRECATED134": 237, + "DEPRECATED135": 238, + "DEPRECATED136": 239, + "DEPRECATED137": 240, + "DEPRECATED138": 241, + "DEPRECATED139": 242, + "DEPRECATED140": 243, + "DEPRECATED141": 244, + "DEPRECATED142": 245, + "DEPRECATED143": 246, + "DEPRECATED144": 247, + "DEPRECATED145": 248, + "DEPRECATED146": 249, + "DEPRECATED147": 250, + "DEPRECATED148": 251, + "DEPRECATED149": 252, + "DEPRECATED150": 253, + "DEPRECATED151": 254, + "DEPRECATED152": 255, + "DEPRECATED153": 256, + "DEPRECATED154": 257, + "DEPRECATED155": 258, + "DEPRECATED156": 259, + "DEPRECATED157": 260, + "DEPRECATED158": 261, + "DEPRECATED159": 262, + "DEPRECATED160": 263, + "DEPRECATED161": 264, + "DEPRECATED162": 265, + "DEPRECATED163": 266, + "DEPRECATED164": 267, + "DEPRECATED165": 268, + "DEPRECATED166": 269, + "DEPRECATED167": 270, + "DEPRECATED168": 271, + "DEPRECATED169": 272, + "DEPRECATED170": 273, + "DEPRECATED171": 274, + "DEPRECATED172": 275, + "DEPRECATED173": 276, + "DEPRECATED174": 277, + "DEPRECATED175": 278, + "DEPRECATED176": 279, + "DEPRECATED177": 280, + "DEPRECATED178": 281, + "DEPRECATED179": 282, + "DEPRECATED180": 283, + "DEPRECATED181": 284, + "DEPRECATED182": 285, + "DEPRECATED183": 286, + "DEPRECATED184": 287, + "DEPRECATED185": 288, + "DEPRECATED186": 289, + "DEPRECATED187": 290, + "DEPRECATED188": 291, + "DEPRECATED189": 292, + "DEPRECATED190": 293, + "DEPRECATED191": 294, + "DEPRECATED192": 295, + "DEPRECATED193": 296, + "DEPRECATED194": 297, + "DEPRECATED195": 298, + "DEPRECATED196": 299, + "DEPRECATED197": 300, + "DEPRECATED198": 301, + "DEPRECATED199": 302, + "DEPRECATED200": 303, + "DEPRECATED201": 304, + "DEPRECATED202": 305, + "DEPRECATED203": 306, + "DEPRECATED204": 307, + "DEPRECATED205": 308, + "DEPRECATED206": 309, + "DEPRECATED207": 310, + "DEPRECATED208": 311, + "DEPRECATED209": 312, + "DEPRECATED210": 313, + "DEPRECATED211": 314, + "DEPRECATED212": 315, + "DEPRECATED213": 316, + "DEPRECATED214": 317, + "DEPRECATED215": 318, + "DEPRECATED216": 319, + "DEPRECATED217": 320, + "DEPRECATED218": 321, + "DEPRECATED219": 322, + "DEPRECATED220": 323, + "DEPRECATED221": 324, + "DEPRECATED222": 325, + "DEPRECATED223": 326, + "DEPRECATED224": 327, + "DEPRECATED225": 328, + "DEPRECATED226": 329, + "DEPRECATED227": 330, + "DEPRECATED228": 331, + "DEPRECATED229": 332, + "DEPRECATED230": 333, + "DEPRECATED231": 334, + "DEPRECATED232": 335, + "DEPRECATED233": 336, + "DEPRECATED234": 337, + "DEPRECATED235": 338, + "DEPRECATED236": 339, + "DEPRECATED237": 340, + "DEPRECATED238": 341, + "DEPRECATED239": 342, + "DEPRECATED240": 343, + "DEPRECATED241": 344, + "DEPRECATED242": 345, + "DEPRECATED243": 346, + "DEPRECATED244": 347, + "DEPRECATED245": 348, + "DEPRECATED246": 349, + "DEPRECATED247": 350, + "DEPRECATED248": 351, + "DEPRECATED249": 352, + "DEPRECATED250": 353, + "DEPRECATED251": 354, + "DEPRECATED252": 355, + "DEPRECATED253": 356, + "DEPRECATED254": 357, + "DEPRECATED255": 358, + "DEPRECATED256": 359, + "DEPRECATED257": 360, + "DEPRECATED258": 361, + "DEPRECATED259": 362, + "DEPRECATED260": 363, + "DEPRECATED261": 364, + "DEPRECATED262": 365, + "DEPRECATED263": 366, + "DEPRECATED264": 367, + "DEPRECATED265": 368, + "DEPRECATED266": 369, + "DEPRECATED267": 370, + "DEPRECATED268": 371, + "DEPRECATED269": 372, + "DEPRECATED270": 373, + "DEPRECATED271": 374, + "DEPRECATED272": 375, + "DEPRECATED273": 376, + "DEPRECATED274": 377, + "DEPRECATED275": 378, + "DEPRECATED276": 379, + "PERSON": 380, + "NORP": 381, + "FACILITY": 382, + "ORG": 383, + "GPE": 384, + "LOC": 385, + "PRODUCT": 386, + "EVENT": 387, + "WORK_OF_ART": 388, + "LANGUAGE": 389, + "DATE": 391, + "TIME": 392, + "PERCENT": 393, + "MONEY": 394, + "QUANTITY": 395, + "ORDINAL": 396, + "CARDINAL": 397, + "acomp": 398, + "advcl": 399, + "advmod": 400, + "agent": 401, + "amod": 402, + "appos": 403, + "attr": 404, + "aux": 405, + "auxpass": 406, + "cc": 407, + "ccomp": 408, + "complm": 409, + "conj": 410, + "cop": 411, + "csubj": 412, + "csubjpass": 413, + "dep": 414, + "det": 415, + "dobj": 416, + "expl": 417, + "hmod": 418, + "hyph": 419, + "infmod": 420, + "intj": 421, + "iobj": 422, + "mark": 423, + "meta": 424, + "neg": 425, + "nmod": 426, + "nn": 427, + "npadvmod": 428, + "nsubj": 429, + "nsubjpass": 430, + "num": 431, + "number": 432, + "oprd": 433, + "obj": 434, + "obl": 435, + "parataxis": 436, + "partmod": 437, + "pcomp": 438, + "pobj": 439, + "poss": 440, + "possessive": 441, + "preconj": 442, + "prep": 443, + "prt": 444, + "punct": 445, + "quantmod": 446, + "rcmod": 448, + "relcl": 447, + "root": 449, + "xcomp": 450, + "acl": 451, + "LAW": 390, + "MORPH": 453, + "_": 456, +} + + +def test_frozen_symbols(): + assert IDS == V3_SYMBOLS + assert NAMES == {v: k for k, v in IDS.items()} diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 2956f357c..85d76efb3 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -974,22 +974,26 @@ cdef class Doc: py_attr_ids = [(IDS[id_.upper()] if hasattr(id_, "upper") else id_) for id_ in py_attr_ids] except KeyError as msg: - keys = [k for k in IDS.keys() if not k.startswith("FLAG")] + keys = list(IDS.keys()) raise KeyError(Errors.E983.format(dict="IDS", key=msg, keys=keys)) from None # Make an array from the attributes --- otherwise our inner loop is # Python dict iteration. - cdef np.ndarray attr_ids = numpy.asarray(py_attr_ids, dtype="i") - output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.uint64) + cdef Pool mem = Pool() + cdef int n_attrs = len(py_attr_ids) + cdef attr_id_t* c_attr_ids + if n_attrs > 0: + c_attr_ids = mem.alloc(n_attrs, sizeof(attr_id_t)) + for i, attr_id in enumerate(py_attr_ids): + c_attr_ids[i] = attr_id + output = numpy.ndarray(shape=(self.length, n_attrs), dtype=numpy.uint64) c_output = output.data - c_attr_ids = attr_ids.data cdef TokenC* token - cdef int nr_attr = attr_ids.shape[0] for i in range(self.length): token = &self.c[i] - for j in range(nr_attr): - c_output[i*nr_attr + j] = get_token_attr(token, c_attr_ids[j]) + for j in range(n_attrs): + c_output[i*n_attrs + j] = get_token_attr(token, c_attr_ids[j]) # Handle 1d case - return output if len(attr_ids) >= 2 else output.reshape((self.length,)) + return output if n_attrs >= 2 else output.reshape((self.length,)) def count_by(self, attr_id_t attr_id, exclude=None, object counts=None): """Count the frequencies of a given attribute. Produces a dict of From d1760ebe027852a10b3ba7c5c7a187859bdae76b Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Fri, 2 Sep 2022 09:09:48 +0200 Subject: [PATCH 11/82] Better handling of unexpected types in `SetPredicate` (#11312) * `Matcher`: Better type checking of values in `SetPredicate` `SetPredicate`: Emit warning and return `False` on unexpected value types * Rename `value_type_mismatch` variable * Inline warning * Remove unexpected type warning from `_SetPredicate` * Ensure that `str` values are not interpreted as sequences Check elements of sequence values for convertibility to `str` or `int` * Add more `INTERSECT` and `IN` test cases * Test for inputs with multiple characters * Return `False` early instead of using a boolean flag * Remove superfluous `int` check, parentheses * Apply suggestions from code review Co-authored-by: Adriane Boyd * Appy suggestions from code review * Clarify test comment Co-authored-by: Adriane Boyd --- spacy/matcher/matcher.pyx | 23 +++++++++++++++-------- spacy/tests/matcher/test_matcher_api.py | 20 +++++++++++++++++++- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 5105f69ed..e1dba01a2 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -1,5 +1,5 @@ # cython: infer_types=True, cython: profile=True -from typing import List +from typing import List, Iterable from libcpp.vector cimport vector from libc.stdint cimport int32_t, int8_t @@ -867,20 +867,27 @@ class _SetPredicate: def __call__(self, Token token): if self.is_extension: - value = get_string_id(token._.get(self.attr)) + value = token._.get(self.attr) else: value = get_token_attr_for_matcher(token.c, self.attr) - if self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"): + if self.predicate in ("IN", "NOT_IN"): + if isinstance(value, (str, int)): + value = get_string_id(value) + else: + return False + elif self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"): + # ensure that all values are enclosed in a set if self.attr == MORPH: # break up MORPH into individual Feat=Val values value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value)) + elif isinstance(value, (str, int)): + value = set((get_string_id(value),)) + elif isinstance(value, Iterable) and all(isinstance(v, (str, int)) for v in value): + value = set(get_string_id(v) for v in value) else: - # treat a single value as a list - if isinstance(value, (str, int)): - value = set([get_string_id(value)]) - else: - value = set(get_string_id(v) for v in value) + return False + if self.predicate == "IN": return value in self.value elif self.predicate == "NOT_IN": diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py index 7c16da9f8..ac905eeb4 100644 --- a/spacy/tests/matcher/test_matcher_api.py +++ b/spacy/tests/matcher/test_matcher_api.py @@ -368,6 +368,16 @@ def test_matcher_intersect_value_operator(en_vocab): doc[0]._.ext = ["A", "B"] assert len(matcher(doc)) == 1 + # INTERSECTS matches nothing for iterables that aren't all str or int + matcher = Matcher(en_vocab) + pattern = [{"_": {"ext": {"INTERSECTS": ["Abx", "C"]}}}] + matcher.add("M", [pattern]) + doc = Doc(en_vocab, words=["a", "b", "c"]) + doc[0]._.ext = [["Abx"], "B"] + assert len(matcher(doc)) == 0 + doc[0]._.ext = ["Abx", "B"] + assert len(matcher(doc)) == 1 + # INTERSECTS with an empty pattern list matches nothing matcher = Matcher(en_vocab) pattern = [{"_": {"ext": {"INTERSECTS": []}}}] @@ -476,14 +486,22 @@ def test_matcher_extension_set_membership(en_vocab): assert len(matches) == 0 -@pytest.mark.xfail(reason="IN predicate must handle sequence values in extensions") def test_matcher_extension_in_set_predicate(en_vocab): matcher = Matcher(en_vocab) Token.set_extension("ext", default=[]) pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}] matcher.add("M", [pattern]) doc = Doc(en_vocab, words=["a", "b", "c"]) + + # The IN predicate expects an exact match between the + # extension value and one of the pattern's values. doc[0]._.ext = ["A", "B"] + assert len(matcher(doc)) == 0 + + doc[0]._.ext = ["A"] + assert len(matcher(doc)) == 0 + + doc[0]._.ext = "A" assert len(matcher(doc)) == 1 From 71884d0942c9b45f0ce5408496aec1aff2f0a4b7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 2 Sep 2022 11:43:20 +0200 Subject: [PATCH 12/82] Auto-format code with black (#11427) Co-authored-by: explosion-bot --- spacy/lang/la/__init__.py | 2 +- spacy/lang/la/lex_attrs.py | 4 +- spacy/lang/la/tokenizer_exceptions.py | 70 ++++++++++++++++++++++----- spacy/tests/conftest.py | 2 +- spacy/tests/lang/la/test_exception.py | 1 + spacy/tests/lang/la/test_text.py | 4 +- 6 files changed, 67 insertions(+), 16 deletions(-) diff --git a/spacy/lang/la/__init__.py b/spacy/lang/la/__init__.py index 5f2cccee3..15b87c5b9 100644 --- a/spacy/lang/la/__init__.py +++ b/spacy/lang/la/__init__.py @@ -6,7 +6,7 @@ from .lex_attrs import LEX_ATTRS class LatinDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS - stop_words = STOP_WORDS + stop_words = STOP_WORDS lex_attr_getters = LEX_ATTRS diff --git a/spacy/lang/la/lex_attrs.py b/spacy/lang/la/lex_attrs.py index 9348a811a..9efb4dd3c 100644 --- a/spacy/lang/la/lex_attrs.py +++ b/spacy/lang/la/lex_attrs.py @@ -2,7 +2,9 @@ from ...attrs import LIKE_NUM import re # cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4 -roman_numerals_compile = re.compile(r'(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$') +roman_numerals_compile = re.compile( + r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$" +) _num_words = set( """ diff --git a/spacy/lang/la/tokenizer_exceptions.py b/spacy/lang/la/tokenizer_exceptions.py index 905304188..060f6e085 100644 --- a/spacy/lang/la/tokenizer_exceptions.py +++ b/spacy/lang/la/tokenizer_exceptions.py @@ -9,21 +9,67 @@ _exc = { "tecum": [{ORTH: "te"}, {ORTH: "cum"}], "nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}], "vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}], - "uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}], + "uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}], } for orth in [ - - 'A.', 'Agr.', 'Ap.', 'C.', 'Cn.', 'D.', 'F.', 'K.', 'L.', "M'.", 'M.', 'Mam.', 'N.', 'Oct.', - 'Opet.', 'P.', 'Paul.', 'Post.', 'Pro.', 'Q.', 'S.', 'Ser.', 'Sert.', 'Sex.', 'St.', 'Sta.', - 'T.', 'Ti.', 'V.', 'Vol.', 'Vop.', 'U.', 'Uol.', 'Uop.', - - 'Ian.', 'Febr.', 'Mart.', 'Apr.', 'Mai.', 'Iun.', 'Iul.', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Nou.', - 'Dec.', - - 'Non.', 'Id.', 'A.D.', - - 'Coll.', 'Cos.', 'Ord.', 'Pl.', 'S.C.', 'Suff.', 'Trib.', + "A.", + "Agr.", + "Ap.", + "C.", + "Cn.", + "D.", + "F.", + "K.", + "L.", + "M'.", + "M.", + "Mam.", + "N.", + "Oct.", + "Opet.", + "P.", + "Paul.", + "Post.", + "Pro.", + "Q.", + "S.", + "Ser.", + "Sert.", + "Sex.", + "St.", + "Sta.", + "T.", + "Ti.", + "V.", + "Vol.", + "Vop.", + "U.", + "Uol.", + "Uop.", + "Ian.", + "Febr.", + "Mart.", + "Apr.", + "Mai.", + "Iun.", + "Iul.", + "Aug.", + "Sept.", + "Oct.", + "Nov.", + "Nou.", + "Dec.", + "Non.", + "Id.", + "A.D.", + "Coll.", + "Cos.", + "Ord.", + "Pl.", + "S.C.", + "Suff.", + "Trib.", ]: _exc[orth] = [{ORTH: orth}] diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 0395ba7ca..742bfcc6a 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -258,7 +258,7 @@ def ko_tokenizer_tokenizer(): @pytest.fixture(scope="module") def la_tokenizer(): - return get_lang_class("la")().tokenizer + return get_lang_class("la")().tokenizer @pytest.fixture(scope="session") diff --git a/spacy/tests/lang/la/test_exception.py b/spacy/tests/lang/la/test_exception.py index 04bc1d489..966ae22cf 100644 --- a/spacy/tests/lang/la/test_exception.py +++ b/spacy/tests/lang/la/test_exception.py @@ -1,5 +1,6 @@ import pytest + def test_la_tokenizer_handles_exc_in_text(la_tokenizer): text = "scio te omnia facturum, ut nobiscum quam primum sis" tokens = la_tokenizer(text) diff --git a/spacy/tests/lang/la/test_text.py b/spacy/tests/lang/la/test_text.py index 11676b92b..48e7359a4 100644 --- a/spacy/tests/lang/la/test_text.py +++ b/spacy/tests/lang/la/test_text.py @@ -1,6 +1,7 @@ import pytest from spacy.lang.la.lex_attrs import like_num + @pytest.mark.parametrize( "text,match", [ @@ -13,7 +14,7 @@ from spacy.lang.la.lex_attrs import like_num ("ix", True), ("MMXXII", True), ("0", True), - ("1", True), + ("1", True), ("quattuor", True), ("decem", True), ("tertius", True), @@ -27,6 +28,7 @@ def test_lex_attrs_like_number(la_tokenizer, text, match): assert len(tokens) == 1 assert tokens[0].like_num == match + @pytest.mark.parametrize("word", ["quinque"]) def test_la_lex_attrs_capitals(word): assert like_num(word) From 977dc33312dd189b5b4ae1d791031d090c169c24 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 2 Sep 2022 18:58:21 +0900 Subject: [PATCH 13/82] Add a way to get the URL to download a pipeline to the CLI (#11175) * Add a dry run flag to download * Remove --dry-run, add --url option to `spacy info` instead * Make mypy happy * Print only the URL, so it's easier to use in scripts * Don't add the egg hash unless downloading an sdist * Update spacy/cli/info.py Co-authored-by: Sofie Van Landeghem * Add two implementations of requirements * Clean up requirements sample slightly This should make mypy happy * Update URL help string * Remove requirements option * Add url option to docs * Add URL to spacy info model output, when available * Add types-setuptools to testing reqs * Add types-setuptools to requirements * Add "compatible", expand docstring * Update spacy/cli/info.py Co-authored-by: Adriane Boyd * Run prettier on CLI docs * Update docs Add a sidebar about finding download URLs, with some examples of the new command. * Add download URLs to table on model page * Apply suggestions from code review Co-authored-by: Adriane Boyd * Updates from review * download url -> download link * Update docs Co-authored-by: Sofie Van Landeghem Co-authored-by: Adriane Boyd --- requirements.txt | 1 + spacy/cli/download.py | 32 ++++++++++--- spacy/cli/info.py | 58 +++++++++++++++++++++++- spacy/tests/package/test_requirements.py | 1 + website/docs/api/cli.md | 17 +++---- website/docs/usage/models.md | 36 ++++++++++----- website/src/templates/models.js | 10 ++++ 7 files changed, 127 insertions(+), 28 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3b8d66e0e..3e8501b2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,4 +34,5 @@ mypy>=0.910,<0.970; platform_machine!='aarch64' types-dataclasses>=0.1.3; python_version < "3.7" types-mock>=0.1.1 types-requests +types-setuptools>=57.0.0 black>=22.0,<23.0 diff --git a/spacy/cli/download.py b/spacy/cli/download.py index b7de88729..0c9a32b93 100644 --- a/spacy/cli/download.py +++ b/spacy/cli/download.py @@ -20,7 +20,7 @@ def download_cli( ctx: typer.Context, model: str = Arg(..., help="Name of pipeline package to download"), direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"), - sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel") + sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel"), # fmt: on ): """ @@ -36,7 +36,12 @@ def download_cli( download(model, direct, sdist, *ctx.args) -def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -> None: +def download( + model: str, + direct: bool = False, + sdist: bool = False, + *pip_args, +) -> None: if ( not (is_package("spacy") or is_package("spacy-nightly")) and "--no-deps" not in pip_args @@ -50,13 +55,10 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) - "dependencies, you'll have to install them manually." ) pip_args = pip_args + ("--no-deps",) - suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX - dl_tpl = "{m}-{v}/{m}-{v}{s}#egg={m}=={v}" if direct: components = model.split("-") model_name = "".join(components[:-1]) version = components[-1] - download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args) else: model_name = model if model in OLD_MODEL_SHORTCUTS: @@ -67,13 +69,26 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) - model_name = OLD_MODEL_SHORTCUTS[model] compatibility = get_compatibility() version = get_version(model_name, compatibility) - download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args) + + filename = get_model_filename(model_name, version, sdist) + + download_model(filename, pip_args) msg.good( "Download and installation successful", f"You can now load the package via spacy.load('{model_name}')", ) +def get_model_filename(model_name: str, version: str, sdist: bool = False) -> str: + dl_tpl = "{m}-{v}/{m}-{v}{s}" + egg_tpl = "#egg={m}=={v}" + suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX + filename = dl_tpl.format(m=model_name, v=version, s=suffix) + if sdist: + filename += egg_tpl.format(m=model_name, v=version) + return filename + + def get_compatibility() -> dict: if is_prerelease_version(about.__version__): version: Optional[str] = about.__version__ @@ -105,6 +120,11 @@ def get_version(model: str, comp: dict) -> str: return comp[model][0] +def get_latest_version(model: str) -> str: + comp = get_compatibility() + return get_version(model, comp) + + def download_model( filename: str, user_pip_args: Optional[Sequence[str]] = None ) -> None: diff --git a/spacy/cli/info.py b/spacy/cli/info.py index e6a1cb616..e6ac4270f 100644 --- a/spacy/cli/info.py +++ b/spacy/cli/info.py @@ -1,10 +1,13 @@ from typing import Optional, Dict, Any, Union, List import platform +import pkg_resources +import json from pathlib import Path from wasabi import Printer, MarkdownRenderer import srsly from ._util import app, Arg, Opt, string_to_list +from .download import get_model_filename, get_latest_version from .. import util from .. import about @@ -16,6 +19,7 @@ def info_cli( markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"), silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"), exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"), + url: bool = Opt(False, "--url", "-u", help="Print the URL to download the most recent compatible version of the pipeline"), # fmt: on ): """ @@ -23,10 +27,19 @@ def info_cli( print its meta information. Flag --markdown prints details in Markdown for easy copy-pasting to GitHub issues. + Flag --url prints only the download URL of the most recent compatible + version of the pipeline. + DOCS: https://spacy.io/api/cli#info """ exclude = string_to_list(exclude) - info(model, markdown=markdown, silent=silent, exclude=exclude) + info( + model, + markdown=markdown, + silent=silent, + exclude=exclude, + url=url, + ) def info( @@ -35,11 +48,20 @@ def info( markdown: bool = False, silent: bool = True, exclude: Optional[List[str]] = None, + url: bool = False, ) -> Union[str, dict]: msg = Printer(no_print=silent, pretty=not silent) if not exclude: exclude = [] - if model: + if url: + if model is not None: + title = f"Download info for pipeline '{model}'" + data = info_model_url(model) + print(data["download_url"]) + return data + else: + msg.fail("--url option requires a pipeline name", exits=1) + elif model: title = f"Info about pipeline '{model}'" data = info_model(model, silent=silent) else: @@ -99,11 +121,43 @@ def info_model(model: str, *, silent: bool = True) -> Dict[str, Any]: meta["source"] = str(model_path.resolve()) else: meta["source"] = str(model_path) + download_url = info_installed_model_url(model) + if download_url: + meta["download_url"] = download_url return { k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed") } +def info_installed_model_url(model: str) -> Optional[str]: + """Given a pipeline name, get the download URL if available, otherwise + return None. + + This is only available for pipelines installed as modules that have + dist-info available. + """ + try: + dist = pkg_resources.get_distribution(model) + data = json.loads(dist.get_metadata("direct_url.json")) + return data["url"] + except pkg_resources.DistributionNotFound: + # no such package + return None + except Exception: + # something else, like no file or invalid JSON + return None + +def info_model_url(model: str) -> Dict[str, Any]: + """Return the download URL for the latest version of a pipeline.""" + version = get_latest_version(model) + + filename = get_model_filename(model, version) + download_url = about.__download_url__ + "/" + filename + release_tpl = "https://github.com/explosion/spacy-models/releases/tag/{m}-{v}" + release_url = release_tpl.format(m=model, v=version) + return {"download_url": download_url, "release_url": release_url} + + def get_markdown( data: Dict[str, Any], title: Optional[str] = None, diff --git a/spacy/tests/package/test_requirements.py b/spacy/tests/package/test_requirements.py index e20227455..b403f274f 100644 --- a/spacy/tests/package/test_requirements.py +++ b/spacy/tests/package/test_requirements.py @@ -17,6 +17,7 @@ def test_build_dependencies(): "types-dataclasses", "types-mock", "types-requests", + "types-setuptools", ] # ignore language-specific packages that shouldn't be installed by all libs_ignore_setup = [ diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index cbd1f794a..e5cd3089b 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -77,14 +77,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude] $ python -m spacy info [model] [--markdown] [--silent] [--exclude] ``` -| Name | Description | -| ------------------------------------------------ | --------------------------------------------------------------------------------------------- | -| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ | -| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ | -| `--silent`, `-s` 2.0.12 | Don't print anything, just return the values. ~~bool (flag)~~ | -| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| **PRINTS** | Information about your spaCy installation. | +| Name | Description | +| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | +| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ | +| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ | +| `--silent`, `-s` 2.0.12 | Don't print anything, just return the values. ~~bool (flag)~~ | +| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ | +| `--url`, `-u` 3.5.0 | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **PRINTS** | Information about your spaCy installation. | ## validate {#validate new="2" tag="command"} diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md index 56992e7e3..6971ac8b4 100644 --- a/website/docs/usage/models.md +++ b/website/docs/usage/models.md @@ -365,15 +365,32 @@ pipeline package can be found. To download a trained pipeline directly using [pip](https://pypi.python.org/pypi/pip), point `pip install` to the URL or local path of the wheel file or archive. Installing the wheel is usually more -efficient. To find the direct link to a package, head over to the -[releases](https://github.com/explosion/spacy-models/releases), right click on -the archive link and copy it to your clipboard. +efficient. + +> #### Pipeline Package URLs {#pipeline-urls} +> +> Pretrained pipeline distributions are hosted on +> [Github Releases](https://github.com/explosion/spacy-models/releases), and you +> can find download links there, as well as on the model page. You can also get +> URLs directly from the command line by using `spacy info` with the `--url` +> flag, which may be useful for automation. +> +> ```bash +> spacy info en_core_web_sm --url +> ``` +> +> This command will print the URL for the latest version of a pipeline +> compatible with the version of spaCy you're using. Note that in order to look +> up the compatibility information an internet connection is required. ```bash # With external URL $ pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0-py3-none-any.whl $ pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz +# Using spacy info to get the external URL +$ pip install $(spacy info en_core_web_sm --url) + # With local file $ pip install /Users/you/en_core_web_sm-3.0.0-py3-none-any.whl $ pip install /Users/you/en_core_web_sm-3.0.0.tar.gz @@ -514,21 +531,16 @@ should be specifying them directly. Because pipeline packages are valid Python packages, you can add them to your application's `requirements.txt`. If you're running your own internal PyPi installation, you can upload the pipeline packages there. pip's -[requirements file format](https://pip.pypa.io/en/latest/reference/pip_install/#requirements-file-format) -supports both package names to download via a PyPi server, as well as direct -URLs. +[requirements file format](https://pip.pypa.io/en/latest/reference/requirements-file-format/) +supports both package names to download via a PyPi server, as well as +[direct URLs](#pipeline-urls). ```text ### requirements.txt spacy>=3.0.0,<4.0.0 -https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz#egg=en_core_web_sm +en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.0/en_core_web_sm-3.4.0-py3-none-any.whl ``` -Specifying `#egg=` with the package name tells pip which package to expect from -the download URL. This way, the package won't be re-downloaded and overwritten -if it's already installed - just like when you're downloading a package from -PyPi. - All pipeline packages are versioned and specify their spaCy dependency. This ensures cross-compatibility and lets you specify exact version requirements for each pipeline. If you've [trained](/usage/training) your own pipeline, you can diff --git a/website/src/templates/models.js b/website/src/templates/models.js index df53f8c3c..16a2360d5 100644 --- a/website/src/templates/models.js +++ b/website/src/templates/models.js @@ -76,6 +76,7 @@ const MODEL_META = { benchmark_ner: 'NER accuracy', benchmark_speed: 'Speed', compat: 'Latest compatible package version for your spaCy installation', + download_link: 'Download link for the pipeline', } const LABEL_SCHEME_META = { @@ -138,6 +139,13 @@ function formatAccuracy(data, lang) { .filter(item => item) } +function formatDownloadLink(lang, name, version) { + const fullName = `${lang}_${name}-${version}` + const filename = `${fullName}-py3-none-any.whl` + const url = `https://github.com/explosion/spacy-models/releases/download/${fullName}/${filename}` + return {filename} +} + function formatModelMeta(data) { return { fullName: `${data.lang}_${data.name}-${data.version}`, @@ -154,6 +162,7 @@ function formatModelMeta(data) { labels: isEmptyObj(data.labels) ? null : data.labels, vectors: formatVectors(data.vectors), accuracy: formatAccuracy(data.performance, data.lang), + download_link: formatDownloadLink(data.lang, data.name, data.version), } } @@ -244,6 +253,7 @@ const Model = ({ { label: 'Components', content: components, help: MODEL_META.components }, { label: 'Pipeline', content: pipeline, help: MODEL_META.pipeline }, { label: 'Vectors', content: meta.vectors, help: MODEL_META.vecs }, + { label: 'Download Link', content: meta.download_link, help: MODEL_META.download_link }, { label: 'Sources', content: sources, help: MODEL_META.sources }, { label: 'Author', content: author }, { label: 'License', content: license }, From ff0522f8daac603e4dfb2773e1a73da61acc621d Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 1 Sep 2022 11:35:52 +0900 Subject: [PATCH 14/82] Fix asent pip package name --- website/meta/universe.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 6c8caa6a6..9145855c6 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1192,7 +1192,7 @@ "slogan": "Fast, flexible and transparent sentiment analysis", "description": "Asent is a rule-based sentiment analysis library for Python made using spaCy. It is inspired by VADER, but uses a more modular ruleset, that allows the user to change e.g. the method for finding negations. Furthermore it includes visualisers to visualize the model predictions, making the model easily interpretable.", "github": "kennethenevoldsen/asent", - "pip": "aseny", + "pip": "asent", "code_example": [ "import spacy", "import asent", From 515d5c65d5f5d05eb8d2777e59cb5680dfcb4bd9 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 7 Sep 2022 22:24:22 +0900 Subject: [PATCH 15/82] Add dev docs on satellite packages (#11435) * Add dev docs on satellite packages * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem * Add displacy link Co-authored-by: Sofie Van Landeghem --- extra/DEVELOPER_DOCS/Satellite Packages.md | 82 ++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 extra/DEVELOPER_DOCS/Satellite Packages.md diff --git a/extra/DEVELOPER_DOCS/Satellite Packages.md b/extra/DEVELOPER_DOCS/Satellite Packages.md new file mode 100644 index 000000000..02b06a90e --- /dev/null +++ b/extra/DEVELOPER_DOCS/Satellite Packages.md @@ -0,0 +1,82 @@ +# spaCy Satellite Packages + +This is a list of all the active repos relevant to spaCy besides the main one, with short descriptions, history, and current status. Archived repos will not be covered. + +## Always Included in spaCy + +These packages are always pulled in when you install spaCy. Most of them are direct dependencies, but some are transitive dependencies through other packages. + +- [spacy-legacy](https://github.com/explosion/spacy-legacy): When an architecture in spaCy changes enough to get a new version, the old version is frozen and moved to spacy-legacy. This allows us to keep the core library slim while also preserving backwards compatability. +- [thinc](https://github.com/explosion/thinc): Thinc is the machine learning library that powers trainable components in spaCy. It wraps backends like Numpy, PyTorch, and Tensorflow to provide a functional interface for specifying architectures. +- [catalogue](https://github.com/explosion/catalogue): Small library for adding function registries, like those used for model architectures in spaCy. +- [confection](https://github.com/explosion/confection): This library contains the functionality for config parsing that was formerly contained directly in Thinc. +- [spacy-loggers](https://github.com/explosion/spacy-loggers): Contains loggers beyond the default logger available in spaCy's core code base. This includes loggers integrated with third-party services, which may differ in release cadence from spaCy itself. +- [wasabi](https://github.com/explosion/wasabi): A command line formatting library, used for terminal output in spaCy. +- [srsly](https://github.com/explosion/srsly): A wrapper that vendors several serialization libraries for spaCy. Includes parsers for JSON, JSONL, MessagePack, (extended) Pickle, and YAML. +- [preshed](https://github.com/explosion/preshed): A Cython library for low-level data structures like hash maps, used for memory efficient data storage. +- [cython-blis](https://github.com/explosion/cython-blis): Fast matrix multiplication using BLIS without depending on system libraries. Required by Thinc, rather than spaCy directly. +- [murmurhash](https://github.com/explosion/murmurhash): A wrapper library for a C++ murmurhash implementation, used for string IDs in spaCy and preshed. +- [cymem](https://github.com/explosion/cymem): A small library for RAII-style memory management in Cython. + +## Optional Extensions for spaCy + +These are repos that can be used by spaCy but aren't part of a default installation. Many of these are wrappers to integrate various kinds of third-party libraries. + +- [spacy-transformers](https://github.com/explosion/spacy-transformers): A wrapper for the [HuggingFace Transformers](https://huggingface.co/docs/transformers/index) library, this handles the extensive conversion necessary to coordinate spaCy's powerful `Doc` representation, training pipeline, and the Transformer embeddings. When released, this was known as `spacy-pytorch-transformers`, but it changed to the current name when HuggingFace update the name of their library as well. +- [spacy-huggingface-hub](https://github.com/explosion/spacy-huggingface-hub): This package has a CLI script for uploading a packaged spaCy pipeline (created with `spacy package`) to the [Hugging Face Hub](https://huggingface.co/models). +- [spacy-alignments](https://github.com/explosion/spacy-alignments): A wrapper for the tokenizations library (mentioned below) with a modified build system to simplify cross-platform wheel creation. Used in spacy-transformers for aligning spaCy and HuggingFace tokenizations. +- [spacy-experimental](https://github.com/explosion/spacy-experimental): Experimental components that are not quite ready for inclusion in the main spaCy library. Usually there are unresolved questions around their APIs, so the experimental library allows us to expose them to the community for feedback before fully integrating them. +- [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data): A repository of linguistic data, such as lemmas, that takes up a lot of disk space. Originally created to reduce the size of the spaCy core library. This is mainly useful if you want the data included but aren't using a pretrained pipeline; for the affected languages, the relevant data is included in pretrained pipelines directly. +- [coreferee](https://github.com/explosion/coreferee): Coreference resolution for English, French, German and Polish, optimised for limited training data and easily extensible for further languages. Used as a spaCy pipeline component. +- [spacy-stanza](https://github.com/explosion/spacy-stanza): This is a wrapper that allows the use of Stanford's Stanza library in spaCy. +- [spacy-streamlit](https://github.com/explosion/spacy-streamlit): A wrapper for the Streamlit dashboard building library to help with integrating [displaCy](https://spacy.io/api/top-level/#displacy). +- [spacymoji](https://github.com/explosion/spacymoji): A library to add extra support for emoji to spaCy, such as including character names. +- [thinc-apple-ops](https://github.com/explosion/thinc-apple-ops): A special backend for OSX that uses Apple's native libraries for improved performance. +- [os-signpost](https://github.com/explosion/os-signpost): A Python package that allows you to use the `OSSignposter` API in OSX for performance analysis. +- [spacy-ray](https://github.com/explosion/spacy-ray): A wrapper to integrate spaCy with Ray, a distributed training framework. Currently a work in progress. + +## Prodigy + +[Prodigy](https://prodi.gy) is Explosion's easy to use and highly customizable tool for annotating data. Prodigy itself requires a license, but the repos below contain documentation, examples, and editor or notebook integrations. + +- [prodigy-recipes](https://github.com/explosion/prodigy-recipes): Sample recipes for Prodigy, along with notebooks and other examples of usage. +- [vscode-prodigy](https://github.com/explosion/vscode-prodigy): A VS Code extension that lets you run Prodigy inside VS Code. +- [jupyterlab-prodigy](https://github.com/explosion/jupyterlab-prodigy): An extension for JupyterLab that lets you run Prodigy inside JupyterLab. + +## Independent Tools or Projects + +These are tools that may be related to or use spaCy, but are functional independent projects in their own right as well. + +- [floret](https://github.com/explosion/floret): A modification of fastText to use Bloom Embeddings. Can be used to add vectors with subword features to spaCy, and also works independently in the same manner as fastText. +- [sense2vec](https://github.com/explosion/sense2vec): A library to make embeddings of noun phrases or words coupled with their part of speech. This library uses spaCy. +- [spacy-vectors-builder](https://github.com/explosion/spacy-vectors-builder): This is a spaCy project that builds vectors using floret and a lot of input text. It handles downloading the input data as well as the actual building of vectors. +- [holmes-extractor](https://github.com/explosion/holmes-extractor): Information extraction from English and German texts based on predicate logic. Uses spaCy. +- [healthsea](https://github.com/explosion/healthsea): Healthsea is a project to extract information from comments about health supplements. Structurally, it's a self-contained, large spaCy project. +- [spacy-pkuseg](https://github.com/explosion/spacy-pkuseg): A fork of the pkuseg Chinese tokenizer. Used for Chinese support in spaCy, but also works independently. +- [ml-datasets](https://github.com/explosion/ml-datasets): This repo includes loaders for several standard machine learning datasets, like MNIST or WikiNER, and has historically been used in spaCy example code and documentation. + +## Documentation and Informational Repos + +These repos are used to support the spaCy docs or otherwise present information about spaCy or other Explosion projects. + +- [projects](https://github.com/explosion/projects): The projects repo is used to show detailed examples of spaCy usage. Individual projects can be checked out using the spaCy command line tool, rather than checking out the projects repo directly. +- [spacy-course](https://github.com/explosion/spacy-course): Home to the interactive spaCy course for learning about how to use the library and some basic NLP principles. +- [spacy-io-binder](https://github.com/explosion/spacy-io-binder): Home to the notebooks used for interactive examples in the documentation. + +## Organizational / Meta + +These repos are used for organizing data around spaCy, but are not something an end user would need to install as part of using the library. + +- [spacy-models](https://github.com/explosion/spacy-models): This repo contains metadata (but not training data) for all the spaCy models. This includes information about where their training data came from, version compatability, and performance information. It also includes tests for the model packages, and the built models are hosted as releases of this repo. +- [wheelwright](https://github.com/explosion/wheelwright): A tool for automating our PyPI builds and releases. +- [ec2buildwheel](https://github.com/explosion/ec2buildwheel): A small project that allows you to build Python packages in the manner of cibuildwheel, but on any EC2 image. Used by wheelwright. + +## Other + +Repos that don't fit in any of the above categories. + +- [blis](https://github.com/explosion/blis): A fork of the official BLIS library. The main branch is not updated, but work continues in various branches. This is used for cython-blis. +- [tokenizations](https://github.com/explosion/tokenizations): A library originally by Yohei Tamura to align strings with tolerance to some variations in features like case and diacritics, used for aligning tokens and wordpieces. Adopted and maintained by Explosion, but usually spacy-alignments is used instead. +- [conll-2012](https://github.com/explosion/conll-2012): A repo to hold some slightly cleaned up versions of the official scripts for the CoNLL 2012 shared task involving coreference resolution. Used in the coref project. +- [fastapi-explosion-extras](https://github.com/explosion/fastapi-explosion-extras): Some small tweaks to FastAPI used at Explosion. + From 1f23c615d7a7326ca5a38a7d768b8b70caaa0e17 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Thu, 8 Sep 2022 10:38:07 +0200 Subject: [PATCH 16/82] Refactor KB for easier customization (#11268) * Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups. * Fix tests. Add distinction w.r.t. batch size. * Remove redundant and add new comments. * Adjust comments. Fix variable naming in EL prediction. * Fix mypy errors. * Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues. * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann * Update spacy/kb_base.pyx Co-authored-by: Paul O'Leary McCann * Update spacy/kb_base.pyx Co-authored-by: Paul O'Leary McCann * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann * Add error messages to NotImplementedErrors. Remove redundant comment. * Fix imports. * Remove redundant comments. * Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase. * Fix tests. * Update spacy/errors.py Co-authored-by: Sofie Van Landeghem * Move KB into subdirectory. * Adjust imports after KB move to dedicated subdirectory. * Fix config imports. * Move Candidate + retrieval functions to separate module. Fix other, small issues. * Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions. * Update spacy/kb/kb_in_memory.pyx Co-authored-by: Sofie Van Landeghem * Update spacy/ml/models/entity_linker.py Co-authored-by: Sofie Van Landeghem * Fix typing. * Change typing of mentions to be Span instead of Union[Span, str]. * Update docs. * Update EntityLinker and _architecture docs. * Update website/docs/api/entitylinker.md Co-authored-by: Paul O'Leary McCann * Adjust message for E1046. * Re-add section for Candidate in kb.md, add reference to dedicated page. * Update docs and docstrings. * Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs. * Update spacy/kb/candidate.pyx * Update spacy/kb/kb_in_memory.pyx * Update spacy/pipeline/legacy/entity_linker.py * Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py. Co-authored-by: Paul O'Leary McCann Co-authored-by: Sofie Van Landeghem --- setup.py | 4 +- spacy/errors.py | 10 +- spacy/kb/__init__.py | 3 + spacy/kb/candidate.pxd | 12 + spacy/kb/candidate.pyx | 74 +++++ spacy/kb/kb.pxd | 10 + spacy/kb/kb.pyx | 108 +++++++ spacy/{kb.pxd => kb/kb_in_memory.pxd} | 24 +- spacy/{kb.pyx => kb/kb_in_memory.pyx} | 96 ++---- spacy/ml/models/entity_linker.py | 30 +- spacy/pipeline/entity_linker.py | 184 +++++++---- spacy/pipeline/legacy/entity_linker.py | 5 +- spacy/tests/pipeline/test_entity_linker.py | 98 +++--- .../tests/serialize/test_resource_warning.py | 8 +- spacy/tests/serialize/test_serialize_kb.py | 16 +- website/docs/api/architectures.md | 14 +- website/docs/api/entitylinker.md | 5 +- website/docs/api/kb.md | 219 +++++-------- website/docs/api/kb_in_memory.md | 302 ++++++++++++++++++ website/docs/usage/101/_architecture.md | 4 +- 20 files changed, 854 insertions(+), 372 deletions(-) create mode 100644 spacy/kb/__init__.py create mode 100644 spacy/kb/candidate.pxd create mode 100644 spacy/kb/candidate.pyx create mode 100644 spacy/kb/kb.pxd create mode 100644 spacy/kb/kb.pyx rename spacy/{kb.pxd => kb/kb_in_memory.pxd} (92%) rename spacy/{kb.pyx => kb/kb_in_memory.pyx} (90%) create mode 100644 website/docs/api/kb_in_memory.md diff --git a/setup.py b/setup.py index ec1bd35fa..899d940ed 100755 --- a/setup.py +++ b/setup.py @@ -30,7 +30,9 @@ MOD_NAMES = [ "spacy.lexeme", "spacy.vocab", "spacy.attrs", - "spacy.kb", + "spacy.kb.candidate", + "spacy.kb.kb", + "spacy.kb.kb_in_memory", "spacy.ml.parser_model", "spacy.morphology", "spacy.pipeline.dep_parser", diff --git a/spacy/errors.py b/spacy/errors.py index 5ee1476c2..e2201284f 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -709,9 +709,9 @@ class Errors(metaclass=ErrorsWithCodes): "`nlp.enable_pipe` instead.") E927 = ("Can't write to frozen list Maybe you're trying to modify a computed " "property or default function argument?") - E928 = ("A KnowledgeBase can only be serialized to/from from a directory, " + E928 = ("An InMemoryLookupKB can only be serialized to/from from a directory, " "but the provided argument {loc} points to a file.") - E929 = ("Couldn't read KnowledgeBase from {loc}. The path does not seem to exist.") + E929 = ("Couldn't read InMemoryLookupKB from {loc}. The path does not seem to exist.") E930 = ("Received invalid get_examples callback in `{method}`. " "Expected function that returns an iterable of Example objects but " "got: {obj}") @@ -941,6 +941,12 @@ class Errors(metaclass=ErrorsWithCodes): "`{arg2}`={arg2_values} but these arguments are conflicting.") E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got " "{value}.") + E1044 = ("Expected `candidates_batch_size` to be >= 1, but got: {value}") + E1045 = ("Encountered {parent} subclass without `{parent}.{method}` " + "method in '{name}'. If you want to use this method, make " + "sure it's overwritten on the subclass.") + E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default " + "knowledge base, use `InMemoryLookupKB`.") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/kb/__init__.py b/spacy/kb/__init__.py new file mode 100644 index 000000000..1d70a9b34 --- /dev/null +++ b/spacy/kb/__init__.py @@ -0,0 +1,3 @@ +from .kb import KnowledgeBase +from .kb_in_memory import InMemoryLookupKB +from .candidate import Candidate, get_candidates, get_candidates_batch diff --git a/spacy/kb/candidate.pxd b/spacy/kb/candidate.pxd new file mode 100644 index 000000000..942ce9dd0 --- /dev/null +++ b/spacy/kb/candidate.pxd @@ -0,0 +1,12 @@ +from .kb cimport KnowledgeBase +from libcpp.vector cimport vector +from ..typedefs cimport hash_t + +# Object used by the Entity Linker that summarizes one entity-alias candidate combination. +cdef class Candidate: + cdef readonly KnowledgeBase kb + cdef hash_t entity_hash + cdef float entity_freq + cdef vector[float] entity_vector + cdef hash_t alias_hash + cdef float prior_prob diff --git a/spacy/kb/candidate.pyx b/spacy/kb/candidate.pyx new file mode 100644 index 000000000..c89efeb03 --- /dev/null +++ b/spacy/kb/candidate.pyx @@ -0,0 +1,74 @@ +# cython: infer_types=True, profile=True + +from typing import Iterable +from .kb cimport KnowledgeBase +from ..tokens import Span + +cdef class Candidate: + """A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved + to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking + algorithm which will disambiguate the various candidates to the correct one. + Each candidate (alias, entity) pair is assigned a certain prior probability. + + DOCS: https://spacy.io/api/kb/#candidate-init + """ + + def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob): + self.kb = kb + self.entity_hash = entity_hash + self.entity_freq = entity_freq + self.entity_vector = entity_vector + self.alias_hash = alias_hash + self.prior_prob = prior_prob + + @property + def entity(self) -> int: + """RETURNS (uint64): hash of the entity's KB ID/name""" + return self.entity_hash + + @property + def entity_(self) -> str: + """RETURNS (str): ID/name of this entity in the KB""" + return self.kb.vocab.strings[self.entity_hash] + + @property + def alias(self) -> int: + """RETURNS (uint64): hash of the alias""" + return self.alias_hash + + @property + def alias_(self) -> str: + """RETURNS (str): ID of the original alias""" + return self.kb.vocab.strings[self.alias_hash] + + @property + def entity_freq(self) -> float: + return self.entity_freq + + @property + def entity_vector(self) -> Iterable[float]: + return self.entity_vector + + @property + def prior_prob(self) -> float: + return self.prior_prob + + +def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]: + """ + Return candidate entities for a given mention and fetching appropriate entries from the index. + kb (KnowledgeBase): Knowledge base to query. + mention (Span): Entity mention for which to identify candidates. + RETURNS (Iterable[Candidate]): Identified candidates. + """ + return kb.get_candidates(mention) + + +def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]: + """ + Return candidate entities for the given mentions and fetching appropriate entries from the index. + kb (KnowledgeBase): Knowledge base to query. + mention (Iterable[Span]): Entity mentions for which to identify candidates. + RETURNS (Iterable[Iterable[Candidate]]): Identified candidates. + """ + return kb.get_candidates_batch(mentions) diff --git a/spacy/kb/kb.pxd b/spacy/kb/kb.pxd new file mode 100644 index 000000000..1adeef8ae --- /dev/null +++ b/spacy/kb/kb.pxd @@ -0,0 +1,10 @@ +"""Knowledge-base for entity or concept linking.""" + +from cymem.cymem cimport Pool +from libc.stdint cimport int64_t +from ..vocab cimport Vocab + +cdef class KnowledgeBase: + cdef Pool mem + cdef readonly Vocab vocab + cdef readonly int64_t entity_vector_length diff --git a/spacy/kb/kb.pyx b/spacy/kb/kb.pyx new file mode 100644 index 000000000..ce4bc0138 --- /dev/null +++ b/spacy/kb/kb.pyx @@ -0,0 +1,108 @@ +# cython: infer_types=True, profile=True + +from pathlib import Path +from typing import Iterable, Tuple, Union +from cymem.cymem cimport Pool + +from .candidate import Candidate +from ..tokens import Span +from ..util import SimpleFrozenList +from ..errors import Errors + + +cdef class KnowledgeBase: + """A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases, + to support entity linking of named entities to real-world concepts. + This is an abstract class and requires its operations to be implemented. + + DOCS: https://spacy.io/api/kb + """ + + def __init__(self, vocab: Vocab, entity_vector_length: int): + """Create a KnowledgeBase.""" + # Make sure abstract KB is not instantiated. + if self.__class__ == KnowledgeBase: + raise TypeError( + Errors.E1046.format(cls_name=self.__class__.__name__) + ) + + self.vocab = vocab + self.entity_vector_length = entity_vector_length + self.mem = Pool() + + def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]: + """ + Return candidate entities for specified texts. Each candidate defines the entity, the original alias, + and the prior probability of that alias resolving to that entity. + If no candidate is found for a given text, an empty list is returned. + mentions (Iterable[Span]): Mentions for which to get candidates. + RETURNS (Iterable[Iterable[Candidate]]): Identified candidates. + """ + return [self.get_candidates(span) for span in mentions] + + def get_candidates(self, mention: Span) -> Iterable[Candidate]: + """ + Return candidate entities for specified text. Each candidate defines the entity, the original alias, + and the prior probability of that alias resolving to that entity. + If the no candidate is found for a given text, an empty list is returned. + mention (Span): Mention for which to get candidates. + RETURNS (Iterable[Candidate]): Identified candidates. + """ + raise NotImplementedError( + Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__) + ) + + def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]: + """ + Return vectors for entities. + entity (str): Entity name/ID. + RETURNS (Iterable[Iterable[float]]): Vectors for specified entities. + """ + return [self.get_vector(entity) for entity in entities] + + def get_vector(self, str entity) -> Iterable[float]: + """ + Return vector for entity. + entity (str): Entity name/ID. + RETURNS (Iterable[float]): Vector for specified entity. + """ + raise NotImplementedError( + Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__) + ) + + def to_bytes(self, **kwargs) -> bytes: + """Serialize the current state to a binary string. + RETURNS (bytes): Current state as binary string. + """ + raise NotImplementedError( + Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__) + ) + + def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()): + """Load state from a binary string. + bytes_data (bytes): KB state. + exclude (Tuple[str]): Properties to exclude when restoring KB. + """ + raise NotImplementedError( + Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__) + ) + + def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None: + """ + Write KnowledgeBase content to disk. + path (Union[str, Path]): Target file path. + exclude (Iterable[str]): List of components to exclude. + """ + raise NotImplementedError( + Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__) + ) + + def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None: + """ + Load KnowledgeBase content from disk. + path (Union[str, Path]): Target file path. + exclude (Iterable[str]): List of components to exclude. + """ + raise NotImplementedError( + Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__) + ) diff --git a/spacy/kb.pxd b/spacy/kb/kb_in_memory.pxd similarity index 92% rename from spacy/kb.pxd rename to spacy/kb/kb_in_memory.pxd index a823dbe1e..825a6bde9 100644 --- a/spacy/kb.pxd +++ b/spacy/kb/kb_in_memory.pxd @@ -1,14 +1,12 @@ """Knowledge-base for entity or concept linking.""" -from cymem.cymem cimport Pool from preshed.maps cimport PreshMap from libcpp.vector cimport vector from libc.stdint cimport int32_t, int64_t from libc.stdio cimport FILE -from .vocab cimport Vocab -from .typedefs cimport hash_t -from .structs cimport KBEntryC, AliasC - +from ..typedefs cimport hash_t +from ..structs cimport KBEntryC, AliasC +from .kb cimport KnowledgeBase ctypedef vector[KBEntryC] entry_vec ctypedef vector[AliasC] alias_vec @@ -16,21 +14,7 @@ ctypedef vector[float] float_vec ctypedef vector[float_vec] float_matrix -# Object used by the Entity Linker that summarizes one entity-alias candidate combination. -cdef class Candidate: - cdef readonly KnowledgeBase kb - cdef hash_t entity_hash - cdef float entity_freq - cdef vector[float] entity_vector - cdef hash_t alias_hash - cdef float prior_prob - - -cdef class KnowledgeBase: - cdef Pool mem - cdef readonly Vocab vocab - cdef int64_t entity_vector_length - +cdef class InMemoryLookupKB(KnowledgeBase): # This maps 64bit keys (hash of unique entity string) # to 64bit values (position of the _KBEntryC struct in the _entries vector). # The PreshMap is pretty space efficient, as it uses open addressing. So diff --git a/spacy/kb.pyx b/spacy/kb/kb_in_memory.pyx similarity index 90% rename from spacy/kb.pyx rename to spacy/kb/kb_in_memory.pyx index ae1983a8d..485e52c2f 100644 --- a/spacy/kb.pyx +++ b/spacy/kb/kb_in_memory.pyx @@ -1,8 +1,7 @@ # cython: infer_types=True, profile=True -from typing import Iterator, Iterable, Callable, Dict, Any +from typing import Iterable, Callable, Dict, Any, Union import srsly -from cymem.cymem cimport Pool from preshed.maps cimport PreshMap from cpython.exc cimport PyErr_SetFromErrno from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek @@ -12,85 +11,28 @@ from libcpp.vector cimport vector from pathlib import Path import warnings -from .typedefs cimport hash_t -from .errors import Errors, Warnings -from . import util -from .util import SimpleFrozenList, ensure_path - -cdef class Candidate: - """A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved - to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking - algorithm which will disambiguate the various candidates to the correct one. - Each candidate (alias, entity) pair is assigned to a certain prior probability. - - DOCS: https://spacy.io/api/kb/#candidate_init - """ - - def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob): - self.kb = kb - self.entity_hash = entity_hash - self.entity_freq = entity_freq - self.entity_vector = entity_vector - self.alias_hash = alias_hash - self.prior_prob = prior_prob - - @property - def entity(self): - """RETURNS (uint64): hash of the entity's KB ID/name""" - return self.entity_hash - - @property - def entity_(self): - """RETURNS (str): ID/name of this entity in the KB""" - return self.kb.vocab.strings[self.entity_hash] - - @property - def alias(self): - """RETURNS (uint64): hash of the alias""" - return self.alias_hash - - @property - def alias_(self): - """RETURNS (str): ID of the original alias""" - return self.kb.vocab.strings[self.alias_hash] - - @property - def entity_freq(self): - return self.entity_freq - - @property - def entity_vector(self): - return self.entity_vector - - @property - def prior_prob(self): - return self.prior_prob +from ..tokens import Span +from ..typedefs cimport hash_t +from ..errors import Errors, Warnings +from .. import util +from ..util import SimpleFrozenList, ensure_path +from ..vocab cimport Vocab +from .kb cimport KnowledgeBase +from .candidate import Candidate as Candidate -def get_candidates(KnowledgeBase kb, span) -> Iterator[Candidate]: - """ - Return candidate entities for a given span by using the text of the span as the alias - and fetching appropriate entries from the index. - This particular function is optimized to work with the built-in KB functionality, - but any other custom candidate generation method can be used in combination with the KB as well. - """ - return kb.get_alias_candidates(span.text) - - -cdef class KnowledgeBase: - """A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases, +cdef class InMemoryLookupKB(KnowledgeBase): + """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases, to support entity linking of named entities to real-world concepts. - DOCS: https://spacy.io/api/kb + DOCS: https://spacy.io/api/kb_in_memory """ def __init__(self, Vocab vocab, entity_vector_length): - """Create a KnowledgeBase.""" - self.mem = Pool() - self.entity_vector_length = entity_vector_length + """Create an InMemoryLookupKB.""" + super().__init__(vocab, entity_vector_length) self._entry_index = PreshMap() self._alias_index = PreshMap() - self.vocab = vocab self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) def _initialize_entities(self, int64_t nr_entities): @@ -104,11 +46,6 @@ cdef class KnowledgeBase: self._alias_index = PreshMap(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1) - @property - def entity_vector_length(self): - """RETURNS (uint64): length of the entity vectors""" - return self.entity_vector_length - def __len__(self): return self.get_size_entities() @@ -286,7 +223,10 @@ cdef class KnowledgeBase: alias_entry.probs = probs self._aliases_table[alias_index] = alias_entry - def get_alias_candidates(self, str alias) -> Iterator[Candidate]: + def get_candidates(self, mention: Span) -> Iterable[Candidate]: + return self.get_alias_candidates(mention.text) # type: ignore + + def get_alias_candidates(self, str alias) -> Iterable[Candidate]: """ Return candidate entities for an alias. Each candidate defines the entity, the original alias, and the prior probability of that alias resolving to that entity. diff --git a/spacy/ml/models/entity_linker.py b/spacy/ml/models/entity_linker.py index d847342a3..4d18d216a 100644 --- a/spacy/ml/models/entity_linker.py +++ b/spacy/ml/models/entity_linker.py @@ -1,11 +1,12 @@ from pathlib import Path from typing import Optional, Callable, Iterable, List, Tuple from thinc.types import Floats2d -from thinc.api import chain, clone, list2ragged, reduce_mean, residual -from thinc.api import Model, Maxout, Linear, noop, tuplify, Ragged +from thinc.api import chain, list2ragged, reduce_mean, residual +from thinc.api import Model, Maxout, Linear, tuplify, Ragged from ...util import registry -from ...kb import KnowledgeBase, Candidate, get_candidates +from ...kb import KnowledgeBase, InMemoryLookupKB +from ...kb import Candidate, get_candidates, get_candidates_batch from ...vocab import Vocab from ...tokens import Span, Doc from ..extract_spans import extract_spans @@ -78,9 +79,11 @@ def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callab @registry.misc("spacy.KBFromFile.v1") -def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]: - def kb_from_file(vocab): - kb = KnowledgeBase(vocab, entity_vector_length=1) +def load_kb( + kb_path: Path, +) -> Callable[[Vocab], KnowledgeBase]: + def kb_from_file(vocab: Vocab): + kb = InMemoryLookupKB(vocab, entity_vector_length=1) kb.from_disk(kb_path) return kb @@ -88,9 +91,11 @@ def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]: @registry.misc("spacy.EmptyKB.v1") -def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]: - def empty_kb_factory(vocab): - return KnowledgeBase(vocab=vocab, entity_vector_length=entity_vector_length) +def empty_kb( + entity_vector_length: int, +) -> Callable[[Vocab], KnowledgeBase]: + def empty_kb_factory(vocab: Vocab): + return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length) return empty_kb_factory @@ -98,3 +103,10 @@ def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]: @registry.misc("spacy.CandidateGenerator.v1") def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]: return get_candidates + + +@registry.misc("spacy.CandidateBatchGenerator.v1") +def create_candidates_batch() -> Callable[ + [KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]] +]: + return get_candidates_batch diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py index 73a90b268..62845287b 100644 --- a/spacy/pipeline/entity_linker.py +++ b/spacy/pipeline/entity_linker.py @@ -53,9 +53,11 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"] "incl_context": True, "entity_vector_length": 64, "get_candidates": {"@misc": "spacy.CandidateGenerator.v1"}, + "get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"}, "overwrite": True, "scorer": {"@scorers": "spacy.entity_linker_scorer.v1"}, "use_gold_ents": True, + "candidates_batch_size": 1, "threshold": None, }, default_score_weights={ @@ -75,9 +77,13 @@ def make_entity_linker( incl_context: bool, entity_vector_length: int, get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], + get_candidates_batch: Callable[ + [KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]] + ], overwrite: bool, scorer: Optional[Callable], use_gold_ents: bool, + candidates_batch_size: int, threshold: Optional[float] = None, ): """Construct an EntityLinker component. @@ -90,17 +96,21 @@ def make_entity_linker( incl_prior (bool): Whether or not to include prior probabilities from the KB in the model. incl_context (bool): Whether or not to include the local context in the model. entity_vector_length (int): Size of encoding vectors in the KB. - get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that + get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that produces a list of candidates, given a certain knowledge base and a textual mention. + get_candidates_batch ( + Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]] + ): Function that produces a list of candidates, given a certain knowledge base and several textual mentions. scorer (Optional[Callable]): The scoring method. use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another component must provide entity annotations. + candidates_batch_size (int): Size of batches for entity candidate generation. threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold, prediction is discarded. If None, predictions are not filtered by any threshold. """ if not model.attrs.get("include_span_maker", False): - # The only difference in arguments here is that use_gold_ents is not available + # The only difference in arguments here is that use_gold_ents and threshold aren't available. return EntityLinker_v1( nlp.vocab, model, @@ -124,9 +134,11 @@ def make_entity_linker( incl_context=incl_context, entity_vector_length=entity_vector_length, get_candidates=get_candidates, + get_candidates_batch=get_candidates_batch, overwrite=overwrite, scorer=scorer, use_gold_ents=use_gold_ents, + candidates_batch_size=candidates_batch_size, threshold=threshold, ) @@ -160,9 +172,13 @@ class EntityLinker(TrainablePipe): incl_context: bool, entity_vector_length: int, get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], + get_candidates_batch: Callable[ + [KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]] + ], overwrite: bool = BACKWARD_OVERWRITE, scorer: Optional[Callable] = entity_linker_score, use_gold_ents: bool, + candidates_batch_size: int, threshold: Optional[float] = None, ) -> None: """Initialize an entity linker. @@ -178,10 +194,14 @@ class EntityLinker(TrainablePipe): entity_vector_length (int): Size of encoding vectors in the KB. get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that produces a list of candidates, given a certain knowledge base and a textual mention. - scorer (Optional[Callable]): The scoring method. Defaults to - Scorer.score_links. + get_candidates_batch ( + Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], + Iterable[Candidate]] + ): Function that produces a list of candidates, given a certain knowledge base and several textual mentions. + scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links. use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another component must provide entity annotations. + candidates_batch_size (int): Size of batches for entity candidate generation. threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold, prediction is discarded. If None, predictions are not filtered by any threshold. DOCS: https://spacy.io/api/entitylinker#init @@ -204,22 +224,27 @@ class EntityLinker(TrainablePipe): self.incl_prior = incl_prior self.incl_context = incl_context self.get_candidates = get_candidates + self.get_candidates_batch = get_candidates_batch self.cfg: Dict[str, Any] = {"overwrite": overwrite} self.distance = CosineDistance(normalize=False) # how many neighbour sentences to take into account - # create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'. + # create an empty KB by default self.kb = empty_kb(entity_vector_length)(self.vocab) self.scorer = scorer self.use_gold_ents = use_gold_ents + self.candidates_batch_size = candidates_batch_size self.threshold = threshold + if candidates_batch_size < 1: + raise ValueError(Errors.E1044) + def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): """Define the KB of this pipe by providing a function that will create it using this object's vocab.""" if not callable(kb_loader): raise ValueError(Errors.E885.format(arg_type=type(kb_loader))) - self.kb = kb_loader(self.vocab) + self.kb = kb_loader(self.vocab) # type: ignore def validate_kb(self) -> None: # Raise an error if the knowledge base is not initialized. @@ -241,8 +266,8 @@ class EntityLinker(TrainablePipe): get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. - kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance. - Note that providing this argument, will overwrite all data accumulated in the current KB. + kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab + instance. Note that providing this argument will overwrite all data accumulated in the current KB. Use this only when loading a KB as-such from file. DOCS: https://spacy.io/api/entitylinker#initialize @@ -419,66 +444,93 @@ class EntityLinker(TrainablePipe): if len(doc) == 0: continue sentences = [s for s in doc.sents] - # Looping through each entity (TODO: rewrite) - for ent in doc.ents: - sent_index = sentences.index(ent.sent) - assert sent_index >= 0 - if self.incl_context: - # get n_neighbour sentences, clipped to the length of the document - start_sentence = max(0, sent_index - self.n_sents) - end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) - start_token = sentences[start_sentence].start - end_token = sentences[end_sentence].end - sent_doc = doc[start_token:end_token].as_doc() - # currently, the context is the same for each entity in a sentence (should be refined) - sentence_encoding = self.model.predict([sent_doc])[0] - sentence_encoding_t = sentence_encoding.T - sentence_norm = xp.linalg.norm(sentence_encoding_t) - entity_count += 1 - if ent.label_ in self.labels_discard: - # ignoring this entity - setting to NIL - final_kb_ids.append(self.NIL) - else: - candidates = list(self.get_candidates(self.kb, ent)) - if not candidates: - # no prediction possible for this entity - setting to NIL - final_kb_ids.append(self.NIL) - elif len(candidates) == 1 and self.threshold is None: - # shortcut for efficiency reasons: take the 1 candidate - final_kb_ids.append(candidates[0].entity_) - else: - random.shuffle(candidates) - # set all prior probabilities to 0 if incl_prior=False - prior_probs = xp.asarray([c.prior_prob for c in candidates]) - if not self.incl_prior: - prior_probs = xp.asarray([0.0 for _ in candidates]) - scores = prior_probs - # add in similarity from the context - if self.incl_context: - entity_encodings = xp.asarray( - [c.entity_vector for c in candidates] - ) - entity_norm = xp.linalg.norm(entity_encodings, axis=1) - if len(entity_encodings) != len(prior_probs): - raise RuntimeError( - Errors.E147.format( - method="predict", - msg="vectors not of equal length", - ) - ) - # cosine similarity - sims = xp.dot(entity_encodings, sentence_encoding_t) / ( - sentence_norm * entity_norm - ) - if sims.shape != prior_probs.shape: - raise ValueError(Errors.E161) - scores = prior_probs + sims - (prior_probs * sims) - final_kb_ids.append( - candidates[scores.argmax().item()].entity_ - if self.threshold is None or scores.max() >= self.threshold - else EntityLinker.NIL + # Loop over entities in batches. + for ent_idx in range(0, len(doc.ents), self.candidates_batch_size): + ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size] + + # Look up candidate entities. + valid_ent_idx = [ + idx + for idx in range(len(ent_batch)) + if ent_batch[idx].label_ not in self.labels_discard + ] + + batch_candidates = list( + self.get_candidates_batch( + self.kb, [ent_batch[idx] for idx in valid_ent_idx] + ) + if self.candidates_batch_size > 1 + else [ + self.get_candidates(self.kb, ent_batch[idx]) + for idx in valid_ent_idx + ] + ) + + # Looping through each entity in batch (TODO: rewrite) + for j, ent in enumerate(ent_batch): + sent_index = sentences.index(ent.sent) + assert sent_index >= 0 + + if self.incl_context: + # get n_neighbour sentences, clipped to the length of the document + start_sentence = max(0, sent_index - self.n_sents) + end_sentence = min( + len(sentences) - 1, sent_index + self.n_sents ) + start_token = sentences[start_sentence].start + end_token = sentences[end_sentence].end + sent_doc = doc[start_token:end_token].as_doc() + # currently, the context is the same for each entity in a sentence (should be refined) + sentence_encoding = self.model.predict([sent_doc])[0] + sentence_encoding_t = sentence_encoding.T + sentence_norm = xp.linalg.norm(sentence_encoding_t) + entity_count += 1 + if ent.label_ in self.labels_discard: + # ignoring this entity - setting to NIL + final_kb_ids.append(self.NIL) + else: + candidates = list(batch_candidates[j]) + if not candidates: + # no prediction possible for this entity - setting to NIL + final_kb_ids.append(self.NIL) + elif len(candidates) == 1 and self.threshold is None: + # shortcut for efficiency reasons: take the 1 candidate + final_kb_ids.append(candidates[0].entity_) + else: + random.shuffle(candidates) + # set all prior probabilities to 0 if incl_prior=False + prior_probs = xp.asarray([c.prior_prob for c in candidates]) + if not self.incl_prior: + prior_probs = xp.asarray([0.0 for _ in candidates]) + scores = prior_probs + # add in similarity from the context + if self.incl_context: + entity_encodings = xp.asarray( + [c.entity_vector for c in candidates] + ) + entity_norm = xp.linalg.norm(entity_encodings, axis=1) + if len(entity_encodings) != len(prior_probs): + raise RuntimeError( + Errors.E147.format( + method="predict", + msg="vectors not of equal length", + ) + ) + # cosine similarity + sims = xp.dot(entity_encodings, sentence_encoding_t) / ( + sentence_norm * entity_norm + ) + if sims.shape != prior_probs.shape: + raise ValueError(Errors.E161) + scores = prior_probs + sims - (prior_probs * sims) + final_kb_ids.append( + candidates[scores.argmax().item()].entity_ + if self.threshold is None + or scores.max() >= self.threshold + else EntityLinker.NIL + ) + if not (len(final_kb_ids) == entity_count): err = Errors.E147.format( method="predict", msg="result variables not of equal length" diff --git a/spacy/pipeline/legacy/entity_linker.py b/spacy/pipeline/legacy/entity_linker.py index 2f8a1f8ea..c14dfa1db 100644 --- a/spacy/pipeline/legacy/entity_linker.py +++ b/spacy/pipeline/legacy/entity_linker.py @@ -68,8 +68,7 @@ class EntityLinker_v1(TrainablePipe): entity_vector_length (int): Size of encoding vectors in the KB. get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that produces a list of candidates, given a certain knowledge base and a textual mention. - scorer (Optional[Callable]): The scoring method. Defaults to - Scorer.score_links. + scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links. DOCS: https://spacy.io/api/entitylinker#init """ self.vocab = vocab @@ -115,7 +114,7 @@ class EntityLinker_v1(TrainablePipe): get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. - kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance. + kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance. Note that providing this argument, will overwrite all data accumulated in the current KB. Use this only when loading a KB as-such from file. diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index 82bc976bb..4d683acc5 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -6,7 +6,7 @@ from numpy.testing import assert_equal from spacy import registry, util from spacy.attrs import ENT_KB_ID from spacy.compat import pickle -from spacy.kb import Candidate, KnowledgeBase, get_candidates +from spacy.kb import Candidate, InMemoryLookupKB, get_candidates, KnowledgeBase from spacy.lang.en import English from spacy.ml import load_kb from spacy.pipeline import EntityLinker @@ -34,7 +34,7 @@ def assert_almost_equal(a, b): def test_issue4674(): """Test that setting entities with overlapping identifiers does not mess up IO""" nlp = English() - kb = KnowledgeBase(nlp.vocab, entity_vector_length=3) + kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) vector1 = [0.9, 1.1, 1.01] vector2 = [1.8, 2.25, 2.01] with pytest.warns(UserWarning): @@ -51,7 +51,7 @@ def test_issue4674(): dir_path.mkdir() file_path = dir_path / "kb" kb.to_disk(str(file_path)) - kb2 = KnowledgeBase(nlp.vocab, entity_vector_length=3) + kb2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) kb2.from_disk(str(file_path)) assert kb2.get_size_entities() == 1 @@ -59,9 +59,9 @@ def test_issue4674(): @pytest.mark.issue(6730) def test_issue6730(en_vocab): """Ensure that the KB does not accept empty strings, but otherwise IO works fine.""" - from spacy.kb import KnowledgeBase + from spacy.kb.kb_in_memory import InMemoryLookupKB - kb = KnowledgeBase(en_vocab, entity_vector_length=3) + kb = InMemoryLookupKB(en_vocab, entity_vector_length=3) kb.add_entity(entity="1", freq=148, entity_vector=[1, 2, 3]) with pytest.raises(ValueError): @@ -127,7 +127,7 @@ def test_issue7065_b(): def create_kb(vocab): # create artificial KB - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q270853", freq=12, entity_vector=[9, 1, -7]) mykb.add_alias( alias="No. 8", @@ -190,7 +190,7 @@ def test_no_entities(): def create_kb(vocab): # create artificial KB - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) mykb.add_alias("Russ Cochran", ["Q2146908"], [0.9]) return mykb @@ -231,7 +231,7 @@ def test_partial_links(): def create_kb(vocab): # create artificial KB - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) mykb.add_alias("Russ Cochran", ["Q2146908"], [0.9]) return mykb @@ -263,7 +263,7 @@ def test_partial_links(): def test_kb_valid_entities(nlp): """Test the valid construction of a KB with 3 entities and two aliases""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=3) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3]) @@ -292,7 +292,7 @@ def test_kb_valid_entities(nlp): def test_kb_invalid_entities(nlp): """Test the invalid construction of a KB with an alias linked to a non-existing entity""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[1]) @@ -308,7 +308,7 @@ def test_kb_invalid_entities(nlp): def test_kb_invalid_probabilities(nlp): """Test the invalid construction of a KB with wrong prior probabilities""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[1]) @@ -322,7 +322,7 @@ def test_kb_invalid_probabilities(nlp): def test_kb_invalid_combination(nlp): """Test the invalid construction of a KB with non-matching entity and probability lists""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[1]) @@ -338,7 +338,7 @@ def test_kb_invalid_combination(nlp): def test_kb_invalid_entity_vector(nlp): """Test the invalid construction of a KB with non-matching entity vector lengths""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=3) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[1, 2, 3]) @@ -376,7 +376,7 @@ def test_kb_initialize_empty(nlp): def test_kb_serialize(nlp): """Test serialization of the KB""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) with make_tempdir() as d: # normal read-write behaviour mykb.to_disk(d / "kb") @@ -393,12 +393,12 @@ def test_kb_serialize(nlp): @pytest.mark.issue(9137) def test_kb_serialize_2(nlp): v = [5, 6, 7, 8] - kb1 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4) + kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4) kb1.set_entities(["E1"], [1], [v]) assert kb1.get_vector("E1") == v with make_tempdir() as d: kb1.to_disk(d / "kb") - kb2 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4) + kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4) kb2.from_disk(d / "kb") assert kb2.get_vector("E1") == v @@ -408,7 +408,7 @@ def test_kb_set_entities(nlp): v = [5, 6, 7, 8] v1 = [1, 1, 1, 0] v2 = [2, 2, 2, 3] - kb1 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4) + kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4) kb1.set_entities(["E0"], [1], [v]) assert kb1.get_entity_strings() == ["E0"] kb1.set_entities(["E1", "E2"], [1, 9], [v1, v2]) @@ -417,7 +417,7 @@ def test_kb_set_entities(nlp): assert kb1.get_vector("E2") == v2 with make_tempdir() as d: kb1.to_disk(d / "kb") - kb2 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4) + kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4) kb2.from_disk(d / "kb") assert set(kb2.get_entity_strings()) == {"E1", "E2"} assert kb2.get_vector("E1") == v1 @@ -428,7 +428,7 @@ def test_kb_serialize_vocab(nlp): """Test serialization of the KB and custom strings""" entity = "MyFunnyID" assert entity not in nlp.vocab.strings - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) assert not mykb.contains_entity(entity) mykb.add_entity(entity, freq=342, entity_vector=[3]) assert mykb.contains_entity(entity) @@ -436,14 +436,14 @@ def test_kb_serialize_vocab(nlp): with make_tempdir() as d: # normal read-write behaviour mykb.to_disk(d / "kb") - mykb_new = KnowledgeBase(Vocab(), entity_vector_length=1) + mykb_new = InMemoryLookupKB(Vocab(), entity_vector_length=1) mykb_new.from_disk(d / "kb") assert entity in mykb_new.vocab.strings def test_candidate_generation(nlp): """Test correct candidate generation""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) doc = nlp("douglas adam Adam shrubbery") douglas_ent = doc[0:1] @@ -481,7 +481,7 @@ def test_el_pipe_configuration(nlp): ruler.add_patterns([pattern]) def create_kb(vocab): - kb = KnowledgeBase(vocab, entity_vector_length=1) + kb = InMemoryLookupKB(vocab, entity_vector_length=1) kb.add_entity(entity="Q2", freq=12, entity_vector=[2]) kb.add_entity(entity="Q3", freq=5, entity_vector=[3]) kb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.1]) @@ -500,10 +500,21 @@ def test_el_pipe_configuration(nlp): def get_lowercased_candidates(kb, span): return kb.get_alias_candidates(span.text.lower()) + def get_lowercased_candidates_batch(kb, spans): + return [get_lowercased_candidates(kb, span) for span in spans] + @registry.misc("spacy.LowercaseCandidateGenerator.v1") - def create_candidates() -> Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]: + def create_candidates() -> Callable[ + [InMemoryLookupKB, "Span"], Iterable[Candidate] + ]: return get_lowercased_candidates + @registry.misc("spacy.LowercaseCandidateBatchGenerator.v1") + def create_candidates_batch() -> Callable[ + [InMemoryLookupKB, Iterable["Span"]], Iterable[Iterable[Candidate]] + ]: + return get_lowercased_candidates_batch + # replace the pipe with a new one with with a different candidate generator entity_linker = nlp.replace_pipe( "entity_linker", @@ -511,6 +522,9 @@ def test_el_pipe_configuration(nlp): config={ "incl_context": False, "get_candidates": {"@misc": "spacy.LowercaseCandidateGenerator.v1"}, + "get_candidates_batch": { + "@misc": "spacy.LowercaseCandidateBatchGenerator.v1" + }, }, ) entity_linker.set_kb(create_kb) @@ -532,7 +546,7 @@ def test_nel_nsents(nlp): def test_vocab_serialization(nlp): """Test that string information is retained across storage""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) # adding entities mykb.add_entity(entity="Q1", freq=27, entity_vector=[1]) @@ -552,7 +566,7 @@ def test_vocab_serialization(nlp): with make_tempdir() as d: mykb.to_disk(d / "kb") - kb_new_vocab = KnowledgeBase(Vocab(), entity_vector_length=1) + kb_new_vocab = InMemoryLookupKB(Vocab(), entity_vector_length=1) kb_new_vocab.from_disk(d / "kb") candidates = kb_new_vocab.get_alias_candidates("adam") @@ -568,7 +582,7 @@ def test_vocab_serialization(nlp): def test_append_alias(nlp): """Test that we can append additional alias-entity pairs""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) # adding entities mykb.add_entity(entity="Q1", freq=27, entity_vector=[1]) @@ -599,7 +613,7 @@ def test_append_alias(nlp): @pytest.mark.filterwarnings("ignore:\\[W036") def test_append_invalid_alias(nlp): """Test that append an alias will throw an error if prior probs are exceeding 1""" - mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) # adding entities mykb.add_entity(entity="Q1", freq=27, entity_vector=[1]) @@ -621,7 +635,7 @@ def test_preserving_links_asdoc(nlp): vector_length = 1 def create_kb(vocab): - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[1]) mykb.add_entity(entity="Q2", freq=8, entity_vector=[1]) @@ -723,7 +737,7 @@ def test_overfitting_IO(): # create artificial KB - assign same prior weight to the two russ cochran's # Q2146908 (Russ Cochran): American golfer # Q7381115 (Russ Cochran): publisher - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7]) mykb.add_alias( @@ -805,7 +819,7 @@ def test_kb_serialization(): kb_dir = tmp_dir / "kb" nlp1 = English() assert "Q2146908" not in nlp1.vocab.strings - mykb = KnowledgeBase(nlp1.vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(nlp1.vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) mykb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8]) assert "Q2146908" in nlp1.vocab.strings @@ -828,7 +842,7 @@ def test_kb_serialization(): def test_kb_pickle(): # Test that the KB can be pickled nlp = English() - kb_1 = KnowledgeBase(nlp.vocab, entity_vector_length=3) + kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) assert not kb_1.contains_alias("Russ Cochran") kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8]) @@ -842,7 +856,7 @@ def test_kb_pickle(): def test_nel_pickle(): # Test that a pipeline with an EL component can be pickled def create_kb(vocab): - kb = KnowledgeBase(vocab, entity_vector_length=3) + kb = InMemoryLookupKB(vocab, entity_vector_length=3) kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8]) return kb @@ -864,7 +878,7 @@ def test_nel_pickle(): def test_kb_to_bytes(): # Test that the KB's to_bytes method works correctly nlp = English() - kb_1 = KnowledgeBase(nlp.vocab, entity_vector_length=3) + kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) kb_1.add_entity(entity="Q66", freq=9, entity_vector=[1, 2, 3]) kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8]) @@ -874,7 +888,7 @@ def test_kb_to_bytes(): ) assert kb_1.contains_alias("Russ Cochran") kb_bytes = kb_1.to_bytes() - kb_2 = KnowledgeBase(nlp.vocab, entity_vector_length=3) + kb_2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) assert not kb_2.contains_alias("Russ Cochran") kb_2 = kb_2.from_bytes(kb_bytes) # check that both KBs are exactly the same @@ -897,7 +911,7 @@ def test_kb_to_bytes(): def test_nel_to_bytes(): # Test that a pipeline with an EL component can be converted to bytes def create_kb(vocab): - kb = KnowledgeBase(vocab, entity_vector_length=3) + kb = InMemoryLookupKB(vocab, entity_vector_length=3) kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8]) return kb @@ -987,7 +1001,7 @@ def test_legacy_architectures(name, config): train_examples.append(Example.from_dict(doc, annotation)) def create_kb(vocab): - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7]) mykb.add_alias( @@ -1054,7 +1068,7 @@ def test_no_gold_ents(patterns): def create_kb(vocab): # create artificial KB - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3]) mykb.add_alias("Kirby", ["Q613241"], [0.9]) # Placeholder @@ -1104,7 +1118,7 @@ def test_tokenization_mismatch(): def create_kb(vocab): # create placeholder KB - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3]) mykb.add_alias("Kirby", ["Q613241"], [0.9]) return mykb @@ -1121,6 +1135,12 @@ def test_tokenization_mismatch(): nlp.evaluate(train_examples) +def test_abstract_kb_instantiation(): + """Test whether instantiation of abstract KB base class fails.""" + with pytest.raises(TypeError): + KnowledgeBase(None, 3) + + # fmt: off @pytest.mark.parametrize( "meet_threshold,config", @@ -1151,7 +1171,7 @@ def test_threshold(meet_threshold: bool, config: Dict[str, Any]): def create_kb(vocab): # create artificial KB - mykb = KnowledgeBase(vocab, entity_vector_length=3) + mykb = InMemoryLookupKB(vocab, entity_vector_length=3) mykb.add_entity(entity=entity_id, freq=12, entity_vector=[6, -4, 3]) mykb.add_alias( alias="Mahler", diff --git a/spacy/tests/serialize/test_resource_warning.py b/spacy/tests/serialize/test_resource_warning.py index a00b2a688..38701c6d9 100644 --- a/spacy/tests/serialize/test_resource_warning.py +++ b/spacy/tests/serialize/test_resource_warning.py @@ -3,7 +3,7 @@ from unittest import TestCase import pytest import srsly from numpy import zeros -from spacy.kb import KnowledgeBase, Writer +from spacy.kb.kb_in_memory import InMemoryLookupKB, Writer from spacy.vectors import Vectors from spacy.language import Language from spacy.pipeline import TrainablePipe @@ -71,7 +71,7 @@ def entity_linker(): nlp = Language() def create_kb(vocab): - kb = KnowledgeBase(vocab, entity_vector_length=1) + kb = InMemoryLookupKB(vocab, entity_vector_length=1) kb.add_entity("test", 0.0, zeros((1, 1), dtype="f")) return kb @@ -120,7 +120,7 @@ def test_writer_with_path_py35(): def test_save_and_load_knowledge_base(): nlp = Language() - kb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) with make_tempdir() as d: path = d / "kb" try: @@ -129,7 +129,7 @@ def test_save_and_load_knowledge_base(): pytest.fail(str(e)) try: - kb_loaded = KnowledgeBase(nlp.vocab, entity_vector_length=1) + kb_loaded = InMemoryLookupKB(nlp.vocab, entity_vector_length=1) kb_loaded.from_disk(path) except Exception as e: pytest.fail(str(e)) diff --git a/spacy/tests/serialize/test_serialize_kb.py b/spacy/tests/serialize/test_serialize_kb.py index 1e0ae3c76..8d3653ab1 100644 --- a/spacy/tests/serialize/test_serialize_kb.py +++ b/spacy/tests/serialize/test_serialize_kb.py @@ -2,7 +2,7 @@ from typing import Callable from spacy import util from spacy.util import ensure_path, registry, load_model_from_config -from spacy.kb import KnowledgeBase +from spacy.kb.kb_in_memory import InMemoryLookupKB from spacy.vocab import Vocab from thinc.api import Config @@ -22,7 +22,7 @@ def test_serialize_kb_disk(en_vocab): dir_path.mkdir() file_path = dir_path / "kb" kb1.to_disk(str(file_path)) - kb2 = KnowledgeBase(vocab=en_vocab, entity_vector_length=3) + kb2 = InMemoryLookupKB(vocab=en_vocab, entity_vector_length=3) kb2.from_disk(str(file_path)) # final assertions @@ -30,7 +30,7 @@ def test_serialize_kb_disk(en_vocab): def _get_dummy_kb(vocab): - kb = KnowledgeBase(vocab, entity_vector_length=3) + kb = InMemoryLookupKB(vocab, entity_vector_length=3) kb.add_entity(entity="Q53", freq=33, entity_vector=[0, 5, 3]) kb.add_entity(entity="Q17", freq=2, entity_vector=[7, 1, 0]) kb.add_entity(entity="Q007", freq=7, entity_vector=[0, 0, 7]) @@ -104,7 +104,7 @@ def test_serialize_subclassed_kb(): custom_field = 666 """ - class SubKnowledgeBase(KnowledgeBase): + class SubInMemoryLookupKB(InMemoryLookupKB): def __init__(self, vocab, entity_vector_length, custom_field): super().__init__(vocab, entity_vector_length) self.custom_field = custom_field @@ -112,9 +112,9 @@ def test_serialize_subclassed_kb(): @registry.misc("spacy.CustomKB.v1") def custom_kb( entity_vector_length: int, custom_field: int - ) -> Callable[[Vocab], KnowledgeBase]: + ) -> Callable[[Vocab], InMemoryLookupKB]: def custom_kb_factory(vocab): - kb = SubKnowledgeBase( + kb = SubInMemoryLookupKB( vocab=vocab, entity_vector_length=entity_vector_length, custom_field=custom_field, @@ -129,7 +129,7 @@ def test_serialize_subclassed_kb(): nlp.initialize() entity_linker = nlp.get_pipe("entity_linker") - assert type(entity_linker.kb) == SubKnowledgeBase + assert type(entity_linker.kb) == SubInMemoryLookupKB assert entity_linker.kb.entity_vector_length == 342 assert entity_linker.kb.custom_field == 666 @@ -139,6 +139,6 @@ def test_serialize_subclassed_kb(): nlp2 = util.load_model_from_path(tmp_dir) entity_linker2 = nlp2.get_pipe("entity_linker") # After IO, the KB is the standard one - assert type(entity_linker2.kb) == KnowledgeBase + assert type(entity_linker2.kb) == InMemoryLookupKB assert entity_linker2.kb.entity_vector_length == 342 assert not hasattr(entity_linker2.kb, "custom_field") diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md index 2537faff6..a3cb07b44 100644 --- a/website/docs/api/architectures.md +++ b/website/docs/api/architectures.md @@ -587,8 +587,8 @@ consists of either two or three subnetworks: run once for each batch. - **lower**: Construct a feature-specific vector for each `(token, feature)` pair. This is also run once for each batch. Constructing the state - representation is then a matter of summing the component features and - applying the non-linearity. + representation is then a matter of summing the component features and applying + the non-linearity. - **upper** (optional): A feed-forward network that predicts scores from the state representation. If not present, the output from the lower model is used as action scores directly. @@ -628,8 +628,8 @@ same signature, but the `use_upper` argument was `True` by default. > ``` Build a tagger model, using a provided token-to-vector component. The tagger -model adds a linear layer with softmax activation to predict scores given -the token vectors. +model adds a linear layer with softmax activation to predict scores given the +token vectors. | Name | Description | | ----------- | ------------------------------------------------------------------------------------------ | @@ -919,6 +919,6 @@ A function that reads an existing `KnowledgeBase` from file. A function that takes as input a [`KnowledgeBase`](/api/kb) and a [`Span`](/api/span) object denoting a named entity, and returns a list of -plausible [`Candidate`](/api/kb/#candidate) objects. The default -`CandidateGenerator` uses the text of a mention to find its potential -aliases in the `KnowledgeBase`. Note that this function is case-dependent. +plausible [`Candidate`](/api/kb#candidate) objects. The default +`CandidateGenerator` uses the text of a mention to find its potential aliases in +the `KnowledgeBase`. Note that this function is case-dependent. diff --git a/website/docs/api/entitylinker.md b/website/docs/api/entitylinker.md index 43e08a39c..40ec8afb5 100644 --- a/website/docs/api/entitylinker.md +++ b/website/docs/api/entitylinker.md @@ -14,7 +14,8 @@ entities) to unique identifiers, grounding the named entities into the "real world". It requires a `KnowledgeBase`, as well as a function to generate plausible candidates from that `KnowledgeBase` given a certain textual mention, and a machine learning model to pick the right candidate, given the local -context of the mention. +context of the mention. `EntityLinker` defaults to using the +[`InMemoryLookupKB`](/api/kb_in_memory) implementation. ## Assigned Attributes {#assigned-attributes} @@ -170,7 +171,7 @@ with the current vocab. > > ```python > def create_kb(vocab): -> kb = KnowledgeBase(vocab, entity_vector_length=128) +> kb = InMemoryLookupKB(vocab, entity_vector_length=128) > kb.add_entity(...) > kb.add_alias(...) > return kb diff --git a/website/docs/api/kb.md b/website/docs/api/kb.md index e7a8fcd6f..b217a1678 100644 --- a/website/docs/api/kb.md +++ b/website/docs/api/kb.md @@ -4,27 +4,45 @@ teaser: A storage class for entities and aliases of a specific knowledge base (ontology) tag: class -source: spacy/kb.pyx +source: spacy/kb/kb.pyx new: 2.2 --- -The `KnowledgeBase` object provides a method to generate -[`Candidate`](/api/kb/#candidate) objects, which are plausible external +The `KnowledgeBase` object is an abstract class providing a method to generate +[`Candidate`](/api/kb#candidate) objects, which are plausible external identifiers given a certain textual mention. Each such `Candidate` holds information from the relevant KB entities, such as its frequency in text and possible aliases. Each entity in the knowledge base also has a pretrained entity vector of a fixed size. +Beyond that, `KnowledgeBase` classes have to implement a number of utility +functions called by the [`EntityLinker`](/api/entitylinker) component. + + + +This class was not abstract up to spaCy version 3.5. The `KnowledgeBase` +implementation up to that point is available as `InMemoryLookupKB` from 3.5 +onwards. + + + ## KnowledgeBase.\_\_init\_\_ {#init tag="method"} -Create the knowledge base. +`KnowledgeBase` is an abstract class and cannot be instantiated. Its child +classes should call `__init__()` to set up some necessary attributes. > #### Example > > ```python > from spacy.kb import KnowledgeBase +> from spacy.vocab import Vocab +> +> class FullyImplementedKB(KnowledgeBase): +> def __init__(self, vocab: Vocab, entity_vector_length: int): +> super().__init__(vocab, entity_vector_length) +> ... > vocab = nlp.vocab -> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64) +> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64) > ``` | Name | Description | @@ -40,133 +58,66 @@ The length of the fixed-size entity vectors in the knowledge base. | ----------- | ------------------------------------------------ | | **RETURNS** | Length of the fixed-size entity vectors. ~~int~~ | -## KnowledgeBase.add_entity {#add_entity tag="method"} +## KnowledgeBase.get_candidates {#get_candidates tag="method"} -Add an entity to the knowledge base, specifying its corpus frequency and entity -vector, which should be of length -[`entity_vector_length`](/api/kb#entity_vector_length). +Given a certain textual mention as input, retrieve a list of candidate entities +of type [`Candidate`](/api/kb#candidate). > #### Example > > ```python -> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1) -> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2) +> from spacy.lang.en import English +> nlp = English() +> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.") +> candidates = kb.get_candidates(doc[0:2]) > ``` -| Name | Description | -| --------------- | ---------------------------------------------------------- | -| `entity` | The unique entity identifier. ~~str~~ | -| `freq` | The frequency of the entity in a typical corpus. ~~float~~ | -| `entity_vector` | The pretrained vector of the entity. ~~numpy.ndarray~~ | +| Name | Description | +| ----------- | -------------------------------------------------------------------- | +| `mention` | The textual mention or alias. ~~Span~~ | +| **RETURNS** | An iterable of relevant `Candidate` objects. ~~Iterable[Candidate]~~ | -## KnowledgeBase.set_entities {#set_entities tag="method"} +## KnowledgeBase.get_candidates_batch {#get_candidates_batch tag="method"} -Define the full list of entities in the knowledge base, specifying the corpus -frequency and entity vector for each entity. +Same as [`get_candidates()`](/api/kb#get_candidates), but for an arbitrary +number of mentions. The [`EntityLinker`](/api/entitylinker) component will call +`get_candidates_batch()` instead of `get_candidates()`, if the config parameter +`candidates_batch_size` is greater or equal than 1. + +The default implementation of `get_candidates_batch()` executes +`get_candidates()` in a loop. We recommend implementing a more efficient way to +retrieve candidates for multiple mentions at once, if performance is of concern +to you. > #### Example > > ```python -> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2]) +> from spacy.lang.en import English +> nlp = English() +> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.") +> candidates = kb.get_candidates((doc[0:2], doc[3:])) > ``` -| Name | Description | -| ------------- | ---------------------------------------------------------------- | -| `entity_list` | List of unique entity identifiers. ~~Iterable[Union[str, int]]~~ | -| `freq_list` | List of entity frequencies. ~~Iterable[int]~~ | -| `vector_list` | List of entity vectors. ~~Iterable[numpy.ndarray]~~ | - -## KnowledgeBase.add_alias {#add_alias tag="method"} - -Add an alias or mention to the knowledge base, specifying its potential KB -identifiers and their prior probabilities. The entity identifiers should refer -to entities previously added with [`add_entity`](/api/kb#add_entity) or -[`set_entities`](/api/kb#set_entities). The sum of the prior probabilities -should not exceed 1. Note that an empty string can not be used as alias. - -> #### Example -> -> ```python -> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3]) -> ``` - -| Name | Description | -| --------------- | --------------------------------------------------------------------------------- | -| `alias` | The textual mention or alias. Can not be the empty string. ~~str~~ | -| `entities` | The potential entities that the alias may refer to. ~~Iterable[Union[str, int]]~~ | -| `probabilities` | The prior probabilities of each entity. ~~Iterable[float]~~ | - -## KnowledgeBase.\_\_len\_\_ {#len tag="method"} - -Get the total number of entities in the knowledge base. - -> #### Example -> -> ```python -> total_entities = len(kb) -> ``` - -| Name | Description | -| ----------- | ----------------------------------------------------- | -| **RETURNS** | The number of entities in the knowledge base. ~~int~~ | - -## KnowledgeBase.get_entity_strings {#get_entity_strings tag="method"} - -Get a list of all entity IDs in the knowledge base. - -> #### Example -> -> ```python -> all_entities = kb.get_entity_strings() -> ``` - -| Name | Description | -| ----------- | --------------------------------------------------------- | -| **RETURNS** | The list of entities in the knowledge base. ~~List[str]~~ | - -## KnowledgeBase.get_size_aliases {#get_size_aliases tag="method"} - -Get the total number of aliases in the knowledge base. - -> #### Example -> -> ```python -> total_aliases = kb.get_size_aliases() -> ``` - -| Name | Description | -| ----------- | ---------------------------------------------------- | -| **RETURNS** | The number of aliases in the knowledge base. ~~int~~ | - -## KnowledgeBase.get_alias_strings {#get_alias_strings tag="method"} - -Get a list of all aliases in the knowledge base. - -> #### Example -> -> ```python -> all_aliases = kb.get_alias_strings() -> ``` - -| Name | Description | -| ----------- | -------------------------------------------------------- | -| **RETURNS** | The list of aliases in the knowledge base. ~~List[str]~~ | +| Name | Description | +| ----------- | -------------------------------------------------------------------------------------------- | +| `mentions` | The textual mention or alias. ~~Iterable[Span]~~ | +| **RETURNS** | An iterable of iterable with relevant `Candidate` objects. ~~Iterable[Iterable[Candidate]]~~ | ## KnowledgeBase.get_alias_candidates {#get_alias_candidates tag="method"} -Given a certain textual mention as input, retrieve a list of candidate entities -of type [`Candidate`](/api/kb/#candidate). + +This method is _not_ available from spaCy 3.5 onwards. + -> #### Example -> -> ```python -> candidates = kb.get_alias_candidates("Douglas") -> ``` - -| Name | Description | -| ----------- | ------------------------------------------------------------- | -| `alias` | The textual mention or alias. ~~str~~ | -| **RETURNS** | The list of relevant `Candidate` objects. ~~List[Candidate]~~ | +From spaCy 3.5 on `KnowledgeBase` is an abstract class (with +[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow +more flexibility in customizing knowledge bases. Some of its methods were moved +to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those +being `get_alias_candidates()`. This method is now available as +[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). +Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates) +defaults to +[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). ## KnowledgeBase.get_vector {#get_vector tag="method"} @@ -178,27 +129,30 @@ Given a certain entity ID, retrieve its pretrained entity vector. > vector = kb.get_vector("Q42") > ``` -| Name | Description | -| ----------- | ------------------------------------ | -| `entity` | The entity ID. ~~str~~ | -| **RETURNS** | The entity vector. ~~numpy.ndarray~~ | +| Name | Description | +| ----------- | -------------------------------------- | +| `entity` | The entity ID. ~~str~~ | +| **RETURNS** | The entity vector. ~~Iterable[float]~~ | -## KnowledgeBase.get_prior_prob {#get_prior_prob tag="method"} +## KnowledgeBase.get_vectors {#get_vectors tag="method"} -Given a certain entity ID and a certain textual mention, retrieve the prior -probability of the fact that the mention links to the entity ID. +Same as [`get_vector()`](/api/kb#get_vector), but for an arbitrary number of +entity IDs. + +The default implementation of `get_vectors()` executes `get_vector()` in a loop. +We recommend implementing a more efficient way to retrieve vectors for multiple +entities at once, if performance is of concern to you. > #### Example > > ```python -> probability = kb.get_prior_prob("Q42", "Douglas") +> vectors = kb.get_vectors(("Q42", "Q3107329")) > ``` -| Name | Description | -| ----------- | ------------------------------------------------------------------------- | -| `entity` | The entity ID. ~~str~~ | -| `alias` | The textual mention or alias. ~~str~~ | -| **RETURNS** | The prior probability of the `alias` referring to the `entity`. ~~float~~ | +| Name | Description | +| ----------- | --------------------------------------------------------- | +| `entities` | The entity IDs. ~~Iterable[str]~~ | +| **RETURNS** | The entity vectors. ~~Iterable[Iterable[numpy.ndarray]]~~ | ## KnowledgeBase.to_disk {#to_disk tag="method"} @@ -207,12 +161,13 @@ Save the current state of the knowledge base to a directory. > #### Example > > ```python -> kb.to_disk(loc) +> kb.to_disk(path) > ``` -| Name | Description | -| ----- | ------------------------------------------------------------------------------------------------------------------------------------------ | -| `loc` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| Name | Description | +| --------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| `exclude` | List of components to exclude. ~~Iterable[str]~~ | ## KnowledgeBase.from_disk {#from_disk tag="method"} @@ -222,16 +177,16 @@ Restore the state of the knowledge base from a given directory. Note that the > #### Example > > ```python -> from spacy.kb import KnowledgeBase > from spacy.vocab import Vocab > vocab = Vocab().from_disk("/path/to/vocab") -> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64) +> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64) > kb.from_disk("/path/to/kb") > ``` | Name | Description | | ----------- | ----------------------------------------------------------------------------------------------- | | `loc` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| `exclude` | List of components to exclude. ~~Iterable[str]~~ | | **RETURNS** | The modified `KnowledgeBase` object. ~~KnowledgeBase~~ | ## Candidate {#candidate tag="class"} diff --git a/website/docs/api/kb_in_memory.md b/website/docs/api/kb_in_memory.md new file mode 100644 index 000000000..c9ce624f0 --- /dev/null +++ b/website/docs/api/kb_in_memory.md @@ -0,0 +1,302 @@ +--- +title: InMemoryLookupKB +teaser: + The default implementation of the KnowledgeBase interface. Stores all + information in-memory. +tag: class +source: spacy/kb/kb_in_memory.pyx +new: 3.5 +--- + +The `InMemoryLookupKB` class inherits from [`KnowledgeBase`](/api/kb) and +implements all of its methods. It stores all KB data in-memory and generates +[`Candidate`](/api/kb#candidate) objects by exactly matching mentions with +entity names. It's highly optimized for both a low memory footprint and speed of +retrieval. + +## InMemoryLookupKB.\_\_init\_\_ {#init tag="method"} + +Create the knowledge base. + +> #### Example +> +> ```python +> from spacy.kb import KnowledgeBase +> vocab = nlp.vocab +> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64) +> ``` + +| Name | Description | +| ---------------------- | ------------------------------------------------ | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `entity_vector_length` | Length of the fixed-size entity vectors. ~~int~~ | + +## InMemoryLookupKB.entity_vector_length {#entity_vector_length tag="property"} + +The length of the fixed-size entity vectors in the knowledge base. + +| Name | Description | +| ----------- | ------------------------------------------------ | +| **RETURNS** | Length of the fixed-size entity vectors. ~~int~~ | + +## InMemoryLookupKB.add_entity {#add_entity tag="method"} + +Add an entity to the knowledge base, specifying its corpus frequency and entity +vector, which should be of length +[`entity_vector_length`](/api/kb_in_memory#entity_vector_length). + +> #### Example +> +> ```python +> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1) +> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2) +> ``` + +| Name | Description | +| --------------- | ---------------------------------------------------------- | +| `entity` | The unique entity identifier. ~~str~~ | +| `freq` | The frequency of the entity in a typical corpus. ~~float~~ | +| `entity_vector` | The pretrained vector of the entity. ~~numpy.ndarray~~ | + +## InMemoryLookupKB.set_entities {#set_entities tag="method"} + +Define the full list of entities in the knowledge base, specifying the corpus +frequency and entity vector for each entity. + +> #### Example +> +> ```python +> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2]) +> ``` + +| Name | Description | +| ------------- | ---------------------------------------------------------------- | +| `entity_list` | List of unique entity identifiers. ~~Iterable[Union[str, int]]~~ | +| `freq_list` | List of entity frequencies. ~~Iterable[int]~~ | +| `vector_list` | List of entity vectors. ~~Iterable[numpy.ndarray]~~ | + +## InMemoryLookupKB.add_alias {#add_alias tag="method"} + +Add an alias or mention to the knowledge base, specifying its potential KB +identifiers and their prior probabilities. The entity identifiers should refer +to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity) +or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior +probabilities should not exceed 1. Note that an empty string can not be used as +alias. + +> #### Example +> +> ```python +> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3]) +> ``` + +| Name | Description | +| --------------- | --------------------------------------------------------------------------------- | +| `alias` | The textual mention or alias. Can not be the empty string. ~~str~~ | +| `entities` | The potential entities that the alias may refer to. ~~Iterable[Union[str, int]]~~ | +| `probabilities` | The prior probabilities of each entity. ~~Iterable[float]~~ | + +## InMemoryLookupKB.\_\_len\_\_ {#len tag="method"} + +Get the total number of entities in the knowledge base. + +> #### Example +> +> ```python +> total_entities = len(kb) +> ``` + +| Name | Description | +| ----------- | ----------------------------------------------------- | +| **RETURNS** | The number of entities in the knowledge base. ~~int~~ | + +## InMemoryLookupKB.get_entity_strings {#get_entity_strings tag="method"} + +Get a list of all entity IDs in the knowledge base. + +> #### Example +> +> ```python +> all_entities = kb.get_entity_strings() +> ``` + +| Name | Description | +| ----------- | --------------------------------------------------------- | +| **RETURNS** | The list of entities in the knowledge base. ~~List[str]~~ | + +## InMemoryLookupKB.get_size_aliases {#get_size_aliases tag="method"} + +Get the total number of aliases in the knowledge base. + +> #### Example +> +> ```python +> total_aliases = kb.get_size_aliases() +> ``` + +| Name | Description | +| ----------- | ---------------------------------------------------- | +| **RETURNS** | The number of aliases in the knowledge base. ~~int~~ | + +## InMemoryLookupKB.get_alias_strings {#get_alias_strings tag="method"} + +Get a list of all aliases in the knowledge base. + +> #### Example +> +> ```python +> all_aliases = kb.get_alias_strings() +> ``` + +| Name | Description | +| ----------- | -------------------------------------------------------- | +| **RETURNS** | The list of aliases in the knowledge base. ~~List[str]~~ | + +## InMemoryLookupKB.get_candidates {#get_candidates tag="method"} + +Given a certain textual mention as input, retrieve a list of candidate entities +of type [`Candidate`](/api/kb#candidate). Wraps +[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). + +> #### Example +> +> ```python +> from spacy.lang.en import English +> nlp = English() +> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.") +> candidates = kb.get_candidates(doc[0:2]) +> ``` + +| Name | Description | +| ----------- | -------------------------------------------------------------------- | +| `mention` | The textual mention or alias. ~~Span~~ | +| **RETURNS** | An iterable of relevant `Candidate` objects. ~~Iterable[Candidate]~~ | + +## InMemoryLookupKB.get_candidates_batch {#get_candidates_batch tag="method"} + +Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an +arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component +will call `get_candidates_batch()` instead of `get_candidates()`, if the config +parameter `candidates_batch_size` is greater or equal than 1. + +The default implementation of `get_candidates_batch()` executes +`get_candidates()` in a loop. We recommend implementing a more efficient way to +retrieve candidates for multiple mentions at once, if performance is of concern +to you. + +> #### Example +> +> ```python +> from spacy.lang.en import English +> nlp = English() +> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.") +> candidates = kb.get_candidates((doc[0:2], doc[3:])) +> ``` + +| Name | Description | +| ----------- | -------------------------------------------------------------------------------------------- | +| `mentions` | The textual mention or alias. ~~Iterable[Span]~~ | +| **RETURNS** | An iterable of iterable with relevant `Candidate` objects. ~~Iterable[Iterable[Candidate]]~~ | + +## InMemoryLookupKB.get_alias_candidates {#get_alias_candidates tag="method"} + +Given a certain textual mention as input, retrieve a list of candidate entities +of type [`Candidate`](/api/kb#candidate). + +> #### Example +> +> ```python +> candidates = kb.get_alias_candidates("Douglas") +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------------- | +| `alias` | The textual mention or alias. ~~str~~ | +| **RETURNS** | The list of relevant `Candidate` objects. ~~List[Candidate]~~ | + +## InMemoryLookupKB.get_vector {#get_vector tag="method"} + +Given a certain entity ID, retrieve its pretrained entity vector. + +> #### Example +> +> ```python +> vector = kb.get_vector("Q42") +> ``` + +| Name | Description | +| ----------- | ------------------------------------ | +| `entity` | The entity ID. ~~str~~ | +| **RETURNS** | The entity vector. ~~numpy.ndarray~~ | + +## InMemoryLookupKB.get_vectors {#get_vectors tag="method"} + +Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary +number of entity IDs. + +The default implementation of `get_vectors()` executes `get_vector()` in a loop. +We recommend implementing a more efficient way to retrieve vectors for multiple +entities at once, if performance is of concern to you. + +> #### Example +> +> ```python +> vectors = kb.get_vectors(("Q42", "Q3107329")) +> ``` + +| Name | Description | +| ----------- | --------------------------------------------------------- | +| `entities` | The entity IDs. ~~Iterable[str]~~ | +| **RETURNS** | The entity vectors. ~~Iterable[Iterable[numpy.ndarray]]~~ | + +## InMemoryLookupKB.get_prior_prob {#get_prior_prob tag="method"} + +Given a certain entity ID and a certain textual mention, retrieve the prior +probability of the fact that the mention links to the entity ID. + +> #### Example +> +> ```python +> probability = kb.get_prior_prob("Q42", "Douglas") +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------------------------- | +| `entity` | The entity ID. ~~str~~ | +| `alias` | The textual mention or alias. ~~str~~ | +| **RETURNS** | The prior probability of the `alias` referring to the `entity`. ~~float~~ | + +## InMemoryLookupKB.to_disk {#to_disk tag="method"} + +Save the current state of the knowledge base to a directory. + +> #### Example +> +> ```python +> kb.to_disk(path) +> ``` + +| Name | Description | +| --------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| `exclude` | List of components to exclude. ~~Iterable[str]~~ | + +## InMemoryLookupKB.from_disk {#from_disk tag="method"} + +Restore the state of the knowledge base from a given directory. Note that the +[`Vocab`](/api/vocab) should also be the same as the one used to create the KB. + +> #### Example +> +> ```python +> from spacy.vocab import Vocab +> vocab = Vocab().from_disk("/path/to/vocab") +> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64) +> kb.from_disk("/path/to/kb") +> ``` + +| Name | Description | +| ----------- | ----------------------------------------------------------------------------------------------- | +| `loc` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| `exclude` | List of components to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The modified `KnowledgeBase` object. ~~KnowledgeBase~~ | diff --git a/website/docs/usage/101/_architecture.md b/website/docs/usage/101/_architecture.md index 22e2b961e..4ebca2756 100644 --- a/website/docs/usage/101/_architecture.md +++ b/website/docs/usage/101/_architecture.md @@ -78,7 +78,9 @@ operates on a `Doc` and gives you access to the matched tokens **in context**. | Name | Description | | ------------------------------------------------ | -------------------------------------------------------------------------------------------------- | | [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. | -| [`KnowledgeBase`](/api/kb) | Storage for entities and aliases of a knowledge base for entity linking. | +| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. | +| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. | +| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. | | [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. | | [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. | | [`Morphology`](/api/morphology) | Store morphological analyses and map them to and from hash values. | From 2602a30d326e561776fecce95ac03cc5df55652b Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 8 Sep 2022 20:42:47 +0900 Subject: [PATCH 17/82] Fix DVC command example (#11457) This command doesn't have the project dir, but it's required. --- website/docs/usage/projects.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/projects.md b/website/docs/usage/projects.md index 566ae561b..35150035a 100644 --- a/website/docs/usage/projects.md +++ b/website/docs/usage/projects.md @@ -758,7 +758,7 @@ and [`dvc repro`](https://dvc.org/doc/command-reference/repro) to reproduce the workflow or individual commands. ```cli -$ python -m spacy project dvc [workflow_name] +$ python -m spacy project dvc [project_dir] [workflow_name] ``` From aac9a58c2935768c7751b8db7043e7c073362c90 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Fri, 9 Sep 2022 10:46:01 +0200 Subject: [PATCH 18/82] Add docs for the `spacy.models_and_pipes_with_nvtx_range.v1` callback (#11463) * Add docs for the `spacy.models_and_pipes_with_nvtx_range.v1` callback * Add `new` tag --- website/docs/api/top-level.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 220b2d6e9..bc53fc868 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -887,6 +887,27 @@ backprop passes. | `backprop_color` | Color identifier for backpropagation passes. Defaults to `-1`. ~~int~~ | | **CREATES** | A function that takes the current `nlp` and wraps forward/backprop passes in NVTX ranges. ~~Callable[[Language], Language]~~ | +### spacy.models_and_pipes_with_nvtx_range.v1 {#models_and_pipes_with_nvtx_range tag="registered function" new="3.4"} + +> #### Example config +> +> ```ini +> [nlp] +> after_pipeline_creation = {"@callbacks":"spacy.models_and_pipes_with_nvtx_range.v1"} +> ``` + +Recursively wrap both the models and methods of each pipe using +[NVTX](https://nvidia.github.io/NVTX/) range markers. By default, the following +methods are wrapped: `pipe`, `predict`, `set_annotations`, `update`, `rehearse`, +`get_loss`, `initialize`, `begin_update`, `finish_update`, `update`. + +| Name | Description | +| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `forward_color` | Color identifier for model forward passes. Defaults to `-1`. ~~int~~ | +| `backprop_color` | Color identifier for model backpropagation passes. Defaults to `-1`. ~~int~~ | +| `additional_pipe_functions` | Additional pipeline methods to wrap. Keys are pipeline names and values are lists of method identifiers. Defaults to `None`. ~~Optional[Dict[str, List[str]]]~~ | +| **CREATES** | A function that takes the current `nlp` and wraps pipe models and methods in NVTX ranges. ~~Callable[[Language], Language]~~ | + ## Training data and alignment {#gold source="spacy/training"} ### training.offsets_to_biluo_tags {#offsets_to_biluo_tags tag="function"} From 0c72c6bb2c04677654ffeda2a706e3df3a58b3cc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 9 Sep 2022 11:21:17 +0200 Subject: [PATCH 19/82] Auto-format code with black (#11468) Co-authored-by: explosion-bot --- spacy/cli/info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spacy/cli/info.py b/spacy/cli/info.py index e6ac4270f..974bc0f4e 100644 --- a/spacy/cli/info.py +++ b/spacy/cli/info.py @@ -147,6 +147,7 @@ def info_installed_model_url(model: str) -> Optional[str]: # something else, like no file or invalid JSON return None + def info_model_url(model: str) -> Dict[str, Any]: """Return the download URL for the latest version of a pipeline.""" version = get_latest_version(model) From 8a86a35eab45a69d795c2950da61058047d1a516 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 9 Sep 2022 15:10:04 +0200 Subject: [PATCH 20/82] Remove has_letters in config template (#11465) Due to problems with the javascript conversion in the website quickstart, remove the `has_letters` setting to simplify generating `attrs` for the default `tok2vec`. Additionally reduce `PREFIX` as in the trained pipelines. --- spacy/cli/templates/quickstart_training.jinja | 7 +------ .../cli/templates/quickstart_training_recommendations.yml | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index ae11dcafc..58864883a 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -271,13 +271,8 @@ factory = "tok2vec" [components.tok2vec.model.embed] @architectures = "spacy.MultiHashEmbed.v2" width = ${components.tok2vec.model.encode.width} -{% if has_letters -%} attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] -rows = [5000, 2500, 2500, 2500] -{% else -%} -attrs = ["ORTH", "SHAPE"] -rows = [5000, 2500] -{% endif -%} +rows = [5000, 1000, 2500, 2500] include_static_vectors = {{ "true" if optimize == "accuracy" else "false" }} [components.tok2vec.model.encode] diff --git a/spacy/cli/templates/quickstart_training_recommendations.yml b/spacy/cli/templates/quickstart_training_recommendations.yml index a7bf9b74a..27945e27a 100644 --- a/spacy/cli/templates/quickstart_training_recommendations.yml +++ b/spacy/cli/templates/quickstart_training_recommendations.yml @@ -271,4 +271,3 @@ zh: accuracy: name: bert-base-chinese size_factor: 3 - has_letters: false From 6b83fee58db27cee70ef8d893cbbf7470db4e242 Mon Sep 17 00:00:00 2001 From: kadarakos Date: Fri, 9 Sep 2022 17:17:10 +0200 Subject: [PATCH 21/82] Assets message (#11458) * new error message when 'project run assets' * new error message when 'project run assets' * Update spacy/cli/project/run.py Co-authored-by: Sofie Van Landeghem Co-authored-by: Sofie Van Landeghem --- spacy/cli/project/run.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index 734803bc4..d42d95465 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -195,6 +195,8 @@ def validate_subcommand( msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1) if subcommand not in commands and subcommand not in workflows: help_msg = [] + if subcommand in ["assets", "asset"]: + help_msg.append("Did you mean to run: python -m spacy project assets?") if commands: help_msg.append(f"Available commands: {', '.join(commands)}") if workflows: From 0ec9a696e60933807c189c7be22623a81a840289 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Mon, 12 Sep 2022 14:55:41 +0200 Subject: [PATCH 22/82] Fix config validation failures caused by NVTX pipeline wrappers (#11460) * Enable Cython<->Python bindings for `Pipe` and `TrainablePipe` methods * `pipes_with_nvtx_range`: Skip hooking methods whose signature cannot be ascertained When loading pipelines from a config file, the arguments passed to individual pipeline components is validated by `pydantic` during init. For this, the validation model attempts to parse the function signature of the component's c'tor/entry point so that it can check if all mandatory parameters are present in the config file. When using the `models_and_pipes_with_nvtx_range` as a `after_pipeline_creation` callback, the methods of all pipeline components get replaced by a NVTX range wrapper **before** the above-mentioned validation takes place. This can be problematic for components that are implemented as Cython extension types - if the extension type is not compiled with Python bindings for its methods, they will have no signatures at runtime. This resulted in `pydantic` matching the *wrapper's* parameters with the those in the config and raising errors. To avoid this, we now skip applying the wrapper to any (Cython) methods that do not have signatures. --- spacy/ml/callbacks.py | 7 +++++-- spacy/pipeline/pipe.pyx | 2 +- spacy/pipeline/trainable_pipe.pyx | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/spacy/ml/callbacks.py b/spacy/ml/callbacks.py index 18290b947..3b60ec2ab 100644 --- a/spacy/ml/callbacks.py +++ b/spacy/ml/callbacks.py @@ -89,11 +89,14 @@ def pipes_with_nvtx_range( types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func ) - # Try to preserve the original function signature. + # We need to preserve the original function signature so that + # the original parameters are passed to pydantic for validation downstream. try: wrapped_func.__signature__ = inspect.signature(func) # type: ignore except: - pass + # Can fail for Cython methods that do not have bindings. + warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name)) + continue try: setattr( diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx index 4e3ae1cf0..8407acc45 100644 --- a/spacy/pipeline/pipe.pyx +++ b/spacy/pipeline/pipe.pyx @@ -1,4 +1,4 @@ -# cython: infer_types=True, profile=True +# cython: infer_types=True, profile=True, binding=True from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict import srsly import warnings diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx index 76b0733cf..3f0507d4b 100644 --- a/spacy/pipeline/trainable_pipe.pyx +++ b/spacy/pipeline/trainable_pipe.pyx @@ -1,4 +1,4 @@ -# cython: infer_types=True, profile=True +# cython: infer_types=True, profile=True, binding=True from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable import srsly from thinc.api import set_dropout_rate, Model, Optimizer From cc10a27c59a3e5fe3c2d08667534fcbf22908f06 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 12 Sep 2022 15:36:48 +0200 Subject: [PATCH 23/82] Prevent tok2vec to broadcast to listeners when predicting (#11385) * replicate bug with tok2vec in annotating components * add overfitting test with a frozen tok2vec * remove broadcast from predict and check doc.tensor instead * remove broadcast * proper error * slight rephrase of documentation --- spacy/errors.py | 2 + spacy/pipeline/tok2vec.py | 20 ++++--- spacy/tests/pipeline/test_tok2vec.py | 81 ++++++++++++++++++++++++++++ website/docs/usage/training.md | 2 +- 4 files changed, 98 insertions(+), 7 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index e2201284f..7e63dc76c 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -538,6 +538,8 @@ class Errors(metaclass=ErrorsWithCodes): E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.") E200 = ("Can't set {attr} from Span.") E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.") + E203 = ("If the {name} embedding layer is not updated " + "during training, make sure to include it in 'annotating components'") # New errors added in v3.x E853 = ("Unsupported component factory name '{name}'. The character '.' is " diff --git a/spacy/pipeline/tok2vec.py b/spacy/pipeline/tok2vec.py index 2e3dde3cb..c742aaeaa 100644 --- a/spacy/pipeline/tok2vec.py +++ b/spacy/pipeline/tok2vec.py @@ -123,9 +123,6 @@ class Tok2Vec(TrainablePipe): width = self.model.get_dim("nO") return [self.model.ops.alloc((0, width)) for doc in docs] tokvecs = self.model.predict(docs) - batch_id = Tok2VecListener.get_batch_id(docs) - for listener in self.listeners: - listener.receive(batch_id, tokvecs, _empty_backprop) return tokvecs def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None: @@ -286,8 +283,19 @@ class Tok2VecListener(Model): def forward(model: Tok2VecListener, inputs, is_train: bool): """Supply the outputs from the upstream Tok2Vec component.""" if is_train: - model.verify_inputs(inputs) - return model._outputs, model._backprop + # This might occur during training when the tok2vec layer is frozen / hasn't been updated. + # In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc. + if model._batch_id is None: + outputs = [] + for doc in inputs: + if doc.tensor.size == 0: + raise ValueError(Errors.E203.format(name="tok2vec")) + else: + outputs.append(doc.tensor) + return outputs, _empty_backprop + else: + model.verify_inputs(inputs) + return model._outputs, model._backprop else: # This is pretty grim, but it's hard to do better :(. # It's hard to avoid relying on the doc.tensor attribute, because the @@ -306,7 +314,7 @@ def forward(model: Tok2VecListener, inputs, is_train: bool): outputs.append(model.ops.alloc2f(len(doc), width)) else: outputs.append(doc.tensor) - return outputs, lambda dX: [] + return outputs, _empty_backprop def _empty_backprop(dX): # for pickling diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py index 64faf133d..659274db9 100644 --- a/spacy/tests/pipeline/test_tok2vec.py +++ b/spacy/tests/pipeline/test_tok2vec.py @@ -230,6 +230,87 @@ def test_tok2vec_listener_callback(): assert get_dX(Y) is not None +def test_tok2vec_listener_overfitting(): + """ Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components """ + orig_config = Config().from_str(cfg_string) + nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + optimizer = nlp.initialize(get_examples=lambda: train_examples) + + for i in range(50): + losses = {} + nlp.update(train_examples, sgd=optimizer, losses=losses, annotates=["tok2vec"]) + assert losses["tagger"] < 0.00001 + + # test the trained model + test_text = "I like blue eggs" + doc = nlp(test_text) + assert doc[0].tag_ == "N" + assert doc[1].tag_ == "V" + assert doc[2].tag_ == "J" + assert doc[3].tag_ == "N" + + # Also test the results are still the same after IO + with make_tempdir() as tmp_dir: + nlp.to_disk(tmp_dir) + nlp2 = util.load_model_from_path(tmp_dir) + doc2 = nlp2(test_text) + assert doc2[0].tag_ == "N" + assert doc2[1].tag_ == "V" + assert doc2[2].tag_ == "J" + assert doc2[3].tag_ == "N" + + +def test_tok2vec_frozen_not_annotating(): + """ Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating """ + orig_config = Config().from_str(cfg_string) + nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + optimizer = nlp.initialize(get_examples=lambda: train_examples) + + for i in range(2): + losses = {} + with pytest.raises(ValueError, match=r"the tok2vec embedding layer is not updated"): + nlp.update(train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"]) + + +def test_tok2vec_frozen_overfitting(): + """ Test that a pipeline with a frozen & annotating tok2vec can still overfit """ + orig_config = Config().from_str(cfg_string) + nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + optimizer = nlp.initialize(get_examples=lambda: train_examples) + + for i in range(100): + losses = {} + nlp.update(train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"], annotates=["tok2vec"]) + assert losses["tagger"] < 0.0001 + + # test the trained model + test_text = "I like blue eggs" + doc = nlp(test_text) + assert doc[0].tag_ == "N" + assert doc[1].tag_ == "V" + assert doc[2].tag_ == "J" + assert doc[3].tag_ == "N" + + # Also test the results are still the same after IO + with make_tempdir() as tmp_dir: + nlp.to_disk(tmp_dir) + nlp2 = util.load_model_from_path(tmp_dir) + doc2 = nlp2(test_text) + assert doc2[0].tag_ == "N" + assert doc2[1].tag_ == "V" + assert doc2[2].tag_ == "J" + assert doc2[3].tag_ == "N" + + def test_replace_listeners(): orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index 5e064b269..27a8bbca7 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -480,7 +480,7 @@ as-is. They are also excluded when calling > parse. So the evaluation results should always reflect what your pipeline will > produce at runtime. If you want a frozen component to run (without updating) > during training as well, so that downstream components can use its -> **predictions**, you can add it to the list of +> **predictions**, you should add it to the list of > [`annotating_components`](/usage/training#annotating-components). ```ini From 6be6913ba5aaa7aa35deb1a9fcd4418d93824b24 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 13 Sep 2022 09:04:53 +0200 Subject: [PATCH 24/82] Update cupy extras (#11279) * Update cupy extras: * Extend to v11 * Add `cupy-cuda11x` and `cupy-wheel` * Update quickstart to use `cupy-wheel` for CUDA 10.2+ * Rename cuda-wheel to cuda-autodetect, remove repeated CUDA in menu --- setup.cfg | 36 +++++++++++++---------- website/src/widgets/quickstart-install.js | 12 ++------ 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/setup.cfg b/setup.cfg index 5fd820a96..2dc5e7042 100644 --- a/setup.cfg +++ b/setup.cfg @@ -76,37 +76,41 @@ transformers = ray = spacy_ray>=0.1.0,<1.0.0 cuda = - cupy>=5.0.0b4,<11.0.0 + cupy>=5.0.0b4,<12.0.0 cuda80 = - cupy-cuda80>=5.0.0b4,<11.0.0 + cupy-cuda80>=5.0.0b4,<12.0.0 cuda90 = - cupy-cuda90>=5.0.0b4,<11.0.0 + cupy-cuda90>=5.0.0b4,<12.0.0 cuda91 = - cupy-cuda91>=5.0.0b4,<11.0.0 + cupy-cuda91>=5.0.0b4,<12.0.0 cuda92 = - cupy-cuda92>=5.0.0b4,<11.0.0 + cupy-cuda92>=5.0.0b4,<12.0.0 cuda100 = - cupy-cuda100>=5.0.0b4,<11.0.0 + cupy-cuda100>=5.0.0b4,<12.0.0 cuda101 = - cupy-cuda101>=5.0.0b4,<11.0.0 + cupy-cuda101>=5.0.0b4,<12.0.0 cuda102 = - cupy-cuda102>=5.0.0b4,<11.0.0 + cupy-cuda102>=5.0.0b4,<12.0.0 cuda110 = - cupy-cuda110>=5.0.0b4,<11.0.0 + cupy-cuda110>=5.0.0b4,<12.0.0 cuda111 = - cupy-cuda111>=5.0.0b4,<11.0.0 + cupy-cuda111>=5.0.0b4,<12.0.0 cuda112 = - cupy-cuda112>=5.0.0b4,<11.0.0 + cupy-cuda112>=5.0.0b4,<12.0.0 cuda113 = - cupy-cuda113>=5.0.0b4,<11.0.0 + cupy-cuda113>=5.0.0b4,<12.0.0 cuda114 = - cupy-cuda114>=5.0.0b4,<11.0.0 + cupy-cuda114>=5.0.0b4,<12.0.0 cuda115 = - cupy-cuda115>=5.0.0b4,<11.0.0 + cupy-cuda115>=5.0.0b4,<12.0.0 cuda116 = - cupy-cuda116>=5.0.0b4,<11.0.0 + cupy-cuda116>=5.0.0b4,<12.0.0 cuda117 = - cupy-cuda117>=5.0.0b4,<11.0.0 + cupy-cuda117>=5.0.0b4,<12.0.0 +cuda11x = + cupy-cuda11x>=11.0.0,<12.0.0 +cuda-autodetect = + cupy-wheel>=11.0.0,<12.0.0 apple = thinc-apple-ops>=0.1.0.dev0,<1.0.0 # Language tokenizers with external dependencies diff --git a/website/src/widgets/quickstart-install.js b/website/src/widgets/quickstart-install.js index 61c0678dd..0d2186acb 100644 --- a/website/src/widgets/quickstart-install.js +++ b/website/src/widgets/quickstart-install.js @@ -9,7 +9,7 @@ const DEFAULT_PLATFORM = 'x86' const DEFAULT_MODELS = ['en'] const DEFAULT_OPT = 'efficiency' const DEFAULT_HARDWARE = 'cpu' -const DEFAULT_CUDA = 'cuda113' +const DEFAULT_CUDA = 'cuda-autodetect' const CUDA = { '8.0': 'cuda80', '9.0': 'cuda90', @@ -17,15 +17,7 @@ const CUDA = { '9.2': 'cuda92', '10.0': 'cuda100', '10.1': 'cuda101', - '10.2': 'cuda102', - '11.0': 'cuda110', - '11.1': 'cuda111', - '11.2': 'cuda112', - '11.3': 'cuda113', - '11.4': 'cuda114', - '11.5': 'cuda115', - '11.6': 'cuda116', - '11.7': 'cuda117', + '10.2, 11.0+': 'cuda-autodetect', } const LANG_EXTRAS = ['ja'] // only for languages with models From efdbb722c5072e2137f13408e0bc0e3976715a01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Tue, 13 Sep 2022 09:51:12 +0200 Subject: [PATCH 25/82] Store activations in `Doc`s when `save_activations` is enabled (#11002) * Store activations in Doc when `store_activations` is enabled This change adds the new `activations` attribute to `Doc`. This attribute can be used by trainable pipes to store their activations, probabilities, and guesses for downstream users. As an example, this change modifies the `tagger` and `senter` pipes to add an `store_activations` option. When this option is enabled, the probabilities and guesses are stored in `set_annotations`. * Change type of `store_activations` to `Union[bool, List[str]]` When the value is: - A bool: all activations are stored when set to `True`. - A List[str]: the activations named in the list are stored * Formatting fixes in Tagger * Support store_activations in spancat and morphologizer * Make Doc.activations type visible to MyPy * textcat/textcat_multilabel: add store_activations option * trainable_lemmatizer/entity_linker: add store_activations option * parser/ner: do not currently support returning activations * Extend tagger and senter tests So that they, like the other tests, also check that we get no activations if no activations were requested. * Document `Doc.activations` and `store_activations` in the relevant pipes * Start errors/warnings at higher numbers to avoid merge conflicts Between the master and v4 branches. * Add `store_activations` to docstrings. * Replace store_activations setter by set_store_activations method Setters that take a different type than what the getter returns are still problematic for MyPy. Replace the setter by a method, so that type inference works everywhere. * Use dict comprehension suggested by @svlandeg * Revert "Use dict comprehension suggested by @svlandeg" This reverts commit 6e7b958f7060397965176c69649e5414f1f24988. * EntityLinker: add type annotations to _add_activations * _store_activations: make kwarg-only, remove doc_scores_lens arg * set_annotations: add type annotations * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem * TextCat.predict: return dict * Make the `TrainablePipe.store_activations` property a bool This means that we can also bring back `store_activations` setter. * Remove `TrainablePipe.activations` We do not need to enumerate the activations anymore since `store_activations` is `bool`. * Add type annotations for activations in predict/set_annotations * Rename `TrainablePipe.store_activations` to `save_activations` * Error E1400 is not used anymore This error was used when activations were still `Union[bool, List[str]]`. * Change wording in API docs after store -> save change * docs: tag (save_)activations as new in spaCy 4.0 * Fix copied line in morphologizer activations test * Don't train in any test_save_activations test * Rename activations - "probs" -> "probabilities" - "guesses" -> "label_ids", except in the edit tree lemmatizer, where "guesses" -> "tree_ids". * Remove unused W400 warning. This warning was used when we still allowed the user to specify which activations to save. * Formatting fixes Co-authored-by: Sofie Van Landeghem * Replace "kb_ids" by a constant * spancat: replace a cast by an assertion * Fix EOF spacing * Fix comments in test_save_activations tests * Do not set RNG seed in activation saving tests * Revert "spancat: replace a cast by an assertion" This reverts commit 0bd5730d16432443a2b247316928d4f789ad8741. Co-authored-by: Sofie Van Landeghem --- spacy/pipeline/edit_tree_lemmatizer.py | 29 ++++- spacy/pipeline/entity_linker.py | 108 ++++++++++++++++-- spacy/pipeline/morphologizer.pyx | 29 ++++- spacy/pipeline/senter.pyx | 33 ++++-- spacy/pipeline/spancat.py | 30 ++++- spacy/pipeline/tagger.pyx | 37 ++++-- spacy/pipeline/textcat.py | 33 ++++-- spacy/pipeline/textcat_multilabel.py | 14 ++- spacy/pipeline/trainable_pipe.pxd | 1 + spacy/pipeline/trainable_pipe.pyx | 11 +- .../pipeline/test_edit_tree_lemmatizer.py | 25 ++++ spacy/tests/pipeline/test_entity_linker.py | 68 ++++++++++- spacy/tests/pipeline/test_morphologizer.py | 24 ++++ spacy/tests/pipeline/test_senter.py | 25 ++++ spacy/tests/pipeline/test_spancat.py | 20 ++++ spacy/tests/pipeline/test_tagger.py | 22 ++++ spacy/tests/pipeline/test_textcat.py | 43 ++++++- spacy/tokens/doc.pxd | 2 + spacy/tokens/doc.pyi | 3 +- spacy/tokens/doc.pyx | 1 + website/docs/api/doc.md | 33 +++--- website/docs/api/edittreelemmatizer.md | 17 +-- website/docs/api/entitylinker.md | 27 ++--- website/docs/api/morphologizer.md | 17 +-- website/docs/api/sentencerecognizer.md | 11 +- website/docs/api/spancategorizer.md | 17 +-- website/docs/api/tagger.md | 13 ++- website/docs/api/textcategorizer.md | 17 +-- 28 files changed, 580 insertions(+), 130 deletions(-) diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index b7d615f6d..37aa9663b 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -7,7 +7,7 @@ import numpy as np import srsly from thinc.api import Config, Model, SequenceCategoricalCrossentropy -from thinc.types import Floats2d, Ints1d, Ints2d +from thinc.types import ArrayXd, Floats2d, Ints1d from ._edit_tree_internals.edit_trees import EditTrees from ._edit_tree_internals.schemas import validate_edit_tree @@ -21,6 +21,9 @@ from ..vocab import Vocab from .. import util +ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]] + + default_model_config = """ [model] @architectures = "spacy.Tagger.v2" @@ -49,6 +52,7 @@ DEFAULT_EDIT_TREE_LEMMATIZER_MODEL = Config().from_str(default_model_config)["mo "overwrite": False, "top_k": 1, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, + "save_activations": False, }, default_score_weights={"lemma_acc": 1.0}, ) @@ -61,6 +65,7 @@ def make_edit_tree_lemmatizer( overwrite: bool, top_k: int, scorer: Optional[Callable], + save_activations: bool, ): """Construct an EditTreeLemmatizer component.""" return EditTreeLemmatizer( @@ -72,6 +77,7 @@ def make_edit_tree_lemmatizer( overwrite=overwrite, top_k=top_k, scorer=scorer, + save_activations=save_activations, ) @@ -91,6 +97,7 @@ class EditTreeLemmatizer(TrainablePipe): overwrite: bool = False, top_k: int = 1, scorer: Optional[Callable] = lemmatizer_score, + save_activations: bool = False, ): """ Construct an edit tree lemmatizer. @@ -102,6 +109,7 @@ class EditTreeLemmatizer(TrainablePipe): frequency in the training data. overwrite (bool): overwrite existing lemma annotations. top_k (int): try to apply at most the k most probable edit trees. + save_activations (bool): save model activations in Doc when annotating. """ self.vocab = vocab self.model = model @@ -116,6 +124,7 @@ class EditTreeLemmatizer(TrainablePipe): self.cfg: Dict[str, Any] = {"labels": []} self.scorer = scorer + self.save_activations = save_activations def get_loss( self, examples: Iterable[Example], scores: List[Floats2d] @@ -144,21 +153,24 @@ class EditTreeLemmatizer(TrainablePipe): return float(loss), d_scores - def predict(self, docs: Iterable[Doc]) -> List[Ints2d]: + def predict(self, docs: Iterable[Doc]) -> ActivationsT: n_docs = len(list(docs)) if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. n_labels = len(self.cfg["labels"]) - guesses: List[Ints2d] = [ + guesses: List[Ints1d] = [ + self.model.ops.alloc((0,), dtype="i") for doc in docs + ] + scores: List[Floats2d] = [ self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs ] assert len(guesses) == n_docs - return guesses + return {"probabilities": scores, "tree_ids": guesses} scores = self.model.predict(docs) assert len(scores) == n_docs guesses = self._scores2guesses(docs, scores) assert len(guesses) == n_docs - return guesses + return {"probabilities": scores, "tree_ids": guesses} def _scores2guesses(self, docs, scores): guesses = [] @@ -186,8 +198,13 @@ class EditTreeLemmatizer(TrainablePipe): return guesses - def set_annotations(self, docs: Iterable[Doc], batch_tree_ids): + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT): + batch_tree_ids = activations["tree_ids"] for i, doc in enumerate(docs): + if self.save_activations: + doc.activations[self.name] = {} + for act_name, acts in activations.items(): + doc.activations[self.name][act_name] = acts[i] doc_tree_ids = batch_tree_ids[i] if hasattr(doc_tree_ids, "get"): doc_tree_ids = doc_tree_ids.get() diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py index 73a90b268..ac05cb840 100644 --- a/spacy/pipeline/entity_linker.py +++ b/spacy/pipeline/entity_linker.py @@ -1,5 +1,7 @@ -from typing import Optional, Iterable, Callable, Dict, Union, List, Any -from thinc.types import Floats2d +from typing import Optional, Iterable, Callable, Dict, Sequence, Union, List, Any +from typing import cast +from numpy import dtype +from thinc.types import Floats1d, Floats2d, Ints1d, Ragged from pathlib import Path from itertools import islice import srsly @@ -21,6 +23,11 @@ from ..util import SimpleFrozenList, registry from .. import util from ..scorer import Scorer + +ActivationsT = Dict[str, Union[List[Ragged], List[str]]] + +KNOWLEDGE_BASE_IDS = "kb_ids" + # See #9050 BACKWARD_OVERWRITE = True @@ -57,6 +64,7 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"] "scorer": {"@scorers": "spacy.entity_linker_scorer.v1"}, "use_gold_ents": True, "threshold": None, + "save_activations": False, }, default_score_weights={ "nel_micro_f": 1.0, @@ -79,6 +87,7 @@ def make_entity_linker( scorer: Optional[Callable], use_gold_ents: bool, threshold: Optional[float] = None, + save_activations: bool, ): """Construct an EntityLinker component. @@ -97,6 +106,7 @@ def make_entity_linker( component must provide entity annotations. threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold, prediction is discarded. If None, predictions are not filtered by any threshold. + save_activations (bool): save model activations in Doc when annotating. """ if not model.attrs.get("include_span_maker", False): @@ -128,6 +138,7 @@ def make_entity_linker( scorer=scorer, use_gold_ents=use_gold_ents, threshold=threshold, + save_activations=save_activations, ) @@ -164,6 +175,7 @@ class EntityLinker(TrainablePipe): scorer: Optional[Callable] = entity_linker_score, use_gold_ents: bool, threshold: Optional[float] = None, + save_activations: bool = False, ) -> None: """Initialize an entity linker. @@ -212,6 +224,7 @@ class EntityLinker(TrainablePipe): self.scorer = scorer self.use_gold_ents = use_gold_ents self.threshold = threshold + self.save_activations = save_activations def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): """Define the KB of this pipe by providing a function that will @@ -397,7 +410,7 @@ class EntityLinker(TrainablePipe): loss = loss / len(entity_encodings) return float(loss), out - def predict(self, docs: Iterable[Doc]) -> List[str]: + def predict(self, docs: Iterable[Doc]) -> ActivationsT: """Apply the pipeline's model to a batch of docs, without modifying them. Returns the KB IDs for each entity in each doc, including NIL if there is no prediction. @@ -410,13 +423,20 @@ class EntityLinker(TrainablePipe): self.validate_kb() entity_count = 0 final_kb_ids: List[str] = [] - xp = self.model.ops.xp + ops = self.model.ops + xp = ops.xp + docs_ents: List[Ragged] = [] + docs_scores: List[Ragged] = [] if not docs: - return final_kb_ids + return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores} if isinstance(docs, Doc): docs = [docs] - for i, doc in enumerate(docs): + for doc in docs: + doc_ents: List[Ints1d] = [] + doc_scores: List[Floats1d] = [] if len(doc) == 0: + docs_scores.append(Ragged(ops.alloc1f(0), ops.alloc1i(0))) + docs_ents.append(Ragged(xp.zeros(0, dtype="uint64"), ops.alloc1i(0))) continue sentences = [s for s in doc.sents] # Looping through each entity (TODO: rewrite) @@ -439,14 +459,32 @@ class EntityLinker(TrainablePipe): if ent.label_ in self.labels_discard: # ignoring this entity - setting to NIL final_kb_ids.append(self.NIL) + self._add_activations( + doc_scores=doc_scores, + doc_ents=doc_ents, + scores=[0.0], + ents=[0], + ) else: candidates = list(self.get_candidates(self.kb, ent)) if not candidates: # no prediction possible for this entity - setting to NIL final_kb_ids.append(self.NIL) + self._add_activations( + doc_scores=doc_scores, + doc_ents=doc_ents, + scores=[0.0], + ents=[0], + ) elif len(candidates) == 1 and self.threshold is None: # shortcut for efficiency reasons: take the 1 candidate final_kb_ids.append(candidates[0].entity_) + self._add_activations( + doc_scores=doc_scores, + doc_ents=doc_ents, + scores=[1.0], + ents=[candidates[0].entity_], + ) else: random.shuffle(candidates) # set all prior probabilities to 0 if incl_prior=False @@ -479,27 +517,48 @@ class EntityLinker(TrainablePipe): if self.threshold is None or scores.max() >= self.threshold else EntityLinker.NIL ) + self._add_activations( + doc_scores=doc_scores, + doc_ents=doc_ents, + scores=scores, + ents=[c.entity for c in candidates], + ) + self._add_doc_activations( + docs_scores=docs_scores, + docs_ents=docs_ents, + doc_scores=doc_scores, + doc_ents=doc_ents, + ) if not (len(final_kb_ids) == entity_count): err = Errors.E147.format( method="predict", msg="result variables not of equal length" ) raise RuntimeError(err) - return final_kb_ids + return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores} - def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None: + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None: """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. - kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict. + activations (ActivationsT): The activations used for setting annotations, produced + by EntityLinker.predict. DOCS: https://spacy.io/api/entitylinker#set_annotations """ + kb_ids = cast(List[str], activations[KNOWLEDGE_BASE_IDS]) count_ents = len([ent for doc in docs for ent in doc.ents]) if count_ents != len(kb_ids): raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids))) i = 0 overwrite = self.cfg["overwrite"] - for doc in docs: + for j, doc in enumerate(docs): + if self.save_activations: + doc.activations[self.name] = {} + for act_name, acts in activations.items(): + if act_name != KNOWLEDGE_BASE_IDS: + # We only copy activations that are Ragged. + doc.activations[self.name][act_name] = cast(Ragged, acts[j]) + for ent in doc.ents: kb_id = kb_ids[i] i += 1 @@ -598,3 +657,32 @@ class EntityLinker(TrainablePipe): def add_label(self, label): raise NotImplementedError + + def _add_doc_activations( + self, + *, + docs_scores: List[Ragged], + docs_ents: List[Ragged], + doc_scores: List[Floats1d], + doc_ents: List[Ints1d], + ): + if not self.save_activations: + return + ops = self.model.ops + lengths = ops.asarray1i([s.shape[0] for s in doc_scores]) + docs_scores.append(Ragged(ops.flatten(doc_scores), lengths)) + docs_ents.append(Ragged(ops.flatten(doc_ents), lengths)) + + def _add_activations( + self, + *, + doc_scores: List[Floats1d], + doc_ents: List[Ints1d], + scores: Sequence[float], + ents: Sequence[int], + ): + if not self.save_activations: + return + ops = self.model.ops + doc_scores.append(ops.asarray1f(scores)) + doc_ents.append(ops.asarray1i(ents, dtype="uint64")) diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx index eec1e42e1..782a1dabe 100644 --- a/spacy/pipeline/morphologizer.pyx +++ b/spacy/pipeline/morphologizer.pyx @@ -1,7 +1,8 @@ # cython: infer_types=True, profile=True, binding=True -from typing import Optional, Union, Dict, Callable +from typing import Callable, Dict, Iterable, List, Optional, Union import srsly from thinc.api import SequenceCategoricalCrossentropy, Model, Config +from thinc.types import Floats2d, Ints1d from itertools import islice from ..tokens.doc cimport Doc @@ -13,7 +14,7 @@ from ..symbols import POS from ..language import Language from ..errors import Errors from .pipe import deserialize_config -from .tagger import Tagger +from .tagger import ActivationsT, Tagger from .. import util from ..scorer import Scorer from ..training import validate_examples, validate_get_examples @@ -52,7 +53,13 @@ DEFAULT_MORPH_MODEL = Config().from_str(default_model_config)["model"] @Language.factory( "morphologizer", assigns=["token.morph", "token.pos"], - default_config={"model": DEFAULT_MORPH_MODEL, "overwrite": True, "extend": False, "scorer": {"@scorers": "spacy.morphologizer_scorer.v1"}}, + default_config={ + "model": DEFAULT_MORPH_MODEL, + "overwrite": True, + "extend": False, + "scorer": {"@scorers": "spacy.morphologizer_scorer.v1"}, + "save_activations": False, + }, default_score_weights={"pos_acc": 0.5, "morph_acc": 0.5, "morph_per_feat": None}, ) def make_morphologizer( @@ -62,8 +69,10 @@ def make_morphologizer( overwrite: bool, extend: bool, scorer: Optional[Callable], + save_activations: bool, ): - return Morphologizer(nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer) + return Morphologizer(nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer, + save_activations=save_activations) def morphologizer_score(examples, **kwargs): @@ -95,6 +104,7 @@ class Morphologizer(Tagger): overwrite: bool = BACKWARD_OVERWRITE, extend: bool = BACKWARD_EXTEND, scorer: Optional[Callable] = morphologizer_score, + save_activations: bool = False, ): """Initialize a morphologizer. @@ -105,6 +115,7 @@ class Morphologizer(Tagger): scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_token_attr for the attributes "pos" and "morph" and Scorer.score_token_attr_per_feat for the attribute "morph". + save_activations (bool): save model activations in Doc when annotating. DOCS: https://spacy.io/api/morphologizer#init """ @@ -124,6 +135,7 @@ class Morphologizer(Tagger): } self.cfg = dict(sorted(cfg.items())) self.scorer = scorer + self.save_activations = save_activations @property def labels(self): @@ -217,14 +229,15 @@ class Morphologizer(Tagger): assert len(label_sample) > 0, Errors.E923.format(name=self.name) self.model.initialize(X=doc_sample, Y=label_sample) - def set_annotations(self, docs, batch_tag_ids): + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT): """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. - batch_tag_ids: The IDs to set, produced by Morphologizer.predict. + activations (ActivationsT): The activations used for setting annotations, produced by Morphologizer.predict. DOCS: https://spacy.io/api/morphologizer#set_annotations """ + batch_tag_ids = activations["label_ids"] if isinstance(docs, Doc): docs = [docs] cdef Doc doc @@ -236,6 +249,10 @@ class Morphologizer(Tagger): # to allocate a compatible container out of the iterable. labels = tuple(self.labels) for i, doc in enumerate(docs): + if self.save_activations: + doc.activations[self.name] = {} + for act_name, acts in activations.items(): + doc.activations[self.name][act_name] = acts[i] doc_tag_ids = batch_tag_ids[i] if hasattr(doc_tag_ids, "get"): doc_tag_ids = doc_tag_ids.get() diff --git a/spacy/pipeline/senter.pyx b/spacy/pipeline/senter.pyx index 6808fe70e..93a7ee796 100644 --- a/spacy/pipeline/senter.pyx +++ b/spacy/pipeline/senter.pyx @@ -1,13 +1,14 @@ # cython: infer_types=True, profile=True, binding=True -from typing import Optional, Callable +from typing import Dict, Iterable, Optional, Callable, List, Union from itertools import islice import srsly from thinc.api import Model, SequenceCategoricalCrossentropy, Config +from thinc.types import Floats2d, Ints1d from ..tokens.doc cimport Doc -from .tagger import Tagger +from .tagger import ActivationsT, Tagger from ..language import Language from ..errors import Errors from ..scorer import Scorer @@ -38,11 +39,21 @@ DEFAULT_SENTER_MODEL = Config().from_str(default_model_config)["model"] @Language.factory( "senter", assigns=["token.is_sent_start"], - default_config={"model": DEFAULT_SENTER_MODEL, "overwrite": False, "scorer": {"@scorers": "spacy.senter_scorer.v1"}}, + default_config={ + "model": DEFAULT_SENTER_MODEL, + "overwrite": False, + "scorer": {"@scorers": "spacy.senter_scorer.v1"}, + "save_activations": False, + }, default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0}, ) -def make_senter(nlp: Language, name: str, model: Model, overwrite: bool, scorer: Optional[Callable]): - return SentenceRecognizer(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer) +def make_senter(nlp: Language, + name: str, + model: Model, + overwrite: bool, + scorer: Optional[Callable], + save_activations: bool): + return SentenceRecognizer(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, save_activations=save_activations) def senter_score(examples, **kwargs): @@ -72,6 +83,7 @@ class SentenceRecognizer(Tagger): *, overwrite=BACKWARD_OVERWRITE, scorer=senter_score, + save_activations: bool = False, ): """Initialize a sentence recognizer. @@ -81,6 +93,7 @@ class SentenceRecognizer(Tagger): losses during training. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_spans for the attribute "sents". + save_activations (bool): save model activations in Doc when annotating. DOCS: https://spacy.io/api/sentencerecognizer#init """ @@ -90,6 +103,7 @@ class SentenceRecognizer(Tagger): self._rehearsal_model = None self.cfg = {"overwrite": overwrite} self.scorer = scorer + self.save_activations = save_activations @property def labels(self): @@ -107,19 +121,24 @@ class SentenceRecognizer(Tagger): def label_data(self): return None - def set_annotations(self, docs, batch_tag_ids): + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT): """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. - batch_tag_ids: The IDs to set, produced by SentenceRecognizer.predict. + activations (ActivationsT): The activations used for setting annotations, produced by SentenceRecognizer.predict. DOCS: https://spacy.io/api/sentencerecognizer#set_annotations """ + batch_tag_ids = activations["label_ids"] if isinstance(docs, Doc): docs = [docs] cdef Doc doc cdef bint overwrite = self.cfg["overwrite"] for i, doc in enumerate(docs): + if self.save_activations: + doc.activations[self.name] = {} + for act_name, acts in activations.items(): + doc.activations[self.name][act_name] = acts[i] doc_tag_ids = batch_tag_ids[i] if hasattr(doc_tag_ids, "get"): doc_tag_ids = doc_tag_ids.get() diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 1b7a9eecb..c517991f5 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -1,4 +1,5 @@ from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast +from typing import Union from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Optimizer from thinc.types import Ragged, Ints2d, Floats2d, Ints1d @@ -16,6 +17,9 @@ from ..errors import Errors from ..util import registry +ActivationsT = Dict[str, Union[Floats2d, Ragged]] + + spancat_default_config = """ [model] @architectures = "spacy.SpanCategorizer.v1" @@ -106,6 +110,7 @@ def build_ngram_range_suggester(min_size: int, max_size: int) -> Suggester: "model": DEFAULT_SPANCAT_MODEL, "suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]}, "scorer": {"@scorers": "spacy.spancat_scorer.v1"}, + "save_activations": False, }, default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0}, ) @@ -118,6 +123,7 @@ def make_spancat( scorer: Optional[Callable], threshold: float, max_positive: Optional[int], + save_activations: bool, ) -> "SpanCategorizer": """Create a SpanCategorizer component. The span categorizer consists of two parts: a suggester function that proposes candidate spans, and a labeller @@ -138,6 +144,7 @@ def make_spancat( 0.5. max_positive (Optional[int]): Maximum number of labels to consider positive per span. Defaults to None, indicating no limit. + save_activations (bool): save model activations in Doc when annotating. """ return SpanCategorizer( nlp.vocab, @@ -148,6 +155,7 @@ def make_spancat( max_positive=max_positive, name=name, scorer=scorer, + save_activations=save_activations, ) @@ -186,6 +194,7 @@ class SpanCategorizer(TrainablePipe): threshold: float = 0.5, max_positive: Optional[int] = None, scorer: Optional[Callable] = spancat_score, + save_activations: bool = False, ) -> None: """Initialize the span categorizer. vocab (Vocab): The shared vocabulary. @@ -218,6 +227,7 @@ class SpanCategorizer(TrainablePipe): self.model = model self.name = name self.scorer = scorer + self.save_activations = save_activations @property def key(self) -> str: @@ -260,7 +270,7 @@ class SpanCategorizer(TrainablePipe): """ return list(self.labels) - def predict(self, docs: Iterable[Doc]): + def predict(self, docs: Iterable[Doc]) -> ActivationsT: """Apply the pipeline's model to a batch of docs, without modifying them. docs (Iterable[Doc]): The documents to predict. @@ -270,7 +280,7 @@ class SpanCategorizer(TrainablePipe): """ indices = self.suggester(docs, ops=self.model.ops) scores = self.model.predict((docs, indices)) # type: ignore - return indices, scores + return {"indices": indices, "scores": scores} def set_candidates( self, docs: Iterable[Doc], *, candidates_key: str = "candidates" @@ -290,19 +300,29 @@ class SpanCategorizer(TrainablePipe): for index in candidates.dataXd: doc.spans[candidates_key].append(doc[index[0] : index[1]]) - def set_annotations(self, docs: Iterable[Doc], indices_scores) -> None: + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None: """Modify a batch of Doc objects, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. - scores: The scores to set, produced by SpanCategorizer.predict. + activations: ActivationsT: The activations, produced by SpanCategorizer.predict. DOCS: https://spacy.io/api/spancategorizer#set_annotations """ labels = self.labels - indices, scores = indices_scores + + indices = activations["indices"] + assert isinstance(indices, Ragged) + scores = cast(Floats2d, activations["scores"]) + offset = 0 for i, doc in enumerate(docs): indices_i = indices[i].dataXd + if self.save_activations: + doc.activations[self.name] = {} + doc.activations[self.name]["indices"] = indices_i + doc.activations[self.name]["scores"] = scores[ + offset : offset + indices.lengths[i] + ] doc.spans[self.key] = self._make_span_group( doc, indices_i, scores[offset : offset + indices.lengths[i]], labels # type: ignore[arg-type] ) diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx index d6ecbf084..3b4715ce5 100644 --- a/spacy/pipeline/tagger.pyx +++ b/spacy/pipeline/tagger.pyx @@ -1,9 +1,9 @@ # cython: infer_types=True, profile=True, binding=True -from typing import Callable, Optional +from typing import Callable, Dict, Iterable, List, Optional, Union import numpy import srsly from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config -from thinc.types import Floats2d +from thinc.types import Floats2d, Ints1d import warnings from itertools import islice @@ -22,6 +22,9 @@ from ..training import validate_examples, validate_get_examples from ..util import registry from .. import util + +ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]] + # See #9050 BACKWARD_OVERWRITE = False @@ -45,7 +48,13 @@ DEFAULT_TAGGER_MODEL = Config().from_str(default_model_config)["model"] @Language.factory( "tagger", assigns=["token.tag"], - default_config={"model": DEFAULT_TAGGER_MODEL, "overwrite": False, "scorer": {"@scorers": "spacy.tagger_scorer.v1"}, "neg_prefix": "!"}, + default_config={ + "model": DEFAULT_TAGGER_MODEL, + "overwrite": False, + "scorer": {"@scorers": "spacy.tagger_scorer.v1"}, + "neg_prefix": "!", + "save_activations": False, + }, default_score_weights={"tag_acc": 1.0}, ) def make_tagger( @@ -55,6 +64,7 @@ def make_tagger( overwrite: bool, scorer: Optional[Callable], neg_prefix: str, + save_activations: bool, ): """Construct a part-of-speech tagger component. @@ -63,7 +73,8 @@ def make_tagger( in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to 1). """ - return Tagger(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, neg_prefix=neg_prefix) + return Tagger(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, neg_prefix=neg_prefix, + save_activations=save_activations) def tagger_score(examples, **kwargs): @@ -89,6 +100,7 @@ class Tagger(TrainablePipe): overwrite=BACKWARD_OVERWRITE, scorer=tagger_score, neg_prefix="!", + save_activations: bool = False, ): """Initialize a part-of-speech tagger. @@ -98,6 +110,7 @@ class Tagger(TrainablePipe): losses during training. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_token_attr for the attribute "tag". + save_activations (bool): save model activations in Doc when annotating. DOCS: https://spacy.io/api/tagger#init """ @@ -108,6 +121,7 @@ class Tagger(TrainablePipe): cfg = {"labels": [], "overwrite": overwrite, "neg_prefix": neg_prefix} self.cfg = dict(sorted(cfg.items())) self.scorer = scorer + self.save_activations = save_activations @property def labels(self): @@ -126,7 +140,7 @@ class Tagger(TrainablePipe): """Data about the labels currently added to the component.""" return tuple(self.cfg["labels"]) - def predict(self, docs): + def predict(self, docs) -> ActivationsT: """Apply the pipeline's model to a batch of docs, without modifying them. docs (Iterable[Doc]): The documents to predict. @@ -139,12 +153,12 @@ class Tagger(TrainablePipe): n_labels = len(self.labels) guesses = [self.model.ops.alloc((0, n_labels)) for doc in docs] assert len(guesses) == len(docs) - return guesses + return {"probabilities": guesses, "label_ids": guesses} scores = self.model.predict(docs) assert len(scores) == len(docs), (len(scores), len(docs)) guesses = self._scores2guesses(scores) assert len(guesses) == len(docs) - return guesses + return {"probabilities": scores, "label_ids": guesses} def _scores2guesses(self, scores): guesses = [] @@ -155,14 +169,15 @@ class Tagger(TrainablePipe): guesses.append(doc_guesses) return guesses - def set_annotations(self, docs, batch_tag_ids): + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT): """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. - batch_tag_ids: The IDs to set, produced by Tagger.predict. + activations (ActivationsT): The activations used for setting annotations, produced by Tagger.predict. DOCS: https://spacy.io/api/tagger#set_annotations """ + batch_tag_ids = activations["label_ids"] if isinstance(docs, Doc): docs = [docs] cdef Doc doc @@ -170,6 +185,10 @@ class Tagger(TrainablePipe): cdef bint overwrite = self.cfg["overwrite"] labels = self.labels for i, doc in enumerate(docs): + if self.save_activations: + doc.activations[self.name] = {} + for act_name, acts in activations.items(): + doc.activations[self.name][act_name] = acts[i] doc_tag_ids = batch_tag_ids[i] if hasattr(doc_tag_ids, "get"): doc_tag_ids = doc_tag_ids.get() diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index c45f819fc..506cdb61c 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -1,4 +1,4 @@ -from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any +from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any, Union from thinc.api import get_array_module, Model, Optimizer, set_dropout_rate, Config from thinc.types import Floats2d import numpy @@ -14,6 +14,9 @@ from ..util import registry from ..vocab import Vocab +ActivationsT = Dict[str, Floats2d] + + single_label_default_config = """ [model] @architectures = "spacy.TextCatEnsemble.v2" @@ -75,6 +78,7 @@ subword_features = true "threshold": 0.5, "model": DEFAULT_SINGLE_TEXTCAT_MODEL, "scorer": {"@scorers": "spacy.textcat_scorer.v1"}, + "save_activations": False, }, default_score_weights={ "cats_score": 1.0, @@ -96,6 +100,7 @@ def make_textcat( model: Model[List[Doc], List[Floats2d]], threshold: float, scorer: Optional[Callable], + save_activations: bool, ) -> "TextCategorizer": """Create a TextCategorizer component. The text categorizer predicts categories over a whole document. It can learn one or more labels, and the labels are considered @@ -105,8 +110,16 @@ def make_textcat( scores for each category. threshold (float): Cutoff to consider a prediction "positive". scorer (Optional[Callable]): The scoring method. + save_activations (bool): save model activations in Doc when annotating. """ - return TextCategorizer(nlp.vocab, model, name, threshold=threshold, scorer=scorer) + return TextCategorizer( + nlp.vocab, + model, + name, + threshold=threshold, + scorer=scorer, + save_activations=save_activations, + ) def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: @@ -137,6 +150,7 @@ class TextCategorizer(TrainablePipe): *, threshold: float, scorer: Optional[Callable] = textcat_score, + save_activations: bool = False, ) -> None: """Initialize a text categorizer for single-label classification. @@ -157,6 +171,7 @@ class TextCategorizer(TrainablePipe): cfg = {"labels": [], "threshold": threshold, "positive_label": None} self.cfg = dict(cfg) self.scorer = scorer + self.save_activations = save_activations @property def support_missing_values(self): @@ -181,7 +196,7 @@ class TextCategorizer(TrainablePipe): """ return self.labels # type: ignore[return-value] - def predict(self, docs: Iterable[Doc]): + def predict(self, docs: Iterable[Doc]) -> ActivationsT: """Apply the pipeline's model to a batch of docs, without modifying them. docs (Iterable[Doc]): The documents to predict. @@ -194,12 +209,12 @@ class TextCategorizer(TrainablePipe): tensors = [doc.tensor for doc in docs] xp = self.model.ops.xp scores = xp.zeros((len(list(docs)), len(self.labels))) - return scores + return {"probabilities": scores} scores = self.model.predict(docs) scores = self.model.ops.asarray(scores) - return scores + return {"probabilities": scores} - def set_annotations(self, docs: Iterable[Doc], scores) -> None: + def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None: """Modify a batch of Doc objects, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. @@ -207,9 +222,13 @@ class TextCategorizer(TrainablePipe): DOCS: https://spacy.io/api/textcategorizer#set_annotations """ + probs = activations["probabilities"] for i, doc in enumerate(docs): + if self.save_activations: + doc.activations[self.name] = {} + doc.activations[self.name]["probabilities"] = probs[i] for j, label in enumerate(self.labels): - doc.cats[label] = float(scores[i, j]) + doc.cats[label] = float(probs[i, j]) def update( self, diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index e33a885f8..3a6dd0b0c 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -1,4 +1,4 @@ -from typing import Iterable, Optional, Dict, List, Callable, Any +from typing import Iterable, Optional, Dict, List, Callable, Any, Union from thinc.types import Floats2d from thinc.api import Model, Config @@ -75,6 +75,7 @@ subword_features = true "threshold": 0.5, "model": DEFAULT_MULTI_TEXTCAT_MODEL, "scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v1"}, + "save_activations": False, }, default_score_weights={ "cats_score": 1.0, @@ -96,6 +97,7 @@ def make_multilabel_textcat( model: Model[List[Doc], List[Floats2d]], threshold: float, scorer: Optional[Callable], + save_activations: bool, ) -> "TextCategorizer": """Create a TextCategorizer component. The text categorizer predicts categories over a whole document. It can learn one or more labels, and the labels are considered @@ -107,7 +109,12 @@ def make_multilabel_textcat( threshold (float): Cutoff to consider a prediction "positive". """ return MultiLabel_TextCategorizer( - nlp.vocab, model, name, threshold=threshold, scorer=scorer + nlp.vocab, + model, + name, + threshold=threshold, + scorer=scorer, + save_activations=save_activations, ) @@ -139,6 +146,7 @@ class MultiLabel_TextCategorizer(TextCategorizer): *, threshold: float, scorer: Optional[Callable] = textcat_multilabel_score, + save_activations: bool = False, ) -> None: """Initialize a text categorizer for multi-label classification. @@ -147,6 +155,7 @@ class MultiLabel_TextCategorizer(TextCategorizer): name (str): The component instance name, used to add entries to the losses during training. threshold (float): Cutoff to consider a prediction "positive". + save_activations (bool): save model activations in Doc when annotating. DOCS: https://spacy.io/api/textcategorizer#init """ @@ -157,6 +166,7 @@ class MultiLabel_TextCategorizer(TextCategorizer): cfg = {"labels": [], "threshold": threshold} self.cfg = dict(cfg) self.scorer = scorer + self.save_activations = save_activations @property def support_missing_values(self): diff --git a/spacy/pipeline/trainable_pipe.pxd b/spacy/pipeline/trainable_pipe.pxd index 65daa8b22..180f86f45 100644 --- a/spacy/pipeline/trainable_pipe.pxd +++ b/spacy/pipeline/trainable_pipe.pxd @@ -6,3 +6,4 @@ cdef class TrainablePipe(Pipe): cdef public object model cdef public object cfg cdef public object scorer + cdef bint _save_activations diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx index 76b0733cf..c82f2830c 100644 --- a/spacy/pipeline/trainable_pipe.pyx +++ b/spacy/pipeline/trainable_pipe.pyx @@ -2,11 +2,12 @@ from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable import srsly from thinc.api import set_dropout_rate, Model, Optimizer +import warnings from ..tokens.doc cimport Doc from ..training import validate_examples -from ..errors import Errors +from ..errors import Errors, Warnings from .pipe import Pipe, deserialize_config from .. import util from ..vocab import Vocab @@ -342,3 +343,11 @@ cdef class TrainablePipe(Pipe): deserialize["model"] = load_model util.from_disk(path, deserialize, exclude) return self + + @property + def save_activations(self): + return self._save_activations + + @save_activations.setter + def save_activations(self, save_activations: bool): + self._save_activations = save_activations diff --git a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py index cf541e301..ad2e56729 100644 --- a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py +++ b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py @@ -1,3 +1,4 @@ +from typing import cast import pickle import pytest from hypothesis import given @@ -6,6 +7,7 @@ from spacy import util from spacy.lang.en import English from spacy.language import Language from spacy.pipeline._edit_tree_internals.edit_trees import EditTrees +from spacy.pipeline.trainable_pipe import TrainablePipe from spacy.training import Example from spacy.strings import StringStore from spacy.util import make_tempdir @@ -278,3 +280,26 @@ def test_empty_strings(): no_change = trees.add("xyz", "xyz") empty = trees.add("", "") assert no_change == empty + + +def test_save_activations(): + nlp = English() + lemmatizer = cast(TrainablePipe, nlp.add_pipe("trainable_lemmatizer")) + lemmatizer.min_tree_freq = 1 + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + nlp.initialize(get_examples=lambda: train_examples) + nO = lemmatizer.model.get_dim("nO") + + doc = nlp("This is a test.") + assert "trainable_lemmatizer" not in doc.activations + + lemmatizer.save_activations = True + doc = nlp("This is a test.") + assert list(doc.activations["trainable_lemmatizer"].keys()) == [ + "probabilities", + "tree_ids", + ] + assert doc.activations["trainable_lemmatizer"]["probabilities"].shape == (5, nO) + assert doc.activations["trainable_lemmatizer"]["tree_ids"].shape == (5,) diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index 82bc976bb..75d1feea5 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -1,7 +1,8 @@ -from typing import Callable, Iterable, Dict, Any +from typing import Callable, Iterable, Dict, Any, cast import pytest from numpy.testing import assert_equal +from thinc.types import Ragged from spacy import registry, util from spacy.attrs import ENT_KB_ID @@ -9,7 +10,7 @@ from spacy.compat import pickle from spacy.kb import Candidate, KnowledgeBase, get_candidates from spacy.lang.en import English from spacy.ml import load_kb -from spacy.pipeline import EntityLinker +from spacy.pipeline import EntityLinker, TrainablePipe from spacy.pipeline.legacy import EntityLinker_v1 from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.scorer import Scorer @@ -1176,3 +1177,66 @@ def test_threshold(meet_threshold: bool, config: Dict[str, Any]): assert len(doc.ents) == 1 assert doc.ents[0].kb_id_ == entity_id if meet_threshold else EntityLinker.NIL + + +def test_save_activations(): + nlp = English() + vector_length = 3 + assert "Q2146908" not in nlp.vocab.strings + + # Convert the texts to docs to make sure we have doc.ents set for the training examples + train_examples = [] + for text, annotation in TRAIN_DATA: + doc = nlp(text) + train_examples.append(Example.from_dict(doc, annotation)) + + def create_kb(vocab): + # create artificial KB - assign same prior weight to the two russ cochran's + # Q2146908 (Russ Cochran): American golfer + # Q7381115 (Russ Cochran): publisher + mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) + mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7]) + mykb.add_alias( + alias="Russ Cochran", + entities=["Q2146908", "Q7381115"], + probabilities=[0.5, 0.5], + ) + return mykb + + # Create the Entity Linker component and add it to the pipeline + entity_linker = cast(TrainablePipe, nlp.add_pipe("entity_linker", last=True)) + assert isinstance(entity_linker, EntityLinker) + entity_linker.set_kb(create_kb) + assert "Q2146908" in entity_linker.vocab.strings + assert "Q2146908" in entity_linker.kb.vocab.strings + + # initialize the NEL pipe + nlp.initialize(get_examples=lambda: train_examples) + + nO = entity_linker.model.get_dim("nO") + + nlp.add_pipe("sentencizer", first=True) + patterns = [ + {"label": "PERSON", "pattern": [{"LOWER": "russ"}, {"LOWER": "cochran"}]}, + {"label": "ORG", "pattern": [{"LOWER": "ec"}, {"LOWER": "comics"}]}, + ] + ruler = nlp.add_pipe("entity_ruler", before="entity_linker") + ruler.add_patterns(patterns) + + doc = nlp("Russ Cochran was a publisher") + assert "entity_linker" not in doc.activations + + entity_linker.save_activations = True + doc = nlp("Russ Cochran was a publisher") + assert set(doc.activations["entity_linker"].keys()) == {"ents", "scores"} + ents = doc.activations["entity_linker"]["ents"] + assert isinstance(ents, Ragged) + assert ents.data.shape == (2, 1) + assert ents.data.dtype == "uint64" + assert ents.lengths.shape == (1,) + scores = doc.activations["entity_linker"]["scores"] + assert isinstance(scores, Ragged) + assert scores.data.shape == (2, 1) + assert scores.data.dtype == "float32" + assert scores.lengths.shape == (1,) diff --git a/spacy/tests/pipeline/test_morphologizer.py b/spacy/tests/pipeline/test_morphologizer.py index 33696bfd8..70fc77304 100644 --- a/spacy/tests/pipeline/test_morphologizer.py +++ b/spacy/tests/pipeline/test_morphologizer.py @@ -1,3 +1,4 @@ +from typing import cast import pytest from numpy.testing import assert_equal @@ -7,6 +8,7 @@ from spacy.lang.en import English from spacy.language import Language from spacy.tests.util import make_tempdir from spacy.morphology import Morphology +from spacy.pipeline import TrainablePipe from spacy.attrs import MORPH from spacy.tokens import Doc @@ -197,3 +199,25 @@ def test_overfitting_IO(): gold_pos_tags = ["NOUN", "NOUN", "NOUN", "NOUN"] assert [str(t.morph) for t in doc] == gold_morphs assert [t.pos_ for t in doc] == gold_pos_tags + + +def test_save_activations(): + nlp = English() + morphologizer = cast(TrainablePipe, nlp.add_pipe("morphologizer")) + train_examples = [] + for inst in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(inst[0]), inst[1])) + nlp.initialize(get_examples=lambda: train_examples) + + doc = nlp("This is a test.") + assert "morphologizer" not in doc.activations + + morphologizer.save_activations = True + doc = nlp("This is a test.") + assert "morphologizer" in doc.activations + assert set(doc.activations["morphologizer"].keys()) == { + "label_ids", + "probabilities", + } + assert doc.activations["morphologizer"]["probabilities"].shape == (5, 6) + assert doc.activations["morphologizer"]["label_ids"].shape == (5,) diff --git a/spacy/tests/pipeline/test_senter.py b/spacy/tests/pipeline/test_senter.py index 047f59bef..3deac9e9a 100644 --- a/spacy/tests/pipeline/test_senter.py +++ b/spacy/tests/pipeline/test_senter.py @@ -1,3 +1,4 @@ +from typing import cast import pytest from numpy.testing import assert_equal from spacy.attrs import SENT_START @@ -6,6 +7,7 @@ from spacy import util from spacy.training import Example from spacy.lang.en import English from spacy.language import Language +from spacy.pipeline import TrainablePipe from spacy.tests.util import make_tempdir @@ -101,3 +103,26 @@ def test_overfitting_IO(): # test internal pipe labels vs. Language.pipe_labels with hidden labels assert nlp.get_pipe("senter").labels == ("I", "S") assert "senter" not in nlp.pipe_labels + + +def test_save_activations(): + # Test if activations are correctly added to Doc when requested. + nlp = English() + senter = cast(TrainablePipe, nlp.add_pipe("senter")) + + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + + nlp.initialize(get_examples=lambda: train_examples) + nO = senter.model.get_dim("nO") + + doc = nlp("This is a test.") + assert "senter" not in doc.activations + + senter.save_activations = True + doc = nlp("This is a test.") + assert "senter" in doc.activations + assert set(doc.activations["senter"].keys()) == {"label_ids", "probabilities"} + assert doc.activations["senter"]["probabilities"].shape == (5, nO) + assert doc.activations["senter"]["label_ids"].shape == (5,) diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py index 95e9aeb57..4fb26c7e7 100644 --- a/spacy/tests/pipeline/test_spancat.py +++ b/spacy/tests/pipeline/test_spancat.py @@ -419,3 +419,23 @@ def test_set_candidates(): assert len(docs[0].spans["candidates"]) == 9 assert docs[0].spans["candidates"][0].text == "Just" assert docs[0].spans["candidates"][4].text == "Just a" + + +def test_save_activations(): + # Test if activations are correctly added to Doc when requested. + nlp = English() + spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY}) + train_examples = make_examples(nlp) + nlp.initialize(get_examples=lambda: train_examples) + nO = spancat.model.get_dim("nO") + assert nO == 2 + assert set(spancat.labels) == {"LOC", "PERSON"} + + doc = nlp("This is a test.") + assert "spancat" not in doc.activations + + spancat.save_activations = True + doc = nlp("This is a test.") + assert set(doc.activations["spancat"].keys()) == {"indices", "scores"} + assert doc.activations["spancat"]["indices"].shape == (12, 2) + assert doc.activations["spancat"]["scores"].shape == (12, nO) diff --git a/spacy/tests/pipeline/test_tagger.py b/spacy/tests/pipeline/test_tagger.py index 96e75851e..a0c71198e 100644 --- a/spacy/tests/pipeline/test_tagger.py +++ b/spacy/tests/pipeline/test_tagger.py @@ -1,3 +1,4 @@ +from typing import cast import pytest from numpy.testing import assert_equal from spacy.attrs import TAG @@ -6,6 +7,7 @@ from spacy import util from spacy.training import Example from spacy.lang.en import English from spacy.language import Language +from spacy.pipeline import TrainablePipe from thinc.api import compounding from ..util import make_tempdir @@ -211,6 +213,26 @@ def test_overfitting_IO(): assert doc3[0].tag_ != "N" +def test_save_activations(): + # Test if activations are correctly added to Doc when requested. + nlp = English() + tagger = cast(TrainablePipe, nlp.add_pipe("tagger")) + train_examples = [] + for t in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1])) + nlp.initialize(get_examples=lambda: train_examples) + + doc = nlp("This is a test.") + assert "tagger" not in doc.activations + + tagger.save_activations = True + doc = nlp("This is a test.") + assert "tagger" in doc.activations + assert set(doc.activations["tagger"].keys()) == {"label_ids", "probabilities"} + assert doc.activations["tagger"]["probabilities"].shape == (5, len(TAGS)) + assert doc.activations["tagger"]["label_ids"].shape == (5,) + + def test_tagger_requires_labels(): nlp = English() nlp.add_pipe("tagger") diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index 0bb036a33..c1f61a3c0 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -1,3 +1,4 @@ +from typing import cast import random import numpy.random @@ -11,7 +12,7 @@ from spacy import util from spacy.cli.evaluate import print_prf_per_type, print_textcats_auc_per_cat from spacy.lang.en import English from spacy.language import Language -from spacy.pipeline import TextCategorizer +from spacy.pipeline import TextCategorizer, TrainablePipe from spacy.pipeline.textcat import single_label_bow_config from spacy.pipeline.textcat import single_label_cnn_config from spacy.pipeline.textcat import single_label_default_config @@ -285,7 +286,7 @@ def test_issue9904(): nlp.initialize(get_examples) examples = get_examples() - scores = textcat.predict([eg.predicted for eg in examples]) + scores = textcat.predict([eg.predicted for eg in examples])["probabilities"] loss = textcat.get_loss(examples, scores)[0] loss_double_bs = textcat.get_loss(examples * 2, scores.repeat(2, axis=0))[0] @@ -871,3 +872,41 @@ def test_textcat_multi_threshold(): scores = nlp.evaluate(train_examples, scorer_cfg={"threshold": 0}) assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0 + + +def test_save_activations(): + nlp = English() + textcat = cast(TrainablePipe, nlp.add_pipe("textcat")) + + train_examples = [] + for text, annotations in TRAIN_DATA_SINGLE_LABEL: + train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) + nlp.initialize(get_examples=lambda: train_examples) + nO = textcat.model.get_dim("nO") + + doc = nlp("This is a test.") + assert "textcat" not in doc.activations + + textcat.save_activations = True + doc = nlp("This is a test.") + assert list(doc.activations["textcat"].keys()) == ["probabilities"] + assert doc.activations["textcat"]["probabilities"].shape == (nO,) + + +def test_save_activations_multi(): + nlp = English() + textcat = cast(TrainablePipe, nlp.add_pipe("textcat_multilabel")) + + train_examples = [] + for text, annotations in TRAIN_DATA_MULTI_LABEL: + train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) + nlp.initialize(get_examples=lambda: train_examples) + nO = textcat.model.get_dim("nO") + + doc = nlp("This is a test.") + assert "textcat_multilabel" not in doc.activations + + textcat.save_activations = True + doc = nlp("This is a test.") + assert list(doc.activations["textcat_multilabel"].keys()) == ["probabilities"] + assert doc.activations["textcat_multilabel"]["probabilities"].shape == (nO,) diff --git a/spacy/tokens/doc.pxd b/spacy/tokens/doc.pxd index 57d087958..83a940cbb 100644 --- a/spacy/tokens/doc.pxd +++ b/spacy/tokens/doc.pxd @@ -50,6 +50,8 @@ cdef class Doc: cdef public float sentiment + cdef public dict activations + cdef public dict user_hooks cdef public dict user_token_hooks cdef public dict user_span_hooks diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi index ae1324a8a..763c1fd2f 100644 --- a/spacy/tokens/doc.pyi +++ b/spacy/tokens/doc.pyi @@ -1,7 +1,7 @@ from typing import Callable, Protocol, Iterable, Iterator, Optional from typing import Union, Tuple, List, Dict, Any, overload from cymem.cymem import Pool -from thinc.types import Floats1d, Floats2d, Ints2d +from thinc.types import ArrayXd, Floats1d, Floats2d, Ints2d, Ragged from .span import Span from .token import Token from .span_groups import SpanGroups @@ -22,6 +22,7 @@ class Doc: max_length: int length: int sentiment: float + activations: Dict[str, Dict[str, Union[ArrayXd, Ragged]]] cats: Dict[str, float] user_hooks: Dict[str, Callable[..., Any]] user_token_hooks: Dict[str, Callable[..., Any]] diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 85d76efb3..6969515c3 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -245,6 +245,7 @@ cdef class Doc: self.length = 0 self.sentiment = 0.0 self.cats = {} + self.activations = {} self.user_hooks = {} self.user_token_hooks = {} self.user_span_hooks = {} diff --git a/website/docs/api/doc.md b/website/docs/api/doc.md index f97f4ad83..136e7785d 100644 --- a/website/docs/api/doc.md +++ b/website/docs/api/doc.md @@ -751,22 +751,23 @@ The L2 norm of the document's vector representation. ## Attributes {#attributes} -| Name | Description | -| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------- | -| `text` | A string representation of the document text. ~~str~~ | -| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ | -| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ | -| `vocab` | The store of lexical types. ~~Vocab~~ | -| `tensor` 2 | Container for dense vector representations. ~~numpy.ndarray~~ | -| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ | -| `lang` 2.1 | Language of the document's vocabulary. ~~int~~ | -| `lang_` 2.1 | Language of the document's vocabulary. ~~str~~ | -| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ | -| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ | -| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ | -| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ | -| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ | -| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | +| Name | Description | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `text` | A string representation of the document text. ~~str~~ | +| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ | +| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ | +| `vocab` | The store of lexical types. ~~Vocab~~ | +| `tensor` 2 | Container for dense vector representations. ~~numpy.ndarray~~ | +| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ | +| `lang` 2.1 | Language of the document's vocabulary. ~~int~~ | +| `lang_` 2.1 | Language of the document's vocabulary. ~~str~~ | +| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ | +| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ | +| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ | +| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ | +| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ | +| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | +| `activations` 4.0 | A dictionary of activations per trainable pipe (available when the `save_activations` option of a pipe is enabled). ~~Dict[str, Option[Any]]~~ | ## Serialization fields {#serialization-fields} diff --git a/website/docs/api/edittreelemmatizer.md b/website/docs/api/edittreelemmatizer.md index 63e4bf910..8bee74316 100644 --- a/website/docs/api/edittreelemmatizer.md +++ b/website/docs/api/edittreelemmatizer.md @@ -44,14 +44,15 @@ architectures and their arguments and hyperparameters. > nlp.add_pipe("trainable_lemmatizer", config=config, name="lemmatizer") > ``` -| Setting | Description | -| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `model` | A model instance that predicts the edit tree probabilities. The output vectors should match the number of edit trees in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | -| `backoff` | ~~Token~~ attribute to use when no applicable edit tree is found. Defaults to `orth`. ~~str~~ | -| `min_tree_freq` | Minimum frequency of an edit tree in the training set to be used. Defaults to `3`. ~~int~~ | -| `overwrite` | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ | -| `top_k` | The number of most probable edit trees to try before resorting to `backoff`. Defaults to `1`. ~~int~~ | -| `scorer` | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"lemma"`. ~~Optional[Callable]~~ | +| Setting | Description | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | A model instance that predicts the edit tree probabilities. The output vectors should match the number of edit trees in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | +| `backoff` | ~~Token~~ attribute to use when no applicable edit tree is found. Defaults to `orth`. ~~str~~ | +| `min_tree_freq` | Minimum frequency of an edit tree in the training set to be used. Defaults to `3`. ~~int~~ | +| `overwrite` | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ | +| `top_k` | The number of most probable edit trees to try before resorting to `backoff`. Defaults to `1`. ~~int~~ | +| `scorer` | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"lemma"`. ~~Optional[Callable]~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"tree_ids"`. ~~Union[bool, list[str]]~~ | ```python %%GITHUB_SPACY/spacy/pipeline/edit_tree_lemmatizer.py diff --git a/website/docs/api/entitylinker.md b/website/docs/api/entitylinker.md index 43e08a39c..07dd02634 100644 --- a/website/docs/api/entitylinker.md +++ b/website/docs/api/entitylinker.md @@ -52,19 +52,20 @@ architectures and their arguments and hyperparameters. > nlp.add_pipe("entity_linker", config=config) > ``` -| Setting | Description | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `labels_discard` | NER labels that will automatically get a "NIL" prediction. Defaults to `[]`. ~~Iterable[str]~~ | -| `n_sents` | The number of neighbouring sentences to take into account. Defaults to 0. ~~int~~ | -| `incl_prior` | Whether or not to include prior probabilities from the KB in the model. Defaults to `True`. ~~bool~~ | -| `incl_context` | Whether or not to include the local context in the model. Defaults to `True`. ~~bool~~ | -| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [EntityLinker](/api/architectures#EntityLinker). ~~Model~~ | -| `entity_vector_length` | Size of encoding vectors in the KB. Defaults to `64`. ~~int~~ | -| `use_gold_ents` | Whether to copy entities from the gold docs or not. Defaults to `True`. If `False`, entities must be set in the training data or by an annotating component in the pipeline. ~~int~~ | -| `get_candidates` | Function that generates plausible candidates for a given `Span` object. Defaults to [CandidateGenerator](/api/architectures#CandidateGenerator), a function looking up exact, case-dependent aliases in the KB. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ | -| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `True`. ~~bool~~ | -| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_links`](/api/scorer#score_links). ~~Optional[Callable]~~ | -| `threshold` 3.4 | Confidence threshold for entity predictions. The default of `None` implies that all predictions are accepted, otherwise those with a score beneath the treshold are discarded. If there are no predictions with scores above the threshold, the linked entity is `NIL`. ~~Optional[float]~~ | +| Setting | Description | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels_discard` | NER labels that will automatically get a "NIL" prediction. Defaults to `[]`. ~~Iterable[str]~~ | +| `n_sents` | The number of neighbouring sentences to take into account. Defaults to 0. ~~int~~ | +| `incl_prior` | Whether or not to include prior probabilities from the KB in the model. Defaults to `True`. ~~bool~~ | +| `incl_context` | Whether or not to include the local context in the model. Defaults to `True`. ~~bool~~ | +| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [EntityLinker](/api/architectures#EntityLinker). ~~Model~~ | +| `entity_vector_length` | Size of encoding vectors in the KB. Defaults to `64`. ~~int~~ | +| `use_gold_ents` | Whether to copy entities from the gold docs or not. Defaults to `True`. If `False`, entities must be set in the training data or by an annotating component in the pipeline. ~~int~~ | +| `get_candidates` | Function that generates plausible candidates for a given `Span` object. Defaults to [CandidateGenerator](/api/architectures#CandidateGenerator), a function looking up exact, case-dependent aliases in the KB. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ | +| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `True`. ~~bool~~ | +| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_links`](/api/scorer#score_links). ~~Optional[Callable]~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"ents"` and `"scores"`. ~~Union[bool, list[str]]~~ | +| `threshold` 3.4 | Confidence threshold for entity predictions. The default of `None` implies that all predictions are accepted, otherwise those with a score beneath the treshold are discarded. If there are no predictions with scores above the threshold, the linked entity is `NIL`. ~~Optional[float]~~ | ```python %%GITHUB_SPACY/spacy/pipeline/entity_linker.py diff --git a/website/docs/api/morphologizer.md b/website/docs/api/morphologizer.md index fda6d1fa6..97444b157 100644 --- a/website/docs/api/morphologizer.md +++ b/website/docs/api/morphologizer.md @@ -42,12 +42,13 @@ architectures and their arguments and hyperparameters. > nlp.add_pipe("morphologizer", config=config) > ``` -| Setting | Description | -| ---------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | The model to use. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | -| `overwrite` 3.2 | Whether the values of existing features are overwritten. Defaults to `True`. ~~bool~~ | -| `extend` 3.2 | Whether existing feature types (whose values may or may not be overwritten depending on `overwrite`) are preserved. Defaults to `False`. ~~bool~~ | -| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attributes `"pos"` and `"morph"` and [`Scorer.score_token_attr_per_feat`](/api/scorer#score_token_attr_per_feat) for the attribute `"morph"`. ~~Optional[Callable]~~ | +| Setting | Description | +| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | The model to use. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | +| `overwrite` 3.2 | Whether the values of existing features are overwritten. Defaults to `True`. ~~bool~~ | +| `extend` 3.2 | Whether existing feature types (whose values may or may not be overwritten depending on `overwrite`) are preserved. Defaults to `False`. ~~bool~~ | +| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attributes `"pos"` and `"morph"` and [`Scorer.score_token_attr_per_feat`](/api/scorer#score_token_attr_per_feat) for the attribute `"morph"`. ~~Optional[Callable]~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"label_ids"`. ~~Union[bool, list[str]]~~ | ```python %%GITHUB_SPACY/spacy/pipeline/morphologizer.pyx @@ -399,8 +400,8 @@ coarse-grained POS as the feature `POS`. > assert "Mood=Ind|POS=VERB|Tense=Past|VerbForm=Fin" in morphologizer.labels > ``` -| Name | Description | -| ----------- | ------------------------------------------------------ | +| Name | Description | +| ----------- | --------------------------------------------------------- | | **RETURNS** | The labels added to the component. ~~Iterable[str, ...]~~ | ## Morphologizer.label_data {#label_data tag="property" new="3"} diff --git a/website/docs/api/sentencerecognizer.md b/website/docs/api/sentencerecognizer.md index 2f50350ae..03744e1b5 100644 --- a/website/docs/api/sentencerecognizer.md +++ b/website/docs/api/sentencerecognizer.md @@ -39,11 +39,12 @@ architectures and their arguments and hyperparameters. > nlp.add_pipe("senter", config=config) > ``` -| Setting | Description | -| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | -| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ | -| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for the attribute `"sents"`. ~~Optional[Callable]~~ | +| Setting | Description | +| ----------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | +| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ | +| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for the attribute `"sents"`. ~~Optional[Callable]~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"label_ids"`. ~~Union[bool, list[str]]~~ | ```python %%GITHUB_SPACY/spacy/pipeline/senter.pyx diff --git a/website/docs/api/spancategorizer.md b/website/docs/api/spancategorizer.md index 58a06bcf5..e07ad3577 100644 --- a/website/docs/api/spancategorizer.md +++ b/website/docs/api/spancategorizer.md @@ -52,14 +52,15 @@ architectures and their arguments and hyperparameters. > nlp.add_pipe("spancat", config=config) > ``` -| Setting | Description | -| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `suggester` | A function that [suggests spans](#suggesters). Spans are returned as a ragged array with two integer columns, for the start and end positions. Defaults to [`ngram_suggester`](#ngram_suggester). ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ | -| `model` | A model instance that is given a a list of documents and `(start, end)` indices representing candidate span offsets. The model predicts a probability for each category for each span. Defaults to [SpanCategorizer](/api/architectures#SpanCategorizer). ~~Model[Tuple[List[Doc], Ragged], Floats2d]~~ | -| `spans_key` | Key of the [`Doc.spans`](/api/doc#spans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ | -| `threshold` | Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to `0.5`. ~~float~~ | -| `max_positive` | Maximum number of labels to consider positive per span. Defaults to `None`, indicating no limit. ~~Optional[int]~~ | -| `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ | +| Setting | Description | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `suggester` | A function that [suggests spans](#suggesters). Spans are returned as a ragged array with two integer columns, for the start and end positions. Defaults to [`ngram_suggester`](#ngram_suggester). ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ | +| `model` | A model instance that is given a a list of documents and `(start, end)` indices representing candidate span offsets. The model predicts a probability for each category for each span. Defaults to [SpanCategorizer](/api/architectures#SpanCategorizer). ~~Model[Tuple[List[Doc], Ragged], Floats2d]~~ | +| `spans_key` | Key of the [`Doc.spans`](/api/doc#spans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ | +| `threshold` | Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to `0.5`. ~~float~~ | +| `max_positive` | Maximum number of labels to consider positive per span. Defaults to `None`, indicating no limit. ~~Optional[int]~~ | +| `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"indices"` and `"scores"`. ~~Union[bool, list[str]]~~ | ```python %%GITHUB_SPACY/spacy/pipeline/spancat.py diff --git a/website/docs/api/tagger.md b/website/docs/api/tagger.md index 90a49b197..0d77d9bf4 100644 --- a/website/docs/api/tagger.md +++ b/website/docs/api/tagger.md @@ -40,12 +40,13 @@ architectures and their arguments and hyperparameters. > nlp.add_pipe("tagger", config=config) > ``` -| Setting | Description | -| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `model` | A model instance that predicts the tag probabilities. The output vectors should match the number of tags in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | -| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ | -| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"tag"`. ~~Optional[Callable]~~ | -| `neg_prefix` 3.2.1 | The prefix used to specify incorrect tags while training. The tagger will learn not to predict exactly this tag. Defaults to `!`. ~~str~~ | +| Setting | Description | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | A model instance that predicts the tag probabilities. The output vectors should match the number of tags in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ | +| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ | +| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"tag"`. ~~Optional[Callable]~~ | +| `neg_prefix` 3.2.1 | The prefix used to specify incorrect tags while training. The tagger will learn not to predict exactly this tag. Defaults to `!`. ~~str~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"label_ids"`. ~~Union[bool, list[str]]~~ | ```python %%GITHUB_SPACY/spacy/pipeline/tagger.pyx diff --git a/website/docs/api/textcategorizer.md b/website/docs/api/textcategorizer.md index 042b4ab76..d8a609693 100644 --- a/website/docs/api/textcategorizer.md +++ b/website/docs/api/textcategorizer.md @@ -117,14 +117,15 @@ Create a new pipeline instance. In your application, you would normally use a shortcut for this and instantiate the component using its string name and [`nlp.add_pipe`](/api/language#create_pipe). -| Name | Description | -| -------------- | -------------------------------------------------------------------------------------------------------------------------------- | -| `vocab` | The shared vocabulary. ~~Vocab~~ | -| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ | -| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | -| _keyword-only_ | | -| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ | -| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ | +| Name | Description | +| ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ | +| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | +| _keyword-only_ | | +| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ | +| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ | +| `save_activations` 4.0 | Save activations in `Doc` when annotating. The supported activations is `"probabilities"`. ~~Union[bool, list[str]]~~ | ## TextCategorizer.\_\_call\_\_ {#call tag="method"} From 3f0c3ad7d30d493cd017b6bb41b174d991bbcdc1 Mon Sep 17 00:00:00 2001 From: Richard Hudson Date: Wed, 14 Sep 2022 09:36:55 +0200 Subject: [PATCH 26/82] Correct alignment example and documentation (#11491) * Correct example and documentation * Added altered example.md * Changes based on review + apply prettier * Remote unnecessary 'the' Co-authored-by: Madeesh Kannan Co-authored-by: Madeesh Kannan --- website/docs/api/example.md | 16 ++++++++++------ website/docs/usage/linguistic-features.md | 10 +++++----- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/website/docs/api/example.md b/website/docs/api/example.md index ca9d3c056..0228e8935 100644 --- a/website/docs/api/example.md +++ b/website/docs/api/example.md @@ -286,10 +286,14 @@ Calculate alignment tables between two tokenizations. ### Alignment attributes {#alignment-attributes"} -| Name | Description | -| ----- | --------------------------------------------------------------------- | -| `x2y` | The `Ragged` object holding the alignment from `x` to `y`. ~~Ragged~~ | -| `y2x` | The `Ragged` object holding the alignment from `y` to `x`. ~~Ragged~~ | +Alignment attributes are managed using `AlignmentArray`, which is a +simplified version of Thinc's [Ragged](https://thinc.ai/docs/api-types#ragged) +type that only supports the `data` and `length` attributes. + +| Name | Description | +| ----- | ------------------------------------------------------------------------------------- | +| `x2y` | The `AlignmentArray` object holding the alignment from `x` to `y`. ~~AlignmentArray~~ | +| `y2x` | The `AlignmentArray` object holding the alignment from `y` to `x`. ~~AlignmentArray~~ | @@ -309,10 +313,10 @@ tokenizations add up to the same string. For example, you'll be able to align > spacy_tokens = ["obama", "'s", "podcast"] > alignment = Alignment.from_strings(bert_tokens, spacy_tokens) > a2b = alignment.x2y -> assert list(a2b.dataXd) == [0, 1, 1, 2] +> assert list(a2b.data) == [0, 1, 1, 2] > ``` > -> If `a2b.dataXd[1] == a2b.dataXd[2] == 1`, that means that `A[1]` (`"'"`) and +> If `a2b.data[1] == a2b.data[2] == 1`, that means that `A[1]` (`"'"`) and > `A[2]` (`"s"`) both align to `B[1]` (`"'s"`). ### Alignment.from_strings {#classmethod tag="function"} diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md index 82472c67e..099678c40 100644 --- a/website/docs/usage/linguistic-features.md +++ b/website/docs/usage/linguistic-features.md @@ -1422,9 +1422,9 @@ other_tokens = ["i", "listened", "to", "obama", "'", "s", "podcasts", "."] spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts", "."] align = Alignment.from_strings(other_tokens, spacy_tokens) print(f"a -> b, lengths: {align.x2y.lengths}") # array([1, 1, 1, 1, 1, 1, 1, 1]) -print(f"a -> b, mapping: {align.x2y.dataXd}") # array([0, 1, 2, 3, 4, 4, 5, 6]) : two tokens both refer to "'s" +print(f"a -> b, mapping: {align.x2y.data}") # array([0, 1, 2, 3, 4, 4, 5, 6]) : two tokens both refer to "'s" print(f"b -> a, lengths: {align.y2x.lengths}") # array([1, 1, 1, 1, 2, 1, 1]) : the token "'s" refers to two tokens -print(f"b -> a, mappings: {align.y2x.dataXd}") # array([0, 1, 2, 3, 4, 5, 6, 7]) +print(f"b -> a, mappings: {align.y2x.data}") # array([0, 1, 2, 3, 4, 5, 6, 7]) ``` Here are some insights from the alignment information generated in the example @@ -1433,10 +1433,10 @@ above: - The one-to-one mappings for the first four tokens are identical, which means they map to each other. This makes sense because they're also identical in the input: `"i"`, `"listened"`, `"to"` and `"obama"`. -- The value of `x2y.dataXd[6]` is `5`, which means that `other_tokens[6]` +- The value of `x2y.data[6]` is `5`, which means that `other_tokens[6]` (`"podcasts"`) aligns to `spacy_tokens[5]` (also `"podcasts"`). -- `x2y.dataXd[4]` and `x2y.dataXd[5]` are both `4`, which means that both tokens - 4 and 5 of `other_tokens` (`"'"` and `"s"`) align to token 4 of `spacy_tokens` +- `x2y.data[4]` and `x2y.data[5]` are both `4`, which means that both tokens 4 + and 5 of `other_tokens` (`"'"` and `"s"`) align to token 4 of `spacy_tokens` (`"'s"`). From 7c98245c0c0f9c6c0c4a523c0bf1a75690e58620 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 14 Sep 2022 17:05:22 +0200 Subject: [PATCH 27/82] Add levenshtein from polyleven (#11418) Add a simple levenshtein distance function using the implementation from the polyleven library as `spacy.matcher.levenshtein`. --- .gitignore | 1 + licenses/3rd_party_licenses.txt | 31 ++ setup.py | 11 + spacy/matcher/__init__.py | 3 +- spacy/matcher/levenshtein.pyx | 15 + spacy/matcher/polyleven.c | 384 ++++++++++++++++++++++++ spacy/tests/matcher/test_levenshtein.py | 36 +++ 7 files changed, 480 insertions(+), 1 deletion(-) create mode 100644 spacy/matcher/levenshtein.pyx create mode 100644 spacy/matcher/polyleven.c create mode 100644 spacy/tests/matcher/test_levenshtein.py diff --git a/.gitignore b/.gitignore index ac72f2bbf..ac333f958 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ quickstart-training-generator.js cythonize.json spacy/*.html *.cpp +*.c *.so # Vim / VSCode / editors diff --git a/licenses/3rd_party_licenses.txt b/licenses/3rd_party_licenses.txt index d58da9c4a..851e09585 100644 --- a/licenses/3rd_party_licenses.txt +++ b/licenses/3rd_party_licenses.txt @@ -127,3 +127,34 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + +polyleven +--------- + +* Files: spacy/matcher/polyleven.c + +MIT License + +Copyright (c) 2021 Fujimoto Seiji +Copyright (c) 2021 Max Bachmann +Copyright (c) 2022 Nick Mazuk +Copyright (c) 2022 Michael Weiss + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/setup.py b/setup.py index ec1bd35fa..c4138aa93 100755 --- a/setup.py +++ b/setup.py @@ -205,6 +205,17 @@ def setup_package(): get_python_inc(plat_specific=True), ] ext_modules = [] + ext_modules.append( + Extension( + "spacy.matcher.levenshtein", + [ + "spacy/matcher/levenshtein.pyx", + "spacy/matcher/polyleven.c", + ], + language="c", + include_dirs=include_dirs, + ) + ) for name in MOD_NAMES: mod_path = name.replace(".", "/") + ".pyx" ext = Extension( diff --git a/spacy/matcher/__init__.py b/spacy/matcher/__init__.py index 286844787..a4f164847 100644 --- a/spacy/matcher/__init__.py +++ b/spacy/matcher/__init__.py @@ -1,5 +1,6 @@ from .matcher import Matcher from .phrasematcher import PhraseMatcher from .dependencymatcher import DependencyMatcher +from .levenshtein import levenshtein -__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher"] +__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"] diff --git a/spacy/matcher/levenshtein.pyx b/spacy/matcher/levenshtein.pyx new file mode 100644 index 000000000..8463d913d --- /dev/null +++ b/spacy/matcher/levenshtein.pyx @@ -0,0 +1,15 @@ +# cython: profile=True, binding=True, infer_types=True +from cpython.object cimport PyObject +from libc.stdint cimport int64_t + +from typing import Optional + + +cdef extern from "polyleven.c": + int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k) + + +cpdef int64_t levenshtein(a: str, b: str, k: Optional[int] = None): + if k is None: + k = -1 + return polyleven(a, b, k) diff --git a/spacy/matcher/polyleven.c b/spacy/matcher/polyleven.c new file mode 100644 index 000000000..2f2b8826c --- /dev/null +++ b/spacy/matcher/polyleven.c @@ -0,0 +1,384 @@ +/* + * Adapted from Polyleven (https://ceptord.net/) + * + * Source: https://github.com/fujimotos/polyleven/blob/c3f95a080626c5652f0151a2e449963288ccae84/polyleven.c + * + * Copyright (c) 2021 Fujimoto Seiji + * Copyright (c) 2021 Max Bachmann + * Copyright (c) 2022 Nick Mazuk + * Copyright (c) 2022 Michael Weiss + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#define CDIV(a,b) ((a) / (b) + ((a) % (b) > 0)) +#define BIT(i,n) (((i) >> (n)) & 1) +#define FLIP(i,n) ((i) ^ ((uint64_t) 1 << (n))) +#define ISASCII(kd) ((kd) == PyUnicode_1BYTE_KIND) + +/* + * Bare bone of PyUnicode + */ +struct strbuf { + void *ptr; + int kind; + int64_t len; +}; + +static void strbuf_init(struct strbuf *s, PyObject *o) +{ + s->ptr = PyUnicode_DATA(o); + s->kind = PyUnicode_KIND(o); + s->len = PyUnicode_GET_LENGTH(o); +} + +#define strbuf_read(s, i) PyUnicode_READ((s)->kind, (s)->ptr, (i)) + +/* + * An encoded mbleven model table. + * + * Each 8-bit integer represents an edit sequence, with using two + * bits for a single operation. + * + * 01 = DELETE, 10 = INSERT, 11 = REPLACE + * + * For example, 13 is '1101' in binary notation, so it means + * DELETE + REPLACE. + */ +static const uint8_t MBLEVEN_MATRIX[] = { + 3, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 15, 9, 6, 0, 0, 0, 0, 0, + 13, 7, 0, 0, 0, 0, 0, 0, + 5, 0, 0, 0, 0, 0, 0, 0, + 63, 39, 45, 57, 54, 30, 27, 0, + 61, 55, 31, 37, 25, 22, 0, 0, + 53, 29, 23, 0, 0, 0, 0, 0, + 21, 0, 0, 0, 0, 0, 0, 0, +}; + +#define MBLEVEN_MATRIX_GET(k, d) ((((k) + (k) * (k)) / 2 - 1) + (d)) * 8 + +static int64_t mbleven_ascii(char *s1, int64_t len1, + char *s2, int64_t len2, int k) +{ + int pos; + uint8_t m; + int64_t i, j, c, r; + + pos = MBLEVEN_MATRIX_GET(k, len1 - len2); + r = k + 1; + + while (MBLEVEN_MATRIX[pos]) { + m = MBLEVEN_MATRIX[pos++]; + i = j = c = 0; + while (i < len1 && j < len2) { + if (s1[i] != s2[j]) { + c++; + if (!m) break; + if (m & 1) i++; + if (m & 2) j++; + m >>= 2; + } else { + i++; + j++; + } + } + c += (len1 - i) + (len2 - j); + r = MIN(r, c); + if (r < 2) { + return r; + } + } + return r; +} + +static int64_t mbleven(PyObject *o1, PyObject *o2, int64_t k) +{ + int pos; + uint8_t m; + int64_t i, j, c, r; + struct strbuf s1, s2; + + strbuf_init(&s1, o1); + strbuf_init(&s2, o2); + + if (s1.len < s2.len) + return mbleven(o2, o1, k); + + if (k > 3) + return -1; + + if (k < s1.len - s2.len) + return k + 1; + + if (ISASCII(s1.kind) && ISASCII(s2.kind)) + return mbleven_ascii(s1.ptr, s1.len, s2.ptr, s2.len, k); + + pos = MBLEVEN_MATRIX_GET(k, s1.len - s2.len); + r = k + 1; + + while (MBLEVEN_MATRIX[pos]) { + m = MBLEVEN_MATRIX[pos++]; + i = j = c = 0; + while (i < s1.len && j < s2.len) { + if (strbuf_read(&s1, i) != strbuf_read(&s2, j)) { + c++; + if (!m) break; + if (m & 1) i++; + if (m & 2) j++; + m >>= 2; + } else { + i++; + j++; + } + } + c += (s1.len - i) + (s2.len - j); + r = MIN(r, c); + if (r < 2) { + return r; + } + } + return r; +} + +/* + * Data structure to store Peq (equality bit-vector). + */ +struct blockmap_entry { + uint32_t key[128]; + uint64_t val[128]; +}; + +struct blockmap { + int64_t nr; + struct blockmap_entry *list; +}; + +#define blockmap_key(c) ((c) | 0x80000000U) +#define blockmap_hash(c) ((c) % 128) + +static int blockmap_init(struct blockmap *map, struct strbuf *s) +{ + int64_t i; + struct blockmap_entry *be; + uint32_t c, k; + uint8_t h; + + map->nr = CDIV(s->len, 64); + map->list = calloc(1, map->nr * sizeof(struct blockmap_entry)); + if (map->list == NULL) { + PyErr_NoMemory(); + return -1; + } + + for (i = 0; i < s->len; i++) { + be = &(map->list[i / 64]); + c = strbuf_read(s, i); + h = blockmap_hash(c); + k = blockmap_key(c); + + while (be->key[h] && be->key[h] != k) + h = blockmap_hash(h + 1); + be->key[h] = k; + be->val[h] |= (uint64_t) 1 << (i % 64); + } + return 0; +} + +static void blockmap_clear(struct blockmap *map) +{ + if (map->list) + free(map->list); + map->list = NULL; + map->nr = 0; +} + +static uint64_t blockmap_get(struct blockmap *map, int block, uint32_t c) +{ + struct blockmap_entry *be; + uint8_t h; + uint32_t k; + + h = blockmap_hash(c); + k = blockmap_key(c); + + be = &(map->list[block]); + while (be->key[h] && be->key[h] != k) + h = blockmap_hash(h + 1); + return be->key[h] == k ? be->val[h] : 0; +} + +/* + * Myers' bit-parallel algorithm + * + * See: G. Myers. "A fast bit-vector algorithm for approximate string + * matching based on dynamic programming." Journal of the ACM, 1999. + */ +static int64_t myers1999_block(struct strbuf *s1, struct strbuf *s2, + struct blockmap *map) +{ + uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last; + uint64_t *Mhc, *Phc; + int64_t i, b, hsize, vsize, Score; + uint8_t Pb, Mb; + + hsize = CDIV(s1->len, 64); + vsize = CDIV(s2->len, 64); + Score = s2->len; + + Phc = malloc(hsize * 2 * sizeof(uint64_t)); + if (Phc == NULL) { + PyErr_NoMemory(); + return -1; + } + Mhc = Phc + hsize; + memset(Phc, -1, hsize * sizeof(uint64_t)); + memset(Mhc, 0, hsize * sizeof(uint64_t)); + Last = (uint64_t)1 << ((s2->len - 1) % 64); + + for (b = 0; b < vsize; b++) { + Mv = 0; + Pv = (uint64_t) -1; + Score = s2->len; + + for (i = 0; i < s1->len; i++) { + Eq = blockmap_get(map, b, strbuf_read(s1, i)); + + Pb = BIT(Phc[i / 64], i % 64); + Mb = BIT(Mhc[i / 64], i % 64); + + Xv = Eq | Mv; + Xh = ((((Eq | Mb) & Pv) + Pv) ^ Pv) | Eq | Mb; + + Ph = Mv | ~ (Xh | Pv); + Mh = Pv & Xh; + + if (Ph & Last) Score++; + if (Mh & Last) Score--; + + if ((Ph >> 63) ^ Pb) + Phc[i / 64] = FLIP(Phc[i / 64], i % 64); + + if ((Mh >> 63) ^ Mb) + Mhc[i / 64] = FLIP(Mhc[i / 64], i % 64); + + Ph = (Ph << 1) | Pb; + Mh = (Mh << 1) | Mb; + + Pv = Mh | ~ (Xv | Ph); + Mv = Ph & Xv; + } + } + free(Phc); + return Score; +} + +static int64_t myers1999_simple(uint8_t *s1, int64_t len1, uint8_t *s2, int64_t len2) +{ + uint64_t Peq[256]; + uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last; + int64_t i; + int64_t Score = len2; + + memset(Peq, 0, sizeof(Peq)); + + for (i = 0; i < len2; i++) + Peq[s2[i]] |= (uint64_t) 1 << i; + + Mv = 0; + Pv = (uint64_t) -1; + Last = (uint64_t) 1 << (len2 - 1); + + for (i = 0; i < len1; i++) { + Eq = Peq[s1[i]]; + + Xv = Eq | Mv; + Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq; + + Ph = Mv | ~ (Xh | Pv); + Mh = Pv & Xh; + + if (Ph & Last) Score++; + if (Mh & Last) Score--; + + Ph = (Ph << 1) | 1; + Mh = (Mh << 1); + + Pv = Mh | ~ (Xv | Ph); + Mv = Ph & Xv; + } + return Score; +} + +static int64_t myers1999(PyObject *o1, PyObject *o2) +{ + struct strbuf s1, s2; + struct blockmap map; + int64_t ret; + + strbuf_init(&s1, o1); + strbuf_init(&s2, o2); + + if (s1.len < s2.len) + return myers1999(o2, o1); + + if (ISASCII(s1.kind) && ISASCII(s2.kind) && s2.len < 65) + return myers1999_simple(s1.ptr, s1.len, s2.ptr, s2.len); + + if (blockmap_init(&map, &s2)) + return -1; + + ret = myers1999_block(&s1, &s2, &map); + blockmap_clear(&map); + return ret; +} + +/* + * Interface functions + */ +static int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k) +{ + int64_t len1, len2; + + len1 = PyUnicode_GET_LENGTH(o1); + len2 = PyUnicode_GET_LENGTH(o2); + + if (len1 < len2) + return polyleven(o2, o1, k); + + if (k == 0) + return PyUnicode_Compare(o1, o2) ? 1 : 0; + + if (0 < k && k < len1 - len2) + return k + 1; + + if (len2 == 0) + return len1; + + if (0 < k && k < 4) + return mbleven(o1, o2, k); + + return myers1999(o1, o2); +} diff --git a/spacy/tests/matcher/test_levenshtein.py b/spacy/tests/matcher/test_levenshtein.py new file mode 100644 index 000000000..6c7793f63 --- /dev/null +++ b/spacy/tests/matcher/test_levenshtein.py @@ -0,0 +1,36 @@ +import pytest +from spacy.matcher import levenshtein + + +# empty string plus 10 random ASCII, 10 random unicode, and 2 random long tests +# from polyleven +@pytest.mark.parametrize( + "dist,a,b", + [ + (0, "", ""), + (4, "bbcb", "caba"), + (3, "abcb", "cacc"), + (3, "aa", "ccc"), + (1, "cca", "ccac"), + (1, "aba", "aa"), + (4, "bcbb", "abac"), + (3, "acbc", "bba"), + (3, "cbba", "a"), + (2, "bcc", "ba"), + (4, "aaa", "ccbb"), + (3, "うあい", "いいうい"), + (2, "あううい", "うあい"), + (3, "いういい", "うううあ"), + (2, "うい", "あいあ"), + (2, "いあい", "いう"), + (1, "いい", "あいい"), + (3, "あうあ", "いいああ"), + (4, "いあうう", "ううああ"), + (3, "いあいい", "ういああ"), + (3, "いいああ", "ううあう"), + (166,"TCTGGGCACGGATTCGTCAGATTCCATGTCCATATTTGAGGCTCTTGCAGGCAAAATTTGGGCATGTGAACTCCTTATAGTCCCCGTGC","ATATGGATTGGGGGCATTCAAAGATACGGTTTCCCTTTCTTCAGTTTCGCGCGGCGCACGTCCGGGTGCGAGCCAGTTCGTCTTACTCACATTGTCGACTTCACGAATCGCGCATGATGTGCTTAGCCTGTACTTACGAACGAACTTTCGGTCCAAATACATTCTATCAACACCGAGGTATCCGTGCCACACGCCGAAGCTCGACCGTGTTCGTTGAGAGGTGGAAATGGTAAAAGATGAACATAGTC"), + (111,"GGTTCGGCCGAATTCATAGAGCGTGGTAGTCGACGGTATCCCGCCTGGTAGGGGCCCCTTCTACCTAGCGGAAGTTTGTCAGTACTCTATAACACGAGGGCCTCTCACACCCTAGATCGTCCAGCCACTCGAAGATCGCAGCACCCTTACAGAAAGGCATTAATGTTTCTCCTAGCACTTGTGCAATGGTGAAGGAGTGATG","CGTAACACTTCGCGCTACTGGGCTGCAACGTCTTGGGCATACATGCAAGATTATCTAATGCAAGCTTGAGCCCCGCTTGCGGAATTTCCCTAATCGGGGTCCCTTCCTGTTACGATAAGGACGCGTGCACT"), + ], +) +def test_levenshtein(dist, a, b): + assert levenshtein(a, b) == dist From ca1ad67458d96562ab28d03892e926908cb583e1 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 15 Sep 2022 15:51:19 +0200 Subject: [PATCH 28/82] disable mypy run for Python 3.10 (#11508) --- .github/azure-steps.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 18224ba8c..c7722391f 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -27,6 +27,7 @@ steps: - script: python -m mypy spacy displayName: 'Run mypy' + condition: ne(variables['python_version'], '3.10') - task: DeleteFiles@1 inputs: From 5157e4e8235786438c6c463fa7003de17c43b649 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 15 Sep 2022 17:06:58 +0200 Subject: [PATCH 29/82] disable mypy run for Python 3.10 (#11508) (#11512) --- .github/azure-steps.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 18224ba8c..c7722391f 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -27,6 +27,7 @@ steps: - script: python -m mypy spacy displayName: 'Run mypy' + condition: ne(variables['python_version'], '3.10') - task: DeleteFiles@1 inputs: From 0509f908743afc86a185346ca6cb2e4789041732 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 15 Sep 2022 17:29:42 +0200 Subject: [PATCH 30/82] add dot (#11500) --- spacy/errors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/errors.py b/spacy/errors.py index 5ee1476c2..f55b378e9 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -707,7 +707,7 @@ class Errors(metaclass=ErrorsWithCodes): "need to modify the pipeline, use the built-in methods like " "`nlp.add_pipe`, `nlp.remove_pipe`, `nlp.disable_pipe` or " "`nlp.enable_pipe` instead.") - E927 = ("Can't write to frozen list Maybe you're trying to modify a computed " + E927 = ("Can't write to frozen list. Maybe you're trying to modify a computed " "property or default function argument?") E928 = ("A KnowledgeBase can only be serialized to/from from a directory, " "but the provided argument {loc} points to a file.") From d5c8498f2f8c26fdfb1f18aafeeebbe94e6126bb Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 15 Sep 2022 17:41:25 +0200 Subject: [PATCH 31/82] disable mypy run for Python 3.10 (#11508) (#11511) --- .github/azure-steps.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 18224ba8c..c7722391f 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -27,6 +27,7 @@ steps: - script: python -m mypy spacy displayName: 'Run mypy' + condition: ne(variables['python_version'], '3.10') - task: DeleteFiles@1 inputs: From df0b815c2382f572a127e4a35dba30cf1fa9fe45 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 16 Sep 2022 09:26:33 +0200 Subject: [PATCH 32/82] more explicit Example constructor example (#11489) * make constructor example for Example more explicit * shorten example and add spaces --- website/docs/api/example.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/website/docs/api/example.md b/website/docs/api/example.md index 0228e8935..63768d58f 100644 --- a/website/docs/api/example.md +++ b/website/docs/api/example.md @@ -23,11 +23,13 @@ both documents. > ```python > from spacy.tokens import Doc > from spacy.training import Example -> -> words = ["hello", "world", "!"] -> spaces = [True, False, False] -> predicted = Doc(nlp.vocab, words=words, spaces=spaces) -> reference = parse_gold_doc(my_data) +> pred_words = ["Apply", "some", "sunscreen"] +> pred_spaces = [True, True, False] +> gold_words = ["Apply", "some", "sun", "screen"] +> gold_spaces = [True, True, False, False] +> gold_tags = ["VERB", "DET", "NOUN", "NOUN"] +> predicted = Doc(nlp.vocab, words=pred_words, spaces=pred_spaces) +> reference = Doc(nlp.vocab, words=gold_words, spaces=gold_spaces, tags=gold_tags) > example = Example(predicted, reference) > ``` From 279358be63a6f32c49c2c89d4657a5239f238d9e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 16 Sep 2022 11:50:19 +0200 Subject: [PATCH 33/82] Auto-format code with black (#11513) Co-authored-by: explosion-bot --- spacy/tests/matcher/test_levenshtein.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/spacy/tests/matcher/test_levenshtein.py b/spacy/tests/matcher/test_levenshtein.py index 6c7793f63..d30e36132 100644 --- a/spacy/tests/matcher/test_levenshtein.py +++ b/spacy/tests/matcher/test_levenshtein.py @@ -28,8 +28,16 @@ from spacy.matcher import levenshtein (4, "いあうう", "ううああ"), (3, "いあいい", "ういああ"), (3, "いいああ", "ううあう"), - (166,"TCTGGGCACGGATTCGTCAGATTCCATGTCCATATTTGAGGCTCTTGCAGGCAAAATTTGGGCATGTGAACTCCTTATAGTCCCCGTGC","ATATGGATTGGGGGCATTCAAAGATACGGTTTCCCTTTCTTCAGTTTCGCGCGGCGCACGTCCGGGTGCGAGCCAGTTCGTCTTACTCACATTGTCGACTTCACGAATCGCGCATGATGTGCTTAGCCTGTACTTACGAACGAACTTTCGGTCCAAATACATTCTATCAACACCGAGGTATCCGTGCCACACGCCGAAGCTCGACCGTGTTCGTTGAGAGGTGGAAATGGTAAAAGATGAACATAGTC"), - (111,"GGTTCGGCCGAATTCATAGAGCGTGGTAGTCGACGGTATCCCGCCTGGTAGGGGCCCCTTCTACCTAGCGGAAGTTTGTCAGTACTCTATAACACGAGGGCCTCTCACACCCTAGATCGTCCAGCCACTCGAAGATCGCAGCACCCTTACAGAAAGGCATTAATGTTTCTCCTAGCACTTGTGCAATGGTGAAGGAGTGATG","CGTAACACTTCGCGCTACTGGGCTGCAACGTCTTGGGCATACATGCAAGATTATCTAATGCAAGCTTGAGCCCCGCTTGCGGAATTTCCCTAATCGGGGTCCCTTCCTGTTACGATAAGGACGCGTGCACT"), + ( + 166, + "TCTGGGCACGGATTCGTCAGATTCCATGTCCATATTTGAGGCTCTTGCAGGCAAAATTTGGGCATGTGAACTCCTTATAGTCCCCGTGC", + "ATATGGATTGGGGGCATTCAAAGATACGGTTTCCCTTTCTTCAGTTTCGCGCGGCGCACGTCCGGGTGCGAGCCAGTTCGTCTTACTCACATTGTCGACTTCACGAATCGCGCATGATGTGCTTAGCCTGTACTTACGAACGAACTTTCGGTCCAAATACATTCTATCAACACCGAGGTATCCGTGCCACACGCCGAAGCTCGACCGTGTTCGTTGAGAGGTGGAAATGGTAAAAGATGAACATAGTC", + ), + ( + 111, + "GGTTCGGCCGAATTCATAGAGCGTGGTAGTCGACGGTATCCCGCCTGGTAGGGGCCCCTTCTACCTAGCGGAAGTTTGTCAGTACTCTATAACACGAGGGCCTCTCACACCCTAGATCGTCCAGCCACTCGAAGATCGCAGCACCCTTACAGAAAGGCATTAATGTTTCTCCTAGCACTTGTGCAATGGTGAAGGAGTGATG", + "CGTAACACTTCGCGCTACTGGGCTGCAACGTCTTGGGCATACATGCAAGATTATCTAATGCAAGCTTGAGCCCCGCTTGCGGAATTTCCCTAATCGGGGTCCCTTCCTGTTACGATAAGGACGCGTGCACT", + ), ], ) def test_levenshtein(dist, a, b): From af9b01ef97d934a8601aff46d8341fdaf78b88df Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 16 Sep 2022 16:54:31 +0200 Subject: [PATCH 34/82] Add dependency check to project step runs (#11226) * Add dependency check to project step running. * Fix dependency mismatch warning. * Remove newline. * Add types-setuptools to setup.cfg. * Move types-setuptools to test requirements. Move warnings into _validate_requirements(). Handle file reading in project_run(). * Remove newline formatting for output of package conflicts. * Show full version conflict message instead of just package name. * Update spacy/cli/project/run.py Co-authored-by: Adriane Boyd * Fix typo. * Re-add rephrasing of message for conflicting packages. Remove requirements path redundancy. * Update spacy/cli/project/run.py Co-authored-by: Adriane Boyd * Update spacy/cli/project/run.py Co-authored-by: Adriane Boyd * Print unified message for requirement conflicts and missing requirements. * Update spacy/cli/project/run.py Co-authored-by: Adriane Boyd * Fix warning message. * Print conflict/missing messages individually. * Print conflict/missing messages individually. * Add check_requirements setting in project.yml to disable requirements check. * Update website/docs/usage/projects.md Co-authored-by: Adriane Boyd * Update website/docs/usage/projects.md Co-authored-by: Adriane Boyd * Update description of project.yml structure in projects.md. * Update website/docs/usage/projects.md Co-authored-by: Sofie Van Landeghem * Prettify projects docs. Co-authored-by: Adriane Boyd Co-authored-by: Sofie Van Landeghem --- requirements.txt | 1 + spacy/cli/project/run.py | 40 +++++++++++++++++++++++++++++++++- website/docs/usage/projects.md | 35 ++++++++++++++++++----------- 3 files changed, 62 insertions(+), 14 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3e8501b2f..e45fde787 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,6 +33,7 @@ hypothesis>=3.27.0,<7.0.0 mypy>=0.910,<0.970; platform_machine!='aarch64' types-dataclasses>=0.1.3; python_version < "3.7" types-mock>=0.1.1 +types-setuptools>=57.0.0 types-requests types-setuptools>=57.0.0 black>=22.0,<23.0 diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index d42d95465..ebab7471e 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -1,5 +1,8 @@ -from typing import Optional, List, Dict, Sequence, Any, Iterable +from typing import Optional, List, Dict, Sequence, Any, Iterable, Tuple +import os.path from pathlib import Path + +import pkg_resources from wasabi import msg from wasabi.util import locale_escape import sys @@ -71,6 +74,12 @@ def project_run( commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} workflows = config.get("workflows", {}) validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) + + req_path = project_dir / "requirements.txt" + if config.get("check_requirements", True) and os.path.exists(req_path): + with req_path.open() as requirements_file: + _check_requirements([req.replace("\n", "") for req in requirements_file]) + if subcommand in workflows: msg.info(f"Running workflow '{subcommand}'") for cmd in workflows[subcommand]: @@ -310,3 +319,32 @@ def get_fileinfo(project_dir: Path, paths: List[str]) -> List[Dict[str, Optional md5 = get_checksum(file_path) if file_path.exists() else None data.append({"path": path, "md5": md5}) return data + + +def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]: + """Checks whether requirements are installed and free of version conflicts. + requirements (List[str]): List of requirements. + RETURNS (Tuple[bool, bool]): Whether (1) any packages couldn't be imported, (2) any packages with version conflicts + exist. + """ + + failed_pkgs_msgs: List[str] = [] + conflicting_pkgs_msgs: List[str] = [] + + for req in requirements: + try: + pkg_resources.require(req) + except pkg_resources.DistributionNotFound as dnf: + failed_pkgs_msgs.append(dnf.report()) + except pkg_resources.VersionConflict as vc: + conflicting_pkgs_msgs.append(vc.report()) + + if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): + msg.warn( + title="Missing requirements or requirement conflicts detected. Make sure your Python environment is set up " + "correctly and you installed all requirements specified in your project's requirements.txt: " + ) + for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs: + msg.text(pgk_msg) + + return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0 diff --git a/website/docs/usage/projects.md b/website/docs/usage/projects.md index 35150035a..4797bbfe3 100644 --- a/website/docs/usage/projects.md +++ b/website/docs/usage/projects.md @@ -148,6 +148,13 @@ skipped. You can also set `--force` to force re-running a command, or `--dry` to perform a "dry run" and see what would happen (without actually running the script). +Since spaCy v3.4.2, `spacy projects run` checks your installed dependencies to +verify that your environment is properly set up and aligns with the project's +`requirements.txt`, if there is one. If missing or conflicting dependencies are +detected, a corresponding warning is displayed. If you'd like to disable the +dependency check, set `check_requirements: false` in your project's +`project.yml`. + ### 4. Run a workflow {#run-workfow} > #### project.yml @@ -226,26 +233,28 @@ pipelines. ```yaml %%GITHUB_PROJECTS/pipelines/tagger_parser_ud/project.yml ``` + > #### Tip: Overriding variables on the CLI > -> If you want to override one or more variables on the CLI and are not already specifying a -> project directory, you need to add `.` as a placeholder: +> If you want to override one or more variables on the CLI and are not already +> specifying a project directory, you need to add `.` as a placeholder: > > ``` > python -m spacy project run test . --vars.foo bar > ``` -| Section | Description | -| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). | -| `description` | An optional project description used in [auto-generated docs](#custom-docs). | -| `vars` | A dictionary of variables that can be referenced in paths, URLs and scripts and overriden on the CLI, just like [`config.cfg` variables](/usage/training#config-interpolation). For example, `${vars.name}` will use the value of the variable `name`. Variables need to be defined in the section `vars`, but can be a nested dict, so you're able to reference `${vars.model.name}`. | -| `env` | A dictionary of variables, mapped to the names of environment variables that will be read in when running the project. For example, `${env.name}` will use the value of the environment variable defined as `name`. | -| `directories` | An optional list of [directories](#project-files) that should be created in the project for assets, training outputs, metrics etc. spaCy will make sure that these directories always exist. | -| `assets` | A list of assets that can be fetched with the [`project assets`](/api/cli#project-assets) command. `url` defines a URL or local path, `dest` is the destination file relative to the project directory, and an optional `checksum` ensures that an error is raised if the file's checksum doesn't match. Instead of `url`, you can also provide a `git` block with the keys `repo`, `branch` and `path`, to download from a Git repo. | -| `workflows` | A dictionary of workflow names, mapped to a list of command names, to execute in order. Workflows can be run with the [`project run`](/api/cli#project-run) command. | -| `commands` | A list of named commands. A command can define an optional help message (shown in the CLI when the user adds `--help`) and the `script`, a list of commands to run. The `deps` and `outputs` let you define the created file the command depends on and produces, respectively. This lets spaCy determine whether a command needs to be re-run because its dependencies or outputs changed. Commands can be run as part of a workflow, or separately with the [`project run`](/api/cli#project-run) command. | -| `spacy_version` | Optional spaCy version range like `>=3.0.0,<3.1.0` that the project is compatible with. If it's loaded with an incompatible version, an error is raised when the project is loaded. | +| Section | Description | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). | +| `description` | An optional project description used in [auto-generated docs](#custom-docs). | +| `vars` | A dictionary of variables that can be referenced in paths, URLs and scripts and overriden on the CLI, just like [`config.cfg` variables](/usage/training#config-interpolation). For example, `${vars.name}` will use the value of the variable `name`. Variables need to be defined in the section `vars`, but can be a nested dict, so you're able to reference `${vars.model.name}`. | +| `env` | A dictionary of variables, mapped to the names of environment variables that will be read in when running the project. For example, `${env.name}` will use the value of the environment variable defined as `name`. | +| `directories` | An optional list of [directories](#project-files) that should be created in the project for assets, training outputs, metrics etc. spaCy will make sure that these directories always exist. | +| `assets` | A list of assets that can be fetched with the [`project assets`](/api/cli#project-assets) command. `url` defines a URL or local path, `dest` is the destination file relative to the project directory, and an optional `checksum` ensures that an error is raised if the file's checksum doesn't match. Instead of `url`, you can also provide a `git` block with the keys `repo`, `branch` and `path`, to download from a Git repo. | +| `workflows` | A dictionary of workflow names, mapped to a list of command names, to execute in order. Workflows can be run with the [`project run`](/api/cli#project-run) command. | +| `commands` | A list of named commands. A command can define an optional help message (shown in the CLI when the user adds `--help`) and the `script`, a list of commands to run. The `deps` and `outputs` let you define the created file the command depends on and produces, respectively. This lets spaCy determine whether a command needs to be re-run because its dependencies or outputs changed. Commands can be run as part of a workflow, or separately with the [`project run`](/api/cli#project-run) command. | +| `spacy_version` | Optional spaCy version range like `>=3.0.0,<3.1.0` that the project is compatible with. If it's loaded with an incompatible version, an error is raised when the project is loaded. | +| `check_requirements` 3.4.2 | A flag determining whether to verify that the installed dependencies align with the project's `requirements.txt`. Defaults to `true`. | ### Data assets {#data-assets} From f40d2fac29678111ec600eb7def9d58b174f14a2 Mon Sep 17 00:00:00 2001 From: Basile Dura Date: Fri, 23 Sep 2022 13:18:51 +0200 Subject: [PATCH 35/82] fix: remove duplicate v3.2 (#11530) --- website/meta/sidebars.json | 1 - 1 file changed, 1 deletion(-) diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index 1b743636c..06fce7742 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -12,7 +12,6 @@ { "text": "New in v3.0", "url": "/usage/v3" }, { "text": "New in v3.1", "url": "/usage/v3-1" }, { "text": "New in v3.2", "url": "/usage/v3-2" }, - { "text": "New in v3.2", "url": "/usage/v3-2" }, { "text": "New in v3.3", "url": "/usage/v3-3" }, { "text": "New in v3.4", "url": "/usage/v3-4" } ] From 6f692a06d54d53f702def1a2ca20a649b7a1b644 Mon Sep 17 00:00:00 2001 From: Richard Hudson Date: Mon, 26 Sep 2022 15:58:21 +0200 Subject: [PATCH 36/82] Remove side effects from Doc.__init__() (#11506) * Remove side effects from Doc.__init__() * Changes based on review comment * Readd test * Change interface of Doc.__init__() * Simplify test Co-authored-by: Adriane Boyd * Update doc.md Co-authored-by: Adriane Boyd --- spacy/tests/doc/test_doc_api.py | 15 +++++++++++++++ spacy/tokens/doc.pyi | 2 +- spacy/tokens/doc.pyx | 12 ++++++------ website/docs/api/doc.md | 30 +++++++++++++++--------------- 4 files changed, 37 insertions(+), 22 deletions(-) diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py index a64ab2ba8..38003dea9 100644 --- a/spacy/tests/doc/test_doc_api.py +++ b/spacy/tests/doc/test_doc_api.py @@ -82,6 +82,21 @@ def test_issue2396(en_vocab): assert (span.get_lca_matrix() == matrix).all() +@pytest.mark.issue(11499) +def test_init_args_unmodified(en_vocab): + words = ["A", "sentence"] + ents = ["B-TYPE1", ""] + sent_starts = [True, False] + Doc( + vocab=en_vocab, + words=words, + ents=ents, + sent_starts=sent_starts, + ) + assert ents == ["B-TYPE1", ""] + assert sent_starts == [True, False] + + @pytest.mark.parametrize("text", ["-0.23", "+123,456", "±1"]) @pytest.mark.parametrize("lang_cls", [English, MultiLanguage]) @pytest.mark.issue(2782) diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi index a40fa74aa..f0cdaee87 100644 --- a/spacy/tokens/doc.pyi +++ b/spacy/tokens/doc.pyi @@ -72,7 +72,7 @@ class Doc: lemmas: Optional[List[str]] = ..., heads: Optional[List[int]] = ..., deps: Optional[List[str]] = ..., - sent_starts: Optional[List[Union[bool, None]]] = ..., + sent_starts: Optional[List[Union[bool, int, None]]] = ..., ents: Optional[List[str]] = ..., ) -> None: ... @property diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 7ba9a3341..d7d2fd8e6 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -217,9 +217,9 @@ cdef class Doc: head in the doc. Defaults to None. deps (Optional[List[str]]): A list of unicode strings, of the same length as words, to assign as token.dep. Defaults to None. - sent_starts (Optional[List[Union[bool, None]]]): A list of values, of - the same length as words, to assign as token.is_sent_start. Will be - overridden by heads if heads is provided. Defaults to None. + sent_starts (Optional[List[Union[bool, int, None]]]): A list of values, + of the same length as words, to assign as token.is_sent_start. Will + be overridden by heads if heads is provided. Defaults to None. ents (Optional[List[str]]): A list of unicode strings, of the same length as words, as IOB tags to assign as token.ent_iob and token.ent_type. Defaults to None. @@ -285,6 +285,7 @@ cdef class Doc: heads = [0] * len(deps) if heads and not deps: raise ValueError(Errors.E1017) + sent_starts = list(sent_starts) if sent_starts is not None else None if sent_starts is not None: for i in range(len(sent_starts)): if sent_starts[i] is True: @@ -300,12 +301,11 @@ cdef class Doc: ent_iobs = None ent_types = None if ents is not None: + ents = [ent if ent != "" else None for ent in ents] iob_strings = Token.iob_strings() # make valid IOB2 out of IOB1 or IOB2 for i, ent in enumerate(ents): - if ent is "": - ents[i] = None - elif ent is not None and not isinstance(ent, str): + if ent is not None and not isinstance(ent, str): raise ValueError(Errors.E177.format(tag=ent)) if i < len(ents) - 1: # OI -> OB diff --git a/website/docs/api/doc.md b/website/docs/api/doc.md index f97f4ad83..f97ed4547 100644 --- a/website/docs/api/doc.md +++ b/website/docs/api/doc.md @@ -31,21 +31,21 @@ Construct a `Doc` object. The most common way to get a `Doc` object is via the > doc = Doc(nlp.vocab, words=words, spaces=spaces) > ``` -| Name | Description | -| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `vocab` | A storage container for lexical types. ~~Vocab~~ | -| `words` | A list of strings or integer hash values to add to the document as words. ~~Optional[List[Union[str,int]]]~~ | -| `spaces` | A list of boolean values indicating whether each word has a subsequent space. Must have the same length as `words`, if specified. Defaults to a sequence of `True`. ~~Optional[List[bool]]~~ | -| _keyword-only_ | | -| `user\_data` | Optional extra data to attach to the Doc. ~~Dict~~ | -| `tags` 3 | A list of strings, of the same length as `words`, to assign as `token.tag` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | -| `pos` 3 | A list of strings, of the same length as `words`, to assign as `token.pos` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | -| `morphs` 3 | A list of strings, of the same length as `words`, to assign as `token.morph` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | -| `lemmas` 3 | A list of strings, of the same length as `words`, to assign as `token.lemma` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | -| `heads` 3 | A list of values, of the same length as `words`, to assign as the head for each word. Head indices are the absolute position of the head in the `Doc`. Defaults to `None`. ~~Optional[List[int]]~~ | -| `deps` 3 | A list of strings, of the same length as `words`, to assign as `token.dep` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | -| `sent_starts` 3 | A list of values, of the same length as `words`, to assign as `token.is_sent_start`. Will be overridden by heads if `heads` is provided. Defaults to `None`. ~~Optional[List[Optional[bool]]]~~ | -| `ents` 3 | A list of strings, of the same length of `words`, to assign the token-based IOB tag. Defaults to `None`. ~~Optional[List[str]]~~ | +| Name | Description | +| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | A storage container for lexical types. ~~Vocab~~ | +| `words` | A list of strings or integer hash values to add to the document as words. ~~Optional[List[Union[str,int]]]~~ | +| `spaces` | A list of boolean values indicating whether each word has a subsequent space. Must have the same length as `words`, if specified. Defaults to a sequence of `True`. ~~Optional[List[bool]]~~ | +| _keyword-only_ | | +| `user\_data` | Optional extra data to attach to the Doc. ~~Dict~~ | +| `tags` 3 | A list of strings, of the same length as `words`, to assign as `token.tag` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | +| `pos` 3 | A list of strings, of the same length as `words`, to assign as `token.pos` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | +| `morphs` 3 | A list of strings, of the same length as `words`, to assign as `token.morph` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | +| `lemmas` 3 | A list of strings, of the same length as `words`, to assign as `token.lemma` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | +| `heads` 3 | A list of values, of the same length as `words`, to assign as the head for each word. Head indices are the absolute position of the head in the `Doc`. Defaults to `None`. ~~Optional[List[int]]~~ | +| `deps` 3 | A list of strings, of the same length as `words`, to assign as `token.dep` for each word. Defaults to `None`. ~~Optional[List[str]]~~ | +| `sent_starts` 3 | A list of values, of the same length as `words`, to assign as `token.is_sent_start`. Will be overridden by heads if `heads` is provided. Defaults to `None`. ~~Optional[List[Union[bool, int, None]]]~~ | +| `ents` 3 | A list of strings, of the same length of `words`, to assign the token-based IOB tag. Defaults to `None`. ~~Optional[List[str]]~~ | ## Doc.\_\_getitem\_\_ {#getitem tag="method"} From 936a5f0506d5a117aeae000481560e1fc0031036 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Tue, 27 Sep 2022 15:25:24 +0900 Subject: [PATCH 37/82] Fix English pipeline names in 3.4 release notes (#11542) --- website/docs/usage/v3-4.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/usage/v3-4.md b/website/docs/usage/v3-4.md index 7cc4570d5..597fc3cc8 100644 --- a/website/docs/usage/v3-4.md +++ b/website/docs/usage/v3-4.md @@ -65,10 +65,10 @@ The English CNN pipelines have new word vectors: | Package | Model Version | TAG | Parser LAS | NER F | | ----------------------------------------------- | ------------- | ---: | ---------: | ----: | -| [`en_core_news_md`](/models/en#en_core_news_md) | v3.3.0 | 97.3 | 90.1 | 84.6 | -| [`en_core_news_md`](/models/en#en_core_news_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 | -| [`en_core_news_lg`](/models/en#en_core_news_md) | v3.3.0 | 97.4 | 90.1 | 85.3 | -| [`en_core_news_lg`](/models/en#en_core_news_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 | +| [`en_core_web_md`](/models/en#en_core_web_md) | v3.3.0 | 97.3 | 90.1 | 84.6 | +| [`en_core_web_md`](/models/en#en_core_web_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 | +| [`en_core_web_lg`](/models/en#en_core_web_md) | v3.3.0 | 97.4 | 90.1 | 85.3 | +| [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 | ## Notes about upgrading from v3.3 {#upgrading} From 877671e09a0a72ca20ccbbcd65d7073f588cd320 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 27 Sep 2022 10:16:51 +0200 Subject: [PATCH 38/82] Preserve missing entity annotation in augmenters (#11540) Preserve both `-` and `O` annotation in augmenters rather than relying on `Example.to_dict`'s default support for one option outside of labeled entity spans. This is intended as a temporary workaround for augmenters for v3.4.x. The behavior of `Example` and related IOB utils could be improved in the general case for v3.5. --- spacy/tests/training/test_augmenters.py | 7 +++++-- spacy/training/augment.py | 14 +++++++++++++- spacy/training/iob_utils.py | 8 ++++++++ 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/spacy/tests/training/test_augmenters.py b/spacy/tests/training/test_augmenters.py index e3639c5da..35860a199 100644 --- a/spacy/tests/training/test_augmenters.py +++ b/spacy/tests/training/test_augmenters.py @@ -31,7 +31,7 @@ def doc(nlp): words = ["Sarah", "'s", "sister", "flew", "to", "Silicon", "Valley", "via", "London", "."] tags = ["NNP", "POS", "NN", "VBD", "IN", "NNP", "NNP", "IN", "NNP", "."] pos = ["PROPN", "PART", "NOUN", "VERB", "ADP", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"] - ents = ["B-PERSON", "I-PERSON", "O", "O", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"] + ents = ["B-PERSON", "I-PERSON", "O", "", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"] cats = {"TRAVEL": 1.0, "BAKING": 0.0} # fmt: on doc = Doc(nlp.vocab, words=words, tags=tags, pos=pos, ents=ents) @@ -106,6 +106,7 @@ def test_lowercase_augmenter(nlp, doc): assert [(e.start, e.end, e.label) for e in eg.reference.ents] == ents for ref_ent, orig_ent in zip(eg.reference.ents, doc.ents): assert ref_ent.text == orig_ent.text.lower() + assert [t.ent_iob for t in doc] == [t.ent_iob for t in eg.reference] assert [t.pos_ for t in eg.reference] == [t.pos_ for t in doc] # check that augmentation works when lowercasing leads to different @@ -166,7 +167,7 @@ def test_make_whitespace_variant(nlp): lemmas = ["they", "fly", "to", "New", "York", "City", ".", "\n", "then", "they", "drive", "to", "Washington", ",", "D.C."] heads = [1, 1, 1, 4, 5, 2, 1, 10, 10, 10, 10, 10, 11, 12, 12] deps = ["nsubj", "ROOT", "prep", "compound", "compound", "pobj", "punct", "dep", "advmod", "nsubj", "ROOT", "prep", "pobj", "punct", "appos"] - ents = ["O", "O", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"] + ents = ["O", "", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"] # fmt: on doc = Doc( nlp.vocab, @@ -215,6 +216,8 @@ def test_make_whitespace_variant(nlp): assert mod_ex2.reference[j].head.i == j - 1 # entities are well-formed assert len(doc.ents) == len(mod_ex.reference.ents) + # there is one token with missing entity information + assert any(t.ent_iob == 0 for t in mod_ex.reference) for ent in mod_ex.reference.ents: assert not ent[0].is_space assert not ent[-1].is_space diff --git a/spacy/training/augment.py b/spacy/training/augment.py index 55d780ba4..2fe8c24fb 100644 --- a/spacy/training/augment.py +++ b/spacy/training/augment.py @@ -6,7 +6,7 @@ from functools import partial from ..util import registry from .example import Example -from .iob_utils import split_bilu_label +from .iob_utils import split_bilu_label, _doc_to_biluo_tags_with_partial if TYPE_CHECKING: from ..language import Language # noqa: F401 @@ -62,6 +62,9 @@ def combined_augmenter( if orth_variants and random.random() < orth_level: raw_text = example.text orig_dict = example.to_dict() + orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( + example.reference + ) variant_text, variant_token_annot = make_orth_variants( nlp, raw_text, @@ -128,6 +131,9 @@ def lower_casing_augmenter( def make_lowercase_variant(nlp: "Language", example: Example): example_dict = example.to_dict() + example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( + example.reference + ) doc = nlp.make_doc(example.text.lower()) example_dict["token_annotation"]["ORTH"] = [t.lower_ for t in example.reference] return example.from_dict(doc, example_dict) @@ -146,6 +152,9 @@ def orth_variants_augmenter( else: raw_text = example.text orig_dict = example.to_dict() + orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( + example.reference + ) variant_text, variant_token_annot = make_orth_variants( nlp, raw_text, @@ -248,6 +257,9 @@ def make_whitespace_variant( RETURNS (Example): Example with one additional space token. """ example_dict = example.to_dict() + example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial( + example.reference + ) doc_dict = example_dict.get("doc_annotation", {}) token_dict = example_dict.get("token_annotation", {}) # returned unmodified if: diff --git a/spacy/training/iob_utils.py b/spacy/training/iob_utils.py index 61f83a1c3..0d4d246b0 100644 --- a/spacy/training/iob_utils.py +++ b/spacy/training/iob_utils.py @@ -60,6 +60,14 @@ def doc_to_biluo_tags(doc: Doc, missing: str = "O"): ) +def _doc_to_biluo_tags_with_partial(doc: Doc) -> List[str]: + ents = doc_to_biluo_tags(doc, missing="-") + for i, token in enumerate(doc): + if token.ent_iob == 2: + ents[i] = "O" + return ents + + def offsets_to_biluo_tags( doc: Doc, entities: Iterable[Tuple[int, int, Union[str, int]]], missing: str = "O" ) -> List[str]: From a44b7d4622108a42ddb95b62b642df6f142a3450 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Tue, 27 Sep 2022 18:11:23 +0900 Subject: [PATCH 39/82] Add experimental coref docs (#11291) * Add experimental coref docs * Docs cleanup * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem * Apply changes from code review * Fix prettier formatting It seems a period after a number made this think it was a list? * Update docs on examples for initialize * Add docs for coref scorers * Remove 3.4 notes from coref There won't be a "new" tag until it's in core. * Add docs for span cleaner * Fix docs * Fix docs to match spacy-experimental These weren't properly updated when the code was moved out of spacy core. * More doc fixes * Formatting * Update architectures * Fix links * Fix another link Co-authored-by: Sofie Van Landeghem Co-authored-by: svlandeg --- website/docs/api/architectures.md | 92 ++++++- website/docs/api/coref.md | 353 ++++++++++++++++++++++++ website/docs/api/pipeline-functions.md | 33 +++ website/docs/api/scorer.md | 59 ++++ website/docs/api/span-resolver.md | 356 +++++++++++++++++++++++++ website/meta/sidebars.json | 2 + 6 files changed, 889 insertions(+), 6 deletions(-) create mode 100644 website/docs/api/coref.md create mode 100644 website/docs/api/span-resolver.md diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md index 2537faff6..4c5447f75 100644 --- a/website/docs/api/architectures.md +++ b/website/docs/api/architectures.md @@ -11,6 +11,7 @@ menu: - ['Text Classification', 'textcat'] - ['Span Classification', 'spancat'] - ['Entity Linking', 'entitylinker'] + - ['Coreference', 'coref-architectures'] --- A **model architecture** is a function that wires up a @@ -587,8 +588,8 @@ consists of either two or three subnetworks: run once for each batch. - **lower**: Construct a feature-specific vector for each `(token, feature)` pair. This is also run once for each batch. Constructing the state - representation is then a matter of summing the component features and - applying the non-linearity. + representation is then a matter of summing the component features and applying + the non-linearity. - **upper** (optional): A feed-forward network that predicts scores from the state representation. If not present, the output from the lower model is used as action scores directly. @@ -628,8 +629,8 @@ same signature, but the `use_upper` argument was `True` by default. > ``` Build a tagger model, using a provided token-to-vector component. The tagger -model adds a linear layer with softmax activation to predict scores given -the token vectors. +model adds a linear layer with softmax activation to predict scores given the +token vectors. | Name | Description | | ----------- | ------------------------------------------------------------------------------------------ | @@ -920,5 +921,84 @@ A function that reads an existing `KnowledgeBase` from file. A function that takes as input a [`KnowledgeBase`](/api/kb) and a [`Span`](/api/span) object denoting a named entity, and returns a list of plausible [`Candidate`](/api/kb/#candidate) objects. The default -`CandidateGenerator` uses the text of a mention to find its potential -aliases in the `KnowledgeBase`. Note that this function is case-dependent. +`CandidateGenerator` uses the text of a mention to find its potential aliases in +the `KnowledgeBase`. Note that this function is case-dependent. + +## Coreference {#coref-architectures tag="experimental"} + +A [`CoreferenceResolver`](/api/coref) component identifies tokens that refer to +the same entity. A [`SpanResolver`](/api/span-resolver) component infers spans +from single tokens. Together these components can be used to reproduce +traditional coreference models. You can also omit the `SpanResolver` if working +with only token-level clusters is acceptable. + +### spacy-experimental.Coref.v1 {#Coref tag="experimental"} + +> #### Example Config +> +> ```ini +> +> [model] +> @architectures = "spacy-experimental.Coref.v1" +> distance_embedding_size = 20 +> dropout = 0.3 +> hidden_size = 1024 +> depth = 2 +> antecedent_limit = 50 +> antecedent_batch_size = 512 +> +> [model.tok2vec] +> @architectures = "spacy-transformers.TransformerListener.v1" +> grad_factor = 1.0 +> upstream = "transformer" +> pooling = {"@layers":"reduce_mean.v1"} +> ``` + +The `Coref` model architecture is a Thinc `Model`. + +| Name | Description | +| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ | +| `distance_embedding_size` | A representation of the distance between candidates. ~~int~~ | +| `dropout` | The dropout to use internally. Unlike some Thinc models, this has separate dropout for the internal PyTorch layers. ~~float~~ | +| `hidden_size` | Size of the main internal layers. ~~int~~ | +| `depth` | Depth of the internal network. ~~int~~ | +| `antecedent_limit` | How many candidate antecedents to keep after rough scoring. This has a significant effect on memory usage. Typical values would be 50 to 200, or higher for very long documents. ~~int~~ | +| `antecedent_batch_size` | Internal batch size. ~~int~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | + +### spacy-experimental.SpanResolver.v1 {#SpanResolver tag="experimental"} + +> #### Example Config +> +> ```ini +> +> [model] +> @architectures = "spacy-experimental.SpanResolver.v1" +> hidden_size = 1024 +> distance_embedding_size = 64 +> conv_channels = 4 +> window_size = 1 +> max_distance = 128 +> prefix = "coref_head_clusters" +> +> [model.tok2vec] +> @architectures = "spacy-transformers.TransformerListener.v1" +> grad_factor = 1.0 +> upstream = "transformer" +> pooling = {"@layers":"reduce_mean.v1"} +> ``` + +The `SpanResolver` model architecture is a Thinc `Model`. Note that +`MentionClusters` is `List[List[Tuple[int, int]]]`. + +| Name | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ | +| `hidden_size` | Size of the main internal layers. ~~int~~ | +| `distance_embedding_size` | A representation of the distance between two candidates. ~~int~~ | +| `conv_channels` | The number of channels in the internal CNN. ~~int~~ | +| `window_size` | The number of neighboring tokens to consider in the internal CNN. `1` means consider one token on each side. ~~int~~ | +| `max_distance` | The longest possible length of a predicted span. ~~int~~ | +| `prefix` | The prefix that indicates spans to use for input data. ~~string~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], List[MentionClusters]]~~ | diff --git a/website/docs/api/coref.md b/website/docs/api/coref.md new file mode 100644 index 000000000..8f54422d6 --- /dev/null +++ b/website/docs/api/coref.md @@ -0,0 +1,353 @@ +--- +title: CoreferenceResolver +tag: class,experimental +source: spacy-experimental/coref/coref_component.py +teaser: 'Pipeline component for word-level coreference resolution' +api_base_class: /api/pipe +api_string_name: coref +api_trainable: true +--- + +> #### Installation +> +> ```bash +> $ pip install -U spacy-experimental +> ``` + + + +This component is not yet integrated into spaCy core, and is available via the +extension package +[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting +in version 0.6.0. It exposes the component via +[entry points](/usage/saving-loading/#entry-points), so if you have the package +installed, using `factory = "experimental_coref"` in your +[training config](/usage/training#config) or +`nlp.add_pipe("experimental_coref")` will work out-of-the-box. + + + +A `CoreferenceResolver` component groups tokens into clusters that refer to the +same thing. Clusters are represented as SpanGroups that start with a prefix +(`coref_clusters` by default). + +A `CoreferenceResolver` component can be paired with a +[`SpanResolver`](/api/span-resolver) to expand single tokens to spans. + +## Assigned Attributes {#assigned-attributes} + +Predictions will be saved to `Doc.spans` as a [`SpanGroup`](/api/spangroup). The +span key will be a prefix plus a serial number referring to the coreference +cluster, starting from zero. + +The span key prefix defaults to `"coref_clusters"`, but can be passed as a +parameter. + +| Location | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------- | +| `Doc.spans[prefix + "_" + cluster_number]` | One coreference cluster, represented as single-token spans. Cluster numbers start from 1. ~~SpanGroup~~ | + +## Config and implementation {#config} + +The default config is defined by the pipeline component factory and describes +how the component should be configured. You can override its settings via the +`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your +[`config.cfg` for training](/usage/training#config). See the +[model architectures](/api/architectures#coref-architectures) documentation for +details on the architectures and their arguments and hyperparameters. + +> #### Example +> +> ```python +> from spacy_experimental.coref.coref_component import DEFAULT_COREF_MODEL +> from spacy_experimental.coref.coref_util import DEFAULT_CLUSTER_PREFIX +> config={ +> "model": DEFAULT_COREF_MODEL, +> "span_cluster_prefix": DEFAULT_CLUSTER_PREFIX, +> }, +> nlp.add_pipe("experimental_coref", config=config) +> ``` + +| Setting | Description | +| --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Coref](/api/architectures#Coref). ~~Model~~ | +| `span_cluster_prefix` | The prefix for the keys for clusters saved to `doc.spans`. Defaults to `coref_clusters`. ~~str~~ | + +## CoreferenceResolver.\_\_init\_\_ {#init tag="method"} + +> #### Example +> +> ```python +> # Construction via add_pipe with default model +> coref = nlp.add_pipe("experimental_coref") +> +> # Construction via add_pipe with custom model +> config = {"model": {"@architectures": "my_coref.v1"}} +> coref = nlp.add_pipe("experimental_coref", config=config) +> +> # Construction from class +> from spacy_experimental.coref.coref_component import CoreferenceResolver +> coref = CoreferenceResolver(nlp.vocab, model) +> ``` + +Create a new pipeline instance. In your application, you would normally use a +shortcut for this and instantiate the component using its string name and +[`nlp.add_pipe`](/api/language#add_pipe). + +| Name | Description | +| --------------------- | --------------------------------------------------------------------------------------------------- | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ | +| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | +| _keyword-only_ | | +| `span_cluster_prefix` | The prefix for the key for saving clusters of spans. ~~bool~~ | + +## CoreferenceResolver.\_\_call\_\_ {#call tag="method"} + +Apply the pipe to one document. The document is modified in place and returned. +This usually happens under the hood when the `nlp` object is called on a text +and all pipeline components are applied to the `Doc` in order. Both +[`__call__`](/api/coref#call) and [`pipe`](/api/coref#pipe) delegate to the +[`predict`](/api/coref#predict) and +[`set_annotations`](/api/coref#set_annotations) methods. + +> #### Example +> +> ```python +> doc = nlp("This is a sentence.") +> coref = nlp.add_pipe("experimental_coref") +> # This usually happens under the hood +> processed = coref(doc) +> ``` + +| Name | Description | +| ----------- | -------------------------------- | +| `doc` | The document to process. ~~Doc~~ | +| **RETURNS** | The processed document. ~~Doc~~ | + +## CoreferenceResolver.pipe {#pipe tag="method"} + +Apply the pipe to a stream of documents. This usually happens under the hood +when the `nlp` object is called on a text and all pipeline components are +applied to the `Doc` in order. Both [`__call__`](/api/coref#call) and +[`pipe`](/api/coref#pipe) delegate to the [`predict`](/api/coref#predict) and +[`set_annotations`](/api/coref#set_annotations) methods. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> for doc in coref.pipe(docs, batch_size=50): +> pass +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------- | +| `stream` | A stream of documents. ~~Iterable[Doc]~~ | +| _keyword-only_ | | +| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ | +| **YIELDS** | The processed documents in order. ~~Doc~~ | + +## CoreferenceResolver.initialize {#initialize tag="method"} + +Initialize the component for training. `get_examples` should be a function that +returns an iterable of [`Example`](/api/example) objects. **At least one example +should be supplied.** The data examples are used to **initialize the model** of +the component and can either be the full training data or a representative +sample. Initialization includes validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. This method is typically called +by [`Language.initialize`](/api/language#initialize). + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> coref.initialize(lambda: examples, nlp=nlp) +> ``` + +| Name | Description | +| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ | +| _keyword-only_ | | +| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ | + +## CoreferenceResolver.predict {#predict tag="method"} + +Apply the component's model to a batch of [`Doc`](/api/doc) objects, without +modifying them. Clusters are returned as a list of `MentionClusters`, one for +each input `Doc`. A `MentionClusters` instance is just a list of lists of pairs +of `int`s, where each item corresponds to a cluster, and the `int`s correspond +to token indices. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> clusters = coref.predict([doc1, doc2]) +> ``` + +| Name | Description | +| ----------- | ---------------------------------------------------------------------------- | +| `docs` | The documents to predict. ~~Iterable[Doc]~~ | +| **RETURNS** | The predicted coreference clusters for the `docs`. ~~List[MentionClusters]~~ | + +## CoreferenceResolver.set_annotations {#set_annotations tag="method"} + +Modify a batch of documents, saving coreference clusters in `Doc.spans`. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> clusters = coref.predict([doc1, doc2]) +> coref.set_annotations([doc1, doc2], clusters) +> ``` + +| Name | Description | +| ---------- | ---------------------------------------------------------------------------- | +| `docs` | The documents to modify. ~~Iterable[Doc]~~ | +| `clusters` | The predicted coreference clusters for the `docs`. ~~List[MentionClusters]~~ | + +## CoreferenceResolver.update {#update tag="method"} + +Learn from a batch of [`Example`](/api/example) objects. Delegates to +[`predict`](/api/coref#predict). + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> optimizer = nlp.initialize() +> losses = coref.update(examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `examples` | A batch of [`Example`](/api/example) objects to learn from. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | The dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + +## CoreferenceResolver.create_optimizer {#create_optimizer tag="method"} + +Create an optimizer for the pipeline component. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> optimizer = coref.create_optimizer() +> ``` + +| Name | Description | +| ----------- | ---------------------------- | +| **RETURNS** | The optimizer. ~~Optimizer~~ | + +## CoreferenceResolver.use_params {#use_params tag="method, contextmanager"} + +Modify the pipe's model, to use the given parameter values. At the end of the +context, the original parameters are restored. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> with coref.use_params(optimizer.averages): +> coref.to_disk("/best_model") +> ``` + +| Name | Description | +| -------- | -------------------------------------------------- | +| `params` | The parameter values to use in the model. ~~dict~~ | + +## CoreferenceResolver.to_disk {#to_disk tag="method"} + +Serialize the pipe to disk. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> coref.to_disk("/path/to/coref") +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | + +## CoreferenceResolver.from_disk {#from_disk tag="method"} + +Load the pipe from disk. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> coref.from_disk("/path/to/coref") +> ``` + +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------- | +| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The modified `CoreferenceResolver` object. ~~CoreferenceResolver~~ | + +## CoreferenceResolver.to_bytes {#to_bytes tag="method"} + +> #### Example +> +> ```python +> coref = nlp.add_pipe("experimental_coref") +> coref_bytes = coref.to_bytes() +> ``` + +Serialize the pipe to a bytestring, including the `KnowledgeBase`. + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The serialized form of the `CoreferenceResolver` object. ~~bytes~~ | + +## CoreferenceResolver.from_bytes {#from_bytes tag="method"} + +Load the pipe from a bytestring. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> coref_bytes = coref.to_bytes() +> coref = nlp.add_pipe("experimental_coref") +> coref.from_bytes(coref_bytes) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `bytes_data` | The data to load from. ~~bytes~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The `CoreferenceResolver` object. ~~CoreferenceResolver~~ | + +## Serialization fields {#serialization-fields} + +During serialization, spaCy will export several data fields used to restore +different aspects of the object. If needed, you can exclude them from +serialization by passing in the string names via the `exclude` argument. + +> #### Example +> +> ```python +> data = coref.to_disk("/path", exclude=["vocab"]) +> ``` + +| Name | Description | +| ------- | -------------------------------------------------------------- | +| `vocab` | The shared [`Vocab`](/api/vocab). | +| `cfg` | The config file. You usually don't want to exclude this. | +| `model` | The binary model data. You usually don't want to exclude this. | diff --git a/website/docs/api/pipeline-functions.md b/website/docs/api/pipeline-functions.md index 1b7017ca7..070292782 100644 --- a/website/docs/api/pipeline-functions.md +++ b/website/docs/api/pipeline-functions.md @@ -153,3 +153,36 @@ whole pipeline has run. | `attrs` | A dict of the `Doc` attributes and the values to set them to. Defaults to `{"tensor": None, "_.trf_data": None}` to clean up after `tok2vec` and `transformer` components. ~~dict~~ | | `silent` | If `False`, show warnings if attributes aren't found or can't be set. Defaults to `True`. ~~bool~~ | | **RETURNS** | The modified `Doc` with the modified attributes. ~~Doc~~ | + +## span_cleaner {#span_cleaner tag="function,experimental"} + +Remove `SpanGroup`s from `doc.spans` based on a key prefix. This is used to +clean up after the [`CoreferenceResolver`](/api/coref) when it's paired with a +[`SpanResolver`](/api/span-resolver). + + + +This pipeline function is not yet integrated into spaCy core, and is available +via the extension package +[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting +in version 0.6.0. It exposes the component via +[entry points](/usage/saving-loading/#entry-points), so if you have the package +installed, using `factory = "span_cleaner"` in your +[training config](/usage/training#config) or `nlp.add_pipe("span_cleaner")` will +work out-of-the-box. + + + +> #### Example +> +> ```python +> config = {"prefix": "coref_head_clusters"} +> nlp.add_pipe("span_cleaner", config=config) +> doc = nlp("text") +> assert "coref_head_clusters_1" not in doc.spans +> ``` + +| Setting | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------- | +| `prefix` | A prefix to check `SpanGroup` keys for. Any matching groups will be removed. Defaults to `"coref_head_clusters"`. ~~str~~ | +| **RETURNS** | The modified `Doc` with any matching spans removed. ~~Doc~~ | diff --git a/website/docs/api/scorer.md b/website/docs/api/scorer.md index 8dbe3b276..ca3462aa9 100644 --- a/website/docs/api/scorer.md +++ b/website/docs/api/scorer.md @@ -270,3 +270,62 @@ Compute micro-PRF and per-entity PRF scores. | Name | Description | | ---------- | ------------------------------------------------------------------------------------------------------------------- | | `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ | + +## score_coref_clusters {#score_coref_clusters tag="experimental"} + +Returns LEA ([Moosavi and Strube, 2016](https://aclanthology.org/P16-1060/)) PRF +scores for coreference clusters. + + + +Note this scoring function is not yet included in spaCy core - for details, see +the [CoreferenceResolver](/api/coref) docs. + + + +> #### Example +> +> ```python +> scores = score_coref_clusters( +> examples, +> span_cluster_prefix="coref_clusters", +> ) +> print(scores["coref_f"]) +> ``` + +| Name | Description | +| --------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `span_cluster_prefix` | The prefix used for spans representing coreference clusters. ~~str~~ | +| **RETURNS** | A dictionary containing the scores. ~~Dict[str, Optional[float]]~~ | + +## score_span_predictions {#score_span_predictions tag="experimental"} + +Return accuracy for reconstructions of spans from single tokens. Only exactly +correct predictions are counted as correct, there is no partial credit for near +answers. Used by the [SpanResolver](/api/span-resolver). + + + +Note this scoring function is not yet included in spaCy core - for details, see +the [SpanResolver](/api/span-resolver) docs. + + + +> #### Example +> +> ```python +> scores = score_span_predictions( +> examples, +> output_prefix="coref_clusters", +> ) +> print(scores["span_coref_clusters_accuracy"]) +> ``` + +| Name | Description | +| --------------- | ------------------------------------------------------------------------------------------------------------------- | +| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `output_prefix` | The prefix used for spans representing the final predicted spans. ~~str~~ | +| **RETURNS** | A dictionary containing the scores. ~~Dict[str, Optional[float]]~~ | diff --git a/website/docs/api/span-resolver.md b/website/docs/api/span-resolver.md new file mode 100644 index 000000000..3e992cd03 --- /dev/null +++ b/website/docs/api/span-resolver.md @@ -0,0 +1,356 @@ +--- +title: SpanResolver +tag: class,experimental +source: spacy-experimental/coref/span_resolver_component.py +teaser: 'Pipeline component for resolving tokens into spans' +api_base_class: /api/pipe +api_string_name: span_resolver +api_trainable: true +--- + +> #### Installation +> +> ```bash +> $ pip install -U spacy-experimental +> ``` + + + +This component not yet integrated into spaCy core, and is available via the +extension package +[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting +in version 0.6.0. It exposes the component via +[entry points](/usage/saving-loading/#entry-points), so if you have the package +installed, using `factory = "experimental_span_resolver"` in your +[training config](/usage/training#config) or +`nlp.add_pipe("experimental_span_resolver")` will work out-of-the-box. + + + +A `SpanResolver` component takes in tokens (represented as `Span` objects of +length 1) and resolves them into `Span` objects of arbitrary length. The initial +use case is as a post-processing step on word-level +[coreference resolution](/api/coref). The input and output keys used to store +`Span` objects are configurable. + +## Assigned Attributes {#assigned-attributes} + +Predictions will be saved to `Doc.spans` as [`SpanGroup`s](/api/spangroup). + +Input token spans will be read in using an input prefix, by default +`"coref_head_clusters"`, and output spans will be saved using an output prefix +(default `"coref_clusters"`) plus a serial number starting from one. The +prefixes are configurable. + +| Location | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------- | +| `Doc.spans[output_prefix + "_" + cluster_number]` | One group of predicted spans. Cluster number starts from 1. ~~SpanGroup~~ | + +## Config and implementation {#config} + +The default config is defined by the pipeline component factory and describes +how the component should be configured. You can override its settings via the +`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your +[`config.cfg` for training](/usage/training#config). See the +[model architectures](/api/architectures#coref-architectures) documentation for +details on the architectures and their arguments and hyperparameters. + +> #### Example +> +> ```python +> from spacy_experimental.coref.span_resolver_component import DEFAULT_SPAN_RESOLVER_MODEL +> from spacy_experimental.coref.coref_util import DEFAULT_CLUSTER_PREFIX, DEFAULT_CLUSTER_HEAD_PREFIX +> config={ +> "model": DEFAULT_SPAN_RESOLVER_MODEL, +> "input_prefix": DEFAULT_CLUSTER_HEAD_PREFIX, +> "output_prefix": DEFAULT_CLUSTER_PREFIX, +> }, +> nlp.add_pipe("experimental_span_resolver", config=config) +> ``` + +| Setting | Description | +| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [SpanResolver](/api/architectures#SpanResolver). ~~Model~~ | +| `input_prefix` | The prefix to use for input `SpanGroup`s. Defaults to `coref_head_clusters`. ~~str~~ | +| `output_prefix` | The prefix for predicted `SpanGroup`s. Defaults to `coref_clusters`. ~~str~~ | + +## SpanResolver.\_\_init\_\_ {#init tag="method"} + +> #### Example +> +> ```python +> # Construction via add_pipe with default model +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> +> # Construction via add_pipe with custom model +> config = {"model": {"@architectures": "my_span_resolver.v1"}} +> span_resolver = nlp.add_pipe("experimental_span_resolver", config=config) +> +> # Construction from class +> from spacy_experimental.coref.span_resolver_component import SpanResolver +> span_resolver = SpanResolver(nlp.vocab, model) +> ``` + +Create a new pipeline instance. In your application, you would normally use a +shortcut for this and instantiate the component using its string name and +[`nlp.add_pipe`](/api/language#add_pipe). + +| Name | Description | +| --------------- | --------------------------------------------------------------------------------------------------- | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ | +| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | +| _keyword-only_ | | +| `input_prefix` | The prefix to use for input `SpanGroup`s. Defaults to `coref_head_clusters`. ~~str~~ | +| `output_prefix` | The prefix for predicted `SpanGroup`s. Defaults to `coref_clusters`. ~~str~~ | + +## SpanResolver.\_\_call\_\_ {#call tag="method"} + +Apply the pipe to one document. The document is modified in place and returned. +This usually happens under the hood when the `nlp` object is called on a text +and all pipeline components are applied to the `Doc` in order. Both +[`__call__`](#call) and [`pipe`](#pipe) delegate to the [`predict`](#predict) +and [`set_annotations`](#set_annotations) methods. + +> #### Example +> +> ```python +> doc = nlp("This is a sentence.") +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> # This usually happens under the hood +> processed = span_resolver(doc) +> ``` + +| Name | Description | +| ----------- | -------------------------------- | +| `doc` | The document to process. ~~Doc~~ | +| **RETURNS** | The processed document. ~~Doc~~ | + +## SpanResolver.pipe {#pipe tag="method"} + +Apply the pipe to a stream of documents. This usually happens under the hood +when the `nlp` object is called on a text and all pipeline components are +applied to the `Doc` in order. Both [`__call__`](/api/span-resolver#call) and +[`pipe`](/api/span-resolver#pipe) delegate to the +[`predict`](/api/span-resolver#predict) and +[`set_annotations`](/api/span-resolver#set_annotations) methods. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> for doc in span_resolver.pipe(docs, batch_size=50): +> pass +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------- | +| `stream` | A stream of documents. ~~Iterable[Doc]~~ | +| _keyword-only_ | | +| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ | +| **YIELDS** | The processed documents in order. ~~Doc~~ | + +## SpanResolver.initialize {#initialize tag="method"} + +Initialize the component for training. `get_examples` should be a function that +returns an iterable of [`Example`](/api/example) objects. **At least one example +should be supplied.** The data examples are used to **initialize the model** of +the component and can either be the full training data or a representative +sample. Initialization includes validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. This method is typically called +by [`Language.initialize`](/api/language#initialize). + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> span_resolver.initialize(lambda: examples, nlp=nlp) +> ``` + +| Name | Description | +| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ | +| _keyword-only_ | | +| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ | + +## SpanResolver.predict {#predict tag="method"} + +Apply the component's model to a batch of [`Doc`](/api/doc) objects, without +modifying them. Predictions are returned as a list of `MentionClusters`, one for +each input `Doc`. A `MentionClusters` instance is just a list of lists of pairs +of `int`s, where each item corresponds to an input `SpanGroup`, and the `int`s +correspond to token indices. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> spans = span_resolver.predict([doc1, doc2]) +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------------- | +| `docs` | The documents to predict. ~~Iterable[Doc]~~ | +| **RETURNS** | The predicted spans for the `Doc`s. ~~List[MentionClusters]~~ | + +## SpanResolver.set_annotations {#set_annotations tag="method"} + +Modify a batch of documents, saving predictions using the output prefix in +`Doc.spans`. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> spans = span_resolver.predict([doc1, doc2]) +> span_resolver.set_annotations([doc1, doc2], spans) +> ``` + +| Name | Description | +| ------- | ------------------------------------------------------------- | +| `docs` | The documents to modify. ~~Iterable[Doc]~~ | +| `spans` | The predicted spans for the `docs`. ~~List[MentionClusters]~~ | + +## SpanResolver.update {#update tag="method"} + +Learn from a batch of [`Example`](/api/example) objects. Delegates to +[`predict`](/api/span-resolver#predict). + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> optimizer = nlp.initialize() +> losses = span_resolver.update(examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `examples` | A batch of [`Example`](/api/example) objects to learn from. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | The dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + +## SpanResolver.create_optimizer {#create_optimizer tag="method"} + +Create an optimizer for the pipeline component. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> optimizer = span_resolver.create_optimizer() +> ``` + +| Name | Description | +| ----------- | ---------------------------- | +| **RETURNS** | The optimizer. ~~Optimizer~~ | + +## SpanResolver.use_params {#use_params tag="method, contextmanager"} + +Modify the pipe's model, to use the given parameter values. At the end of the +context, the original parameters are restored. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> with span_resolver.use_params(optimizer.averages): +> span_resolver.to_disk("/best_model") +> ``` + +| Name | Description | +| -------- | -------------------------------------------------- | +| `params` | The parameter values to use in the model. ~~dict~~ | + +## SpanResolver.to_disk {#to_disk tag="method"} + +Serialize the pipe to disk. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> span_resolver.to_disk("/path/to/span_resolver") +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | + +## SpanResolver.from_disk {#from_disk tag="method"} + +Load the pipe from disk. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> span_resolver.from_disk("/path/to/span_resolver") +> ``` + +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------- | +| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The modified `SpanResolver` object. ~~SpanResolver~~ | + +## SpanResolver.to_bytes {#to_bytes tag="method"} + +> #### Example +> +> ```python +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> span_resolver_bytes = span_resolver.to_bytes() +> ``` + +Serialize the pipe to a bytestring. + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The serialized form of the `SpanResolver` object. ~~bytes~~ | + +## SpanResolver.from_bytes {#from_bytes tag="method"} + +Load the pipe from a bytestring. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> span_resolver_bytes = span_resolver.to_bytes() +> span_resolver = nlp.add_pipe("experimental_span_resolver") +> span_resolver.from_bytes(span_resolver_bytes) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `bytes_data` | The data to load from. ~~bytes~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The `SpanResolver` object. ~~SpanResolver~~ | + +## Serialization fields {#serialization-fields} + +During serialization, spaCy will export several data fields used to restore +different aspects of the object. If needed, you can exclude them from +serialization by passing in the string names via the `exclude` argument. + +> #### Example +> +> ```python +> data = span_resolver.to_disk("/path", exclude=["vocab"]) +> ``` + +| Name | Description | +| ------- | -------------------------------------------------------------- | +| `vocab` | The shared [`Vocab`](/api/vocab). | +| `cfg` | The config file. You usually don't want to exclude this. | +| `model` | The binary model data. You usually don't want to exclude this. | diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index 06fce7742..2d8745d77 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -94,6 +94,7 @@ "label": "Pipeline", "items": [ { "text": "AttributeRuler", "url": "/api/attributeruler" }, + { "text": "CoreferenceResolver", "url": "/api/coref" }, { "text": "DependencyParser", "url": "/api/dependencyparser" }, { "text": "EditTreeLemmatizer", "url": "/api/edittreelemmatizer" }, { "text": "EntityLinker", "url": "/api/entitylinker" }, @@ -104,6 +105,7 @@ { "text": "SentenceRecognizer", "url": "/api/sentencerecognizer" }, { "text": "Sentencizer", "url": "/api/sentencizer" }, { "text": "SpanCategorizer", "url": "/api/spancategorizer" }, + { "text": "SpanResolver", "url": "/api/span-resolver" }, { "text": "SpanRuler", "url": "/api/spanruler" }, { "text": "Tagger", "url": "/api/tagger" }, { "text": "TextCategorizer", "url": "/api/textcategorizer" }, From 3e8bc1272f95c89e0aa9e5a19f51e286a7934ffa Mon Sep 17 00:00:00 2001 From: Jacobo Myerston <43222279+jmyerston@users.noreply.github.com> Date: Tue, 27 Sep 2022 02:38:56 -0700 Subject: [PATCH 40/82] add punctuation to grc (#11426) * add punctuation to grc Add support for special editorial punctuation that is common in ancient Greek texts. Ancient Greek texts, as found in digital and print form, have been largely edited by scholars. Restorations and improvements are normally marked with special characters that need to be handled properly by the tokenizer. * add unit tests * simplify regex * move generic quotes to char classes * rename unit test * fix regex Co-authored-by: Adriane Boyd Co-authored-by: svlandeg Co-authored-by: Sofie Van Landeghem Co-authored-by: Adriane Boyd --- spacy/lang/char_classes.py | 2 +- spacy/lang/grc/__init__.py | 4 +++ spacy/lang/grc/punctuation.py | 46 ++++++++++++++++++++++++++ spacy/tests/lang/grc/test_tokenizer.py | 18 ++++++++++ 4 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 spacy/lang/grc/punctuation.py create mode 100644 spacy/tests/lang/grc/test_tokenizer.py diff --git a/spacy/lang/char_classes.py b/spacy/lang/char_classes.py index 1d204c46c..37c58c85f 100644 --- a/spacy/lang/char_classes.py +++ b/spacy/lang/char_classes.py @@ -280,7 +280,7 @@ _currency = ( _punct = ( r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 ? ! , 、 ; : ~ · । ، ۔ ؛ ٪" ) -_quotes = r'\' " ” “ ` ‘ ´ ’ ‚ , „ » « 「 」 『 』 ( ) 〔 〕 【 】 《 》 〈 〉' +_quotes = r'\' " ” “ ` ‘ ´ ’ ‚ , „ » « 「 」 『 』 ( ) 〔 〕 【 】 《 》 〈 〉 〈 〉 ⟦ ⟧' _hyphens = "- – — -- --- —— ~" # Various symbols like dingbats, but also emoji diff --git a/spacy/lang/grc/__init__.py b/spacy/lang/grc/__init__.py index e83f0c5a5..019b3802e 100644 --- a/spacy/lang/grc/__init__.py +++ b/spacy/lang/grc/__init__.py @@ -1,11 +1,15 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES from ...language import Language, BaseDefaults class AncientGreekDefaults(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS + prefixes = TOKENIZER_PREFIXES + suffixes = TOKENIZER_SUFFIXES + infixes = TOKENIZER_INFIXES lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS diff --git a/spacy/lang/grc/punctuation.py b/spacy/lang/grc/punctuation.py new file mode 100644 index 000000000..8f3589e9a --- /dev/null +++ b/spacy/lang/grc/punctuation.py @@ -0,0 +1,46 @@ +from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY +from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS +from ..char_classes import CONCAT_QUOTES + +_prefixes = ( + [ + "†", + "⸏", + ] + + LIST_PUNCT + + LIST_ELLIPSES + + LIST_QUOTES + + LIST_CURRENCY + + LIST_ICONS +) + +_suffixes = ( + LIST_PUNCT + + LIST_ELLIPSES + + LIST_QUOTES + + LIST_ICONS + + [ + "†", + "⸎", + r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]", + ] +) + +_infixes = ( + LIST_ELLIPSES + + LIST_ICONS + + [ + r"(?<=[0-9])[+\-\*^](?=[0-9-])", + r"(?<=[{al}{q}])\.(?=[{au}{q}])".format( + al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES + ), + r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), + r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS), + r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), + r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])—", + ] +) + +TOKENIZER_PREFIXES = _prefixes +TOKENIZER_SUFFIXES = _suffixes +TOKENIZER_INFIXES = _infixes diff --git a/spacy/tests/lang/grc/test_tokenizer.py b/spacy/tests/lang/grc/test_tokenizer.py new file mode 100644 index 000000000..3df5b546b --- /dev/null +++ b/spacy/tests/lang/grc/test_tokenizer.py @@ -0,0 +1,18 @@ +import pytest + + +# fmt: off +GRC_TOKEN_EXCEPTION_TESTS = [ + ("τὸ 〈τῆς〉 φιλοσοφίας ἔργον ἔνιοί φασιν ἀπὸ ⟦βαρβάρων⟧ ἄρξαι.", ["τὸ", "〈", "τῆς", "〉", "φιλοσοφίας", "ἔργον", "ἔνιοί", "φασιν", "ἀπὸ", "⟦", "βαρβάρων", "⟧", "ἄρξαι", "."]), + ("τὴν δὲ τῶν Αἰγυπτίων φιλοσοφίαν εἶναι τοιαύτην περί τε †θεῶν† καὶ ὑπὲρ δικαιοσύνης.", ["τὴν", "δὲ", "τῶν", "Αἰγυπτίων", "φιλοσοφίαν", "εἶναι", "τοιαύτην", "περί", "τε", "†", "θεῶν", "†", "καὶ", "ὑπὲρ", "δικαιοσύνης", "."]), + ("⸏πόσις δ' Ἐρεχθεύς ἐστί μοι σεσωσμένος⸏", ["⸏", "πόσις", "δ'", "Ἐρεχθεύς", "ἐστί", "μοι", "σεσωσμένος", "⸏"]), + ("⸏ὔπνον ἴδωμεν⸎", ["⸏", "ὔπνον", "ἴδωμεν", "⸎"]), +] +# fmt: on + + +@pytest.mark.parametrize("text,expected_tokens", GRC_TOKEN_EXCEPTION_TESTS) +def test_grc_tokenizer(grc_tokenizer, text, expected_tokens): + tokens = grc_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list From 9557b0fb01612f5b32823dfc52cae71af37f0bd8 Mon Sep 17 00:00:00 2001 From: Taniguchi Yasufumi Date: Tue, 27 Sep 2022 21:11:50 +0900 Subject: [PATCH 41/82] Add spacy-partial-tagger to spaCy Universe (#11538) --- website/meta/universe.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 9145855c6..9ec0d6c0e 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -3984,7 +3984,21 @@ }, "category": ["pipeline"], "tags": ["interpretation", "ja"] + }, + { + "id": "spacy-partial-tagger", + "title": "spaCy - Partial Tagger", + "slogan": "Sequence Tagger for Partially Annotated Dataset in spaCy", + "description": "This is a library to build a CRF tagger with a partially annotated dataset in spaCy. You can build your own tagger only from dictionary.", + "github": "doccano/spacy-partial-tagger", + "pip": "spacy-partial-tagger", + "category": ["pipeline", "training"], + "author": "Yasufumi Taniguchi", + "author_links": { + "github": "yasufumy" + } } + ], "categories": [ From aea16719be04d4d6ab889cd20fe0e323b2c7ffee Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Tue, 27 Sep 2022 14:22:36 +0200 Subject: [PATCH 42/82] Simplify and clarify enable/disable behavior of spacy.load() (#11459) * Change enable/disable behavior so that arguments take precedence over config options. Extend error message on conflict. Add warning message in case of overwriting config option with arguments. * Fix tests in test_serialize_pipeline.py to reflect changes to handling of enable/disable. * Fix type issue. * Move comment. * Move comment. * Issue UserWarning instead of printing wasabi message. Adjust test. * Added pytest.warns(UserWarning) for expected warning to fix tests. * Update warning message. * Move type handling out of fetch_pipes_status(). * Add global variable for default value. Use id() to determine whether used values are default value. * Fix default value for disable. * Rename DEFAULT_PIPE_STATUS to _DEFAULT_EMPTY_PIPES. --- spacy/__init__.py | 6 +- spacy/errors.py | 7 ++- spacy/language.py | 59 ++++++++++++------- spacy/tests/pipeline/test_pipe_methods.py | 33 +++++++++-- .../serialize/test_serialize_pipeline.py | 7 ++- spacy/util.py | 23 ++++---- 6 files changed, 92 insertions(+), 43 deletions(-) diff --git a/spacy/__init__.py b/spacy/__init__.py index d60f46b96..c3568bc5c 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -31,9 +31,9 @@ def load( name: Union[str, Path], *, vocab: Union[Vocab, bool] = True, - disable: Union[str, Iterable[str]] = util.SimpleFrozenList(), - enable: Union[str, Iterable[str]] = util.SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = util.SimpleFrozenList(), + disable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES, config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), ) -> Language: """Load a spaCy model from an installed package or a local path. diff --git a/spacy/errors.py b/spacy/errors.py index f55b378e9..c035f684d 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -212,6 +212,8 @@ class Warnings(metaclass=ErrorsWithCodes): W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'") W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class " "is a Cython extension type.") + W123 = ("Argument {arg} with value {arg_value} is used instead of {config_value} as specified in the config. Be " + "aware that this might affect other components in your pipeline.") class Errors(metaclass=ErrorsWithCodes): @@ -937,8 +939,9 @@ class Errors(metaclass=ErrorsWithCodes): E1040 = ("Doc.from_json requires all tokens to have the same attributes. " "Some tokens do not contain annotation for: {partial_attrs}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") - E1042 = ("Function was called with `{arg1}`={arg1_values} and " - "`{arg2}`={arg2_values} but these arguments are conflicting.") + E1042 = ("`enable={enable}` and `disable={disable}` are inconsistent with each other.\nIf you only passed " + "one of `enable` or `disable`, the other argument is specified in your pipeline's configuration.\nIn that " + "case pass an empty list for the previously not specified argument to avoid this error.") E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got " "{value}.") diff --git a/spacy/language.py b/spacy/language.py index 34a06e576..d391f15ab 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1,4 +1,4 @@ -from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection +from typing import Iterator, Optional, Any, Dict, Callable, Iterable from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload @@ -10,6 +10,7 @@ from contextlib import contextmanager from copy import deepcopy from pathlib import Path import warnings + from thinc.api import get_current_ops, Config, CupyOps, Optimizer import srsly import multiprocessing as mp @@ -24,7 +25,7 @@ from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis from .training import Example, validate_examples from .training.initialize import init_vocab, init_tok2vec from .scorer import Scorer -from .util import registry, SimpleFrozenList, _pipe, raise_error +from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER from .util import warn_if_jupyter_cupy from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS @@ -1698,9 +1699,9 @@ class Language: config: Union[Dict[str, Any], Config] = {}, *, vocab: Union[Vocab, bool] = True, - disable: Union[str, Iterable[str]] = SimpleFrozenList(), - enable: Union[str, Iterable[str]] = SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, meta: Dict[str, Any] = SimpleFrozenDict(), auto_fill: bool = True, validate: bool = True, @@ -1727,12 +1728,6 @@ class Language: DOCS: https://spacy.io/api/language#from_config """ - if isinstance(disable, str): - disable = [disable] - if isinstance(enable, str): - enable = [enable] - if isinstance(exclude, str): - exclude = [exclude] if auto_fill: config = Config( cls.default_config, section_order=CONFIG_SECTION_ORDER @@ -1877,9 +1872,38 @@ class Language: nlp.vocab.from_bytes(vocab_b) # Resolve disabled/enabled settings. + if isinstance(disable, str): + disable = [disable] + if isinstance(enable, str): + enable = [enable] + if isinstance(exclude, str): + exclude = [exclude] + + def fetch_pipes_status(value: Iterable[str], key: str) -> Iterable[str]: + """Fetch value for `enable` or `disable` w.r.t. the specified config and passed arguments passed to + .load(). If both arguments and config specified values for this field, the passed arguments take precedence + and a warning is printed. + value (Iterable[str]): Passed value for `enable` or `disable`. + key (str): Key for field in config (either "enabled" or "disabled"). + RETURN (Iterable[str]): + """ + # We assume that no argument was passed if the value is the specified default value. + if id(value) == id(_DEFAULT_EMPTY_PIPES): + return config["nlp"].get(key, []) + else: + if len(config["nlp"].get(key, [])): + warnings.warn( + Warnings.W123.format( + arg=key[:-1], + arg_value=value, + config_value=config["nlp"][key], + ) + ) + return value + disabled_pipes = cls._resolve_component_status( - [*config["nlp"]["disabled"], *disable], - [*config["nlp"].get("enabled", []), *enable], + fetch_pipes_status(disable, "disabled"), + fetch_pipes_status(enable, "enabled"), config["nlp"]["pipeline"], ) nlp._disabled = set(p for p in disabled_pipes if p not in exclude) @@ -2064,14 +2088,7 @@ class Language: pipe_name for pipe_name in pipe_names if pipe_name not in enable ] if disable and disable != to_disable: - raise ValueError( - Errors.E1042.format( - arg1="enable", - arg2="disable", - arg1_values=enable, - arg2_values=disable, - ) - ) + raise ValueError(Errors.E1042.format(enable=enable, disable=disable)) return tuple(to_disable) diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index b946061f6..14a7a36e5 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -605,10 +605,35 @@ def test_update_with_annotates(): assert results[component] == "" -def test_load_disable_enable() -> None: - """ - Tests spacy.load() with dis-/enabling components. - """ +@pytest.mark.issue(11443) +def test_enable_disable_conflict_with_config(): + """Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.""" + nlp = English() + nlp.add_pipe("tagger") + nlp.add_pipe("senter") + nlp.add_pipe("sentencizer") + + with make_tempdir() as tmp_dir: + nlp.to_disk(tmp_dir) + # Expected to fail, as config and arguments conflict. + with pytest.raises(ValueError): + spacy.load( + tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} + ) + # Expected to succeed without warning due to the lack of a conflicting config option. + spacy.load(tmp_dir, enable=["tagger"]) + # Expected to succeed with a warning, as disable=[] should override the config setting. + with pytest.warns(UserWarning): + spacy.load( + tmp_dir, + enable=["tagger"], + disable=[], + config={"nlp": {"disabled": ["senter"]}}, + ) + + +def test_load_disable_enable(): + """Tests spacy.load() with dis-/enabling components.""" base_nlp = English() for pipe in ("sentencizer", "tagger", "parser"): diff --git a/spacy/tests/serialize/test_serialize_pipeline.py b/spacy/tests/serialize/test_serialize_pipeline.py index 9fcf18e2d..b948bb76c 100644 --- a/spacy/tests/serialize/test_serialize_pipeline.py +++ b/spacy/tests/serialize/test_serialize_pipeline.py @@ -404,10 +404,11 @@ def test_serialize_pipeline_disable_enable(): assert nlp3.component_names == ["ner", "tagger"] with make_tempdir() as d: nlp3.to_disk(d) - nlp4 = spacy.load(d, disable=["ner"]) - assert nlp4.pipe_names == [] + with pytest.warns(UserWarning): + nlp4 = spacy.load(d, disable=["ner"]) + assert nlp4.pipe_names == ["tagger"] assert nlp4.component_names == ["ner", "tagger"] - assert nlp4.disabled == ["ner", "tagger"] + assert nlp4.disabled == ["ner"] with make_tempdir() as d: nlp.to_disk(d) nlp5 = spacy.load(d, exclude=["tagger"]) diff --git a/spacy/util.py b/spacy/util.py index 4e1a62d05..3034808ba 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -67,7 +67,6 @@ LEXEME_NORM_LANGS = ["cs", "da", "de", "el", "en", "id", "lb", "mk", "pt", "ru", CONFIG_SECTION_ORDER = ["paths", "variables", "system", "nlp", "components", "corpora", "training", "pretraining", "initialize"] # fmt: on - logger = logging.getLogger("spacy") logger_stream_handler = logging.StreamHandler() logger_stream_handler.setFormatter( @@ -394,13 +393,17 @@ def get_module_path(module: ModuleType) -> Path: return file_path.parent +# Default value for passed enable/disable values. +_DEFAULT_EMPTY_PIPES = SimpleFrozenList() + + def load_model( name: Union[str, Path], *, vocab: Union["Vocab", bool] = True, - disable: Union[str, Iterable[str]] = SimpleFrozenList(), - enable: Union[str, Iterable[str]] = SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Load a model from a package or data path. @@ -470,9 +473,9 @@ def load_model_from_path( *, meta: Optional[Dict[str, Any]] = None, vocab: Union["Vocab", bool] = True, - disable: Union[str, Iterable[str]] = SimpleFrozenList(), - enable: Union[str, Iterable[str]] = SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Load a model from a data directory path. Creates Language class with @@ -516,9 +519,9 @@ def load_model_from_config( *, meta: Dict[str, Any] = SimpleFrozenDict(), vocab: Union["Vocab", bool] = True, - disable: Union[str, Iterable[str]] = SimpleFrozenList(), - enable: Union[str, Iterable[str]] = SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, auto_fill: bool = False, validate: bool = True, ) -> "Language": From e794d4ae39b65aed341fa588ed4d473644aec672 Mon Sep 17 00:00:00 2001 From: Peter Baumgartner <5107405+pmbaumgartner@users.noreply.github.com> Date: Wed, 28 Sep 2022 11:16:05 -0400 Subject: [PATCH 43/82] `debug data` Spancat Table Improvements (#11504) * update * fix format function * pull out _format_number * format with black --- spacy/cli/_util.py | 9 +++++++++ spacy/cli/debug_data.py | 29 ++++++++++++++++++++++++----- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index ae43b991b..897964a88 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -573,3 +573,12 @@ def setup_gpu(use_gpu: int, silent=None) -> None: local_msg.info("Using CPU") if gpu_is_available(): local_msg.info("To switch to GPU 0, use the option: --gpu-id 0") + + +def _format_number(number: Union[int, float], ndigits: int = 2) -> str: + """Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s, + as happens with `round(number, ndigits)`""" + if isinstance(number, float): + return f"{number:.{ndigits}f}" + else: + return str(number) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index bd05471b1..963d5b926 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -9,7 +9,7 @@ import typer import math from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides -from ._util import import_code, debug_cli +from ._util import import_code, debug_cli, _format_number from ..training import Example, remove_bilu_prefix from ..training.initialize import get_sourced_components from ..schemas import ConfigSchemaTraining @@ -989,7 +989,8 @@ def _get_kl_divergence(p: Counter, q: Counter) -> float: def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]: """Compile into one list for easier reporting""" d = { - label: [label] + list(round(d[label], 2) for d in span_data) for label in labels + label: [label] + list(_format_number(d[label]) for d in span_data) + for label in labels } return list(d.values()) @@ -1004,6 +1005,10 @@ def _get_span_characteristics( label: _gmean(l) for label, l in compiled_gold["spans_length"][spans_key].items() } + spans_per_type = { + label: len(spans) + for label, spans in compiled_gold["spans_per_type"][spans_key].items() + } min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()] max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()] @@ -1031,6 +1036,7 @@ def _get_span_characteristics( return { "sd": span_distinctiveness, "bd": sb_distinctiveness, + "spans_per_type": spans_per_type, "lengths": span_length, "min_length": min(min_lengths), "max_length": max(max_lengths), @@ -1045,12 +1051,15 @@ def _get_span_characteristics( def _print_span_characteristics(span_characteristics: Dict[str, Any]): """Print all span characteristics into a table""" - headers = ("Span Type", "Length", "SD", "BD") + headers = ("Span Type", "Length", "SD", "BD", "N") + # Wasabi has this at 30 by default, but we might have some long labels + max_col = max(30, max(len(label) for label in span_characteristics["labels"])) # Prepare table data with all span characteristics table_data = [ span_characteristics["lengths"], span_characteristics["sd"], span_characteristics["bd"], + span_characteristics["spans_per_type"], ] table = _format_span_row( span_data=table_data, labels=span_characteristics["labels"] @@ -1061,8 +1070,18 @@ def _print_span_characteristics(span_characteristics: Dict[str, Any]): span_characteristics["avg_sd"], span_characteristics["avg_bd"], ] - footer = ["Wgt. Average"] + [str(round(f, 2)) for f in footer_data] - msg.table(table, footer=footer, header=headers, divider=True) + + footer = ( + ["Wgt. Average"] + ["{:.2f}".format(round(f, 2)) for f in footer_data] + ["-"] + ) + msg.table( + table, + footer=footer, + header=headers, + divider=True, + aligns=["l"] + ["r"] * (len(footer_data) + 1), + max_col=max_col, + ) def _get_spans_length_freq_dist( From 6d7630c5d372cda53b88a18b10bb893ce478d294 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 29 Sep 2022 10:44:06 +0200 Subject: [PATCH 44/82] Allow overriding spacy_version in spacy package meta (#11552) --- spacy/cli/package.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/cli/package.py b/spacy/cli/package.py index b8c8397b6..324c5d1bb 100644 --- a/spacy/cli/package.py +++ b/spacy/cli/package.py @@ -299,8 +299,8 @@ def get_meta( } nlp = util.load_model_from_path(Path(model_path)) meta.update(nlp.meta) - meta.update(existing_meta) meta["spacy_version"] = util.get_minor_version_range(about.__version__) + meta.update(existing_meta) meta["vectors"] = { "width": nlp.vocab.vectors_length, "vectors": len(nlp.vocab.vectors), From ba63f57f81441d049da52c5d398e5b226019a1a6 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 29 Sep 2022 18:50:29 +0900 Subject: [PATCH 45/82] Update docs to reflect Doc input to Language (#11555) --- website/docs/api/language.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/website/docs/api/language.md b/website/docs/api/language.md index ed763e36a..767a7450a 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -164,6 +164,9 @@ examples, see the Apply the pipeline to some text. The text can span multiple sentences, and can contain arbitrary whitespace. Alignment into the original string is preserved. +Instead of text, a `Doc` can be passed as input, in which case tokenization is +skipped, but the rest of the pipeline is run. + > #### Example > > ```python @@ -173,7 +176,7 @@ contain arbitrary whitespace. Alignment into the original string is preserved. | Name | Description | | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| `text` | The text to be processed. ~~str~~ | +| `text` | The text to be processed, or a Doc. ~~Union[str, Doc]~~ | | _keyword-only_ | | | `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ | | `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | @@ -184,6 +187,9 @@ contain arbitrary whitespace. Alignment into the original string is preserved. Process texts as a stream, and yield `Doc` objects in order. This is usually more efficient than processing texts one-by-one. +Instead of text, a `Doc` object can be passed as input. In this case +tokenization is skipped but the rest of the pipeline is run. + > #### Example > > ```python @@ -194,7 +200,7 @@ more efficient than processing texts one-by-one. | Name | Description | | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `texts` | A sequence of strings. ~~Iterable[str]~~ | +| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ | | _keyword-only_ | | | `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ | | `batch_size` | The number of texts to buffer. ~~Optional[int]~~ | From bcda8bc1e720e999243d23ce620181fcad7e8e46 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 29 Sep 2022 14:24:40 +0200 Subject: [PATCH 46/82] update mypy to latest version (#11546) * update mypy and disable it for python 3.6 * ignoring mypy's type redefinition error --- .github/azure-steps.yml | 2 +- requirements.txt | 2 +- spacy/pipeline/entityruler.py | 5 ++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index c7722391f..9d57219ca 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -27,7 +27,7 @@ steps: - script: python -m mypy spacy displayName: 'Run mypy' - condition: ne(variables['python_version'], '3.10') + condition: ne(variables['python_version'], '3.6') - task: DeleteFiles@1 inputs: diff --git a/requirements.txt b/requirements.txt index e45fde787..446560c06 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ pytest-timeout>=1.3.0,<2.0.0 mock>=2.0.0,<3.0.0 flake8>=3.8.0,<3.10.0 hypothesis>=3.27.0,<7.0.0 -mypy>=0.910,<0.970; platform_machine!='aarch64' +mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7" types-dataclasses>=0.1.3; python_version < "3.7" types-mock>=0.1.1 types-setuptools>=57.0.0 diff --git a/spacy/pipeline/entityruler.py b/spacy/pipeline/entityruler.py index 3cb1ca676..8154a077d 100644 --- a/spacy/pipeline/entityruler.py +++ b/spacy/pipeline/entityruler.py @@ -1,6 +1,5 @@ -import warnings from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence -from typing import cast +import warnings from collections import defaultdict from pathlib import Path import srsly @@ -317,7 +316,7 @@ class EntityRuler(Pipe): phrase_pattern["id"] = ent_id phrase_patterns.append(phrase_pattern) for entry in token_patterns + phrase_patterns: # type: ignore[operator] - label = entry["label"] + label = entry["label"] # type: ignore if "id" in entry: ent_label = label label = self._create_label(label, entry["id"]) From ff9002b726cfdae083a9a0206e1ef615f19a6088 Mon Sep 17 00:00:00 2001 From: Gabriele Picco Date: Thu, 29 Sep 2022 16:34:44 +0100 Subject: [PATCH 47/82] Add Zshot Spacy plugin (#11557) * Add Zshot Spacy plugin Add Zshot (Zero and Few shot named entity & relationships recognition) Spacy plugin * Update website/meta/universe.json Co-authored-by: Adriane Boyd * Update website/meta/universe.json Co-authored-by: Adriane Boyd Co-authored-by: Adriane Boyd --- website/meta/universe.json | 57 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 9ec0d6c0e..a6a1a0fc7 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,62 @@ { "resources": [ + { + "id": "Zshot", + "title": "Zshot", + "slogan": "Zero and Few shot named entity & relationships recognition", + "github": "ibm/zshot", + "pip": "zshot", + "code_example": [ + "import spacy", + "from zshot import PipelineConfig, displacy", + "from zshot.linker import LinkerRegen", + "from zshot.mentions_extractor import MentionsExtractorSpacy", + "from zshot.utils.data_models import Entity", + "", + "nlp = spacy.load('en_core_web_sm')", + "# zero shot definition of entities", + "nlp_config = PipelineConfig(", + " mentions_extractor=MentionsExtractorSpacy(),", + " linker=LinkerRegen(),", + " entities=[", + " Entity(name='Paris',", + " description='Paris is located in northern central France, in a north-bending arc of the river Seine'),", + " Entity(name='IBM',", + " description='International Business Machines Corporation (IBM) is an American multinational technology corporation headquartered in Armonk, New York'),", + " Entity(name='New York', description='New York is a city in U.S. state'),", + " Entity(name='Florida', description='southeasternmost U.S. state'),", + " Entity(name='American',", + " description='American, something of, from, or related to the United States of America, commonly known as the United States or America'),", + " Entity(name='Chemical formula',", + " description='In chemistry, a chemical formula is a way of presenting information about the chemical proportions of atoms that constitute a particular chemical compound or molecul'),", + " Entity(name='Acetamide',", + " description='Acetamide (systematic name: ethanamide) is an organic compound with the formula CH3CONH2. It is the simplest amide derived from acetic acid. It finds some use as a plasticizer and as an industrial solvent.'),", + " Entity(name='Armonk',", + " description='Armonk is a hamlet and census-designated place (CDP) in the town of North Castle, located in Westchester County, New York, United States.'),", + " Entity(name='Acetic Acid',", + " description='Acetic acid, systematically named ethanoic acid, is an acidic, colourless liquid and organic compound with the chemical formula CH3COOH'),", + " Entity(name='Industrial solvent',", + " description='Acetamide (systematic name: ethanamide) is an organic compound with the formula CH3CONH2. It is the simplest amide derived from acetic acid. It finds some use as a plasticizer and as an industrial solvent.'),", + " ]", + ")", + "nlp.add_pipe('zshot', config=nlp_config, last=True)", + "", + "text = 'International Business Machines Corporation (IBM) is an American multinational technology corporation' \\", + " ' headquartered in Armonk, New York, with operations in over 171 countries.'", + "", + "doc = nlp(text)", + "displacy.serve(doc, style='ent')" + ], + "thumb": "https://ibm.github.io/zshot/img/graph.png", + "url": "https://ibm.github.io/zshot/", + "author": "IBM Research", + "author_links": { + "github": "ibm", + "twitter": "IBMResearch", + "website": "https://research.ibm.com/labs/ireland/" + }, + "category": ["scientific", "models", "research"] + }, { "id": "concepcy", "title": "concepCy", From 087cc74c6abdd43e04e4313cdcf292edf6187f4b Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 3 Oct 2022 18:53:21 +0900 Subject: [PATCH 48/82] Remove mention of 1.7 from issue template (#11570) It's rare to have anyone using v1 anymore, so this message is no longer helpful. --- .github/ISSUE_TEMPLATE/01_bugs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/01_bugs.md b/.github/ISSUE_TEMPLATE/01_bugs.md index 255a5241e..f0d0ba912 100644 --- a/.github/ISSUE_TEMPLATE/01_bugs.md +++ b/.github/ISSUE_TEMPLATE/01_bugs.md @@ -10,7 +10,7 @@ about: Use this template if you came across a bug or unexpected behaviour differ ## Your Environment - + * Operating System: * Python Version Used: * spaCy Version Used: From 70e21dfcad28b044903ba33b2b8831d925151b76 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 3 Oct 2022 13:04:03 +0200 Subject: [PATCH 49/82] PR to test importlib-metadata (#11569) * empty commit * restrict importlib-metadata to lower than 5.0.0 * restrict importlib-metadata also for validate CI step * set fixed version for CI * try flake8 5.0.4 in CI validation step * from importlib-metadata from requirements again --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f475b7fdd..2f5201614 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -31,7 +31,7 @@ jobs: inputs: versionSpec: "3.7" - script: | - pip install flake8==3.9.2 + pip install flake8==5.0.4 python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics displayName: "flake8" From d4922f25fc182a51d71375a979f99cf27b08ecd9 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Mon, 3 Oct 2022 14:41:15 +0200 Subject: [PATCH 50/82] fix test for EL activations with refactored KB --- spacy/tests/pipeline/test_entity_linker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index c03c7008a..a6baa1ff4 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -1214,7 +1214,7 @@ def test_save_activations(): # create artificial KB - assign same prior weight to the two russ cochran's # Q2146908 (Russ Cochran): American golfer # Q7381115 (Russ Cochran): publisher - mykb = KnowledgeBase(vocab, entity_vector_length=vector_length) + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7]) mykb.add_alias( From b187076a2dd0f034c1a8918c9b332711688b5dc2 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 3 Oct 2022 17:01:04 +0200 Subject: [PATCH 51/82] fix docs (#11573) --- website/docs/api/kb_in_memory.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/api/kb_in_memory.md b/website/docs/api/kb_in_memory.md index c9ce624f0..9e3279e6a 100644 --- a/website/docs/api/kb_in_memory.md +++ b/website/docs/api/kb_in_memory.md @@ -21,9 +21,9 @@ Create the knowledge base. > #### Example > > ```python -> from spacy.kb import KnowledgeBase +> from spacy.kb import InMemoryLookupKB > vocab = nlp.vocab -> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64) +> kb = InMemoryLookupKB(vocab=vocab, entity_vector_length=64) > ``` | Name | Description | From 8cd77dd54cfc89c2f67ca2412490ef9b49a98518 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 4 Oct 2022 11:23:04 +0200 Subject: [PATCH 52/82] Sync flake8 version across requirements (#11580) --- .pre-commit-config.yaml | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b959262e3..df59697b1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ repos: language_version: python3.7 additional_dependencies: ['click==8.0.4'] - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.2 + rev: 5.0.4 hooks: - id: flake8 args: diff --git a/requirements.txt b/requirements.txt index 446560c06..14847ff21 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,7 +28,7 @@ cython>=0.25,<3.0 pytest>=5.2.0,!=7.1.0 pytest-timeout>=1.3.0,<2.0.0 mock>=2.0.0,<3.0.0 -flake8>=3.8.0,<3.10.0 +flake8>=3.8.0,<6.0.0 hypothesis>=3.27.0,<7.0.0 mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7" types-dataclasses>=0.1.3; python_version < "3.7" From 446a3ecf34f3b0c139a326d7471b9b1b5346bcb0 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Thu, 6 Oct 2022 10:51:06 +0200 Subject: [PATCH 53/82] `StringStore` refactoring (#11344) * `strings`: Remove unused `hash32_utf8` function * `strings`: Make `hash_utf8` and `decode_Utf8Str` private * `strings`: Reorganize private functions * 'strings': Raise error when non-string/-int types are passed to functions that don't accept them * `strings`: Add `items()` method, add type hints, remove unused methods, restrict inputs to specific types, reorganize methods * `Morphology`: Use `StringStore.items()` to enumerate features when pickling * `test_stringstore`: Update pre-Python 3 tests * Update `StringStore` docs * Fix `get_string_id` imports * Replace redundant test with tests for type checking * Rename `_retrieve_interned_str`, remove `.get` default arg * Add `get_string_id` to `strings.pyi` Remove `mypy` ignore directives from imports of the above * `strings.pyi`: Replace functions that consume `Union`-typed params with overloads * `strings.pyi`: Revert some function signatures * Update `SYMBOLS_BY_INT` lookups and error codes post-merge * Revert clobbered change introduced in a previous merge * Remove unnecessary type hint * Invert tuple order in `StringStore.items()` * Add test for `StringStore.items()` * Revert "`Morphology`: Use `StringStore.items()` to enumerate features when pickling" This reverts commit 1af9510ceb6b08cfdcfbf26df6896f26709fac0d. * Rename `keys` and `key_map` * Add `keys()` and `values()` * Add comment about the inverted key-value semantics in the API * Fix type hints * Implement `keys()`, `values()`, `items()` without generators * Fix type hints, remove unnecessary boxing * Update docs * Simplify `keys/values/items()` impl * `mypy` fix * Fix error message, doc fixes --- spacy/errors.py | 4 +- spacy/matcher/matcher.pyx | 2 +- spacy/strings.pxd | 21 +- spacy/strings.pyi | 23 +- spacy/strings.pyx | 426 +++++++++--------- spacy/tests/vocab_vectors/test_stringstore.py | 41 +- spacy/tokens/graph.pyx | 2 +- spacy/tokens/retokenizer.pyx | 2 +- website/docs/api/stringstore.md | 82 +++- 9 files changed, 339 insertions(+), 264 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 5fb59e2c5..856660106 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -252,7 +252,7 @@ class Errors(metaclass=ErrorsWithCodes): E012 = ("Cannot add pattern for zero tokens to matcher.\nKey: {key}") E016 = ("MultitaskObjective target should be function or one of: dep, " "tag, ent, dep_tag_offset, ent_tag.") - E017 = ("Can only add unicode or bytes. Got type: {value_type}") + E017 = ("Can only add 'str' inputs to StringStore. Got type: {value_type}") E018 = ("Can't retrieve string for hash '{hash_value}'. This usually " "refers to an issue with the `Vocab` or `StringStore`.") E019 = ("Can't create transition with unknown action ID: {action}. Action " @@ -955,6 +955,8 @@ class Errors(metaclass=ErrorsWithCodes): # v4 error strings E4000 = ("Expected a Doc as input, but got: '{type}'") + E4001 = ("Expected input to be one of the following types: ({expected_types}), " + "but got '{received_type}'") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 865e7594e..8bd05f25f 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -22,7 +22,7 @@ from ..attrs cimport ID, attr_id_t, NULL_ATTR, ORTH, POS, TAG, DEP, LEMMA, MORPH from ..schemas import validate_token_pattern from ..errors import Errors, MatchPatternError, Warnings -from ..strings import get_string_id +from ..strings cimport get_string_id from ..attrs import IDS diff --git a/spacy/strings.pxd b/spacy/strings.pxd index 5f03a9a28..0c1a30fe3 100644 --- a/spacy/strings.pxd +++ b/spacy/strings.pxd @@ -1,4 +1,4 @@ -from libc.stdint cimport int64_t +from libc.stdint cimport int64_t, uint32_t from libcpp.vector cimport vector from libcpp.set cimport set from cymem.cymem cimport Pool @@ -7,13 +7,6 @@ from murmurhash.mrmr cimport hash64 from .typedefs cimport attr_t, hash_t - -cpdef hash_t hash_string(str string) except 0 -cdef hash_t hash_utf8(char* utf8_string, int length) nogil - -cdef str decode_Utf8Str(const Utf8Str* string) - - ctypedef union Utf8Str: unsigned char[8] s unsigned char* p @@ -21,9 +14,13 @@ ctypedef union Utf8Str: cdef class StringStore: cdef Pool mem + cdef vector[hash_t] _keys + cdef PreshMap _map - cdef vector[hash_t] keys - cdef public PreshMap _map + cdef hash_t _intern_str(self, str string) + cdef Utf8Str* _allocate_str_repr(self, const unsigned char* chars, uint32_t length) except * + cdef str _decode_str_repr(self, const Utf8Str* string) - cdef const Utf8Str* intern_unicode(self, str py_string) - cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash) + +cpdef hash_t hash_string(object string) except -1 +cpdef hash_t get_string_id(object string_or_hash) except -1 diff --git a/spacy/strings.pyi b/spacy/strings.pyi index b29389b9a..d9509ff57 100644 --- a/spacy/strings.pyi +++ b/spacy/strings.pyi @@ -1,21 +1,20 @@ -from typing import Optional, Iterable, Iterator, Union, Any, overload +from typing import List, Optional, Iterable, Iterator, Union, Any, Tuple, overload from pathlib import Path -def get_string_id(key: Union[str, int]) -> int: ... - class StringStore: - def __init__( - self, strings: Optional[Iterable[str]] = ..., freeze: bool = ... - ) -> None: ... + def __init__(self, strings: Optional[Iterable[str]]) -> None: ... @overload - def __getitem__(self, string_or_id: Union[bytes, str]) -> int: ... + def __getitem__(self, string_or_hash: str) -> int: ... @overload - def __getitem__(self, string_or_id: int) -> str: ... - def as_int(self, key: Union[bytes, str, int]) -> int: ... - def as_string(self, key: Union[bytes, str, int]) -> str: ... + def __getitem__(self, string_or_hash: int) -> str: ... + def as_int(self, string_or_hash: Union[str, int]) -> int: ... + def as_string(self, string_or_hash: Union[str, int]) -> str: ... def add(self, string: str) -> int: ... + def items(self) -> List[Tuple[str, int]]: ... + def keys(self) -> List[str]: ... + def values(self) -> List[int]: ... def __len__(self) -> int: ... - def __contains__(self, string: str) -> bool: ... + def __contains__(self, string_or_hash: Union[str, int]) -> bool: ... def __iter__(self) -> Iterator[str]: ... def __reduce__(self) -> Any: ... def to_disk(self, path: Union[str, Path]) -> None: ... @@ -23,3 +22,5 @@ class StringStore: def to_bytes(self, **kwargs: Any) -> bytes: ... def from_bytes(self, bytes_data: bytes, **kwargs: Any) -> StringStore: ... def _reset_and_load(self, strings: Iterable[str]) -> None: ... + +def get_string_id(string_or_hash: Union[str, int]) -> int: ... diff --git a/spacy/strings.pyx b/spacy/strings.pyx index e86682733..5a037eb9a 100644 --- a/spacy/strings.pyx +++ b/spacy/strings.pyx @@ -1,9 +1,10 @@ # cython: infer_types=True +from typing import Optional, Union, Iterable, Tuple, Callable, Any, List, Iterator cimport cython from libc.string cimport memcpy from libcpp.set cimport set from libc.stdint cimport uint32_t -from murmurhash.mrmr cimport hash64, hash32 +from murmurhash.mrmr cimport hash64 import srsly @@ -14,105 +15,13 @@ from .symbols import NAMES as SYMBOLS_BY_INT from .errors import Errors from . import util -# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)` -cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash): - try: - out_hash[0] = key - return True - except: - return False - -def get_string_id(key): - """Get a string ID, handling the reserved symbols correctly. If the key is - already an ID, return it. - - This function optimises for convenience over performance, so shouldn't be - used in tight loops. - """ - cdef hash_t str_hash - if isinstance(key, str): - if len(key) == 0: - return 0 - - symbol = SYMBOLS_BY_STR.get(key, None) - if symbol is not None: - return symbol - else: - chars = key.encode("utf8") - return hash_utf8(chars, len(chars)) - elif _try_coerce_to_hash(key, &str_hash): - # Coerce the integral key to the expected primitive hash type. - # This ensures that custom/overloaded "primitive" data types - # such as those implemented by numpy are not inadvertently used - # downsteam (as these are internally implemented as custom PyObjects - # whose comparison operators can incur a significant overhead). - return str_hash - else: - # TODO: Raise an error instead - return key - - -cpdef hash_t hash_string(str string) except 0: - chars = string.encode("utf8") - return hash_utf8(chars, len(chars)) - - -cdef hash_t hash_utf8(char* utf8_string, int length) nogil: - return hash64(utf8_string, length, 1) - - -cdef uint32_t hash32_utf8(char* utf8_string, int length) nogil: - return hash32(utf8_string, length, 1) - - -cdef str decode_Utf8Str(const Utf8Str* string): - cdef int i, length - if string.s[0] < sizeof(string.s) and string.s[0] != 0: - return string.s[1:string.s[0]+1].decode("utf8") - elif string.p[0] < 255: - return string.p[1:string.p[0]+1].decode("utf8") - else: - i = 0 - length = 0 - while string.p[i] == 255: - i += 1 - length += 255 - length += string.p[i] - i += 1 - return string.p[i:length + i].decode("utf8") - - -cdef Utf8Str* _allocate(Pool mem, const unsigned char* chars, uint32_t length) except *: - cdef int n_length_bytes - cdef int i - cdef Utf8Str* string = mem.alloc(1, sizeof(Utf8Str)) - cdef uint32_t ulength = length - if length < sizeof(string.s): - string.s[0] = length - memcpy(&string.s[1], chars, length) - return string - elif length < 255: - string.p = mem.alloc(length + 1, sizeof(unsigned char)) - string.p[0] = length - memcpy(&string.p[1], chars, length) - return string - else: - i = 0 - n_length_bytes = (length // 255) + 1 - string.p = mem.alloc(length + n_length_bytes, sizeof(unsigned char)) - for i in range(n_length_bytes-1): - string.p[i] = 255 - string.p[n_length_bytes-1] = length % 255 - memcpy(&string.p[n_length_bytes], chars, length) - return string - cdef class StringStore: - """Look up strings by 64-bit hashes. + """Look up strings by 64-bit hashes. Implicitly handles reserved symbols. DOCS: https://spacy.io/api/stringstore """ - def __init__(self, strings=None, freeze=False): + def __init__(self, strings: Optional[Iterable[str]] = None): """Create the StringStore. strings (iterable): A sequence of unicode strings to add to the store. @@ -123,128 +32,127 @@ cdef class StringStore: for string in strings: self.add(string) - def __getitem__(self, object string_or_id): - """Retrieve a string from a given hash, or vice versa. + def __getitem__(self, string_or_hash: Union[str, int]) -> Union[str, int]: + """Retrieve a string from a given hash. If a string + is passed as the input, add it to the store and return + its hash. - string_or_id (bytes, str or uint64): The value to encode. - Returns (str / uint64): The value to be retrieved. + string_or_hash (int / str): The hash value to lookup or the string to store. + RETURNS (str / int): The stored string or the hash of the newly added string. """ - cdef hash_t str_hash - cdef Utf8Str* utf8str = NULL - - if isinstance(string_or_id, str): - if len(string_or_id) == 0: - return 0 - - # Return early if the string is found in the symbols LUT. - symbol = SYMBOLS_BY_STR.get(string_or_id, None) - if symbol is not None: - return symbol - else: - return hash_string(string_or_id) - elif isinstance(string_or_id, bytes): - return hash_utf8(string_or_id, len(string_or_id)) - elif _try_coerce_to_hash(string_or_id, &str_hash): - if str_hash == 0: - return "" - elif str_hash in SYMBOLS_BY_INT: - return SYMBOLS_BY_INT[str_hash] - else: - utf8str = self._map.get(str_hash) + if isinstance(string_or_hash, str): + return self.add(string_or_hash) else: - # TODO: Raise an error instead - utf8str = self._map.get(string_or_id) + return self._get_interned_str(string_or_hash) - if utf8str is NULL: - raise KeyError(Errors.E018.format(hash_value=string_or_id)) - else: - return decode_Utf8Str(utf8str) + def __contains__(self, string_or_hash: Union[str, int]) -> bool: + """Check whether a string or a hash is in the store. - def as_int(self, key): - """If key is an int, return it; otherwise, get the int value.""" - if not isinstance(key, str): - return key - else: - return self[key] - - def as_string(self, key): - """If key is a string, return it; otherwise, get the string value.""" - if isinstance(key, str): - return key - else: - return self[key] - - def add(self, string): - """Add a string to the StringStore. - - string (str): The string to add. - RETURNS (uint64): The string's hash value. - """ - cdef hash_t str_hash - if isinstance(string, str): - if string in SYMBOLS_BY_STR: - return SYMBOLS_BY_STR[string] - - string = string.encode("utf8") - str_hash = hash_utf8(string, len(string)) - self._intern_utf8(string, len(string), &str_hash) - elif isinstance(string, bytes): - if string in SYMBOLS_BY_STR: - return SYMBOLS_BY_STR[string] - str_hash = hash_utf8(string, len(string)) - self._intern_utf8(string, len(string), &str_hash) - else: - raise TypeError(Errors.E017.format(value_type=type(string))) - return str_hash - - def __len__(self): - """The number of strings in the store. - - RETURNS (int): The number of strings in the store. - """ - return self.keys.size() - - def __contains__(self, string_or_id not None): - """Check whether a string or ID is in the store. - - string_or_id (str or int): The string to check. + string (str / int): The string/hash to check. RETURNS (bool): Whether the store contains the string. """ - cdef hash_t str_hash - if isinstance(string_or_id, str): - if len(string_or_id) == 0: - return True - elif string_or_id in SYMBOLS_BY_STR: - return True - str_hash = hash_string(string_or_id) - elif _try_coerce_to_hash(string_or_id, &str_hash): - pass - else: - # TODO: Raise an error instead - return self._map.get(string_or_id) is not NULL - + cdef hash_t str_hash = get_string_id(string_or_hash) if str_hash in SYMBOLS_BY_INT: return True else: return self._map.get(str_hash) is not NULL - def __iter__(self): - """Iterate over the strings in the store, in order. + def __iter__(self) -> Iterator[str]: + """Iterate over the strings in the store in insertion order. - YIELDS (str): A string in the store. + RETURNS: An iterable collection of strings. """ - cdef int i - cdef hash_t key - for i in range(self.keys.size()): - key = self.keys[i] - utf8str = self._map.get(key) - yield decode_Utf8Str(utf8str) - # TODO: Iterate OOV here? + return iter(self.keys()) def __reduce__(self): strings = list(self) return (StringStore, (strings,), None, None, None) + def __len__(self) -> int: + """The number of strings in the store. + + RETURNS (int): The number of strings in the store. + """ + return self._keys.size() + + def add(self, string: str) -> int: + """Add a string to the StringStore. + + string (str): The string to add. + RETURNS (uint64): The string's hash value. + """ + if not isinstance(string, str): + raise TypeError(Errors.E017.format(value_type=type(string))) + + if string in SYMBOLS_BY_STR: + return SYMBOLS_BY_STR[string] + else: + return self._intern_str(string) + + def as_int(self, string_or_hash: Union[str, int]) -> str: + """If a hash value is passed as the input, return it as-is. If the input + is a string, return its corresponding hash. + + string_or_hash (str / int): The string to hash or a hash value. + RETURNS (int): The hash of the string or the input hash value. + """ + if isinstance(string_or_hash, int): + return string_or_hash + else: + return get_string_id(string_or_hash) + + def as_string(self, string_or_hash: Union[str, int]) -> str: + """If a string is passed as the input, return it as-is. If the input + is a hash value, return its corresponding string. + + string_or_hash (str / int): The hash value to lookup or a string. + RETURNS (str): The stored string or the input string. + """ + if isinstance(string_or_hash, str): + return string_or_hash + else: + return self._get_interned_str(string_or_hash) + + def items(self) -> List[Tuple[str, int]]: + """Iterate over the stored strings and their hashes in insertion order. + + RETURNS: A list of string-hash pairs. + """ + # Even though we internally store the hashes as keys and the strings as + # values, we invert the order in the public API to keep it consistent with + # the implementation of the `__iter__` method (where we wish to iterate over + # the strings in the store). + cdef int i + pairs = [None] * self._keys.size() + for i in range(self._keys.size()): + str_hash = self._keys[i] + utf8str = self._map.get(str_hash) + pairs[i] = (self._decode_str_repr(utf8str), str_hash) + return pairs + + def keys(self) -> List[str]: + """Iterate over the stored strings in insertion order. + + RETURNS: A list of strings. + """ + cdef int i + strings = [None] * self._keys.size() + for i in range(self._keys.size()): + utf8str = self._map.get(self._keys[i]) + strings[i] = self._decode_str_repr(utf8str) + return strings + + def values(self) -> List[int]: + """Iterate over the stored strings hashes in insertion order. + + RETURNS: A list of string hashs. + """ + cdef int i + hashes = [None] * self._keys.size() + for i in range(self._keys.size()): + hashes[i] = self._keys[i] + return hashes + def to_disk(self, path): """Save the current state to a directory. @@ -294,24 +202,122 @@ cdef class StringStore: def _reset_and_load(self, strings): self.mem = Pool() self._map = PreshMap() - self.keys.clear() + self._keys.clear() for string in strings: self.add(string) - cdef const Utf8Str* intern_unicode(self, str py_string): - # 0 means missing, but we don't bother offsetting the index. - cdef bytes byte_string = py_string.encode("utf8") - return self._intern_utf8(byte_string, len(byte_string), NULL) + def _get_interned_str(self, hash_value: int) -> str: + cdef hash_t str_hash + if not _try_coerce_to_hash(hash_value, &str_hash): + raise TypeError(Errors.E4001.format(expected_types="'int'", received_type=type(hash_value))) - @cython.final - cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash): + # Handle reserved symbols and empty strings correctly. + if str_hash == 0: + return "" + + symbol = SYMBOLS_BY_INT.get(str_hash) + if symbol is not None: + return symbol + + utf8str = self._map.get(str_hash) + if utf8str is NULL: + raise KeyError(Errors.E018.format(hash_value=str_hash)) + else: + return self._decode_str_repr(utf8str) + + cdef hash_t _intern_str(self, str string): # TODO: This function's API/behaviour is an unholy mess... # 0 means missing, but we don't bother offsetting the index. - cdef hash_t key = precalculated_hash[0] if precalculated_hash is not NULL else hash_utf8(utf8_string, length) + chars = string.encode('utf-8') + cdef hash_t key = hash64(chars, len(chars), 1) cdef Utf8Str* value = self._map.get(key) if value is not NULL: - return value - value = _allocate(self.mem, utf8_string, length) + return key + + value = self._allocate_str_repr(chars, len(chars)) self._map.set(key, value) - self.keys.push_back(key) - return value + self._keys.push_back(key) + return key + + cdef Utf8Str* _allocate_str_repr(self, const unsigned char* chars, uint32_t length) except *: + cdef int n_length_bytes + cdef int i + cdef Utf8Str* string = self.mem.alloc(1, sizeof(Utf8Str)) + cdef uint32_t ulength = length + if length < sizeof(string.s): + string.s[0] = length + memcpy(&string.s[1], chars, length) + return string + elif length < 255: + string.p = self.mem.alloc(length + 1, sizeof(unsigned char)) + string.p[0] = length + memcpy(&string.p[1], chars, length) + return string + else: + i = 0 + n_length_bytes = (length // 255) + 1 + string.p = self.mem.alloc(length + n_length_bytes, sizeof(unsigned char)) + for i in range(n_length_bytes-1): + string.p[i] = 255 + string.p[n_length_bytes-1] = length % 255 + memcpy(&string.p[n_length_bytes], chars, length) + return string + + cdef str _decode_str_repr(self, const Utf8Str* string): + cdef int i, length + if string.s[0] < sizeof(string.s) and string.s[0] != 0: + return string.s[1:string.s[0]+1].decode('utf-8') + elif string.p[0] < 255: + return string.p[1:string.p[0]+1].decode('utf-8') + else: + i = 0 + length = 0 + while string.p[i] == 255: + i += 1 + length += 255 + length += string.p[i] + i += 1 + return string.p[i:length + i].decode('utf-8') + + +cpdef hash_t hash_string(object string) except -1: + if not isinstance(string, str): + raise TypeError(Errors.E4001.format(expected_types="'str'", received_type=type(string))) + + # Handle reserved symbols and empty strings correctly. + if len(string) == 0: + return 0 + + symbol = SYMBOLS_BY_STR.get(string) + if symbol is not None: + return symbol + + chars = string.encode('utf-8') + return hash64(chars, len(chars), 1) + + +cpdef hash_t get_string_id(object string_or_hash) except -1: + cdef hash_t str_hash + + try: + return hash_string(string_or_hash) + except: + if _try_coerce_to_hash(string_or_hash, &str_hash): + # Coerce the integral key to the expected primitive hash type. + # This ensures that custom/overloaded "primitive" data types + # such as those implemented by numpy are not inadvertently used + # downsteam (as these are internally implemented as custom PyObjects + # whose comparison operators can incur a significant overhead). + return str_hash + else: + raise TypeError(Errors.E4001.format(expected_types="'str','int'", received_type=type(string_or_hash))) + + +# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)` +cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash): + try: + out_hash[0] = key + return True + except: + return False + diff --git a/spacy/tests/vocab_vectors/test_stringstore.py b/spacy/tests/vocab_vectors/test_stringstore.py index a0f8016af..f86c0f10d 100644 --- a/spacy/tests/vocab_vectors/test_stringstore.py +++ b/spacy/tests/vocab_vectors/test_stringstore.py @@ -24,6 +24,14 @@ def test_stringstore_from_api_docs(stringstore): stringstore.add("orange") all_strings = [s for s in stringstore] assert all_strings == ["apple", "orange"] + assert all_strings == list(stringstore.keys()) + all_strings_and_hashes = list(stringstore.items()) + assert all_strings_and_hashes == [ + ("apple", 8566208034543834098), + ("orange", 2208928596161743350), + ] + all_hashes = list(stringstore.values()) + assert all_hashes == [8566208034543834098, 2208928596161743350] banana_hash = stringstore.add("banana") assert len(stringstore) == 3 assert banana_hash == 2525716904149915114 @@ -31,12 +39,25 @@ def test_stringstore_from_api_docs(stringstore): assert stringstore["banana"] == banana_hash -@pytest.mark.parametrize("text1,text2,text3", [(b"Hello", b"goodbye", b"hello")]) -def test_stringstore_save_bytes(stringstore, text1, text2, text3): - key = stringstore.add(text1) - assert stringstore[text1] == key - assert stringstore[text2] != key - assert stringstore[text3] != key +@pytest.mark.parametrize( + "val_bytes,val_float,val_list,val_text,val_hash", + [(b"Hello", 1.1, ["abc"], "apple", 8566208034543834098)], +) +def test_stringstore_type_checking( + stringstore, val_bytes, val_float, val_list, val_text, val_hash +): + with pytest.raises(TypeError): + assert stringstore[val_bytes] + + with pytest.raises(TypeError): + stringstore.add(val_float) + + with pytest.raises(TypeError): + assert val_list not in stringstore + + key = stringstore.add(val_text) + assert val_hash == key + assert stringstore[val_hash] == val_text @pytest.mark.parametrize("text1,text2,text3", [("Hello", "goodbye", "hello")]) @@ -47,19 +68,19 @@ def test_stringstore_save_unicode(stringstore, text1, text2, text3): assert stringstore[text3] != key -@pytest.mark.parametrize("text", [b"A"]) +@pytest.mark.parametrize("text", ["A"]) def test_stringstore_retrieve_id(stringstore, text): key = stringstore.add(text) assert len(stringstore) == 1 - assert stringstore[key] == text.decode("utf8") + assert stringstore[key] == text with pytest.raises(KeyError): stringstore[20000] -@pytest.mark.parametrize("text1,text2", [(b"0123456789", b"A")]) +@pytest.mark.parametrize("text1,text2", [("0123456789", "A")]) def test_stringstore_med_string(stringstore, text1, text2): store = stringstore.add(text1) - assert stringstore[store] == text1.decode("utf8") + assert stringstore[store] == text1 stringstore.add(text2) assert stringstore[text1] == store diff --git a/spacy/tokens/graph.pyx b/spacy/tokens/graph.pyx index adc4d23c8..0ae0d94c7 100644 --- a/spacy/tokens/graph.pyx +++ b/spacy/tokens/graph.pyx @@ -12,7 +12,7 @@ from murmurhash.mrmr cimport hash64 from .. import Errors from ..typedefs cimport hash_t -from ..strings import get_string_id +from ..strings cimport get_string_id from ..structs cimport EdgeC, GraphC from .token import Token diff --git a/spacy/tokens/retokenizer.pyx b/spacy/tokens/retokenizer.pyx index 43e6d4aa7..29143bed3 100644 --- a/spacy/tokens/retokenizer.pyx +++ b/spacy/tokens/retokenizer.pyx @@ -18,7 +18,7 @@ from .underscore import is_writable_attr from ..attrs import intify_attrs from ..util import SimpleFrozenDict from ..errors import Errors -from ..strings import get_string_id +from ..strings cimport get_string_id cdef class Retokenizer: diff --git a/website/docs/api/stringstore.md b/website/docs/api/stringstore.md index cd414b1f0..b509659ef 100644 --- a/website/docs/api/stringstore.md +++ b/website/docs/api/stringstore.md @@ -40,7 +40,8 @@ Get the number of strings in the store. ## StringStore.\_\_getitem\_\_ {#getitem tag="method"} -Retrieve a string from a given hash, or vice versa. +Retrieve a string from a given hash. If a string is passed as the input, add it +to the store and return its hash. > #### Example > @@ -51,14 +52,14 @@ Retrieve a string from a given hash, or vice versa. > assert stringstore[apple_hash] == "apple" > ``` -| Name | Description | -| -------------- | ----------------------------------------------- | -| `string_or_id` | The value to encode. ~~Union[bytes, str, int]~~ | -| **RETURNS** | The value to be retrieved. ~~Union[str, int]~~ | +| Name | Description | +| ---------------- | ---------------------------------------------------------------------------- | +| `string_or_hash` | The hash value to lookup or the string to store. ~~Union[str, int]~~ | +| **RETURNS** | The stored string or the hash of the newly added string. ~~Union[str, int]~~ | ## StringStore.\_\_contains\_\_ {#contains tag="method"} -Check whether a string is in the store. +Check whether a string or a hash is in the store. > #### Example > @@ -68,15 +69,14 @@ Check whether a string is in the store. > assert not "cherry" in stringstore > ``` -| Name | Description | -| ----------- | ----------------------------------------------- | -| `string` | The string to check. ~~str~~ | -| **RETURNS** | Whether the store contains the string. ~~bool~~ | +| Name | Description | +| ---------------- | ------------------------------------------------------- | +| `string_or_hash` | The string or hash to check. ~~Union[str, int]~~ | +| **RETURNS** | Whether the store contains the string or hash. ~~bool~~ | ## StringStore.\_\_iter\_\_ {#iter tag="method"} -Iterate over the strings in the store, in order. Note that a newly initialized -store will always include an empty string `""` at position `0`. +Iterate over the stored strings in insertion order. > #### Example > @@ -86,11 +86,59 @@ store will always include an empty string `""` at position `0`. > assert all_strings == ["apple", "orange"] > ``` -| Name | Description | -| ---------- | ------------------------------ | -| **YIELDS** | A string in the store. ~~str~~ | +| Name | Description | +| ----------- | ------------------------------ | +| **RETURNS** | A string in the store. ~~str~~ | -## StringStore.add {#add tag="method" new="2"} +## StringStore.items {#iter tag="method" new="4"} + +Iterate over the stored string-hash pairs in insertion order. + +> #### Example +> +> ```python +> stringstore = StringStore(["apple", "orange"]) +> all_strings_and_hashes = stringstore.items() +> assert all_strings_and_hashes == [("apple", 8566208034543834098), ("orange", 2208928596161743350)] +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------ | +| **RETURNS** | A list of string-hash pairs. ~~List[Tuple[str, int]]~~ | + +## StringStore.keys {#iter tag="method" new="4"} + +Iterate over the stored strings in insertion order. + +> #### Example +> +> ```python +> stringstore = StringStore(["apple", "orange"]) +> all_strings = stringstore.keys() +> assert all_strings == ["apple", "orange"] +> ``` + +| Name | Description | +| ----------- | -------------------------------- | +| **RETURNS** | A list of strings. ~~List[str]~~ | + +## StringStore.values {#iter tag="method" new="4"} + +Iterate over the stored string hashes in insertion order. + +> #### Example +> +> ```python +> stringstore = StringStore(["apple", "orange"]) +> all_hashes = stringstore.values() +> assert all_hashes == [8566208034543834098, 2208928596161743350] +> ``` + +| Name | Description | +| ----------- | -------------------------------------- | +| **RETURNS** | A list of string hashes. ~~List[int]~~ | + +## StringStore.add {#add tag="method"} Add a string to the `StringStore`. @@ -110,7 +158,7 @@ Add a string to the `StringStore`. | `string` | The string to add. ~~str~~ | | **RETURNS** | The string's hash value. ~~int~~ | -## StringStore.to_disk {#to_disk tag="method" new="2"} +## StringStore.to_disk {#to_disk tag="method"} Save the current state to a directory. From ef74f8f5e447dec10ab69d2a7e94f0e09165db75 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Tue, 11 Oct 2022 14:15:22 +0200 Subject: [PATCH 54/82] Fix mypy error in edittree lemmatizer (#11612) * cleanup imports * try limiting Thinc to previous release * remove Model specification * fix code and revert Thinc constraint --- spacy/pipeline/edit_tree_lemmatizer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index b7d615f6d..7f6367c75 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -1,7 +1,6 @@ from typing import cast, Any, Callable, Dict, Iterable, List, Optional -from typing import Sequence, Tuple, Union +from typing import Tuple from collections import Counter -from copy import deepcopy from itertools import islice import numpy as np @@ -150,7 +149,7 @@ class EditTreeLemmatizer(TrainablePipe): # Handle cases where there are no tokens in any docs. n_labels = len(self.cfg["labels"]) guesses: List[Ints2d] = [ - self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs + self.model.ops.alloc2i(0, n_labels, dtype="i") for _ in docs ] assert len(guesses) == n_docs return guesses From 29649589fc889a58c8b631d569d4ae378a10aa2b Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Tue, 11 Oct 2022 15:25:05 +0200 Subject: [PATCH 55/82] remove dtype (#11615) --- spacy/pipeline/edit_tree_lemmatizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index 7f6367c75..76b0e0bc9 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -149,7 +149,7 @@ class EditTreeLemmatizer(TrainablePipe): # Handle cases where there are no tokens in any docs. n_labels = len(self.cfg["labels"]) guesses: List[Ints2d] = [ - self.model.ops.alloc2i(0, n_labels, dtype="i") for _ in docs + self.model.ops.alloc2i(0, n_labels) for _ in docs ] assert len(guesses) == n_docs return guesses From 2e52479eec987367117d27fb4f049df2efb2518d Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Tue, 11 Oct 2022 23:45:05 +0900 Subject: [PATCH 56/82] Fix example code for spacy-wordnet (#11593) * Fix example code for spacy-wordnet It looks like in the most recent version, 0.1.0, it's no longer possible to pass the lang parameter to the component separately. Doing so will raise an error. * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem * Cleanup * More cleanup Co-authored-by: Sofie Van Landeghem --- website/meta/universe.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index a6a1a0fc7..637e9d6ce 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -2460,20 +2460,20 @@ "import spacy", "from spacy_wordnet.wordnet_annotator import WordnetAnnotator ", "", - "# Load an spacy model (supported models are \"es\" and \"en\") ", - "nlp = spacy.load('en')", - "# Spacy 3.x", - "nlp.add_pipe(\"spacy_wordnet\", after='tagger', config={'lang': nlp.lang})", - "# Spacy 2.x", + "# Load a spaCy model (supported languages are \"es\" and \"en\") ", + "nlp = spacy.load('en_core_web_sm')", + "# spaCy 3.x", + "nlp.add_pipe(\"spacy_wordnet\", after='tagger')", + "# spaCy 2.x", "# nlp.add_pipe(WordnetAnnotator(nlp.lang), after='tagger')", "token = nlp('prices')[0]", "", - "# wordnet object link spacy token with nltk wordnet interface by giving acces to", + "# WordNet object links spaCy token with NLTK WordNet interface by giving access to", "# synsets and lemmas ", "token._.wordnet.synsets()", "token._.wordnet.lemmas()", "", - "# And automatically tags with wordnet domains", + "# And automatically add info about WordNet domains", "token._.wordnet.wordnet_domains()" ], "author": "recognai", From fe06e037bcd733708401bce082863994b1fc48bd Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 12 Oct 2022 12:18:39 +0200 Subject: [PATCH 57/82] Fix init for pymorphy2_lookup lemmatizer mode (#11631) --- spacy/lang/ru/lemmatizer.py | 2 +- spacy/lang/uk/lemmatizer.py | 2 +- spacy/tests/conftest.py | 17 +++++++++++++++++ spacy/tests/lang/ru/test_lemmatizer.py | 14 ++++++++++++++ spacy/tests/lang/uk/test_lemmatizer.py | 8 ++++++++ 5 files changed, 41 insertions(+), 2 deletions(-) diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py index 85180b1e4..5bf685d44 100644 --- a/spacy/lang/ru/lemmatizer.py +++ b/spacy/lang/ru/lemmatizer.py @@ -23,7 +23,7 @@ class RussianLemmatizer(Lemmatizer): overwrite: bool = False, scorer: Optional[Callable] = lemmatizer_score, ) -> None: - if mode == "pymorphy2": + if mode in {"pymorphy2", "pymorphy2_lookup"}: try: from pymorphy2 import MorphAnalyzer except ImportError: diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py index a8bc56057..d4f8cc9e5 100644 --- a/spacy/lang/uk/lemmatizer.py +++ b/spacy/lang/uk/lemmatizer.py @@ -18,7 +18,7 @@ class UkrainianLemmatizer(RussianLemmatizer): overwrite: bool = False, scorer: Optional[Callable] = lemmatizer_score, ) -> None: - if mode == "pymorphy2": + if mode in {"pymorphy2", "pymorphy2_lookup"}: try: from pymorphy2 import MorphAnalyzer except ImportError: diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 742bfcc6a..394ef00d3 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -343,6 +343,14 @@ def ru_lemmatizer(): return get_lang_class("ru")().add_pipe("lemmatizer") +@pytest.fixture +def ru_lookup_lemmatizer(): + pytest.importorskip("pymorphy2") + return get_lang_class("ru")().add_pipe( + "lemmatizer", config={"mode": "pymorphy2_lookup"} + ) + + @pytest.fixture(scope="session") def sa_tokenizer(): return get_lang_class("sa")().tokenizer @@ -422,6 +430,15 @@ def uk_lemmatizer(): return get_lang_class("uk")().add_pipe("lemmatizer") +@pytest.fixture +def uk_lookup_lemmatizer(): + pytest.importorskip("pymorphy2") + pytest.importorskip("pymorphy2_dicts_uk") + return get_lang_class("uk")().add_pipe( + "lemmatizer", config={"mode": "pymorphy2_lookup"} + ) + + @pytest.fixture(scope="session") def ur_tokenizer(): return get_lang_class("ur")().tokenizer diff --git a/spacy/tests/lang/ru/test_lemmatizer.py b/spacy/tests/lang/ru/test_lemmatizer.py index 9ca7f441b..e82fd4f8c 100644 --- a/spacy/tests/lang/ru/test_lemmatizer.py +++ b/spacy/tests/lang/ru/test_lemmatizer.py @@ -78,3 +78,17 @@ def test_ru_lemmatizer_punct(ru_lemmatizer): assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"'] doc = Doc(ru_lemmatizer.vocab, words=["»"], pos=["PUNCT"]) assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"'] + + +def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): + words = ["мама", "мыла", "раму"] + pos = ["NOUN", "VERB", "NOUN"] + morphs = [ + "Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing", + "Aspect=Imp|Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act", + "Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing", + ] + doc = Doc(ru_lookup_lemmatizer.vocab, words=words, pos=pos, morphs=morphs) + doc = ru_lookup_lemmatizer(doc) + lemmas = [token.lemma_ for token in doc] + assert lemmas == ["мама", "мыла", "раму"] diff --git a/spacy/tests/lang/uk/test_lemmatizer.py b/spacy/tests/lang/uk/test_lemmatizer.py index 57dd4198a..788744aa1 100644 --- a/spacy/tests/lang/uk/test_lemmatizer.py +++ b/spacy/tests/lang/uk/test_lemmatizer.py @@ -9,3 +9,11 @@ def test_uk_lemmatizer(uk_lemmatizer): """Check that the default uk lemmatizer runs.""" doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"]) uk_lemmatizer(doc) + assert [token.lemma for token in doc] + + +def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer): + """Check that the lookup uk lemmatizer runs.""" + doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"]) + uk_lookup_lemmatizer(doc) + assert [token.lemma for token in doc] From 4d869fcc111151bcefa08ee1a2b7b49dc5ecd677 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Wed, 12 Oct 2022 15:17:40 +0200 Subject: [PATCH 58/82] Small fixes to docstrings (#11610) * add missing scorer arg to docstring * fix class names in textcat_multilabel * add missing scorer to docstrings --- spacy/pipeline/spancat.py | 3 +++ spacy/pipeline/textcat_multilabel.py | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 1b7a9eecb..ca9f1dab0 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -133,6 +133,9 @@ def make_spancat( spans_key (str): Key of the doc.spans dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. + scorer (Optional[Callable]): The scoring method. Defaults to + Scorer.score_spans for the Doc.spans[spans_key] with overlapping + spans allowed. threshold (float): Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to 0.5. diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index e33a885f8..119ae3310 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -96,8 +96,8 @@ def make_multilabel_textcat( model: Model[List[Doc], List[Floats2d]], threshold: float, scorer: Optional[Callable], -) -> "TextCategorizer": - """Create a TextCategorizer component. The text categorizer predicts categories +) -> "MultiLabel_TextCategorizer": + """Create a MultiLabel_TextCategorizer component. The text categorizer predicts categories over a whole document. It can learn one or more labels, and the labels are considered to be non-mutually exclusive, which means that there can be zero or more labels per doc). @@ -105,6 +105,7 @@ def make_multilabel_textcat( model (Model[List[Doc], List[Floats2d]]): A model instance that predicts scores for each category. threshold (float): Cutoff to consider a prediction "positive". + scorer (Optional[Callable]): The scoring method. """ return MultiLabel_TextCategorizer( nlp.vocab, model, name, threshold=threshold, scorer=scorer @@ -147,6 +148,7 @@ class MultiLabel_TextCategorizer(TextCategorizer): name (str): The component instance name, used to add entries to the losses during training. threshold (float): Cutoff to consider a prediction "positive". + scorer (Optional[Callable]): The scoring method. DOCS: https://spacy.io/api/textcategorizer#init """ From 6b5a3e72198aa9735587b0712e3eb2c24234b463 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 14 Oct 2022 08:16:49 +0200 Subject: [PATCH 59/82] Extend to pydantic v1.10 (#11635) * Update types in `spacy.schemas` for updated pydantic+mypy --- requirements.txt | 2 +- setup.cfg | 2 +- spacy/schemas.py | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/requirements.txt b/requirements.txt index 14847ff21..9d6bbb2c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pathy>=0.3.5 numpy>=1.15.0 requests>=2.13.0,<3.0.0 tqdm>=4.38.0,<5.0.0 -pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0 +pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0 jinja2 langcodes>=3.2.0,<4.0.0 # Official Python utilities diff --git a/setup.cfg b/setup.cfg index 2dc5e7042..c2653feba 100644 --- a/setup.cfg +++ b/setup.cfg @@ -56,7 +56,7 @@ install_requires = tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 requests>=2.13.0,<3.0.0 - pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0 + pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0 jinja2 # Official Python utilities setuptools diff --git a/spacy/schemas.py b/spacy/schemas.py index 048082134..ab71b2016 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -181,12 +181,12 @@ class TokenPatternNumber(BaseModel): IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset") IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset") INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects") - EQ: Union[StrictInt, StrictFloat] = Field(None, alias="==") - NEQ: Union[StrictInt, StrictFloat] = Field(None, alias="!=") - GEQ: Union[StrictInt, StrictFloat] = Field(None, alias=">=") - LEQ: Union[StrictInt, StrictFloat] = Field(None, alias="<=") - GT: Union[StrictInt, StrictFloat] = Field(None, alias=">") - LT: Union[StrictInt, StrictFloat] = Field(None, alias="<") + EQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="==") + NEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="!=") + GEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">=") + LEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<=") + GT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">") + LT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<") class Config: extra = "forbid" @@ -430,7 +430,7 @@ class ProjectConfigAssetURL(BaseModel): # fmt: off dest: StrictStr = Field(..., title="Destination of downloaded asset") url: Optional[StrictStr] = Field(None, title="URL of asset") - checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") + checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") description: StrictStr = Field("", title="Description of asset") # fmt: on @@ -438,7 +438,7 @@ class ProjectConfigAssetURL(BaseModel): class ProjectConfigAssetGit(BaseModel): # fmt: off git: ProjectConfigAssetGitItem = Field(..., title="Git repo information") - checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") + checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") description: Optional[StrictStr] = Field(None, title="Description of asset") # fmt: on @@ -508,7 +508,7 @@ class DocJSONSchema(BaseModel): None, title="Indices of sentences' start and end indices" ) text: StrictStr = Field(..., title="Document text") - spans: Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] = Field( + spans: Optional[Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]]] = Field( None, title="Span information - end/start indices, label, KB ID" ) tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field( From ceb62352bfcad49b3ad63e3e65ef12dabab645b3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 14 Oct 2022 18:04:55 +0900 Subject: [PATCH 60/82] Auto-format code with black (#11649) Co-authored-by: explosion-bot --- spacy/pipeline/edit_tree_lemmatizer.py | 4 +--- spacy/schemas.py | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index 76b0e0bc9..12f9b73a3 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -148,9 +148,7 @@ class EditTreeLemmatizer(TrainablePipe): if not any(len(doc) for doc in docs): # Handle cases where there are no tokens in any docs. n_labels = len(self.cfg["labels"]) - guesses: List[Ints2d] = [ - self.model.ops.alloc2i(0, n_labels) for _ in docs - ] + guesses: List[Ints2d] = [self.model.ops.alloc2i(0, n_labels) for _ in docs] assert len(guesses) == n_docs return guesses scores = self.model.predict(docs) diff --git a/spacy/schemas.py b/spacy/schemas.py index ab71b2016..a67d96d9d 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -508,9 +508,9 @@ class DocJSONSchema(BaseModel): None, title="Indices of sentences' start and end indices" ) text: StrictStr = Field(..., title="Document text") - spans: Optional[Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]]] = Field( - None, title="Span information - end/start indices, label, KB ID" - ) + spans: Optional[ + Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] + ] = Field(None, title="Span information - end/start indices, label, KB ID") tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field( ..., title="Token information - ID, start, annotations" ) From 2ce6aadda2d455cf2f2a1aef494b2bafe3e07119 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 17 Oct 2022 12:10:03 +0200 Subject: [PATCH 61/82] update default configs to recent versions (#11618) --- spacy/pipeline/spancat.py | 6 +++--- spacy/pipeline/textcat_multilabel.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 1b7a9eecb..5ede622c2 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -26,17 +26,17 @@ scorer = {"@layers": "spacy.LinearLogistic.v1"} hidden_size = 128 [model.tok2vec] -@architectures = "spacy.Tok2Vec.v1" +@architectures = "spacy.Tok2Vec.v2" [model.tok2vec.embed] -@architectures = "spacy.MultiHashEmbed.v1" +@architectures = "spacy.MultiHashEmbed.v2" width = 96 rows = [5000, 2000, 1000, 1000] attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] -@architectures = "spacy.MaxoutWindowEncoder.v1" +@architectures = "spacy.MaxoutWindowEncoder.v2" width = ${model.tok2vec.embed.width} window_size = 1 maxout_pieces = 3 diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index e33a885f8..10aef46aa 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -19,7 +19,7 @@ multi_label_default_config = """ @architectures = "spacy.TextCatEnsemble.v2" [model.tok2vec] -@architectures = "spacy.Tok2Vec.v1" +@architectures = "spacy.Tok2Vec.v2" [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v2" @@ -29,7 +29,7 @@ attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] include_static_vectors = false [model.tok2vec.encode] -@architectures = "spacy.MaxoutWindowEncoder.v1" +@architectures = "spacy.MaxoutWindowEncoder.v2" width = ${model.tok2vec.embed.width} window_size = 1 maxout_pieces = 3 From 858565a5671de61334443d6a2348164bc39216e1 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Tue, 18 Oct 2022 15:11:39 +0900 Subject: [PATCH 62/82] Fix issues with DVC commands (#11592) * Fix flag handling in dvc Prior to this commit, if a flag (--verbose or --quiet) was passed to DVC, it would be added to the end of the generated dvc command line. This would result in the command being interpreted as part of the actual command to run, rather than an argument to dvc. This would result in command lines like: spacy project run preprocess --verbose That would fail with an error that there's no such directory as `--verbose`. This change puts the flags at the front of the dvc command so that they are interpreted correctly. It removes the `run_dvc_commands` function, which had been reduced to just a for loop and wasn't used elsewhere. A separate problem is that there's no way to specify the quiet behaviour to dvc from the command line, though it's unclear if that's a bug. * Add dvc quiet flag to docs * Handle case in DVC where no commands are appropriate If only have commands with no deps or outputs (admittedly unlikely), you get a weird error about the dvc file not existing. This gives explicit output instead. * Add support for quiet flag * Fix command execution Commands are strings now because they're joined further up. --- spacy/cli/project/dvc.py | 57 +++++++++++++++++++++------------------- website/docs/api/cli.md | 3 ++- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/spacy/cli/project/dvc.py b/spacy/cli/project/dvc.py index 83dc5efbf..a15353855 100644 --- a/spacy/cli/project/dvc.py +++ b/spacy/cli/project/dvc.py @@ -25,6 +25,7 @@ def project_update_dvc_cli( project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."), verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"), + quiet: bool = Opt(False, "--quiet", "-q", help="Print less info"), force: bool = Opt(False, "--force", "-F", help="Force update DVC config"), # fmt: on ): @@ -36,7 +37,7 @@ def project_update_dvc_cli( DOCS: https://spacy.io/api/cli#project-dvc """ - project_update_dvc(project_dir, workflow, verbose=verbose, force=force) + project_update_dvc(project_dir, workflow, verbose=verbose, quiet=quiet, force=force) def project_update_dvc( @@ -44,6 +45,7 @@ def project_update_dvc( workflow: Optional[str] = None, *, verbose: bool = False, + quiet: bool = False, force: bool = False, ) -> None: """Update the auto-generated Data Version Control (DVC) config file. A DVC @@ -54,11 +56,12 @@ def project_update_dvc( workflow (Optional[str]): Optional name of workflow defined in project.yml. If not set, the first workflow will be used. verbose (bool): Print more info. + quiet (bool): Print less info. force (bool): Force update DVC config. """ config = load_project_config(project_dir) updated = update_dvc_config( - project_dir, config, workflow, verbose=verbose, force=force + project_dir, config, workflow, verbose=verbose, quiet=quiet, force=force ) help_msg = "To execute the workflow with DVC, run: dvc repro" if updated: @@ -72,7 +75,7 @@ def update_dvc_config( config: Dict[str, Any], workflow: Optional[str] = None, verbose: bool = False, - silent: bool = False, + quiet: bool = False, force: bool = False, ) -> bool: """Re-run the DVC commands in dry mode and update dvc.yaml file in the @@ -83,7 +86,7 @@ def update_dvc_config( path (Path): The path to the project directory. config (Dict[str, Any]): The loaded project.yml. verbose (bool): Whether to print additional info (via DVC). - silent (bool): Don't output anything (via DVC). + quiet (bool): Don't output anything (via DVC). force (bool): Force update, even if hashes match. RETURNS (bool): Whether the DVC config file was updated. """ @@ -105,6 +108,14 @@ def update_dvc_config( dvc_config_path.unlink() dvc_commands = [] config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} + + # some flags that apply to every command + flags = [] + if verbose: + flags.append("--verbose") + if quiet: + flags.append("--quiet") + for name in workflows[workflow]: command = config_commands[name] deps = command.get("deps", []) @@ -118,14 +129,26 @@ def update_dvc_config( deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl] outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl] outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl] - dvc_cmd = ["run", "-n", name, "-w", str(path), "--no-exec"] + + dvc_cmd = ["run", *flags, "-n", name, "-w", str(path), "--no-exec"] if command.get("no_skip"): dvc_cmd.append("--always-changed") full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd] dvc_commands.append(join_command(full_cmd)) + + if not dvc_commands: + # If we don't check for this, then there will be an error when reading the + # config, since DVC wouldn't create it. + msg.fail( + "No usable commands for DVC found. This can happen if none of your " + "commands have dependencies or outputs.", + exits=1, + ) + with working_dir(path): - dvc_flags = {"--verbose": verbose, "--quiet": silent} - run_dvc_commands(dvc_commands, flags=dvc_flags) + for c in dvc_commands: + dvc_command = "dvc " + c + run_command(dvc_command) with dvc_config_path.open("r+", encoding="utf8") as f: content = f.read() f.seek(0, 0) @@ -133,26 +156,6 @@ def update_dvc_config( return True -def run_dvc_commands( - commands: Iterable[str] = SimpleFrozenList(), flags: Dict[str, bool] = {} -) -> None: - """Run a sequence of DVC commands in a subprocess, in order. - - commands (List[str]): The string commands without the leading "dvc". - flags (Dict[str, bool]): Conditional flags to be added to command. Makes it - easier to pass flags like --quiet that depend on a variable or - command-line setting while avoiding lots of nested conditionals. - """ - for c in commands: - command = split_command(c) - dvc_command = ["dvc", *command] - # Add the flags if they are set to True - for flag, is_active in flags.items(): - if is_active: - dvc_command.append(flag) - run_command(dvc_command) - - def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None: """Validate workflows provided in project.yml and check that a given workflow can be used to generate a DVC config. diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index e5cd3089b..fc2c46022 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -1482,7 +1482,7 @@ You'll also need to add the assets you want to track with ```cli -$ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] +$ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] [--quiet] ``` > #### Example @@ -1499,6 +1499,7 @@ $ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] | `workflow` | Name of workflow defined in `project.yml`. Defaults to first workflow if not set. ~~Optional[str] \(option)~~ | | `--force`, `-F` | Force-updating config file. ~~bool (flag)~~ | | `--verbose`, `-V` | Print more output generated by DVC. ~~bool (flag)~~ | +| `--quiet`, `-q` | Print no output generated by DVC. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. | From a1eacaa8db055322d4a066a08b730243a2f5b969 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 18 Oct 2022 14:36:06 +0200 Subject: [PATCH 63/82] Add python 3.11.0rc2 to CI (#11667) --- .github/azure-steps.yml | 1 + azure-pipelines.yml | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 9d57219ca..cc0247b3a 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -10,6 +10,7 @@ steps: inputs: versionSpec: ${{ parameters.python_version }} architecture: ${{ parameters.architecture }} + allowUnstable: true - bash: | echo "##vso[task.setvariable variable=python_version]${{ parameters.python_version }}" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2f5201614..357cce835 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -85,6 +85,15 @@ jobs: Python310Mac: imageName: "macos-latest" python.version: "3.10" + Python311Linux: + imageName: 'ubuntu-latest' + python.version: '3.11.0-rc.2' + Python311Windows: + imageName: 'windows-latest' + python.version: '3.11.0-rc.2' + Python311Mac: + imageName: 'macos-latest' + python.version: '3.11.0-rc.2' maxParallel: 4 pool: vmImage: $(imageName) From d66ccb8eb08cd515904045de84351546065fb3ed Mon Sep 17 00:00:00 2001 From: Edward <43848523+thomashacker@users.noreply.github.com> Date: Wed, 19 Oct 2022 15:52:47 +0200 Subject: [PATCH 64/82] Fix multiple entries per custom extension in doc json (#11551) * Fix multiple extensions and character offset * Rename token_start/end to start/end * Refactor Doc.from_json based on review * Iterate over user_data items * Only add non-empty underscore entries Co-authored-by: Adriane Boyd --- spacy/schemas.py | 4 +- spacy/tests/doc/test_json_doc_conversion.py | 25 +++++++---- spacy/tokens/doc.pyx | 48 ++++++++++----------- 3 files changed, 42 insertions(+), 35 deletions(-) diff --git a/spacy/schemas.py b/spacy/schemas.py index a67d96d9d..c824d76b9 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -519,9 +519,9 @@ class DocJSONSchema(BaseModel): title="Any custom data stored in the document's _ attribute", alias="_", ) - underscore_token: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field( + underscore_token: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field( None, title="Any custom data stored in the token's _ attribute" ) - underscore_span: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field( + underscore_span: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field( None, title="Any custom data stored in the span's _ attribute" ) diff --git a/spacy/tests/doc/test_json_doc_conversion.py b/spacy/tests/doc/test_json_doc_conversion.py index 0d7c061c9..19698cfb2 100644 --- a/spacy/tests/doc/test_json_doc_conversion.py +++ b/spacy/tests/doc/test_json_doc_conversion.py @@ -128,7 +128,9 @@ def test_doc_to_json_with_token_span_attributes(doc): doc._.json_test1 = "hello world" doc._.json_test2 = [1, 2, 3] doc[0:1]._.span_test = "span_attribute" + doc[0:2]._.span_test = "span_attribute_2" doc[0]._.token_test = 117 + doc[1]._.token_test = 118 doc.spans["span_group"] = [doc[0:1]] json_doc = doc.to_json( underscore=["json_test1", "json_test2", "token_test", "span_test"] @@ -139,8 +141,10 @@ def test_doc_to_json_with_token_span_attributes(doc): assert json_doc["_"]["json_test2"] == [1, 2, 3] assert "underscore_token" in json_doc assert "underscore_span" in json_doc - assert json_doc["underscore_token"]["token_test"]["value"] == 117 - assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute" + assert json_doc["underscore_token"]["token_test"][0]["value"] == 117 + assert json_doc["underscore_token"]["token_test"][1]["value"] == 118 + assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute" + assert json_doc["underscore_span"]["span_test"][1]["value"] == "span_attribute_2" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc @@ -161,8 +165,8 @@ def test_doc_to_json_with_custom_user_data(doc): assert json_doc["_"]["json_test"] == "hello world" assert "underscore_token" in json_doc assert "underscore_span" in json_doc - assert json_doc["underscore_token"]["token_test"]["value"] == 117 - assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute" + assert json_doc["underscore_token"]["token_test"][0]["value"] == 117 + assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc @@ -181,8 +185,8 @@ def test_doc_to_json_with_token_span_same_identifier(doc): assert json_doc["_"]["my_ext"] == "hello world" assert "underscore_token" in json_doc assert "underscore_span" in json_doc - assert json_doc["underscore_token"]["my_ext"]["value"] == 117 - assert json_doc["underscore_span"]["my_ext"]["value"] == "span_attribute" + assert json_doc["underscore_token"]["my_ext"][0]["value"] == 117 + assert json_doc["underscore_span"]["my_ext"][0]["value"] == "span_attribute" assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc @@ -195,10 +199,9 @@ def test_doc_to_json_with_token_attributes_missing(doc): doc[0]._.token_test = 117 json_doc = doc.to_json(underscore=["span_test"]) - assert "underscore_token" in json_doc assert "underscore_span" in json_doc - assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute" - assert "token_test" not in json_doc["underscore_token"] + assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute" + assert "underscore_token" not in json_doc assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 @@ -283,7 +286,9 @@ def test_json_to_doc_with_token_span_attributes(doc): doc._.json_test1 = "hello world" doc._.json_test2 = [1, 2, 3] doc[0:1]._.span_test = "span_attribute" + doc[0:2]._.span_test = "span_attribute_2" doc[0]._.token_test = 117 + doc[1]._.token_test = 118 json_doc = doc.to_json( underscore=["json_test1", "json_test2", "token_test", "span_test"] @@ -295,7 +300,9 @@ def test_json_to_doc_with_token_span_attributes(doc): assert new_doc._.json_test1 == "hello world" assert new_doc._.json_test2 == [1, 2, 3] assert new_doc[0]._.token_test == 117 + assert new_doc[1]._.token_test == 118 assert new_doc[0:1]._.span_test == "span_attribute" + assert new_doc[0:2]._.span_test == "span_attribute_2" assert new_doc.user_data == doc.user_data assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes( exclude=["user_data"] diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index d7d2fd8e6..295f91c28 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -1608,24 +1608,20 @@ cdef class Doc: Doc.set_extension(attr) self._.set(attr, doc_json["_"][attr]) - if doc_json.get("underscore_token", {}): - for token_attr in doc_json["underscore_token"]: - token_start = doc_json["underscore_token"][token_attr]["token_start"] - value = doc_json["underscore_token"][token_attr]["value"] - - if not Token.has_extension(token_attr): - Token.set_extension(token_attr) - self[token_start]._.set(token_attr, value) + for token_attr in doc_json.get("underscore_token", {}): + if not Token.has_extension(token_attr): + Token.set_extension(token_attr) + for token_data in doc_json["underscore_token"][token_attr]: + start = token_by_char(self.c, self.length, token_data["start"]) + value = token_data["value"] + self[start]._.set(token_attr, value) - if doc_json.get("underscore_span", {}): - for span_attr in doc_json["underscore_span"]: - token_start = doc_json["underscore_span"][span_attr]["token_start"] - token_end = doc_json["underscore_span"][span_attr]["token_end"] - value = doc_json["underscore_span"][span_attr]["value"] - - if not Span.has_extension(span_attr): - Span.set_extension(span_attr) - self[token_start:token_end]._.set(span_attr, value) + for span_attr in doc_json.get("underscore_span", {}): + if not Span.has_extension(span_attr): + Span.set_extension(span_attr) + for span_data in doc_json["underscore_span"][span_attr]: + value = span_data["value"] + self.char_span(span_data["start"], span_data["end"])._.set(span_attr, value) return self def to_json(self, underscore=None): @@ -1673,30 +1669,34 @@ cdef class Doc: if underscore: user_keys = set() if self.user_data: - data["_"] = {} - data["underscore_token"] = {} - data["underscore_span"] = {} - for data_key in self.user_data: + for data_key, value in self.user_data.copy().items(): if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.": attr = data_key[1] start = data_key[2] end = data_key[3] if attr in underscore: user_keys.add(attr) - value = self.user_data[data_key] if not srsly.is_json_serializable(value): raise ValueError(Errors.E107.format(attr=attr, value=repr(value))) # Check if doc attribute if start is None: + if "_" not in data: + data["_"] = {} data["_"][attr] = value # Check if token attribute elif end is None: + if "underscore_token" not in data: + data["underscore_token"] = {} if attr not in data["underscore_token"]: - data["underscore_token"][attr] = {"token_start": start, "value": value} + data["underscore_token"][attr] = [] + data["underscore_token"][attr].append({"start": start, "value": value}) # Else span attribute else: + if "underscore_span" not in data: + data["underscore_span"] = {} if attr not in data["underscore_span"]: - data["underscore_span"][attr] = {"token_start": start, "token_end": end, "value": value} + data["underscore_span"][attr] = [] + data["underscore_span"][attr].append({"start": start, "end": end, "value": value}) for attr in underscore: if attr not in user_keys: From 3d0e895363921d4acb7f89a5b708472681e6fc1b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 19 Oct 2022 17:33:55 +0200 Subject: [PATCH 65/82] Set version to v3.4.2 (#11672) --- spacy/about.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/about.py b/spacy/about.py index 843c15aba..ce86e6294 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy" -__version__ = "3.4.1" +__version__ = "3.4.2" __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __projects__ = "https://github.com/explosion/projects" From bf83f6872a55e307da289fb901db3c16dd35e8d1 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 20 Oct 2022 20:35:03 +0900 Subject: [PATCH 66/82] Add detailed example of env dict usage (#11677) * Add detailed example of env dict usage * Mark code blocks as yaml --- website/docs/usage/projects.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/website/docs/usage/projects.md b/website/docs/usage/projects.md index 4797bbfe3..90b612358 100644 --- a/website/docs/usage/projects.md +++ b/website/docs/usage/projects.md @@ -243,6 +243,27 @@ pipelines. > python -m spacy project run test . --vars.foo bar > ``` +> #### Tip: Environment Variables +> +> Commands in a project file are not executed in a shell, so they don't have +> direct access to environment variables. But you can insert environment +> variables using the `env` dictionary to make values available for +> interpolation, just like values in `vars`. Here's an example `env` dict that +> makes `$PATH` available as `ENV_PATH`: +> +> ```yaml +> env: +> ENV_PATH: PATH +> ``` +> +> This can be used in a project command like so: +> +> ```yaml +> - name: "echo-path" +> script: +> - "echo ${env.ENV_PATH}" +> ``` + | Section | Description | | --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). | From b69d249a223fa4e633e11babc0830f3b68df57e2 Mon Sep 17 00:00:00 2001 From: Cellan Hall <60790416+Ce11an@users.noreply.github.com> Date: Thu, 20 Oct 2022 12:38:29 +0100 Subject: [PATCH 67/82] Adding `spacy-cleaner` to the spaCy universe (#11674) * added spacy-cleaner to the spaCy universe * Move data to righ section of universe.json * Cleanup - fix typo ("replacers") - spaCy doesn't need to be marked as code - lemma of "Hello" is lower case Co-authored-by: Paul O'Leary McCann --- website/meta/universe.json | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 637e9d6ce..d7c99956b 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,46 @@ { "resources": [ + { + "id": "spacy-cleaner", + "title": "spacy-cleaner", + "slogan": "Easily clean text with spaCy!", + "description": "**spacy-cleaner** utilises spaCy `Language` models to replace, remove, and \n mutate spaCy tokens. Cleaning actions available are:\n\n* Remove/replace stopwords.\n* Remove/replace punctuation.\n* Remove/replace numbers.\n* Remove/replace emails.\n* Remove/replace URLs.\n* Perform lemmatisation.\n\nSee our [docs](https://ce11an.github.io/spacy-cleaner/) for more information.", + "github": "Ce11an/spacy-cleaner", + "pip": "spacy-cleaner", + "code_example": [ + "import spacy", + "import spacy_cleaner", + "from spacy_cleaner.processing import removers, replacers, mutators", + "", + "model = spacy.load(\"en_core_web_sm\")", + "pipeline = spacy_cleaner.Pipeline(", + " model,", + " removers.remove_stopword_token,", + " replacers.replace_punctuation_token,", + " mutators.mutate_lemma_token,", + ")", + "", + "texts = [\"Hello, my name is Cellan! I love to swim!\"]", + "", + "pipeline.clean(texts)", + "# ['hello _IS_PUNCT_ Cellan _IS_PUNCT_ love swim _IS_PUNCT_']" + ], + "code_language": "python", + "url": "https://ce11an.github.io/spacy-cleaner/", + "image": "https://raw.githubusercontent.com/Ce11an/spacy-cleaner/main/docs/assets/images/spacemen.png", + "author": "Cellan Hall", + "author_links": { + "twitter": "Ce11an", + "github": "Ce11an", + "website": "https://www.linkedin.com/in/cellan-hall/" + }, + "category": [ + "extension" + ], + "tags": [ + "text-processing" + ] + }, { "id": "Zshot", "title": "Zshot", From 0e2b7fb28be1eb7a2950ae341b7956c4b0ab7af8 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 21 Oct 2022 18:01:18 +0900 Subject: [PATCH 68/82] Remove thinc util reimports (#11665) * Remove imports marked as v2 leftovers There are a few functions that were in `spacy.util` in v2, but were moved to Thinc. In v3 these were imported in `spacy.util` so that code could be used unchanged, but the comment over them indicates they should always be imported from Thinc. This commit removes those imports. It doesn't look like any DeprecationWarning was ever thrown for using these, but it is probably fine to remove them anyway with a major version. It is not clear that they were widely used. * Import fix_random_seed correctly This seems to be the only place in spaCy that was using the old import. --- spacy/tests/pipeline/test_spancat.py | 4 ++-- spacy/util.py | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py index 4fb26c7e7..d7da6eb23 100644 --- a/spacy/tests/pipeline/test_spancat.py +++ b/spacy/tests/pipeline/test_spancat.py @@ -1,7 +1,7 @@ import pytest import numpy from numpy.testing import assert_array_equal, assert_almost_equal -from thinc.api import get_current_ops, Ragged +from thinc.api import get_current_ops, Ragged, fix_random_seed from spacy import util from spacy.lang.en import English @@ -9,7 +9,7 @@ from spacy.language import Language from spacy.tokens import SpanGroup from spacy.tokens.span_groups import SpanGroups from spacy.training import Example -from spacy.util import fix_random_seed, registry, make_tempdir +from spacy.util import registry, make_tempdir OPS = get_current_ops() diff --git a/spacy/util.py b/spacy/util.py index 3034808ba..809bc1814 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -37,12 +37,6 @@ try: except ImportError: cupy = None -# These are functions that were previously (v2.x) available from spacy.util -# and have since moved to Thinc. We're importing them here so people's code -# doesn't break, but they should always be imported from Thinc from now on, -# not from spacy.util. -from thinc.api import fix_random_seed, compounding, decaying # noqa: F401 - from .symbols import ORTH from .compat import cupy, CudaStream, is_windows, importlib_metadata From 84d9cb6b387572293c8bcf26b0e71b508104b165 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 21 Oct 2022 11:54:17 +0200 Subject: [PATCH 69/82] Auto-format code with black (#11687) Co-authored-by: explosion-bot --- spacy/tests/pipeline/test_tok2vec.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py index 659274db9..e423d9a19 100644 --- a/spacy/tests/pipeline/test_tok2vec.py +++ b/spacy/tests/pipeline/test_tok2vec.py @@ -231,7 +231,7 @@ def test_tok2vec_listener_callback(): def test_tok2vec_listener_overfitting(): - """ Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components """ + """Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) train_examples = [] @@ -264,7 +264,7 @@ def test_tok2vec_listener_overfitting(): def test_tok2vec_frozen_not_annotating(): - """ Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating """ + """Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) train_examples = [] @@ -274,12 +274,16 @@ def test_tok2vec_frozen_not_annotating(): for i in range(2): losses = {} - with pytest.raises(ValueError, match=r"the tok2vec embedding layer is not updated"): - nlp.update(train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"]) + with pytest.raises( + ValueError, match=r"the tok2vec embedding layer is not updated" + ): + nlp.update( + train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"] + ) def test_tok2vec_frozen_overfitting(): - """ Test that a pipeline with a frozen & annotating tok2vec can still overfit """ + """Test that a pipeline with a frozen & annotating tok2vec can still overfit""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) train_examples = [] @@ -289,7 +293,13 @@ def test_tok2vec_frozen_overfitting(): for i in range(100): losses = {} - nlp.update(train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"], annotates=["tok2vec"]) + nlp.update( + train_examples, + sgd=optimizer, + losses=losses, + exclude=["tok2vec"], + annotates=["tok2vec"], + ) assert losses["tagger"] < 0.0001 # test the trained model From cae4589f5a229ab4688338474d93d33512226fd8 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 24 Oct 2022 09:11:35 +0200 Subject: [PATCH 70/82] Replace EntityRuler with SpanRuler implementation (#11320) * Replace EntityRuler with SpanRuler implementation Remove `EntityRuler` and rename the `SpanRuler`-based `future_entity_ruler` to `entity_ruler`. Main changes: * It is no longer possible to load patterns on init as with `EntityRuler(patterns=)`. * The older serialization formats (`patterns.jsonl`) are no longer supported and the related tests are removed. * The config settings are only stored in the config, not in the serialized component (in particular the `phrase_matcher_attr` and overwrite settings). * Add migration guide to EntityRuler API docs * docs update * Minor edit Co-authored-by: svlandeg --- spacy/errors.py | 8 +- spacy/pipeline/__init__.py | 2 - spacy/pipeline/entity_ruler.py | 525 ------------------ spacy/pipeline/span_ruler.py | 17 +- spacy/tests/matcher/test_phrase_matcher.py | 9 +- spacy/tests/pipeline/test_entity_ruler.py | 255 +++------ .../serialize/test_serialize_pipeline.py | 57 +- website/docs/api/entityruler.md | 313 ++--------- website/docs/api/spanruler.md | 11 + website/docs/usage/101/_architecture.md | 40 +- website/docs/usage/101/_pipelines.md | 6 +- website/docs/usage/processing-pipelines.md | 5 +- website/docs/usage/rule-based-matching.md | 45 +- website/docs/usage/saving-loading.md | 10 +- website/docs/usage/training.md | 2 +- 15 files changed, 233 insertions(+), 1072 deletions(-) delete mode 100644 spacy/pipeline/entity_ruler.py diff --git a/spacy/errors.py b/spacy/errors.py index 856660106..820f7352e 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -460,13 +460,13 @@ class Errors(metaclass=ErrorsWithCodes): "same, but found '{nlp}' and '{vocab}' respectively.") E152 = ("The attribute {attr} is not supported for token patterns. " "Please use the option `validate=True` with the Matcher, PhraseMatcher, " - "EntityRuler or AttributeRuler for more details.") + "SpanRuler or AttributeRuler for more details.") E153 = ("The value type {vtype} is not supported for token patterns. " "Please use the option validate=True with Matcher, PhraseMatcher, " - "EntityRuler or AttributeRuler for more details.") + "SpanRuler or AttributeRuler for more details.") E154 = ("One of the attributes or values is not supported for token " "patterns. Please use the option `validate=True` with the Matcher, " - "PhraseMatcher, or EntityRuler for more details.") + "PhraseMatcher, or SpanRuler for more details.") E155 = ("The pipeline needs to include a {pipe} in order to use " "Matcher or PhraseMatcher with the attribute {attr}. " "Try using `nlp()` instead of `nlp.make_doc()` or `list(nlp.pipe())` " @@ -917,8 +917,6 @@ class Errors(metaclass=ErrorsWithCodes): E1021 = ("`pos` value \"{pp}\" is not a valid Universal Dependencies tag. " "Non-UD tags should use the `tag` property.") E1022 = ("Words must be of type str or int, but input is of type '{wtype}'") - E1023 = ("Couldn't read EntityRuler from the {path}. This file doesn't " - "exist.") E1024 = ("A pattern with {attr_type} '{label}' is not present in " "'{component}' patterns.") E1025 = ("Cannot intify the value '{value}' as an IOB string. The only " diff --git a/spacy/pipeline/__init__.py b/spacy/pipeline/__init__.py index 4744a989b..14dfed949 100644 --- a/spacy/pipeline/__init__.py +++ b/spacy/pipeline/__init__.py @@ -3,7 +3,6 @@ from .dep_parser import DependencyParser from .edit_tree_lemmatizer import EditTreeLemmatizer from .entity_linker import EntityLinker from .ner import EntityRecognizer -from .entity_ruler import EntityRuler from .lemmatizer import Lemmatizer from .morphologizer import Morphologizer from .pipe import Pipe @@ -23,7 +22,6 @@ __all__ = [ "DependencyParser", "EntityLinker", "EntityRecognizer", - "EntityRuler", "Morphologizer", "Lemmatizer", "MultiLabel_TextCategorizer", diff --git a/spacy/pipeline/entity_ruler.py b/spacy/pipeline/entity_ruler.py deleted file mode 100644 index 8154a077d..000000000 --- a/spacy/pipeline/entity_ruler.py +++ /dev/null @@ -1,525 +0,0 @@ -from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence -import warnings -from collections import defaultdict -from pathlib import Path -import srsly - -from .pipe import Pipe -from ..training import Example -from ..language import Language -from ..errors import Errors, Warnings -from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList, registry -from ..tokens import Doc, Span -from ..matcher import Matcher, PhraseMatcher -from ..scorer import get_ner_prf - - -DEFAULT_ENT_ID_SEP = "||" -PatternType = Dict[str, Union[str, List[Dict[str, Any]]]] - - -@Language.factory( - "entity_ruler", - assigns=["doc.ents", "token.ent_type", "token.ent_iob"], - default_config={ - "phrase_matcher_attr": None, - "validate": False, - "overwrite_ents": False, - "ent_id_sep": DEFAULT_ENT_ID_SEP, - "scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"}, - }, - default_score_weights={ - "ents_f": 1.0, - "ents_p": 0.0, - "ents_r": 0.0, - "ents_per_type": None, - }, -) -def make_entity_ruler( - nlp: Language, - name: str, - phrase_matcher_attr: Optional[Union[int, str]], - validate: bool, - overwrite_ents: bool, - ent_id_sep: str, - scorer: Optional[Callable], -): - return EntityRuler( - nlp, - name, - phrase_matcher_attr=phrase_matcher_attr, - validate=validate, - overwrite_ents=overwrite_ents, - ent_id_sep=ent_id_sep, - scorer=scorer, - ) - - -def entity_ruler_score(examples, **kwargs): - return get_ner_prf(examples) - - -@registry.scorers("spacy.entity_ruler_scorer.v1") -def make_entity_ruler_scorer(): - return entity_ruler_score - - -class EntityRuler(Pipe): - """The EntityRuler lets you add spans to the `Doc.ents` using token-based - rules or exact phrase matches. It can be combined with the statistical - `EntityRecognizer` to boost accuracy, or used on its own to implement a - purely rule-based entity recognition system. After initialization, the - component is typically added to the pipeline using `nlp.add_pipe`. - - DOCS: https://spacy.io/api/entityruler - USAGE: https://spacy.io/usage/rule-based-matching#entityruler - """ - - def __init__( - self, - nlp: Language, - name: str = "entity_ruler", - *, - phrase_matcher_attr: Optional[Union[int, str]] = None, - validate: bool = False, - overwrite_ents: bool = False, - ent_id_sep: str = DEFAULT_ENT_ID_SEP, - patterns: Optional[List[PatternType]] = None, - scorer: Optional[Callable] = entity_ruler_score, - ) -> None: - """Initialize the entity ruler. If patterns are supplied here, they - need to be a list of dictionaries with a `"label"` and `"pattern"` - key. A pattern can either be a token pattern (list) or a phrase pattern - (string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`. - - nlp (Language): The shared nlp object to pass the vocab to the matchers - and process phrase patterns. - name (str): Instance name of the current pipeline component. Typically - passed in automatically from the factory when the component is - added. Used to disable the current entity ruler while creating - phrase patterns with the nlp object. - phrase_matcher_attr (int / str): Token attribute to match on, passed - to the internal PhraseMatcher as `attr` - validate (bool): Whether patterns should be validated, passed to - Matcher and PhraseMatcher as `validate` - patterns (iterable): Optional patterns to load in. - overwrite_ents (bool): If existing entities are present, e.g. entities - added by the model, overwrite them by matches if necessary. - ent_id_sep (str): Separator used internally for entity IDs. - scorer (Optional[Callable]): The scoring method. Defaults to - spacy.scorer.get_ner_prf. - - DOCS: https://spacy.io/api/entityruler#init - """ - self.nlp = nlp - self.name = name - self.overwrite = overwrite_ents - self.token_patterns = defaultdict(list) # type: ignore - self.phrase_patterns = defaultdict(list) # type: ignore - self._validate = validate - self.matcher = Matcher(nlp.vocab, validate=validate) - self.phrase_matcher_attr = phrase_matcher_attr - self.phrase_matcher = PhraseMatcher( - nlp.vocab, attr=self.phrase_matcher_attr, validate=validate - ) - self.ent_id_sep = ent_id_sep - self._ent_ids = defaultdict(tuple) # type: ignore - if patterns is not None: - self.add_patterns(patterns) - self.scorer = scorer - - def __len__(self) -> int: - """The number of all patterns added to the entity ruler.""" - n_token_patterns = sum(len(p) for p in self.token_patterns.values()) - n_phrase_patterns = sum(len(p) for p in self.phrase_patterns.values()) - return n_token_patterns + n_phrase_patterns - - def __contains__(self, label: str) -> bool: - """Whether a label is present in the patterns.""" - return label in self.token_patterns or label in self.phrase_patterns - - def __call__(self, doc: Doc) -> Doc: - """Find matches in document and add them as entities. - - doc (Doc): The Doc object in the pipeline. - RETURNS (Doc): The Doc with added entities, if available. - - DOCS: https://spacy.io/api/entityruler#call - """ - error_handler = self.get_error_handler() - try: - matches = self.match(doc) - self.set_annotations(doc, matches) - return doc - except Exception as e: - return error_handler(self.name, self, [doc], e) - - def match(self, doc: Doc): - self._require_patterns() - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", message="\\[W036") - matches = list(self.matcher(doc)) + list(self.phrase_matcher(doc)) - - final_matches = set( - [(m_id, start, end) for m_id, start, end in matches if start != end] - ) - get_sort_key = lambda m: (m[2] - m[1], -m[1]) - final_matches = sorted(final_matches, key=get_sort_key, reverse=True) - return final_matches - - def set_annotations(self, doc, matches): - """Modify the document in place""" - entities = list(doc.ents) - new_entities = [] - seen_tokens = set() - for match_id, start, end in matches: - if any(t.ent_type for t in doc[start:end]) and not self.overwrite: - continue - # check for end - 1 here because boundaries are inclusive - if start not in seen_tokens and end - 1 not in seen_tokens: - if match_id in self._ent_ids: - label, ent_id = self._ent_ids[match_id] - span = Span(doc, start, end, label=label, span_id=ent_id) - else: - span = Span(doc, start, end, label=match_id) - new_entities.append(span) - entities = [ - e for e in entities if not (e.start < end and e.end > start) - ] - seen_tokens.update(range(start, end)) - doc.ents = entities + new_entities - - @property - def labels(self) -> Tuple[str, ...]: - """All labels present in the match patterns. - - RETURNS (set): The string labels. - - DOCS: https://spacy.io/api/entityruler#labels - """ - keys = set(self.token_patterns.keys()) - keys.update(self.phrase_patterns.keys()) - all_labels = set() - - for l in keys: - if self.ent_id_sep in l: - label, _ = self._split_label(l) - all_labels.add(label) - else: - all_labels.add(l) - return tuple(sorted(all_labels)) - - def initialize( - self, - get_examples: Callable[[], Iterable[Example]], - *, - nlp: Optional[Language] = None, - patterns: Optional[Sequence[PatternType]] = None, - ): - """Initialize the pipe for training. - - get_examples (Callable[[], Iterable[Example]]): Function that - returns a representative sample of gold-standard Example objects. - nlp (Language): The current nlp object the component is part of. - patterns Optional[Iterable[PatternType]]: The list of patterns. - - DOCS: https://spacy.io/api/entityruler#initialize - """ - self.clear() - if patterns: - self.add_patterns(patterns) # type: ignore[arg-type] - - @property - def ent_ids(self) -> Tuple[Optional[str], ...]: - """All entity ids present in the match patterns `id` properties - - RETURNS (set): The string entity ids. - - DOCS: https://spacy.io/api/entityruler#ent_ids - """ - keys = set(self.token_patterns.keys()) - keys.update(self.phrase_patterns.keys()) - all_ent_ids = set() - - for l in keys: - if self.ent_id_sep in l: - _, ent_id = self._split_label(l) - all_ent_ids.add(ent_id) - return tuple(all_ent_ids) - - @property - def patterns(self) -> List[PatternType]: - """Get all patterns that were added to the entity ruler. - - RETURNS (list): The original patterns, one dictionary per pattern. - - DOCS: https://spacy.io/api/entityruler#patterns - """ - all_patterns = [] - for label, patterns in self.token_patterns.items(): - for pattern in patterns: - ent_label, ent_id = self._split_label(label) - p = {"label": ent_label, "pattern": pattern} - if ent_id: - p["id"] = ent_id - all_patterns.append(p) - for label, patterns in self.phrase_patterns.items(): - for pattern in patterns: - ent_label, ent_id = self._split_label(label) - p = {"label": ent_label, "pattern": pattern.text} - if ent_id: - p["id"] = ent_id - all_patterns.append(p) - return all_patterns - - def add_patterns(self, patterns: List[PatternType]) -> None: - """Add patterns to the entity ruler. A pattern can either be a token - pattern (list of dicts) or a phrase pattern (string). For example: - {'label': 'ORG', 'pattern': 'Apple'} - {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]} - - patterns (list): The patterns to add. - - DOCS: https://spacy.io/api/entityruler#add_patterns - """ - - # disable the nlp components after this one in case they hadn't been initialized / deserialised yet - try: - current_index = -1 - for i, (name, pipe) in enumerate(self.nlp.pipeline): - if self == pipe: - current_index = i - break - subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]] - except ValueError: - subsequent_pipes = [] - with self.nlp.select_pipes(disable=subsequent_pipes): - token_patterns = [] - phrase_pattern_labels = [] - phrase_pattern_texts = [] - phrase_pattern_ids = [] - for entry in patterns: - if isinstance(entry["pattern"], str): - phrase_pattern_labels.append(entry["label"]) - phrase_pattern_texts.append(entry["pattern"]) - phrase_pattern_ids.append(entry.get("id")) - elif isinstance(entry["pattern"], list): - token_patterns.append(entry) - phrase_patterns = [] - for label, pattern, ent_id in zip( - phrase_pattern_labels, - self.nlp.pipe(phrase_pattern_texts), - phrase_pattern_ids, - ): - phrase_pattern = {"label": label, "pattern": pattern} - if ent_id: - phrase_pattern["id"] = ent_id - phrase_patterns.append(phrase_pattern) - for entry in token_patterns + phrase_patterns: # type: ignore[operator] - label = entry["label"] # type: ignore - if "id" in entry: - ent_label = label - label = self._create_label(label, entry["id"]) - key = self.matcher._normalize_key(label) - self._ent_ids[key] = (ent_label, entry["id"]) - pattern = entry["pattern"] # type: ignore - if isinstance(pattern, Doc): - self.phrase_patterns[label].append(pattern) - self.phrase_matcher.add(label, [pattern]) # type: ignore - elif isinstance(pattern, list): - self.token_patterns[label].append(pattern) - self.matcher.add(label, [pattern]) - else: - raise ValueError(Errors.E097.format(pattern=pattern)) - - def clear(self) -> None: - """Reset all patterns.""" - self.token_patterns = defaultdict(list) - self.phrase_patterns = defaultdict(list) - self._ent_ids = defaultdict(tuple) - self.matcher = Matcher(self.nlp.vocab, validate=self._validate) - self.phrase_matcher = PhraseMatcher( - self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self._validate - ) - - def remove(self, ent_id: str) -> None: - """Remove a pattern by its ent_id if a pattern with this ent_id was added before - - ent_id (str): id of the pattern to be removed - RETURNS: None - DOCS: https://spacy.io/api/entityruler#remove - """ - label_id_pairs = [ - (label, eid) for (label, eid) in self._ent_ids.values() if eid == ent_id - ] - if not label_id_pairs: - raise ValueError( - Errors.E1024.format(attr_type="ID", label=ent_id, component=self.name) - ) - created_labels = [ - self._create_label(label, eid) for (label, eid) in label_id_pairs - ] - # remove the patterns from self.phrase_patterns - self.phrase_patterns = defaultdict( - list, - { - label: val - for (label, val) in self.phrase_patterns.items() - if label not in created_labels - }, - ) - # remove the patterns from self.token_pattern - self.token_patterns = defaultdict( - list, - { - label: val - for (label, val) in self.token_patterns.items() - if label not in created_labels - }, - ) - # remove the patterns from self.token_pattern - for label in created_labels: - if label in self.phrase_matcher: - self.phrase_matcher.remove(label) - else: - self.matcher.remove(label) - - def _require_patterns(self) -> None: - """Raise a warning if this component has no patterns defined.""" - if len(self) == 0: - warnings.warn(Warnings.W036.format(name=self.name)) - - def _split_label(self, label: str) -> Tuple[str, Optional[str]]: - """Split Entity label into ent_label and ent_id if it contains self.ent_id_sep - - label (str): The value of label in a pattern entry - RETURNS (tuple): ent_label, ent_id - """ - if self.ent_id_sep in label: - ent_label, ent_id = label.rsplit(self.ent_id_sep, 1) - else: - ent_label = label - ent_id = None # type: ignore - return ent_label, ent_id - - def _create_label(self, label: Any, ent_id: Any) -> str: - """Join Entity label with ent_id if the pattern has an `id` attribute - If ent_id is not a string, the label is returned as is. - - label (str): The label to set for ent.label_ - ent_id (str): The label - RETURNS (str): The ent_label joined with configured `ent_id_sep` - """ - if isinstance(ent_id, str): - label = f"{label}{self.ent_id_sep}{ent_id}" - return label - - def from_bytes( - self, patterns_bytes: bytes, *, exclude: Iterable[str] = SimpleFrozenList() - ) -> "EntityRuler": - """Load the entity ruler from a bytestring. - - patterns_bytes (bytes): The bytestring to load. - RETURNS (EntityRuler): The loaded entity ruler. - - DOCS: https://spacy.io/api/entityruler#from_bytes - """ - cfg = srsly.msgpack_loads(patterns_bytes) - self.clear() - if isinstance(cfg, dict): - self.add_patterns(cfg.get("patterns", cfg)) - self.overwrite = cfg.get("overwrite", False) - self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None) - self.phrase_matcher = PhraseMatcher( - self.nlp.vocab, attr=self.phrase_matcher_attr - ) - self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP) - else: - self.add_patterns(cfg) - return self - - def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes: - """Serialize the entity ruler patterns to a bytestring. - - RETURNS (bytes): The serialized patterns. - - DOCS: https://spacy.io/api/entityruler#to_bytes - """ - serial = { - "overwrite": self.overwrite, - "ent_id_sep": self.ent_id_sep, - "phrase_matcher_attr": self.phrase_matcher_attr, - "patterns": self.patterns, - } - return srsly.msgpack_dumps(serial) - - def from_disk( - self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() - ) -> "EntityRuler": - """Load the entity ruler from a file. Expects a file containing - newline-delimited JSON (JSONL) with one entry per line. - - path (str / Path): The JSONL file to load. - RETURNS (EntityRuler): The loaded entity ruler. - - DOCS: https://spacy.io/api/entityruler#from_disk - """ - path = ensure_path(path) - self.clear() - depr_patterns_path = path.with_suffix(".jsonl") - if path.suffix == ".jsonl": # user provides a jsonl - if path.is_file: - patterns = srsly.read_jsonl(path) - self.add_patterns(patterns) - else: - raise ValueError(Errors.E1023.format(path=path)) - elif depr_patterns_path.is_file(): - patterns = srsly.read_jsonl(depr_patterns_path) - self.add_patterns(patterns) - elif path.is_dir(): # path is a valid directory - cfg = {} - deserializers_patterns = { - "patterns": lambda p: self.add_patterns( - srsly.read_jsonl(p.with_suffix(".jsonl")) - ) - } - deserializers_cfg = {"cfg": lambda p: cfg.update(srsly.read_json(p))} - from_disk(path, deserializers_cfg, {}) - self.overwrite = cfg.get("overwrite", False) - self.phrase_matcher_attr = cfg.get("phrase_matcher_attr") - self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP) - - self.phrase_matcher = PhraseMatcher( - self.nlp.vocab, attr=self.phrase_matcher_attr - ) - from_disk(path, deserializers_patterns, {}) - else: # path is not a valid directory or file - raise ValueError(Errors.E146.format(path=path)) - return self - - def to_disk( - self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() - ) -> None: - """Save the entity ruler patterns to a directory. The patterns will be - saved as newline-delimited JSON (JSONL). - - path (str / Path): The JSONL file to save. - - DOCS: https://spacy.io/api/entityruler#to_disk - """ - path = ensure_path(path) - cfg = { - "overwrite": self.overwrite, - "phrase_matcher_attr": self.phrase_matcher_attr, - "ent_id_sep": self.ent_id_sep, - } - serializers = { - "patterns": lambda p: srsly.write_jsonl( - p.with_suffix(".jsonl"), self.patterns - ), - "cfg": lambda p: srsly.write_json(p, cfg), - } - if path.suffix == ".jsonl": # user wants to save only JSONL - srsly.write_jsonl(path, self.patterns) - else: - to_disk(path, serializers, {}) diff --git a/spacy/pipeline/span_ruler.py b/spacy/pipeline/span_ruler.py index 807a4ffe5..e39b89073 100644 --- a/spacy/pipeline/span_ruler.py +++ b/spacy/pipeline/span_ruler.py @@ -11,7 +11,7 @@ from ..language import Language from ..errors import Errors, Warnings from ..util import ensure_path, SimpleFrozenList, registry from ..tokens import Doc, Span -from ..scorer import Scorer +from ..scorer import Scorer, get_ner_prf from ..matcher import Matcher, PhraseMatcher from .. import util @@ -20,7 +20,7 @@ DEFAULT_SPANS_KEY = "ruler" @Language.factory( - "future_entity_ruler", + "entity_ruler", assigns=["doc.ents"], default_config={ "phrase_matcher_attr": None, @@ -63,6 +63,15 @@ def make_entity_ruler( ) +def entity_ruler_score(examples, **kwargs): + return get_ner_prf(examples) + + +@registry.scorers("spacy.entity_ruler_scorer.v1") +def make_entity_ruler_scorer(): + return entity_ruler_score + + @Language.factory( "span_ruler", assigns=["doc.spans"], @@ -117,7 +126,7 @@ def prioritize_new_ents_filter( ) -> List[Span]: """Merge entities and spans into one list without overlaps by allowing spans to overwrite any entities that they overlap with. Intended to - replicate the overwrite_ents=True behavior from the EntityRuler. + replicate the overwrite_ents=True behavior from the v3 EntityRuler. entities (Iterable[Span]): The entities, already filtered for overlaps. spans (Iterable[Span]): The spans to merge, may contain overlaps. @@ -148,7 +157,7 @@ def prioritize_existing_ents_filter( ) -> List[Span]: """Merge entities and spans into one list without overlaps by prioritizing existing entities. Intended to replicate the overwrite_ents=False behavior - from the EntityRuler. + from the v3 EntityRuler. entities (Iterable[Span]): The entities, already filtered for overlaps. spans (Iterable[Span]): The spans to merge, may contain overlaps. diff --git a/spacy/tests/matcher/test_phrase_matcher.py b/spacy/tests/matcher/test_phrase_matcher.py index b462b1878..20d0febb8 100644 --- a/spacy/tests/matcher/test_phrase_matcher.py +++ b/spacy/tests/matcher/test_phrase_matcher.py @@ -87,14 +87,15 @@ def test_issue4373(): @pytest.mark.issue(4651) def test_issue4651_with_phrase_matcher_attr(): - """Test that the EntityRuler PhraseMatcher is deserialized correctly using - the method from_disk when the EntityRuler argument phrase_matcher_attr is + """Test that the entity_ruler PhraseMatcher is deserialized correctly using + the method from_disk when the entity_ruler argument phrase_matcher_attr is specified. """ text = "Spacy is a python library for nlp" nlp = English() patterns = [{"label": "PYTHON_LIB", "pattern": "spacy", "id": "spaCy"}] - ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"}) + config = {"phrase_matcher_attr": "LOWER"} + ruler = nlp.add_pipe("entity_ruler", config=config) ruler.add_patterns(patterns) doc = nlp(text) res = [(ent.text, ent.label_, ent.ent_id_) for ent in doc.ents] @@ -102,7 +103,7 @@ def test_issue4651_with_phrase_matcher_attr(): with make_tempdir() as d: file_path = d / "entityruler" ruler.to_disk(file_path) - nlp_reloaded.add_pipe("entity_ruler").from_disk(file_path) + nlp_reloaded.add_pipe("entity_ruler", config=config).from_disk(file_path) doc_reloaded = nlp_reloaded(text) res_reloaded = [(ent.text, ent.label_, ent.ent_id_) for ent in doc_reloaded.ents] assert res == res_reloaded diff --git a/spacy/tests/pipeline/test_entity_ruler.py b/spacy/tests/pipeline/test_entity_ruler.py index 6851e2a7c..440849e84 100644 --- a/spacy/tests/pipeline/test_entity_ruler.py +++ b/spacy/tests/pipeline/test_entity_ruler.py @@ -4,7 +4,7 @@ from spacy import registry from spacy.tokens import Doc, Span from spacy.language import Language from spacy.lang.en import English -from spacy.pipeline import EntityRuler, EntityRecognizer, merge_entities +from spacy.pipeline import EntityRecognizer, merge_entities from spacy.pipeline import SpanRuler from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.errors import MatchPatternError @@ -12,8 +12,6 @@ from spacy.tests.util import make_tempdir from thinc.api import NumpyOps, get_current_ops -ENTITY_RULERS = ["entity_ruler", "future_entity_ruler"] - @pytest.fixture def nlp(): @@ -40,13 +38,12 @@ def add_ent_component(doc): @pytest.mark.issue(3345) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_issue3345(entity_ruler_factory): +def test_issue3345(): """Test case where preset entity crosses sentence boundary.""" nlp = English() doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"]) doc[4].is_sent_start = True - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns([{"label": "GPE", "pattern": "New York"}]) cfg = {"model": DEFAULT_NER_MODEL} model = registry.resolve(cfg, validate=True)["model"] @@ -65,15 +62,14 @@ def test_issue3345(entity_ruler_factory): @pytest.mark.issue(4849) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_issue4849(entity_ruler_factory): +def test_issue4849(): nlp = English() patterns = [ {"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"}, {"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"}, ] ruler = nlp.add_pipe( - entity_ruler_factory, + "entity_ruler", name="entity_ruler", config={"phrase_matcher_attr": "LOWER"}, ) @@ -96,11 +92,10 @@ def test_issue4849(entity_ruler_factory): @pytest.mark.issue(5918) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_issue5918(entity_ruler_factory): +def test_issue5918(): # Test edge case when merging entities. nlp = English() - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "ORG", "pattern": "Digicon Inc"}, {"label": "ORG", "pattern": "Rotan Mosle Inc's"}, @@ -125,10 +120,9 @@ def test_issue5918(entity_ruler_factory): @pytest.mark.issue(8168) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_issue8168(entity_ruler_factory): +def test_issue8168(): nlp = English() - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "ORG", "pattern": "Apple"}, { @@ -148,12 +142,9 @@ def test_issue8168(entity_ruler_factory): @pytest.mark.issue(8216) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory): +def test_entity_ruler_fix8216(nlp, patterns): """Test that patterns don't get added excessively.""" - ruler = nlp.add_pipe( - entity_ruler_factory, name="entity_ruler", config={"validate": True} - ) + ruler = nlp.add_pipe("entity_ruler", config={"validate": True}) ruler.add_patterns(patterns) pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values()) assert pattern_count > 0 @@ -162,16 +153,15 @@ def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory): assert after_count == pattern_count -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_init(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_init(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 assert "HELLO" in ruler assert "BYE" in ruler nlp.remove_pipe("entity_ruler") - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) doc = nlp("hello world bye bye") assert len(doc.ents) == 2 @@ -179,23 +169,21 @@ def test_entity_ruler_init(nlp, patterns, entity_ruler_factory): assert doc.ents[1].label_ == "BYE" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_no_patterns_warns(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_no_patterns_warns(nlp): + ruler = nlp.add_pipe("entity_ruler") assert len(ruler) == 0 assert len(ruler.labels) == 0 nlp.remove_pipe("entity_ruler") - nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + nlp.add_pipe("entity_ruler") assert nlp.pipe_names == ["entity_ruler"] with pytest.warns(UserWarning): doc = nlp("hello world bye bye") assert len(doc.ents) == 0 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory): +def test_entity_ruler_init_patterns(nlp, patterns): # initialize with patterns - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") assert len(ruler.labels) == 0 ruler.initialize(lambda: [], patterns=patterns) assert len(ruler.labels) == 4 @@ -207,7 +195,7 @@ def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory): nlp.config["initialize"]["components"]["entity_ruler"] = { "patterns": {"@misc": "entity_ruler_patterns"} } - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") assert len(ruler.labels) == 0 nlp.initialize() assert len(ruler.labels) == 4 @@ -216,20 +204,18 @@ def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory): assert doc.ents[1].label_ == "BYE" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_init_clear(nlp, patterns, entity_ruler_factory): +def test_entity_ruler_init_clear(nlp, patterns): """Test that initialization clears patterns.""" - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) assert len(ruler.labels) == 4 ruler.initialize(lambda: []) assert len(ruler.labels) == 0 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_clear(nlp, patterns, entity_ruler_factory): +def test_entity_ruler_clear(nlp, patterns): """Test that initialization clears patterns.""" - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) assert len(ruler.labels) == 4 doc = nlp("hello world") @@ -241,9 +227,8 @@ def test_entity_ruler_clear(nlp, patterns, entity_ruler_factory): assert len(doc.ents) == 0 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_existing(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_existing(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) nlp.add_pipe("add_ent", before="entity_ruler") doc = nlp("OH HELLO WORLD bye bye") @@ -252,11 +237,8 @@ def test_entity_ruler_existing(nlp, patterns, entity_ruler_factory): assert doc.ents[1].label_ == "BYE" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_existing_overwrite(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe( - entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True} - ) +def test_entity_ruler_existing_overwrite(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) ruler.add_patterns(patterns) nlp.add_pipe("add_ent", before="entity_ruler") doc = nlp("OH HELLO WORLD bye bye") @@ -266,11 +248,8 @@ def test_entity_ruler_existing_overwrite(nlp, patterns, entity_ruler_factory): assert doc.ents[1].label_ == "BYE" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_existing_complex(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe( - entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True} - ) +def test_entity_ruler_existing_complex(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) ruler.add_patterns(patterns) nlp.add_pipe("add_ent", before="entity_ruler") doc = nlp("foo foo bye bye") @@ -281,11 +260,8 @@ def test_entity_ruler_existing_complex(nlp, patterns, entity_ruler_factory): assert len(doc.ents[1]) == 2 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_entity_id(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe( - entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True} - ) +def test_entity_ruler_entity_id(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) ruler.add_patterns(patterns) doc = nlp("Apple is a technology company") assert len(doc.ents) == 1 @@ -293,26 +269,23 @@ def test_entity_ruler_entity_id(nlp, patterns, entity_ruler_factory): assert doc.ents[0].ent_id_ == "a1" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_cfg_ent_id_sep(nlp, patterns, entity_ruler_factory): +def test_entity_ruler_cfg_ent_id_sep(nlp, patterns): config = {"overwrite_ents": True, "ent_id_sep": "**"} - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler", config=config) + ruler = nlp.add_pipe("entity_ruler", config=config) ruler.add_patterns(patterns) doc = nlp("Apple is a technology company") - if isinstance(ruler, EntityRuler): - assert "TECH_ORG**a1" in ruler.phrase_patterns assert len(doc.ents) == 1 assert doc.ents[0].label_ == "TECH_ORG" assert doc.ents[0].ent_id_ == "a1" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_serialize_bytes(nlp, patterns, entity_ruler_factory): - ruler = EntityRuler(nlp, patterns=patterns) +def test_entity_ruler_serialize_bytes(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler") + ruler.add_patterns(patterns) assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 ruler_bytes = ruler.to_bytes() - new_ruler = EntityRuler(nlp) + new_ruler = nlp.add_pipe("entity_ruler", name="new_ruler") assert len(new_ruler) == 0 assert len(new_ruler.labels) == 0 new_ruler = new_ruler.from_bytes(ruler_bytes) @@ -324,28 +297,27 @@ def test_entity_ruler_serialize_bytes(nlp, patterns, entity_ruler_factory): assert sorted(new_ruler.labels) == sorted(ruler.labels) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_serialize_phrase_matcher_attr_bytes( - nlp, patterns, entity_ruler_factory -): - ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns) +def test_entity_ruler_serialize_phrase_matcher_attr_bytes(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"}) + ruler.add_patterns(patterns) assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 ruler_bytes = ruler.to_bytes() - new_ruler = EntityRuler(nlp) + new_ruler = nlp.add_pipe( + "entity_ruler", name="new_ruler", config={"phrase_matcher_attr": "LOWER"} + ) assert len(new_ruler) == 0 assert len(new_ruler.labels) == 0 - assert new_ruler.phrase_matcher_attr is None new_ruler = new_ruler.from_bytes(ruler_bytes) assert len(new_ruler) == len(patterns) assert len(new_ruler.labels) == 4 - assert new_ruler.phrase_matcher_attr == "LOWER" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_validate(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") - validated_ruler = EntityRuler(nlp, validate=True) +def test_entity_ruler_validate(nlp): + ruler = nlp.add_pipe("entity_ruler") + validated_ruler = nlp.add_pipe( + "entity_ruler", name="validated_ruler", config={"validate": True} + ) valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]} invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]} @@ -362,16 +334,15 @@ def test_entity_ruler_validate(nlp, entity_ruler_factory): validated_ruler.add_patterns([invalid_pattern]) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_properties(nlp, patterns, entity_ruler_factory): - ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) +def test_entity_ruler_properties(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) + ruler.add_patterns(patterns) assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"]) - assert sorted(ruler.ent_ids) == ["a1", "a2"] + assert sorted(ruler.ids) == ["a1", "a2"] -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_overlapping_spans(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_overlapping_spans(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "FOOBAR", "pattern": "foo bar"}, {"label": "BARBAZ", "pattern": "bar baz"}, @@ -383,14 +354,13 @@ def test_entity_ruler_overlapping_spans(nlp, entity_ruler_factory): @pytest.mark.parametrize("n_process", [1, 2]) -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_multiprocessing(nlp, n_process, entity_ruler_factory): +def test_entity_ruler_multiprocessing(nlp, n_process): if isinstance(get_current_ops, NumpyOps) or n_process < 2: texts = ["I enjoy eating Pizza Hut pizza."] patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}] - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) for doc in nlp.pipe(texts, n_process=2): @@ -398,9 +368,8 @@ def test_entity_ruler_multiprocessing(nlp, n_process, entity_ruler_factory): assert ent.ent_id_ == "1234" -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_serialize_jsonl(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_serialize_jsonl(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) with make_tempdir() as d: ruler.to_disk(d / "test_ruler.jsonl") @@ -409,9 +378,8 @@ def test_entity_ruler_serialize_jsonl(nlp, patterns, entity_ruler_factory): ruler.from_disk(d / "non_existing.jsonl") # read from a bad jsonl file -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_serialize_dir(nlp, patterns, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_serialize_dir(nlp, patterns): + ruler = nlp.add_pipe("entity_ruler") ruler.add_patterns(patterns) with make_tempdir() as d: ruler.to_disk(d / "test_ruler") @@ -420,9 +388,8 @@ def test_entity_ruler_serialize_dir(nlp, patterns, entity_ruler_factory): ruler.from_disk(d / "non_existing_dir") # read from a bad directory -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_basic(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_basic(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "PERSON", "pattern": "Dina", "id": "dina"}, {"label": "ORG", "pattern": "ACME", "id": "acme"}, @@ -432,24 +399,16 @@ def test_entity_ruler_remove_basic(nlp, entity_ruler_factory): doc = nlp("Dina went to school") assert len(ruler.patterns) == 3 assert len(doc.ents) == 1 - if isinstance(ruler, EntityRuler): - assert "PERSON||dina" in ruler.phrase_matcher assert doc.ents[0].label_ == "PERSON" assert doc.ents[0].text == "Dina" - if isinstance(ruler, EntityRuler): - ruler.remove("dina") - else: - ruler.remove_by_id("dina") + ruler.remove_by_id("dina") doc = nlp("Dina went to school") assert len(doc.ents) == 0 - if isinstance(ruler, EntityRuler): - assert "PERSON||dina" not in ruler.phrase_matcher assert len(ruler.patterns) == 2 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_same_id_multiple_patterns(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_same_id_multiple_patterns(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "PERSON", "pattern": "Dina", "id": "dina"}, {"label": "ORG", "pattern": "DinaCorp", "id": "dina"}, @@ -458,25 +417,15 @@ def test_entity_ruler_remove_same_id_multiple_patterns(nlp, entity_ruler_factory ruler.add_patterns(patterns) doc = nlp("Dina founded DinaCorp and ACME.") assert len(ruler.patterns) == 3 - if isinstance(ruler, EntityRuler): - assert "PERSON||dina" in ruler.phrase_matcher - assert "ORG||dina" in ruler.phrase_matcher assert len(doc.ents) == 3 - if isinstance(ruler, EntityRuler): - ruler.remove("dina") - else: - ruler.remove_by_id("dina") + ruler.remove_by_id("dina") doc = nlp("Dina founded DinaCorp and ACME.") assert len(ruler.patterns) == 1 - if isinstance(ruler, EntityRuler): - assert "PERSON||dina" not in ruler.phrase_matcher - assert "ORG||dina" not in ruler.phrase_matcher assert len(doc.ents) == 1 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_nonexisting_pattern(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_nonexisting_pattern(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "PERSON", "pattern": "Dina", "id": "dina"}, {"label": "ORG", "pattern": "ACME", "id": "acme"}, @@ -491,9 +440,8 @@ def test_entity_ruler_remove_nonexisting_pattern(nlp, entity_ruler_factory): ruler.remove_by_id("nepattern") -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_several_patterns(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_several_patterns(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "PERSON", "pattern": "Dina", "id": "dina"}, {"label": "ORG", "pattern": "ACME", "id": "acme"}, @@ -507,27 +455,20 @@ def test_entity_ruler_remove_several_patterns(nlp, entity_ruler_factory): assert doc.ents[0].text == "Dina" assert doc.ents[1].label_ == "ORG" assert doc.ents[1].text == "ACME" - if isinstance(ruler, EntityRuler): - ruler.remove("dina") - else: - ruler.remove_by_id("dina") + ruler.remove_by_id("dina") doc = nlp("Dina founded her company ACME") assert len(ruler.patterns) == 2 assert len(doc.ents) == 1 assert doc.ents[0].label_ == "ORG" assert doc.ents[0].text == "ACME" - if isinstance(ruler, EntityRuler): - ruler.remove("acme") - else: - ruler.remove_by_id("acme") + ruler.remove_by_id("acme") doc = nlp("Dina founded her company ACME") assert len(ruler.patterns) == 1 assert len(doc.ents) == 0 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_patterns_in_a_row(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_patterns_in_a_row(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "PERSON", "pattern": "Dina", "id": "dina"}, {"label": "ORG", "pattern": "ACME", "id": "acme"}, @@ -543,21 +484,15 @@ def test_entity_ruler_remove_patterns_in_a_row(nlp, entity_ruler_factory): assert doc.ents[1].text == "ACME" assert doc.ents[2].label_ == "DATE" assert doc.ents[2].text == "her birthday" - if isinstance(ruler, EntityRuler): - ruler.remove("dina") - ruler.remove("acme") - ruler.remove("bday") - else: - ruler.remove_by_id("dina") - ruler.remove_by_id("acme") - ruler.remove_by_id("bday") + ruler.remove_by_id("dina") + ruler.remove_by_id("acme") + ruler.remove_by_id("bday") doc = nlp("Dina went to school") assert len(doc.ents) == 0 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_all_patterns(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_all_patterns(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [ {"label": "PERSON", "pattern": "Dina", "id": "dina"}, {"label": "ORG", "pattern": "ACME", "id": "acme"}, @@ -565,29 +500,19 @@ def test_entity_ruler_remove_all_patterns(nlp, entity_ruler_factory): ] ruler.add_patterns(patterns) assert len(ruler.patterns) == 3 - if isinstance(ruler, EntityRuler): - ruler.remove("dina") - else: - ruler.remove_by_id("dina") + ruler.remove_by_id("dina") assert len(ruler.patterns) == 2 - if isinstance(ruler, EntityRuler): - ruler.remove("acme") - else: - ruler.remove_by_id("acme") + ruler.remove_by_id("acme") assert len(ruler.patterns) == 1 - if isinstance(ruler, EntityRuler): - ruler.remove("bday") - else: - ruler.remove_by_id("bday") + ruler.remove_by_id("bday") assert len(ruler.patterns) == 0 with pytest.warns(UserWarning): doc = nlp("Dina founded her company ACME on her birthday") assert len(doc.ents) == 0 -@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) -def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory): - ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler") +def test_entity_ruler_remove_and_add(nlp): + ruler = nlp.add_pipe("entity_ruler") patterns = [{"label": "DATE", "pattern": "last time"}] ruler.add_patterns(patterns) doc = ruler( @@ -608,10 +533,7 @@ def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory): assert doc.ents[0].text == "last time" assert doc.ents[1].label_ == "DATE" assert doc.ents[1].text == "this time" - if isinstance(ruler, EntityRuler): - ruler.remove("ttime") - else: - ruler.remove_by_id("ttime") + ruler.remove_by_id("ttime") doc = ruler( nlp.make_doc("I saw him last time we met, this time he brought some flowers") ) @@ -634,10 +556,7 @@ def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory): ) assert len(ruler.patterns) == 3 assert len(doc.ents) == 3 - if isinstance(ruler, EntityRuler): - ruler.remove("ttime") - else: - ruler.remove_by_id("ttime") + ruler.remove_by_id("ttime") doc = ruler( nlp.make_doc( "I saw him last time we met, this time he brought some flowers, another time some chocolate." diff --git a/spacy/tests/serialize/test_serialize_pipeline.py b/spacy/tests/serialize/test_serialize_pipeline.py index b948bb76c..e49882441 100644 --- a/spacy/tests/serialize/test_serialize_pipeline.py +++ b/spacy/tests/serialize/test_serialize_pipeline.py @@ -8,7 +8,7 @@ import spacy from spacy import Vocab, load, registry from spacy.lang.en import English from spacy.language import Language -from spacy.pipeline import DependencyParser, EntityRecognizer, EntityRuler +from spacy.pipeline import DependencyParser, EntityRecognizer from spacy.pipeline import SentenceRecognizer, Tagger, TextCategorizer from spacy.pipeline import TrainablePipe from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL @@ -85,58 +85,17 @@ def test_issue_3526_1(en_vocab): {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"}, ] nlp = Language(vocab=en_vocab) - ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) + ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) + ruler.add_patterns(patterns) ruler_bytes = ruler.to_bytes() assert len(ruler) == len(patterns) assert len(ruler.labels) == 4 - assert ruler.overwrite - new_ruler = EntityRuler(nlp) + new_ruler = nlp.add_pipe( + "entity_ruler", name="new_ruler", config={"overwrite_ents": True} + ) new_ruler = new_ruler.from_bytes(ruler_bytes) assert len(new_ruler) == len(ruler) assert len(new_ruler.labels) == 4 - assert new_ruler.overwrite == ruler.overwrite - assert new_ruler.ent_id_sep == ruler.ent_id_sep - - -@pytest.mark.issue(3526) -def test_issue_3526_2(en_vocab): - patterns = [ - {"label": "HELLO", "pattern": "hello world"}, - {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]}, - {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]}, - {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]}, - {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"}, - ] - nlp = Language(vocab=en_vocab) - ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) - bytes_old_style = srsly.msgpack_dumps(ruler.patterns) - new_ruler = EntityRuler(nlp) - new_ruler = new_ruler.from_bytes(bytes_old_style) - assert len(new_ruler) == len(ruler) - for pattern in ruler.patterns: - assert pattern in new_ruler.patterns - assert new_ruler.overwrite is not ruler.overwrite - - -@pytest.mark.issue(3526) -def test_issue_3526_3(en_vocab): - patterns = [ - {"label": "HELLO", "pattern": "hello world"}, - {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]}, - {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]}, - {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]}, - {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"}, - ] - nlp = Language(vocab=en_vocab) - ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) - with make_tempdir() as tmpdir: - out_file = tmpdir / "entity_ruler" - srsly.write_jsonl(out_file.with_suffix(".jsonl"), ruler.patterns) - new_ruler = EntityRuler(nlp).from_disk(out_file) - for pattern in ruler.patterns: - assert pattern in new_ruler.patterns - assert len(new_ruler) == len(ruler) - assert new_ruler.overwrite is not ruler.overwrite @pytest.mark.issue(3526) @@ -150,16 +109,14 @@ def test_issue_3526_4(en_vocab): nlp.to_disk(tmpdir) ruler = nlp.get_pipe("entity_ruler") assert ruler.patterns == [{"label": "ORG", "pattern": "Apple"}] - assert ruler.overwrite is True nlp2 = load(tmpdir) new_ruler = nlp2.get_pipe("entity_ruler") assert new_ruler.patterns == [{"label": "ORG", "pattern": "Apple"}] - assert new_ruler.overwrite is True @pytest.mark.issue(4042) def test_issue4042(): - """Test that serialization of an EntityRuler before NER works fine.""" + """Test that serialization of an entity_ruler before NER works fine.""" nlp = English() # add ner pipe ner = nlp.add_pipe("ner") diff --git a/website/docs/api/entityruler.md b/website/docs/api/entityruler.md index ef7acbbf1..651c87585 100644 --- a/website/docs/api/entityruler.md +++ b/website/docs/api/entityruler.md @@ -1,13 +1,24 @@ --- title: EntityRuler -tag: class -source: spacy/pipeline/entity_ruler.py new: 2.1 teaser: 'Pipeline component for rule-based named entity recognition' api_string_name: entity_ruler api_trainable: false --- + + +As of spaCy v4, there is no separate `EntityRuler` class. The entity ruler is +implemented as a special case of the `SpanRuler` component. + +See the [migration guide](#migrating) below for differences between the v3 +`EntityRuler` and v4 `SpanRuler` implementations of the `entity_ruler` +component. + +See the [`SpanRuler`](/api/spanruler) API docs for the full API. + + + The entity ruler lets you add spans to the [`Doc.ents`](/api/doc#ents) using token-based rules or exact phrase matches. It can be combined with the statistical [`EntityRecognizer`](/api/entityrecognizer) to boost accuracy, or @@ -63,271 +74,51 @@ how the component should be configured. You can override its settings via the | `ent_id_sep` | Separator used internally for entity IDs. Defaults to `"\|\|"`. ~~str~~ | | `scorer` | The scoring method. Defaults to [`spacy.scorer.get_ner_prf`](/api/scorer#get_ner_prf). ~~Optional[Callable]~~ | -```python -%%GITHUB_SPACY/spacy/pipeline/entity_ruler.py +## Migrating from v3 {#migrating} + +### Loading patterns + +Unlike the v3 `EntityRuler`, the `SpanRuler` cannot load patterns on +initialization with `SpanRuler(patterns=patterns)` or directly from a JSONL file +path with `SpanRuler.from_disk(jsonl_path)`. Patterns should be loaded from the +JSONL file separately and then added through +[`SpanRuler.initialize`](/api/spanruler#initialize]) or +[`SpanRuler.add_patterns`](/api/spanruler#add_patterns). + +```diff + ruler = nlp.get_pipe("entity_ruler") +- ruler.from_disk("patterns.jsonl") ++ import srsly ++ patterns = srsly.read_jsonl("patterns.jsonl") ++ ruler.add_patterns(patterns) ``` -## EntityRuler.\_\_init\_\_ {#init tag="method"} +### Saving patterns -Initialize the entity ruler. If patterns are supplied here, they need to be a -list of dictionaries with a `"label"` and `"pattern"` key. A pattern can either -be a token pattern (list) or a phrase pattern (string). For example: -`{"label": "ORG", "pattern": "Apple"}`. +`SpanRuler.to_disk` always saves the full component data to a directory and does +not include an option to save the patterns to a single JSONL file. -> #### Example -> -> ```python -> # Construction via add_pipe -> ruler = nlp.add_pipe("entity_ruler") -> -> # Construction from class -> from spacy.pipeline import EntityRuler -> ruler = EntityRuler(nlp, overwrite_ents=True) -> ``` +```diff + ruler = nlp.get_pipe("entity_ruler") +- ruler.to_disk("patterns.jsonl") ++ import srsly ++ srsly.write_jsonl("patterns.jsonl", ruler.patterns) +``` -| Name | Description | -| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `nlp` | The shared nlp object to pass the vocab to the matchers and process phrase patterns. ~~Language~~ | -| `name` 3 | Instance name of the current pipeline component. Typically passed in automatically from the factory when the component is added. Used to disable the current entity ruler while creating phrase patterns with the nlp object. ~~str~~ | -| _keyword-only_ | | -| `phrase_matcher_attr` | Optional attribute name match on for the internal [`PhraseMatcher`](/api/phrasematcher), e.g. `LOWER` to match on the lowercase token text. Defaults to `None`. ~~Optional[Union[int, str]]~~ | -| `validate` | Whether patterns should be validated, passed to Matcher and PhraseMatcher as `validate`. Defaults to `False`. ~~bool~~ | -| `overwrite_ents` | If existing entities are present, e.g. entities added by the model, overwrite them by matches if necessary. Defaults to `False`. ~~bool~~ | -| `ent_id_sep` | Separator used internally for entity IDs. Defaults to `"\|\|"`. ~~str~~ | -| `patterns` | Optional patterns to load in on initialization. ~~Optional[List[Dict[str, Union[str, List[dict]]]]]~~ | +### Accessing token and phrase patterns -## EntityRuler.initialize {#initialize tag="method" new="3"} +The separate token patterns and phrase patterns are no longer accessible under +`ruler.token_patterns` or `ruler.phrase_patterns`. You can access the combined +patterns in their original format using the property +[`SpanRuler.patterns`](/api/spanruler#patterns). -Initialize the component with data and used before training to load in rules -from a [pattern file](/usage/rule-based-matching/#entityruler-files). This method -is typically called by [`Language.initialize`](/api/language#initialize) and -lets you customize arguments it receives via the -[`[initialize.components]`](/api/data-formats#config-initialize) block in the -config. +### Removing patterns by ID -> #### Example -> -> ```python -> entity_ruler = nlp.add_pipe("entity_ruler") -> entity_ruler.initialize(lambda: [], nlp=nlp, patterns=patterns) -> ``` -> -> ```ini -> ### config.cfg -> [initialize.components.entity_ruler] -> -> [initialize.components.entity_ruler.patterns] -> @readers = "srsly.read_jsonl.v1" -> path = "corpus/entity_ruler_patterns.jsonl -> ``` +[`SpanRuler.remove`](/api/spanruler#remove) removes by label rather than ID. To +remove by ID, use [`SpanRuler.remove_by_id`](/api/spanruler#remove_by_id): -| Name | Description | -| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Not used by the `EntityRuler`. ~~Callable[[], Iterable[Example]]~~ | -| _keyword-only_ | | -| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ | -| `patterns` | The list of patterns. Defaults to `None`. ~~Optional[Sequence[Dict[str, Union[str, List[Dict[str, Any]]]]]]~~ | - -## EntityRuler.\_\len\_\_ {#len tag="method"} - -The number of all patterns added to the entity ruler. - -> #### Example -> -> ```python -> ruler = nlp.add_pipe("entity_ruler") -> assert len(ruler) == 0 -> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}]) -> assert len(ruler) == 1 -> ``` - -| Name | Description | -| ----------- | ------------------------------- | -| **RETURNS** | The number of patterns. ~~int~~ | - -## EntityRuler.\_\_contains\_\_ {#contains tag="method"} - -Whether a label is present in the patterns. - -> #### Example -> -> ```python -> ruler = nlp.add_pipe("entity_ruler") -> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}]) -> assert "ORG" in ruler -> assert not "PERSON" in ruler -> ``` - -| Name | Description | -| ----------- | ----------------------------------------------------- | -| `label` | The label to check. ~~str~~ | -| **RETURNS** | Whether the entity ruler contains the label. ~~bool~~ | - -## EntityRuler.\_\_call\_\_ {#call tag="method"} - -Find matches in the `Doc` and add them to the `doc.ents`. Typically, this -happens automatically after the component has been added to the pipeline using -[`nlp.add_pipe`](/api/language#add_pipe). If the entity ruler was initialized -with `overwrite_ents=True`, existing entities will be replaced if they overlap -with the matches. When matches overlap in a Doc, the entity ruler prioritizes -longer patterns over shorter, and if equal the match occuring first in the Doc -is chosen. - -> #### Example -> -> ```python -> ruler = nlp.add_pipe("entity_ruler") -> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}]) -> -> doc = nlp("A text about Apple.") -> ents = [(ent.text, ent.label_) for ent in doc.ents] -> assert ents == [("Apple", "ORG")] -> ``` - -| Name | Description | -| ----------- | -------------------------------------------------------------------- | -| `doc` | The `Doc` object to process, e.g. the `Doc` in the pipeline. ~~Doc~~ | -| **RETURNS** | The modified `Doc` with added entities, if available. ~~Doc~~ | - -## EntityRuler.add_patterns {#add_patterns tag="method"} - -Add patterns to the entity ruler. A pattern can either be a token pattern (list -of dicts) or a phrase pattern (string). For more details, see the usage guide on -[rule-based matching](/usage/rule-based-matching). - -> #### Example -> -> ```python -> patterns = [ -> {"label": "ORG", "pattern": "Apple"}, -> {"label": "GPE", "pattern": [{"lower": "san"}, {"lower": "francisco"}]} -> ] -> ruler = nlp.add_pipe("entity_ruler") -> ruler.add_patterns(patterns) -> ``` - -| Name | Description | -| ---------- | ---------------------------------------------------------------- | -| `patterns` | The patterns to add. ~~List[Dict[str, Union[str, List[dict]]]]~~ | - - -## EntityRuler.remove {#remove tag="method" new="3.2.1"} - -Remove a pattern by its ID from the entity ruler. A `ValueError` is raised if the ID does not exist. - -> #### Example -> -> ```python -> patterns = [{"label": "ORG", "pattern": "Apple", "id": "apple"}] -> ruler = nlp.add_pipe("entity_ruler") -> ruler.add_patterns(patterns) -> ruler.remove("apple") -> ``` - -| Name | Description | -| ---------- | ---------------------------------------------------------------- | -| `id` | The ID of the pattern rule. ~~str~~ | - -## EntityRuler.to_disk {#to_disk tag="method"} - -Save the entity ruler patterns to a directory. The patterns will be saved as -newline-delimited JSON (JSONL). If a file with the suffix `.jsonl` is provided, -only the patterns are saved as JSONL. If a directory name is provided, a -`patterns.jsonl` and `cfg` file with the component configuration is exported. - -> #### Example -> -> ```python -> ruler = nlp.add_pipe("entity_ruler") -> ruler.to_disk("/path/to/patterns.jsonl") # saves patterns only -> ruler.to_disk("/path/to/entity_ruler") # saves patterns and config -> ``` - -| Name | Description | -| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `path` | A path to a JSONL file or directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | - -## EntityRuler.from_disk {#from_disk tag="method"} - -Load the entity ruler from a path. Expects either a file containing -newline-delimited JSON (JSONL) with one entry per line, or a directory -containing a `patterns.jsonl` file and a `cfg` file with the component -configuration. - -> #### Example -> -> ```python -> ruler = nlp.add_pipe("entity_ruler") -> ruler.from_disk("/path/to/patterns.jsonl") # loads patterns only -> ruler.from_disk("/path/to/entity_ruler") # loads patterns and config -> ``` - -| Name | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------- | -| `path` | A path to a JSONL file or directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | -| **RETURNS** | The modified `EntityRuler` object. ~~EntityRuler~~ | - -## EntityRuler.to_bytes {#to_bytes tag="method"} - -Serialize the entity ruler patterns to a bytestring. - -> #### Example -> -> ```python -> ruler = nlp.add_pipe("entity_ruler") -> ruler_bytes = ruler.to_bytes() -> ``` - -| Name | Description | -| ----------- | ---------------------------------- | -| **RETURNS** | The serialized patterns. ~~bytes~~ | - -## EntityRuler.from_bytes {#from_bytes tag="method"} - -Load the pipe from a bytestring. Modifies the object in place and returns it. - -> #### Example -> -> ```python -> ruler_bytes = ruler.to_bytes() -> ruler = nlp.add_pipe("entity_ruler") -> ruler.from_bytes(ruler_bytes) -> ``` - -| Name | Description | -| ------------ | -------------------------------------------------- | -| `bytes_data` | The bytestring to load. ~~bytes~~ | -| **RETURNS** | The modified `EntityRuler` object. ~~EntityRuler~~ | - -## EntityRuler.labels {#labels tag="property"} - -All labels present in the match patterns. - -| Name | Description | -| ----------- | -------------------------------------- | -| **RETURNS** | The string labels. ~~Tuple[str, ...]~~ | - -## EntityRuler.ent_ids {#ent_ids tag="property" new="2.2.2"} - -All entity IDs present in the `id` properties of the match patterns. - -| Name | Description | -| ----------- | ----------------------------------- | -| **RETURNS** | The string IDs. ~~Tuple[str, ...]~~ | - -## EntityRuler.patterns {#patterns tag="property"} - -Get all patterns that were added to the entity ruler. - -| Name | Description | -| ----------- | ---------------------------------------------------------------------------------------- | -| **RETURNS** | The original patterns, one dictionary per pattern. ~~List[Dict[str, Union[str, dict]]]~~ | - -## Attributes {#attributes} - -| Name | Description | -| ----------------- | --------------------------------------------------------------------------------------------------------------------- | -| `matcher` | The underlying matcher used to process token patterns. ~~Matcher~~ | -| `phrase_matcher` | The underlying phrase matcher used to process phrase patterns. ~~PhraseMatcher~~ | -| `token_patterns` | The token patterns present in the entity ruler, keyed by label. ~~Dict[str, List[Dict[str, Union[str, List[dict]]]]~~ | -| `phrase_patterns` | The phrase patterns present in the entity ruler, keyed by label. ~~Dict[str, List[Doc]]~~ | +```diff + ruler = nlp.get_pipe("entity_ruler") +- ruler.remove("id") ++ ruler.remove_by_id("id") +``` diff --git a/website/docs/api/spanruler.md b/website/docs/api/spanruler.md index b573f7c58..1339d0967 100644 --- a/website/docs/api/spanruler.md +++ b/website/docs/api/spanruler.md @@ -13,6 +13,17 @@ The span ruler lets you add spans to [`Doc.spans`](/api/doc#spans) and/or usage examples, see the docs on [rule-based span matching](/usage/rule-based-matching#spanruler). + + +As of spaCy v4, there is no separate `EntityRuler` class. The entity ruler is +implemented as a special case of the `SpanRuler` component. + +See the [migration guide](/api/entityruler#migrating) for differences between +the v3 `EntityRuler` and v4 `SpanRuler` implementations of the `entity_ruler` +component. + + + ## Assigned Attributes {#assigned-attributes} Matches will be saved to `Doc.spans[spans_key]` as a diff --git a/website/docs/usage/101/_architecture.md b/website/docs/usage/101/_architecture.md index 4ebca2756..ecc7f2fd9 100644 --- a/website/docs/usage/101/_architecture.md +++ b/website/docs/usage/101/_architecture.md @@ -41,25 +41,27 @@ components for different language processing tasks and also allows adding ![The processing pipeline](../../images/pipeline.svg) -| Name | Description | -| ----------------------------------------------- | ------------------------------------------------------------------------------------------- | -| [`AttributeRuler`](/api/attributeruler) | Set token attributes using matcher rules. | -| [`DependencyParser`](/api/dependencyparser) | Predict syntactic dependencies. | -| [`EditTreeLemmatizer`](/api/edittreelemmatizer) | Predict base forms of words. | -| [`EntityLinker`](/api/entitylinker) | Disambiguate named entities to nodes in a knowledge base. | -| [`EntityRecognizer`](/api/entityrecognizer) | Predict named entities, e.g. persons or products. | -| [`EntityRuler`](/api/entityruler) | Add entity spans to the `Doc` using token-based rules or exact phrase matches. | -| [`Lemmatizer`](/api/lemmatizer) | Determine the base forms of words using rules and lookups. | -| [`Morphologizer`](/api/morphologizer) | Predict morphological features and coarse-grained part-of-speech tags. | -| [`SentenceRecognizer`](/api/sentencerecognizer) | Predict sentence boundaries. | -| [`Sentencizer`](/api/sentencizer) | Implement rule-based sentence boundary detection that doesn't require the dependency parse. | -| [`Tagger`](/api/tagger) | Predict part-of-speech tags. | -| [`TextCategorizer`](/api/textcategorizer) | Predict categories or labels over the whole document. | -| [`Tok2Vec`](/api/tok2vec) | Apply a "token-to-vector" model and set its outputs. | -| [`Tokenizer`](/api/tokenizer) | Segment raw text and create `Doc` objects from the words. | -| [`TrainablePipe`](/api/pipe) | Class that all trainable pipeline components inherit from. | -| [`Transformer`](/api/transformer) | Use a transformer model and set its outputs. | -| [Other functions](/api/pipeline-functions) | Automatically apply something to the `Doc`, e.g. to merge spans of tokens. | +| Component name | Component class | Description | +| ---------------------- | ---------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `attribute_ruler` | [`AttributeRuler`](/api/attributeruler) | Set token attributes using matcher rules. | +| `entity_linker` | [`EntityLinker`](/api/entitylinker) | Disambiguate named entities to nodes in a knowledge base. | +| `entity_ruler` | [`SpanRuler`](/api/spanruler) | Add entity spans to the `Doc` using token-based rules or exact phrase matches. | +| `lemmatizer` | [`Lemmatizer`](/api/lemmatizer) | Determine the base forms of words using rules and lookups. | +| `morphologizer` | [`Morphologizer`](/api/morphologizer) | Predict morphological features and coarse-grained part-of-speech tags. | +| `ner` | [`EntityRecognizer`](/api/entityrecognizer) | Predict named entities, e.g. persons or products. | +| `parser` | [`DependencyParser`](/api/dependencyparser) | Predict syntactic dependencies. | +| `senter` | [`SentenceRecognizer`](/api/sentencerecognizer) | Predict sentence boundaries. | +| `sentencizer` | [`Sentencizer`](/api/sentencizer) | Implement rule-based sentence boundary detection that doesn't require the dependency parse. | +| `span_ruler` | [`SpanRuler`](/api/spanruler) | Add spans to the `Doc` using token-based rules or exact phrase matches. | +| `tagger` | [`Tagger`](/api/tagger) | Predict part-of-speech tags. | +| `textcat` | [`TextCategorizer`](/api/textcategorizer) | Predict exactly one category or label over a whole document. | +| `textcat_multilabel` | [`MultiLabel_TextCategorizer`](/api/textcategorizer) | Predict 0, 1 or more categories or labels over a whole document. | +| `tok2vec` | [`Tok2Vec`](/api/tok2vec) | Apply a "token-to-vector" model and set its outputs. | +| `tokenizer` | [`Tokenizer`](/api/tokenizer) | Segment raw text and create `Doc` objects from the words. | +| `trainable_lemmatizer` | [`EditTreeLemmatizer`](/api/edittreelemmatizer) | Predict base forms of words. | +| `transformer` | [`Transformer`](/api/transformer) | Use a transformer model and set its outputs. | +| - | [`TrainablePipe`](/api/pipe) | Class that all trainable pipeline components inherit from. | +| - | [Other functions](/api/pipeline-functions) | Automatically apply something to the `Doc`, e.g. to merge spans of tokens. | ### Matchers {#architecture-matchers} diff --git a/website/docs/usage/101/_pipelines.md b/website/docs/usage/101/_pipelines.md index f43219f41..3a6d67a37 100644 --- a/website/docs/usage/101/_pipelines.md +++ b/website/docs/usage/101/_pipelines.md @@ -53,9 +53,9 @@ example, a custom lemmatizer may need the part-of-speech tags assigned, so it'll only work if it's added after the tagger. The parser will respect pre-defined sentence boundaries, so if a previous component in the pipeline sets them, its dependency predictions may be different. Similarly, it matters if you add the -[`EntityRuler`](/api/entityruler) before or after the statistical entity -recognizer: if it's added before, the entity recognizer will take the existing -entities into account when making predictions. The +[`SpanRuler`](/api/spanruler) before or after the statistical entity recognizer: +if it's added before and it is writing to `doc.ents`, then the entity recognizer +will take those existing entities into account when making predictions. The [`EntityLinker`](/api/entitylinker), which resolves named entities to knowledge base IDs, should be preceded by a pipeline component that recognizes entities such as the [`EntityRecognizer`](/api/entityrecognizer). diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index bd28810ae..2463b523f 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -303,13 +303,14 @@ available pipeline components and component functions. > ruler = nlp.add_pipe("entity_ruler") > ``` -| String name | Component | Description | +| Component name | Component class | Description | | ---------------------- | ---------------------------------------------------- | ----------------------------------------------------------------------------------------- | | `tagger` | [`Tagger`](/api/tagger) | Assign part-of-speech-tags. | | `parser` | [`DependencyParser`](/api/dependencyparser) | Assign dependency labels. | | `ner` | [`EntityRecognizer`](/api/entityrecognizer) | Assign named entities. | | `entity_linker` | [`EntityLinker`](/api/entitylinker) | Assign knowledge base IDs to named entities. Should be added after the entity recognizer. | -| `entity_ruler` | [`EntityRuler`](/api/entityruler) | Assign named entities based on pattern rules and dictionaries. | +| `span_ruler` | [`SpanRuler`](/api/spanruler) | Assign spans based on pattern rules and dictionaries. | +| `entity_ruler` | [`SpanRuler`](/api/spanruler) | Assign named entities based on pattern rules and dictionaries. | | `textcat` | [`TextCategorizer`](/api/textcategorizer) | Assign text categories: exactly one category is predicted per document. | | `textcat_multilabel` | [`MultiLabel_TextCategorizer`](/api/textcategorizer) | Assign text categories in a multi-label setting: zero, one or more labels per document. | | `lemmatizer` | [`Lemmatizer`](/api/lemmatizer) | Assign base forms to words using rules and lookups. | diff --git a/website/docs/usage/rule-based-matching.md b/website/docs/usage/rule-based-matching.md index bf1891df1..d9f551820 100644 --- a/website/docs/usage/rule-based-matching.md +++ b/website/docs/usage/rule-based-matching.md @@ -375,7 +375,7 @@ scoped quantifiers – instead, you can build those behaviors with `on_match` callbacks. | OP | Description | -|---------|------------------------------------------------------------------------| +| ------- | ---------------------------------------------------------------------- | | `!` | Negate the pattern, by requiring it to match exactly 0 times. | | `?` | Make the pattern optional, by allowing it to match 0 or 1 times. | | `+` | Require the pattern to match 1 or more times. | @@ -471,7 +471,7 @@ matches = matcher(doc) ``` A very similar logic has been implemented in the built-in -[`EntityRuler`](/api/entityruler) by the way. It also takes care of handling +[`entity_ruler`](/api/entityruler) by the way. It also takes care of handling overlapping matches, which you would otherwise have to take care of yourself. > #### Tip: Visualizing matches @@ -1270,7 +1270,7 @@ of patterns such as `{}` that match any token in the sentence. ## Rule-based entity recognition {#entityruler new="2.1"} -The [`EntityRuler`](/api/entityruler) is a component that lets you add named +The [`entity_ruler`](/api/entityruler) is a component that lets you add named entities based on pattern dictionaries, which makes it easy to combine rule-based and statistical named entity recognition for even more powerful pipelines. @@ -1295,13 +1295,12 @@ pattern. The entity ruler accepts two types of patterns: ### Using the entity ruler {#entityruler-usage} -The [`EntityRuler`](/api/entityruler) is a pipeline component that's typically -added via [`nlp.add_pipe`](/api/language#add_pipe). When the `nlp` object is -called on a text, it will find matches in the `doc` and add them as entities to -the `doc.ents`, using the specified pattern label as the entity label. If any -matches were to overlap, the pattern matching most tokens takes priority. If -they also happen to be equally long, then the match occurring first in the `Doc` -is chosen. +The `entity_ruler` is a pipeline component that's typically added via +[`nlp.add_pipe`](/api/language#add_pipe). When the `nlp` object is called on a +text, it will find matches in the `doc` and add them as entities to `doc.ents`, +using the specified pattern label as the entity label. If any matches were to +overlap, the pattern matching most tokens takes priority. If they also happen to +be equally long, then the match occurring first in the `Doc` is chosen. ```python ### {executable="true"} @@ -1339,7 +1338,7 @@ doc = nlp("MyCorp Inc. is a company in the U.S.") print([(ent.text, ent.label_) for ent in doc.ents]) ``` -#### Validating and debugging EntityRuler patterns {#entityruler-pattern-validation new="2.1.8"} +#### Validating and debugging entity ruler patterns {#entityruler-pattern-validation new="2.1.8"} The entity ruler can validate patterns against a JSON schema with the config setting `"validate"`. See details under @@ -1351,9 +1350,9 @@ ruler = nlp.add_pipe("entity_ruler", config={"validate": True}) ### Adding IDs to patterns {#entityruler-ent-ids new="2.2.2"} -The [`EntityRuler`](/api/entityruler) can also accept an `id` attribute for each -pattern. Using the `id` attribute allows multiple patterns to be associated with -the same entity. +The [`entity_ruler`](/api/entityruler) can also accept an `id` attribute for +each pattern. Using the `id` attribute allows multiple patterns to be associated +with the same entity. ```python ### {executable="true"} @@ -1373,10 +1372,10 @@ doc2 = nlp("Apple is opening its first big office in San Fran.") print([(ent.text, ent.label_, ent.id_) for ent in doc2.ents]) ``` -If the `id` attribute is included in the [`EntityRuler`](/api/entityruler) -patterns, the `id_` property of the matched entity is set to the `id` given -in the patterns. So in the example above it's easy to identify that "San -Francisco" and "San Fran" are both the same entity. +If the `id` attribute is included in the [`entity_ruler`](/api/entityruler) +patterns, the `id_` property of the matched entity is set to the `id` given in +the patterns. So in the example above it's easy to identify that "San Francisco" +and "San Fran" are both the same entity. ### Using pattern files {#entityruler-files} @@ -1400,13 +1399,13 @@ new_ruler = nlp.add_pipe("entity_ruler").from_disk("./patterns.jsonl") If you're using the [Prodigy](https://prodi.gy) annotation tool, you might recognize these pattern files from bootstrapping your named entity and text -classification labelling. The patterns for the `EntityRuler` follow the same +classification labelling. The patterns for the `entity_ruler` follow the same syntax, so you can use your existing Prodigy pattern files in spaCy, and vice versa. -When you save out an `nlp` object that has an `EntityRuler` added to its +When you save out an `nlp` object that has an `entity_ruler` added to its pipeline, its patterns are automatically exported to the pipeline directory: ```python @@ -1429,9 +1428,9 @@ rules included! When using a large amount of **phrase patterns** (roughly > 10000) it's useful to understand how the `add_patterns` function of the entity ruler works. For -each **phrase pattern**, the EntityRuler calls the nlp object to construct a doc -object. This happens in case you try to add the EntityRuler at the end of an -existing pipeline with, for example, a POS tagger and want to extract matches +each **phrase pattern**, the entity ruler calls the nlp object to construct a +doc object. This happens in case you try to add the entity ruler at the end of +an existing pipeline with, for example, a POS tagger and want to extract matches based on the pattern's POS signature. In this case you would pass a config value of `"phrase_matcher_attr": "POS"` for the entity ruler. diff --git a/website/docs/usage/saving-loading.md b/website/docs/usage/saving-loading.md index 9a4b584a3..d2b67b199 100644 --- a/website/docs/usage/saving-loading.md +++ b/website/docs/usage/saving-loading.md @@ -193,13 +193,13 @@ the data to and from a JSON file. > #### Real-world example > -> To see custom serialization methods in action, check out the new -> [`EntityRuler`](/api/entityruler) component and its -> [source](%%GITHUB_SPACY/spacy/pipeline/entity_ruler.py). Patterns added to the +> To see custom serialization methods in action, check out the +> [`SpanRuler`](/api/spanruler) component and its +> [source](%%GITHUB_SPACY/spacy/pipeline/span_ruler.py). Patterns added to the > component will be saved to a `.jsonl` file if the pipeline is serialized to > disk, and to a bytestring if the pipeline is serialized to bytes. This allows -> saving out a pipeline with a rule-based entity recognizer and including all -> rules _with_ the component data. +> saving out a pipeline with rule-based components _with_ all the component +> data. ```python ### {highlight="16-23,25-30"} diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index 27a8bbca7..5ee148224 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -424,7 +424,7 @@ your components during training, and the most common scenarios are: 2. Update an existing **trained component** with more examples. 3. Include an existing trained component without updating it. 4. Include a non-trainable component, like a rule-based - [`EntityRuler`](/api/entityruler) or [`Sentencizer`](/api/sentencizer), or a + [`SpanRuler`](/api/spanruler) or [`Sentencizer`](/api/sentencizer), or a fully [custom component](/usage/processing-pipelines#custom-components). If a component block defines a `factory`, spaCy will look it up in the From 88d35450dcedd89fa739640d8a8d3e62f3643b4a Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 25 Oct 2022 14:53:18 +0200 Subject: [PATCH 71/82] Rename test helper method with non-test_ name (#11701) --- spacy/tests/test_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/tests/test_models.py b/spacy/tests/test_models.py index 2306cabb7..d91ed1201 100644 --- a/spacy/tests/test_models.py +++ b/spacy/tests/test_models.py @@ -23,7 +23,7 @@ def get_textcat_bow_kwargs(): def get_textcat_cnn_kwargs(): - return {"tok2vec": test_tok2vec(), "exclusive_classes": False, "nO": 13} + return {"tok2vec": make_test_tok2vec(), "exclusive_classes": False, "nO": 13} def get_all_params(model): @@ -65,7 +65,7 @@ def get_tok2vec_kwargs(): } -def test_tok2vec(): +def make_test_tok2vec(): return build_Tok2Vec_model(**get_tok2vec_kwargs()) From 8740e4341f03fe2720f50c64e2f94a339d6bd4be Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 25 Oct 2022 14:54:54 +0200 Subject: [PATCH 72/82] Update languages and version in README and website (#11694) --- README.md | 6 +++--- website/meta/languages.json | 28 ++++++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index d9ef83e01..abfc3da67 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ be used in real products. spaCy comes with [pretrained pipelines](https://spacy.io/models) and -currently supports tokenization and training for **60+ languages**. It features +currently supports tokenization and training for **70+ languages**. It features state-of-the-art speed and **neural network models** for tagging, parsing, **named entity recognition**, **text classification** and more, multi-task learning with pretrained **transformers** like BERT, as well as a @@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy model packaging, deployment and workflow management. spaCy is commercial open-source software, released under the MIT license. -💫 **Version 3.4.0 out now!** +💫 **Version 3.4 out now!** [Check out the release notes here.](https://github.com/explosion/spaCy/releases) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) @@ -79,7 +79,7 @@ more people can benefit from it. ## Features -- Support for **60+ languages** +- Support for **70+ languages** - **Trained pipelines** for different languages and tasks - Multi-task learning with pretrained **transformers** like BERT - Support for pretrained **word vectors** and embeddings diff --git a/website/meta/languages.json b/website/meta/languages.json index 0028b4a5f..bd1535c90 100644 --- a/website/meta/languages.json +++ b/website/meta/languages.json @@ -4,12 +4,22 @@ "code": "af", "name": "Afrikaans" }, + { + "code": "am", + "name": "Amharic", + "has_examples": true + }, { "code": "ar", "name": "Arabic", "example": "هذه جملة", "has_examples": true }, + { + "code": "az", + "name": "Azerbaijani", + "has_examples": true + }, { "code": "bg", "name": "Bulgarian", @@ -65,7 +75,7 @@ { "code": "dsb", "name": "Lower Sorbian", - "has_examples": true + "has_examples": true }, { "code": "el", @@ -142,6 +152,11 @@ "code": "ga", "name": "Irish" }, + { + "code": "grc", + "name": "Ancient Greek", + "has_examples": true + }, { "code": "gu", "name": "Gujarati", @@ -172,7 +187,7 @@ { "code": "hsb", "name": "Upper Sorbian", - "has_examples": true + "has_examples": true }, { "code": "hu", @@ -260,6 +275,10 @@ "example": "Адамга эң кыйыны — күн сайын адам болуу", "has_examples": true }, + { + "code": "la", + "name": "Latin" + }, { "code": "lb", "name": "Luxembourgish", @@ -448,6 +467,11 @@ "example": "นี่คือประโยค", "has_examples": true }, + { + "code": "ti", + "name": "Tigrinya", + "has_examples": true + }, { "code": "tl", "name": "Tagalog" From 0a9859ba01c8a51842218e1817dff7ff784951df Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 25 Oct 2022 19:38:23 +0200 Subject: [PATCH 73/82] Reduce python 3.10 in CI to one OS (#11703) --- azure-pipelines.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 357cce835..eea07cb7a 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -76,15 +76,15 @@ jobs: # Python39Mac: # imageName: "macos-latest" # python.version: "3.9" - Python310Linux: - imageName: "ubuntu-latest" - python.version: "3.10" + # Python310Linux: + # imageName: "ubuntu-latest" + # python.version: "3.10" Python310Windows: imageName: "windows-latest" python.version: "3.10" - Python310Mac: - imageName: "macos-latest" - python.version: "3.10" + # Python310Mac: + # imageName: "macos-latest" + # python.version: "3.10" Python311Linux: imageName: 'ubuntu-latest' python.version: '3.11.0-rc.2' From a9139907a943f0cc91dac0338aa43caa38939778 Mon Sep 17 00:00:00 2001 From: Ryn Daniels <397565+ryndaniels@users.noreply.github.com> Date: Wed, 26 Oct 2022 09:15:13 +0300 Subject: [PATCH 74/82] update github actions to deal with deprecations (#11702) --- .github/workflows/autoblack.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/autoblack.yml b/.github/workflows/autoblack.yml index 8d0282650..3ad4cf408 100644 --- a/.github/workflows/autoblack.yml +++ b/.github/workflows/autoblack.yml @@ -12,10 +12,10 @@ jobs: if: github.repository_owner == 'explosion' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: ref: ${{ github.head_ref }} - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v3 - run: pip install black - name: Auto-format code if needed run: black spacy @@ -23,10 +23,11 @@ jobs: # code and makes GitHub think the action failed - name: Check for modified files id: git-check - run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) + run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + - name: Create Pull Request if: steps.git-check.outputs.modified == 'true' - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v4 with: title: Auto-format code with black labels: meta From 865691d169c3be413007f0d7324e03a7aac3b3cb Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 26 Oct 2022 08:43:00 +0200 Subject: [PATCH 75/82] Adjust default attrs for textcat configs (#11698) --- spacy/pipeline/textcat.py | 4 ++-- spacy/pipeline/textcat_multilabel.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index c45f819fc..59549ad99 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -24,8 +24,8 @@ single_label_default_config = """ [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v2" width = 64 -rows = [2000, 2000, 1000, 1000, 1000, 1000] -attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] +rows = [2000, 2000, 500, 1000, 500] +attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index 493c440c3..eb83d9cb7 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -24,8 +24,8 @@ multi_label_default_config = """ [model.tok2vec.embed] @architectures = "spacy.MultiHashEmbed.v2" width = 64 -rows = [2000, 2000, 1000, 1000, 1000, 1000] -attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] +rows = [2000, 2000, 500, 1000, 500] +attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"] include_static_vectors = false [model.tok2vec.encode] From 6b78135b9e158e5bc02e39c1a73ef28bb360a44f Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 27 Oct 2022 22:08:24 +0900 Subject: [PATCH 76/82] Add warning to install widget for M1 GPUs (#11666) * Add warning to install widget for M1 GPUs * Use Thinc tracking issue instead * Update website/src/widgets/quickstart-install.js Co-authored-by: Adriane Boyd * Underline URL in warning * Update website/src/widgets/quickstart-install.js Co-authored-by: Adriane Boyd * Don't install cupy on m1 gpus Co-authored-by: Adriane Boyd --- website/src/styles/quickstart.module.sass | 3 +++ website/src/widgets/quickstart-install.js | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/website/src/styles/quickstart.module.sass b/website/src/styles/quickstart.module.sass index 8ad106a78..d0f9db551 100644 --- a/website/src/styles/quickstart.module.sass +++ b/website/src/styles/quickstart.module.sass @@ -149,6 +149,9 @@ & > span display: block + a + text-decoration: underline + .small font-size: var(--font-size-code) line-height: 1.65 diff --git a/website/src/widgets/quickstart-install.js b/website/src/widgets/quickstart-install.js index 0d2186acb..28dd14ecc 100644 --- a/website/src/widgets/quickstart-install.js +++ b/website/src/widgets/quickstart-install.js @@ -159,6 +159,9 @@ const QuickstartInstall = ({ id, title }) => { setters={setters} showDropdown={showDropdown} > + + # Note M1 GPU support is experimental, see Thinc issue #792 + python -m venv .env @@ -198,7 +201,13 @@ const QuickstartInstall = ({ id, title }) => { {nightly ? ' --pre' : ''} conda install -c conda-forge spacy - + + conda install -c conda-forge cupy + + + conda install -c conda-forge cupy + + conda install -c conda-forge cupy From d61e742960ef230b423dfa157449b291a03bd119 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 28 Oct 2022 17:25:34 +0900 Subject: [PATCH 77/82] Handle Docs with no entities in EntityLinker (#11640) * Handle docs with no entities If a whole batch contains no entities it won't make it to the model, but it's possible for individual Docs to have no entities. Before this commit, those Docs would cause an error when attempting to concatenate arrays because the dimensions didn't match. It turns out the process of preparing the Ragged at the end of the span maker forward was a little different from list2ragged, which just uses the flatten function directly. Letting list2ragged do the conversion avoids the dimension issue. This did not come up before because in NEL demo projects it's typical for data with no entities to be discarded before it reaches the NEL component. This includes a simple direct test that shows the issue and checks it's resolved. It doesn't check if there are any downstream changes, so a more complete test could be added. A full run was tested by adding an example with no entities to the Emerson sample project. * Add a blank instance to default training data in tests Rather than adding a specific test, since not failing on instances with no entities is basic functionality, it makes sense to add it to the default set. * Fix without modifying architecture If the architecture is modified this would have to be a new version, but this change isn't big enough to merit that. --- spacy/ml/models/entity_linker.py | 7 +++---- spacy/tests/pipeline/test_entity_linker.py | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/spacy/ml/models/entity_linker.py b/spacy/ml/models/entity_linker.py index 4d18d216a..299b6bb52 100644 --- a/spacy/ml/models/entity_linker.py +++ b/spacy/ml/models/entity_linker.py @@ -71,11 +71,10 @@ def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callab cands.append((start_token, end_token)) candidates.append(ops.asarray2i(cands)) - candlens = ops.asarray1i([len(cands) for cands in candidates]) - candidates = ops.xp.concatenate(candidates) - outputs = Ragged(candidates, candlens) + lengths = model.ops.asarray1i([len(cands) for cands in candidates]) + out = Ragged(model.ops.flatten(candidates), lengths) # because this is just rearranging docs, the backprop does nothing - return outputs, lambda x: [] + return out, lambda x: [] @registry.misc("spacy.KBFromFile.v1") diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index 4d683acc5..99f164f15 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -9,6 +9,7 @@ from spacy.compat import pickle from spacy.kb import Candidate, InMemoryLookupKB, get_candidates, KnowledgeBase from spacy.lang.en import English from spacy.ml import load_kb +from spacy.ml.models.entity_linker import build_span_maker from spacy.pipeline import EntityLinker from spacy.pipeline.legacy import EntityLinker_v1 from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL @@ -715,7 +716,11 @@ TRAIN_DATA = [ ("Russ Cochran was a member of University of Kentucky's golf team.", {"links": {(0, 12): {"Q7381115": 0.0, "Q2146908": 1.0}}, "entities": [(0, 12, "PERSON"), (43, 51, "LOC")], - "sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}) + "sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}), + # having a blank instance shouldn't break things + ("The weather is nice today.", + {"links": {}, "entities": [], + "sent_starts": [1, -1, 0, 0, 0, 0]}) ] GOLD_entities = ["Q2146908", "Q7381115", "Q7381115", "Q2146908"] # fmt: on @@ -1196,3 +1201,18 @@ def test_threshold(meet_threshold: bool, config: Dict[str, Any]): assert len(doc.ents) == 1 assert doc.ents[0].kb_id_ == entity_id if meet_threshold else EntityLinker.NIL + + +def test_span_maker_forward_with_empty(): + """The forward pass of the span maker may have a doc with no entities.""" + nlp = English() + doc1 = nlp("a b c") + ent = doc1[0:1] + ent.label_ = "X" + doc1.ents = [ent] + # no entities + doc2 = nlp("x y z") + + # just to get a model + span_maker = build_span_maker() + span_maker([doc1, doc2], False) From d25f09468c4eca20eb464d78d35e439474ed2dbc Mon Sep 17 00:00:00 2001 From: Aaron Zipp <15341396+aaronzipp@users.noreply.github.com> Date: Mon, 31 Oct 2022 05:27:12 +0100 Subject: [PATCH 78/82] Spelling mistake in rule-based-matching.md (#11717) Changed retokenize to retokenizer --- website/docs/usage/rule-based-matching.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/rule-based-matching.md b/website/docs/usage/rule-based-matching.md index f096890cb..64bbf8e7b 100644 --- a/website/docs/usage/rule-based-matching.md +++ b/website/docs/usage/rule-based-matching.md @@ -1792,7 +1792,7 @@ the entity `Span` – for example `._.orgs` or `._.prev_orgs` and > [`Doc.retokenize`](/api/doc#retokenize) context manager: > > ```python -> with doc.retokenize() as retokenize: +> with doc.retokenize() as retokenizer: > for ent in doc.ents: > retokenizer.merge(ent) > ``` From f7edd84b44a37b78d87fe6815399a576f1980b8b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 2 Nov 2022 13:42:20 +0100 Subject: [PATCH 79/82] Switch CI to Python 3.11.0 (#11737) --- azure-pipelines.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index eea07cb7a..bf3672b8b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -87,13 +87,13 @@ jobs: # python.version: "3.10" Python311Linux: imageName: 'ubuntu-latest' - python.version: '3.11.0-rc.2' + python.version: '3.11.0' Python311Windows: imageName: 'windows-latest' - python.version: '3.11.0-rc.2' + python.version: '3.11.0' Python311Mac: imageName: 'macos-latest' - python.version: '3.11.0-rc.2' + python.version: '3.11.0' maxParallel: 4 pool: vmImage: $(imageName) From 420b1d854be86e899088bb136f1daf23fc61ed1d Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 2 Nov 2022 15:35:04 +0100 Subject: [PATCH 80/82] Update textcat scorer threshold behavior (#11696) * Update textcat scorer threshold behavior For `textcat` (with exclusive classes) the scorer should always use a threshold of 0.0 because there should be one predicted label per doc and the numeric score for that particular label should not matter. * Rename to test_textcat_multilabel_threshold * Remove all uses of threshold for multi_label=False * Update Scorer.score_cats API docs * Add tests for score_cats with thresholds * Update textcat API docs * Fix types * Convert threshold back to float * Fix threshold type in docstring * Improve formatting in Scorer API docs --- spacy/pipeline/textcat.py | 7 +++-- spacy/scorer.py | 12 +++---- spacy/tests/pipeline/test_textcat.py | 6 ++-- spacy/tests/test_scorer.py | 47 ++++++++++++++++++++++++++++ website/docs/api/scorer.md | 21 +++++++------ website/docs/api/textcategorizer.md | 5 ++- 6 files changed, 73 insertions(+), 25 deletions(-) diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index 59549ad99..238a768ed 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -72,7 +72,7 @@ subword_features = true "textcat", assigns=["doc.cats"], default_config={ - "threshold": 0.5, + "threshold": 0.0, "model": DEFAULT_SINGLE_TEXTCAT_MODEL, "scorer": {"@scorers": "spacy.textcat_scorer.v1"}, }, @@ -144,7 +144,8 @@ class TextCategorizer(TrainablePipe): model (thinc.api.Model): The Thinc Model powering the pipeline component. name (str): The component instance name, used to add entries to the losses during training. - threshold (float): Cutoff to consider a prediction "positive". + threshold (float): Unused, not needed for single-label (exclusive + classes) classification. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_cats for the attribute "cats". @@ -154,7 +155,7 @@ class TextCategorizer(TrainablePipe): self.model = model self.name = name self._rehearsal_model = None - cfg = {"labels": [], "threshold": threshold, "positive_label": None} + cfg: Dict[str, Any] = {"labels": [], "threshold": threshold, "positive_label": None} self.cfg = dict(cfg) self.scorer = scorer diff --git a/spacy/scorer.py b/spacy/scorer.py index 8cd755ac4..16fc303a0 100644 --- a/spacy/scorer.py +++ b/spacy/scorer.py @@ -446,7 +446,7 @@ class Scorer: labels (Iterable[str]): The set of possible labels. Defaults to []. multi_label (bool): Whether the attribute allows multiple labels. Defaults to True. When set to False (exclusive labels), missing - gold labels are interpreted as 0.0. + gold labels are interpreted as 0.0 and the threshold is set to 0.0. positive_label (str): The positive label for a binary task with exclusive classes. Defaults to None. threshold (float): Cutoff to consider a prediction "positive". Defaults @@ -471,6 +471,8 @@ class Scorer: """ if threshold is None: threshold = 0.5 if multi_label else 0.0 + if not multi_label: + threshold = 0.0 f_per_type = {label: PRFScore() for label in labels} auc_per_type = {label: ROCAUCScore() for label in labels} labels = set(labels) @@ -505,20 +507,18 @@ class Scorer: # Get the highest-scoring for each. pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1]) - if pred_label == gold_label and pred_score >= threshold: + if pred_label == gold_label: f_per_type[pred_label].tp += 1 else: f_per_type[gold_label].fn += 1 - if pred_score >= threshold: - f_per_type[pred_label].fp += 1 + f_per_type[pred_label].fp += 1 elif gold_cats: gold_label, gold_score = max(gold_cats, key=lambda it: it[1]) if gold_score > 0: f_per_type[gold_label].fn += 1 elif pred_cats: pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) - if pred_score >= threshold: - f_per_type[pred_label].fp += 1 + f_per_type[pred_label].fp += 1 micro_prf = PRFScore() for label_prf in f_per_type.values(): micro_prf.tp += label_prf.tp diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index 0bb036a33..d359b77db 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -823,10 +823,10 @@ def test_textcat_loss(multi_label: bool, expected_loss: float): assert loss == expected_loss -def test_textcat_threshold(): +def test_textcat_multilabel_threshold(): # Ensure the scorer can be called with a different threshold nlp = English() - nlp.add_pipe("textcat") + nlp.add_pipe("textcat_multilabel") train_examples = [] for text, annotations in TRAIN_DATA_SINGLE_LABEL: @@ -849,7 +849,7 @@ def test_textcat_threshold(): ) pos_f = scores["cats_score"] assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0 - assert pos_f > macro_f + assert pos_f >= macro_f def test_textcat_multi_threshold(): diff --git a/spacy/tests/test_scorer.py b/spacy/tests/test_scorer.py index 6e15fa2de..b903f1669 100644 --- a/spacy/tests/test_scorer.py +++ b/spacy/tests/test_scorer.py @@ -474,3 +474,50 @@ def test_prf_score(): assert (a.precision, a.recall, a.fscore) == approx( (c.precision, c.recall, c.fscore) ) + + +def test_score_cats(en_tokenizer): + text = "some text" + gold_doc = en_tokenizer(text) + gold_doc.cats = {"POSITIVE": 1.0, "NEGATIVE": 0.0} + pred_doc = en_tokenizer(text) + pred_doc.cats = {"POSITIVE": 0.75, "NEGATIVE": 0.25} + example = Example(pred_doc, gold_doc) + # threshold is ignored for multi_label=False + scores1 = Scorer.score_cats( + [example], + "cats", + labels=list(gold_doc.cats.keys()), + multi_label=False, + positive_label="POSITIVE", + threshold=0.1, + ) + scores2 = Scorer.score_cats( + [example], + "cats", + labels=list(gold_doc.cats.keys()), + multi_label=False, + positive_label="POSITIVE", + threshold=0.9, + ) + assert scores1["cats_score"] == 1.0 + assert scores2["cats_score"] == 1.0 + assert scores1 == scores2 + # threshold is relevant for multi_label=True + scores = Scorer.score_cats( + [example], + "cats", + labels=list(gold_doc.cats.keys()), + multi_label=True, + threshold=0.9, + ) + assert scores["cats_macro_f"] == 0.0 + # threshold is relevant for multi_label=True + scores = Scorer.score_cats( + [example], + "cats", + labels=list(gold_doc.cats.keys()), + multi_label=True, + threshold=0.1, + ) + assert scores["cats_macro_f"] == 0.5 diff --git a/website/docs/api/scorer.md b/website/docs/api/scorer.md index ca3462aa9..9ef36e6fc 100644 --- a/website/docs/api/scorer.md +++ b/website/docs/api/scorer.md @@ -229,16 +229,17 @@ The reported `{attr}_score` depends on the classification properties: > print(scores["cats_macro_auc"]) > ``` -| Name | Description | -| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ | -| `attr` | The attribute to score. ~~str~~ | -| _keyword-only_ | | -| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ | -| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ | -| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. ~~bool~~ | -| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ | -| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ | +| Name | Description | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ | +| `attr` | The attribute to score. ~~str~~ | +| _keyword-only_ | | +| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ | +| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ | +| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. When set to `False` (exclusive labels), missing gold labels are interpreted as `0.0` and the threshold is set to `0.0`. ~~bool~~ | +| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ | +| `threshold` | Cutoff to consider a prediction "positive". Defaults to `0.5` for multi-label, and `0.0` (i.e. whatever's highest scoring) otherwise. ~~float~~ | +| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ | ## Scorer.score_links {#score_links tag="staticmethod" new="3"} diff --git a/website/docs/api/textcategorizer.md b/website/docs/api/textcategorizer.md index 042b4ab76..f5f8706ec 100644 --- a/website/docs/api/textcategorizer.md +++ b/website/docs/api/textcategorizer.md @@ -63,7 +63,6 @@ architectures and their arguments and hyperparameters. > ```python > from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL > config = { -> "threshold": 0.5, > "model": DEFAULT_SINGLE_TEXTCAT_MODEL, > } > nlp.add_pipe("textcat", config=config) @@ -82,7 +81,7 @@ architectures and their arguments and hyperparameters. | Setting | Description | | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ | +| `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ | | `model` | A model instance that predicts scores for each category. Defaults to [TextCatEnsemble](/api/architectures#TextCatEnsemble). ~~Model[List[Doc], List[Floats2d]]~~ | | `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ | @@ -123,7 +122,7 @@ shortcut for this and instantiate the component using its string name and | `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ | | `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | | _keyword-only_ | | -| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ | +| `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ | | `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ | ## TextCategorizer.\_\_call\_\_ {#call tag="method"} From 2fb7e4dc74bd491ecec43971b2b29b0d28efd492 Mon Sep 17 00:00:00 2001 From: Ryn Daniels <397565+ryndaniels@users.noreply.github.com> Date: Wed, 2 Nov 2022 16:36:30 +0200 Subject: [PATCH 81/82] More version updates for github action deprecation warnings (#11705) * More version updates for github action deprecation warnings * fix the deprecated set-output commands * bump explosion-bot to run on ubuntu-latest --- .github/workflows/autoblack.yml | 2 +- .github/workflows/explosionbot.yml | 6 +++--- .github/workflows/slowtests.yml | 6 +++--- .github/workflows/spacy_universe_alert.yml | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/autoblack.yml b/.github/workflows/autoblack.yml index 3ad4cf408..70882c3cc 100644 --- a/.github/workflows/autoblack.yml +++ b/.github/workflows/autoblack.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 with: ref: ${{ github.head_ref }} - - uses: actions/setup-python@v3 + - uses: actions/setup-python@v4 - run: pip install black - name: Auto-format code if needed run: black spacy diff --git a/.github/workflows/explosionbot.yml b/.github/workflows/explosionbot.yml index d585ecd9c..6b472cd12 100644 --- a/.github/workflows/explosionbot.yml +++ b/.github/workflows/explosionbot.yml @@ -8,14 +8,14 @@ on: jobs: explosion-bot: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Dump GitHub context env: GITHUB_CONTEXT: ${{ toJson(github) }} run: echo "$GITHUB_CONTEXT" - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 - name: Install and run explosion-bot run: | pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot diff --git a/.github/workflows/slowtests.yml b/.github/workflows/slowtests.yml index 38ceb18c6..f9fd3e817 100644 --- a/.github/workflows/slowtests.yml +++ b/.github/workflows/slowtests.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 with: ref: ${{ matrix.branch }} - name: Get commits from past 24 hours @@ -23,9 +23,9 @@ jobs: today=$(date '+%Y-%m-%d %H:%M:%S') yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S') if git log --after="$yesterday" --before="$today" | grep commit ; then - echo "::set-output name=run_tests::true" + echo run_tests=true >> $GITHUB_OUTPUT else - echo "::set-output name=run_tests::false" + echo run_tests=false >> $GITHUB_OUTPUT fi - name: Trigger buildkite build diff --git a/.github/workflows/spacy_universe_alert.yml b/.github/workflows/spacy_universe_alert.yml index cbbf14c6e..f507e0594 100644 --- a/.github/workflows/spacy_universe_alert.yml +++ b/.github/workflows/spacy_universe_alert.yml @@ -17,8 +17,8 @@ jobs: run: | echo "$GITHUB_CONTEXT" - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 - name: Install Bernadette app dependency and send an alert env: SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} From 1211552f0ec84aef0b55f834d76899ab07e2c5cc Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 3 Nov 2022 09:29:46 +0100 Subject: [PATCH 82/82] Modernize and simplify CI steps (#11738) * Use `build` instead of `python setup.py sdist` * Remove in-place build with `setup.py` * Remove `gpu` parameter and GPU tests * Keep `architecture` and `num_build_jobs` in azure steps with CI defaults * Fix use of `num_build_jobs` parameters * Remove now-unused `prefix` parameter * Test imports and CLI before installing test requirements * Remove `*.egg-info` directory in addition to source directory for an warning-free `import spacy` * Switch `thinc-apple-ops` test to python 3.11 (as most recent python that is tested across platforms) --- .github/azure-steps.yml | 70 +++++++++++++++++++---------------------- azure-pipelines.yml | 17 ---------- 2 files changed, 33 insertions(+), 54 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index cc0247b3a..b2bc80dd6 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -1,9 +1,7 @@ parameters: python_version: '' - architecture: '' - prefix: '' - gpu: false - num_build_jobs: 1 + architecture: 'x64' + num_build_jobs: 2 steps: - task: UsePythonVersion@0 @@ -17,16 +15,16 @@ steps: displayName: 'Set variables' - script: | - ${{ parameters.prefix }} python -m pip install -U pip setuptools - ${{ parameters.prefix }} python -m pip install -U -r requirements.txt + python -m pip install -U build pip setuptools + python -m pip install -U -r requirements.txt displayName: "Install dependencies" - script: | - ${{ parameters.prefix }} python setup.py build_ext --inplace -j ${{ parameters.num_build_jobs }} - ${{ parameters.prefix }} python setup.py sdist --formats=gztar - displayName: "Compile and build sdist" + python -m build --sdist + displayName: "Build sdist" - - script: python -m mypy spacy + - script: | + python -m mypy spacy displayName: 'Run mypy' condition: ne(variables['python_version'], '3.6') @@ -35,35 +33,24 @@ steps: contents: "spacy" displayName: "Delete source directory" + - task: DeleteFiles@1 + inputs: + contents: "*.egg-info" + displayName: "Delete egg-info directory" + - script: | - ${{ parameters.prefix }} python -m pip freeze --exclude torch --exclude cupy-cuda110 > installed.txt - ${{ parameters.prefix }} python -m pip uninstall -y -r installed.txt + python -m pip freeze > installed.txt + python -m pip uninstall -y -r installed.txt displayName: "Uninstall all packages" - bash: | - ${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1) - ${{ parameters.prefix }} SPACY_NUM_BUILD_JOBS=2 python -m pip install dist/$SDIST + SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1) + SPACY_NUM_BUILD_JOBS=${{ parameters.num_build_jobs }} python -m pip install dist/$SDIST displayName: "Install from sdist" - script: | - ${{ parameters.prefix }} python -m pip install -U -r requirements.txt - displayName: "Install test requirements" - - - script: | - ${{ parameters.prefix }} python -m pip install -U cupy-cuda110 -f https://github.com/cupy/cupy/releases/v9.0.0 - ${{ parameters.prefix }} python -m pip install "torch==1.7.1+cu110" -f https://download.pytorch.org/whl/torch_stable.html - displayName: "Install GPU requirements" - condition: eq(${{ parameters.gpu }}, true) - - - script: | - ${{ parameters.prefix }} python -m pytest --pyargs spacy -W error - displayName: "Run CPU tests" - condition: eq(${{ parameters.gpu }}, false) - - - script: | - ${{ parameters.prefix }} python -m pytest --pyargs spacy -W error -p spacy.tests.enable_gpu - displayName: "Run GPU tests" - condition: eq(${{ parameters.gpu }}, true) + python -W error -c "import spacy" + displayName: "Test import" - script: | python -m spacy download ca_core_news_sm @@ -106,13 +93,22 @@ steps: displayName: 'Test assemble CLI vectors warning' condition: eq(variables['python_version'], '3.8') + - script: | + python -m pip install -U -r requirements.txt + displayName: "Install test requirements" + + - script: | + python -m pytest --pyargs spacy -W error + displayName: "Run CPU tests" + + - script: | + python -m pip install --pre thinc-apple-ops + python -m pytest --pyargs spacy + displayName: "Run CPU tests with thinc-apple-ops" + condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11')) + - script: | python .github/validate_universe_json.py website/meta/universe.json displayName: 'Test website/meta/universe.json' condition: eq(variables['python_version'], '3.8') - - script: | - ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops - ${{ parameters.prefix }} python -m pytest --pyargs spacy - displayName: "Run CPU tests with thinc-apple-ops" - condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10')) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index bf3672b8b..3499042cb 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -101,20 +101,3 @@ jobs: - template: .github/azure-steps.yml parameters: python_version: '$(python.version)' - architecture: 'x64' - -# - job: "TestGPU" -# dependsOn: "Validate" -# strategy: -# matrix: -# Python38LinuxX64_GPU: -# python.version: '3.8' -# pool: -# name: "LinuxX64_GPU" -# steps: -# - template: .github/azure-steps.yml -# parameters: -# python_version: '$(python.version)' -# architecture: 'x64' -# gpu: true -# num_build_jobs: 24