From db56600536e2d615a766fc2fc973a6cc9e0f1a52 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 3 Nov 2022 18:52:59 +0900 Subject: [PATCH 01/55] Fix default parameters for load functions (fix #11706) (#11713) * Fix default parameters for load functions Some load functions used SimpleFrozenList() directly instead of the _DEFAULT_EMPTY_PIPES parameter. That mostly worked as intended, but the changes in #11459 check for equality using identity, not value, so a warning is incorrectly raised sometimes, as in #11706. This change just has all the load functions use the singleton value instead. * Add test that there are no warnings on module-based load This will succeed due to changes in this branch, but local tests with the latest release failed as intended. * Try reverting commit and see if CI changes There is an error in CI that is probably unrelated. Revert "Fix default parameters for load functions" This reverts commit dc46b35687e92e4793e64edb11997d44b88c6a8b. * Revert "Try reverting commit and see if CI changes" This reverts commit 2514ed07ef29851b5ac60015442a7ce44c69decc. Co-authored-by: Adriane Boyd --- .github/azure-steps.yml | 5 +++++ spacy/util.py | 12 ++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index b2bc80dd6..e8bd0d212 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -59,6 +59,11 @@ steps: displayName: 'Test download CLI' condition: eq(variables['python_version'], '3.8') + - script: | + python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" + displayName: 'Test no warnings on load (#11713)' + condition: eq(variables['python_version'], '3.8') + - script: | python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . displayName: 'Test convert CLI' diff --git a/spacy/util.py b/spacy/util.py index 3034808ba..76a1e0bfa 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -443,9 +443,9 @@ def load_model_from_package( name: str, *, vocab: Union["Vocab", bool] = True, - disable: Union[str, Iterable[str]] = SimpleFrozenList(), - enable: Union[str, Iterable[str]] = SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Load a model from an installed package. @@ -619,9 +619,9 @@ def load_model_from_init_py( init_file: Union[Path, str], *, vocab: Union["Vocab", bool] = True, - disable: Union[str, Iterable[str]] = SimpleFrozenList(), - enable: Union[str, Iterable[str]] = SimpleFrozenList(), - exclude: Union[str, Iterable[str]] = SimpleFrozenList(), + disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, + exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES, config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": """Helper function to use in the `load()` method of a model package's From 40e1000db08858e8c928efacab8f710e027dde61 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 3 Nov 2022 11:49:08 +0100 Subject: [PATCH 02/55] Restore Doc attr getter values in Doc.to_json (#11700) --- spacy/tests/doc/test_json_doc_conversion.py | 9 +++++++ spacy/tokens/doc.pyx | 27 ++++++++++++++------- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/spacy/tests/doc/test_json_doc_conversion.py b/spacy/tests/doc/test_json_doc_conversion.py index 19698cfb2..11a1817e6 100644 --- a/spacy/tests/doc/test_json_doc_conversion.py +++ b/spacy/tests/doc/test_json_doc_conversion.py @@ -370,3 +370,12 @@ def test_json_to_doc_validation_error(doc): doc_json.pop("tokens") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json, validate=True) + + +def test_to_json_underscore_doc_getters(doc): + def get_text_length(doc): + return len(doc.text) + + Doc.set_extension("text_length", getter=get_text_length) + doc_json = doc.to_json(underscore=["text_length"]) + assert doc_json["_"]["text_length"] == get_text_length(doc) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 295f91c28..f2621292c 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -1668,6 +1668,20 @@ cdef class Doc: if underscore: user_keys = set() + # Handle doc attributes with .get to include values from getters + # and not only values stored in user_data, for backwards + # compatibility + for attr in underscore: + if self.has_extension(attr): + if "_" not in data: + data["_"] = {} + value = self._.get(attr) + if not srsly.is_json_serializable(value): + raise ValueError(Errors.E107.format(attr=attr, value=repr(value))) + data["_"][attr] = value + user_keys.add(attr) + # Token and span attributes only include values stored in user_data + # and not values generated by getters if self.user_data: for data_key, value in self.user_data.copy().items(): if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.": @@ -1678,20 +1692,15 @@ cdef class Doc: user_keys.add(attr) if not srsly.is_json_serializable(value): raise ValueError(Errors.E107.format(attr=attr, value=repr(value))) - # Check if doc attribute - if start is None: - if "_" not in data: - data["_"] = {} - data["_"][attr] = value - # Check if token attribute - elif end is None: + # Token attribute + if start is not None and end is None: if "underscore_token" not in data: data["underscore_token"] = {} if attr not in data["underscore_token"]: data["underscore_token"][attr] = [] data["underscore_token"][attr].append({"start": start, "value": value}) - # Else span attribute - else: + # Span attribute + elif start is not None and end is not None: if "underscore_span" not in data: data["underscore_span"] = {} if attr not in data["underscore_span"]: From bbf64cfc4391cccba447346badaacca4d42e583d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 4 Nov 2022 11:17:43 +0100 Subject: [PATCH 03/55] Auto-format code with black (#11749) Co-authored-by: explosion-bot --- spacy/pipeline/textcat.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index 238a768ed..4023c4456 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -155,7 +155,11 @@ class TextCategorizer(TrainablePipe): self.model = model self.name = name self._rehearsal_model = None - cfg: Dict[str, Any] = {"labels": [], "threshold": threshold, "positive_label": None} + cfg: Dict[str, Any] = { + "labels": [], + "threshold": threshold, + "positive_label": None, + } self.cfg = dict(cfg) self.scorer = scorer From ea326cf47d5324cff14bef983b0da122b9f0d1ed Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 7 Nov 2022 08:11:13 +0100 Subject: [PATCH 04/55] Fix types for Span.id and Span.id_ (#11744) --- spacy/tokens/span.pyi | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/spacy/tokens/span.pyi b/spacy/tokens/span.pyi index 617e3d19d..0a6f306a6 100644 --- a/spacy/tokens/span.pyi +++ b/spacy/tokens/span.pyi @@ -117,15 +117,13 @@ class Span: end_char: int label: int kb_id: int + id: int ent_id: int ent_id_: str @property - def id(self) -> int: ... - @property - def id_(self) -> str: ... - @property def orth_(self) -> str: ... @property def lemma_(self) -> str: ... label_: str kb_id_: str + id_: str From b76222e56adb49e33d7d0471674dfe2f207b2020 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 7 Nov 2022 16:11:55 +0900 Subject: [PATCH 05/55] Raise Typer limit (#11720) * Raise typer limit to <0.7.0 * Raise limit to <0.8.0 --- requirements.txt | 2 +- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9d6bbb2c4..d91a3b3d4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ murmurhash>=0.28.0,<1.1.0 wasabi>=0.9.1,<1.1.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 -typer>=0.3.0,<0.5.0 +typer>=0.3.0,<0.8.0 pathy>=0.3.5 # Third party dependencies numpy>=1.15.0 diff --git a/setup.cfg b/setup.cfg index c2653feba..82d4d2758 100644 --- a/setup.cfg +++ b/setup.cfg @@ -51,7 +51,7 @@ install_requires = srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 # Third-party dependencies - typer>=0.3.0,<0.5.0 + typer>=0.3.0,<0.8.0 pathy>=0.3.5 tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 From e91b47a22655c0384202f797e9d50d3660596d32 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 7 Nov 2022 10:43:34 +0100 Subject: [PATCH 06/55] Check for unsafe paths in tarfile.extractall (CVE-2007-4559) (#11746) * Adding tarfile member sanitization to extractall() * Format * Simplify and add error message * Fix import * Add comment about CVE Co-authored-by: TrellixVulnTeam --- spacy/cli/project/remote_storage.py | 19 ++++++++++++++++++- spacy/errors.py | 2 ++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/spacy/cli/project/remote_storage.py b/spacy/cli/project/remote_storage.py index 336a4bcb3..12e252b3c 100644 --- a/spacy/cli/project/remote_storage.py +++ b/spacy/cli/project/remote_storage.py @@ -10,6 +10,7 @@ from .._util import get_hash, get_checksum, download_file, ensure_pathy from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var from ...git_info import GIT_VERSION from ... import about +from ...errors import Errors if TYPE_CHECKING: from pathy import Pathy # noqa: F401 @@ -84,7 +85,23 @@ class RemoteStorage: with tarfile.open(tar_loc, mode=mode_string) as tar_file: # This requires that the path is added correctly, relative # to root. This is how we set things up in push() - tar_file.extractall(self.root) + + # Disallow paths outside the current directory for the tar + # file (CVE-2007-4559, directory traversal vulnerability) + def is_within_directory(directory, target): + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + prefix = os.path.commonprefix([abs_directory, abs_target]) + return prefix == abs_directory + + def safe_extract(tar, path): + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise ValueError(Errors.E852) + tar.extractall(path) + + safe_extract(tar_file, self.root) return url def find( diff --git a/spacy/errors.py b/spacy/errors.py index e0628819d..2f8a3996f 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -544,6 +544,8 @@ class Errors(metaclass=ErrorsWithCodes): "during training, make sure to include it in 'annotating components'") # New errors added in v3.x + E852 = ("The tar file pulled from the remote attempted an unsafe path " + "traversal.") E853 = ("Unsupported component factory name '{name}'. The character '.' is " "not permitted in factory names.") E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " From 6105f20d8a10a18a0e5985d310664812198840a8 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 7 Nov 2022 13:25:40 +0100 Subject: [PATCH 07/55] Switch CI to python 3.11 (#11765) --- azure-pipelines.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3499042cb..9c3b92f06 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -87,13 +87,13 @@ jobs: # python.version: "3.10" Python311Linux: imageName: 'ubuntu-latest' - python.version: '3.11.0' + python.version: '3.11' Python311Windows: imageName: 'windows-latest' - python.version: '3.11.0' + python.version: '3.11' Python311Mac: imageName: 'macos-latest' - python.version: '3.11.0' + python.version: '3.11' maxParallel: 4 pool: vmImage: $(imageName) From e116395f890a70447c75109026e7b37f20c142c2 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 7 Nov 2022 14:46:08 +0100 Subject: [PATCH 08/55] Add fallback in requirements check, only check once (#11735) * Add fallback in requirements check, only check once * Rename to skip_requirements_check * Update spacy/cli/project/run.py Co-authored-by: Paul O'Leary McCann Co-authored-by: Paul O'Leary McCann --- spacy/cli/project/run.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index ebab7471e..638e7fab1 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -53,6 +53,7 @@ def project_run( force: bool = False, dry: bool = False, capture: bool = False, + skip_requirements_check: bool = False, ) -> None: """Run a named script defined in the project.yml. If the script is part of the default pipeline (defined in the "run" section), DVC is used to @@ -69,6 +70,7 @@ def project_run( sys.exit will be called with the return code. You should use capture=False when you want to turn over execution to the command, and capture=True when you want to run the command more like a function. + skip_requirements_check (bool): Whether to skip the requirements check. """ config = load_project_config(project_dir, overrides=overrides) commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} @@ -76,9 +78,10 @@ def project_run( validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) req_path = project_dir / "requirements.txt" - if config.get("check_requirements", True) and os.path.exists(req_path): - with req_path.open() as requirements_file: - _check_requirements([req.replace("\n", "") for req in requirements_file]) + if not skip_requirements_check: + if config.get("check_requirements", True) and os.path.exists(req_path): + with req_path.open() as requirements_file: + _check_requirements([req.strip() for req in requirements_file]) if subcommand in workflows: msg.info(f"Running workflow '{subcommand}'") @@ -90,6 +93,7 @@ def project_run( force=force, dry=dry, capture=capture, + skip_requirements_check=True, ) else: cmd = commands[subcommand] @@ -338,6 +342,11 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]: failed_pkgs_msgs.append(dnf.report()) except pkg_resources.VersionConflict as vc: conflicting_pkgs_msgs.append(vc.report()) + except Exception: + msg.warn(f"Unable to check requirement: {req} " + "Check that the requirement is formatted according to PEP " + "440, in particular that URLs are formatted as " + "'package_name @ URL'") if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): msg.warn( From 2e3cfd758ea414497802843970666a18ed4d123e Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 8 Nov 2022 04:46:19 +0100 Subject: [PATCH 09/55] Use python 3.10 for GHA universe alert (#11768) --- .github/workflows/spacy_universe_alert.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/spacy_universe_alert.yml b/.github/workflows/spacy_universe_alert.yml index f507e0594..837aaeb33 100644 --- a/.github/workflows/spacy_universe_alert.yml +++ b/.github/workflows/spacy_universe_alert.yml @@ -19,6 +19,8 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 + with: + python-version: '3.10' - name: Install Bernadette app dependency and send an alert env: SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} From 20bbbe3e44f14d42a4861d1399ad98d6e1707d84 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Tue, 8 Nov 2022 14:58:10 +0100 Subject: [PATCH 10/55] Revert disable/disabled merging behavior (#11745) * Merge disable with disabled. Adjust warnings, errors and tests. * Replace any() with set operation. * Update spacy/tests/pipeline/test_pipe_methods.py Co-authored-by: Adriane Boyd * Update docs. * Remve reference to config entry nlp.enabled from docs. Co-authored-by: Adriane Boyd --- spacy/errors.py | 4 +- spacy/language.py | 45 ++++++++----------- spacy/tests/pipeline/test_pipe_methods.py | 18 ++++---- .../serialize/test_serialize_pipeline.py | 7 ++- website/docs/api/language.md | 24 +++++----- website/docs/api/top-level.md | 20 ++++----- website/docs/usage/processing-pipelines.md | 3 +- 7 files changed, 56 insertions(+), 65 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 2f8a3996f..278e5496a 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -212,8 +212,8 @@ class Warnings(metaclass=ErrorsWithCodes): W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'") W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class " "is a Cython extension type.") - W123 = ("Argument {arg} with value {arg_value} is used instead of {config_value} as specified in the config. Be " - "aware that this might affect other components in your pipeline.") + W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option " + "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.") class Errors(metaclass=ErrorsWithCodes): diff --git a/spacy/language.py b/spacy/language.py index d391f15ab..967af1e62 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1879,31 +1879,22 @@ class Language: if isinstance(exclude, str): exclude = [exclude] - def fetch_pipes_status(value: Iterable[str], key: str) -> Iterable[str]: - """Fetch value for `enable` or `disable` w.r.t. the specified config and passed arguments passed to - .load(). If both arguments and config specified values for this field, the passed arguments take precedence - and a warning is printed. - value (Iterable[str]): Passed value for `enable` or `disable`. - key (str): Key for field in config (either "enabled" or "disabled"). - RETURN (Iterable[str]): - """ - # We assume that no argument was passed if the value is the specified default value. - if id(value) == id(_DEFAULT_EMPTY_PIPES): - return config["nlp"].get(key, []) - else: - if len(config["nlp"].get(key, [])): - warnings.warn( - Warnings.W123.format( - arg=key[:-1], - arg_value=value, - config_value=config["nlp"][key], - ) + # `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config + # specifies values for `enabled` not included in `enable`, emit warning. + if id(enable) != id(_DEFAULT_EMPTY_PIPES): + enabled = config["nlp"].get("enabled", []) + if len(enabled) and not set(enabled).issubset(enable): + warnings.warn( + Warnings.W123.format( + enable=enable, + enabled=enabled, ) - return value + ) + # Ensure sets of disabled/enabled pipe names are not contradictory. disabled_pipes = cls._resolve_component_status( - fetch_pipes_status(disable, "disabled"), - fetch_pipes_status(enable, "enabled"), + list({*disable, *config["nlp"].get("disabled", [])}), + enable, config["nlp"]["pipeline"], ) nlp._disabled = set(p for p in disabled_pipes if p not in exclude) @@ -2084,10 +2075,12 @@ class Language: if enable: if isinstance(enable, str): enable = [enable] - to_disable = [ - pipe_name for pipe_name in pipe_names if pipe_name not in enable - ] - if disable and disable != to_disable: + to_disable = { + *[pipe_name for pipe_name in pipe_names if pipe_name not in enable], + *disable, + } + # If any pipe to be enabled is in to_disable, the specification is inconsistent. + if len(set(enable) & to_disable): raise ValueError(Errors.E1042.format(enable=enable, disable=disable)) return tuple(to_disable) diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index 14a7a36e5..4dd7bae16 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -615,20 +615,18 @@ def test_enable_disable_conflict_with_config(): with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) - # Expected to fail, as config and arguments conflict. - with pytest.raises(ValueError): - spacy.load( - tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} - ) + # Expected to succeed, as config and arguments do not conflict. + assert spacy.load( + tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} + ).disabled == ["senter", "sentencizer"] # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=["tagger"]) - # Expected to succeed with a warning, as disable=[] should override the config setting. - with pytest.warns(UserWarning): + # Expected to fail due to conflict between enable and disabled. + with pytest.raises(ValueError): spacy.load( tmp_dir, - enable=["tagger"], - disable=[], - config={"nlp": {"disabled": ["senter"]}}, + enable=["senter"], + config={"nlp": {"disabled": ["senter", "tagger"]}}, ) diff --git a/spacy/tests/serialize/test_serialize_pipeline.py b/spacy/tests/serialize/test_serialize_pipeline.py index b948bb76c..9fcf18e2d 100644 --- a/spacy/tests/serialize/test_serialize_pipeline.py +++ b/spacy/tests/serialize/test_serialize_pipeline.py @@ -404,11 +404,10 @@ def test_serialize_pipeline_disable_enable(): assert nlp3.component_names == ["ner", "tagger"] with make_tempdir() as d: nlp3.to_disk(d) - with pytest.warns(UserWarning): - nlp4 = spacy.load(d, disable=["ner"]) - assert nlp4.pipe_names == ["tagger"] + nlp4 = spacy.load(d, disable=["ner"]) + assert nlp4.pipe_names == [] assert nlp4.component_names == ["ner", "tagger"] - assert nlp4.disabled == ["ner"] + assert nlp4.disabled == ["ner", "tagger"] with make_tempdir() as d: nlp.to_disk(d) nlp5 = spacy.load(d, exclude=["tagger"]) diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 767a7450a..504640d57 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -63,18 +63,18 @@ spaCy loads a model under the hood based on its > nlp = Language.from_config(config) > ``` -| Name | Description | -| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | -| _keyword-only_ | | -| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | -| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | -| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | -| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | -| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | -| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | -| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | -| **RETURNS** | The initialized object. ~~Language~~ | +| Name | Description | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | +| _keyword-only_ | | +| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | +| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | +| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | +| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | +| **RETURNS** | The initialized object. ~~Language~~ | ## Language.component {#component tag="classmethod" new="3"} diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index bc53fc868..c798f2a8d 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -45,16 +45,16 @@ specified separately using the new `exclude` keyword argument. > nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"]) > ``` -| Name | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | -| _keyword-only_ | | -| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | -| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | -| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ | -| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | -| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | -| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | +| Name | Description | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | +| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ | +| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | +| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's [`config.cfg`](/api/data-formats#config), uses the language and pipeline diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index bd28810ae..0b63cdcb8 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -363,7 +363,8 @@ nlp.enable_pipe("tagger") ``` In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is -set, all components except for those in `enable` are disabled. +set, all components except for those in `enable` are disabled. If `enable` and +`disable` conflict (i.e. the same component is included in both), an error is raised. ```python # Load the complete pipeline, but disable all components except for tok2vec and tagger From 03eebe9d1c79d39a632876205e93f023fc096d85 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 9 Nov 2022 10:59:28 +0100 Subject: [PATCH 11/55] Update warning, add tests for project requirements check (#11777) * Update warning, add tests for project requirements check * Make warning more general for differences between PEP 508 and pip * Add tests for _check_requirements * Parameterize test --- spacy/cli/project/run.py | 5 ++--- spacy/tests/test_cli.py | 41 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index 638e7fab1..5db9e14f4 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -344,9 +344,8 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]: conflicting_pkgs_msgs.append(vc.report()) except Exception: msg.warn(f"Unable to check requirement: {req} " - "Check that the requirement is formatted according to PEP " - "440, in particular that URLs are formatted as " - "'package_name @ URL'") + "Checks are currently limited to requirement specifiers " + "(PEP 508)") if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): msg.warn( diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 838e00369..8225e14f1 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -1,5 +1,6 @@ import os import math +import pkg_resources from random import sample from typing import Counter @@ -25,6 +26,7 @@ from spacy.cli.download import get_compatibility, get_version from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config from spacy.cli.package import get_third_party_dependencies from spacy.cli.package import _is_permitted_package_name +from spacy.cli.project.run import _check_requirements from spacy.cli.validate import get_model_pkgs from spacy.lang.en import English from spacy.lang.nl import Dutch @@ -855,3 +857,42 @@ def test_span_length_freq_dist_output_must_be_correct(): span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold) assert sum(span_freqs.values()) >= threshold assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] + + +@pytest.mark.parametrize( + "reqs,output", + [ + [ + """ + spacy + + # comment + + thinc""", + (False, False), + ], + [ + """# comment + --some-flag + spacy""", + (False, False), + ], + [ + """# comment + --some-flag + spacy; python_version >= '3.6'""", + (False, False), + ], + [ + """# comment + spacyunknowndoesnotexist12345""", + (True, False), + ], + ], +) +def test_project_check_requirements(reqs, output): + # excessive guard against unlikely package name + try: + pkg_resources.require("spacyunknowndoesnotexist12345") + except pkg_resources.DistributionNotFound: + assert output == _check_requirements([req.strip() for req in reqs.split("\n")]) From 322b5dc1df7031139780963cebaa081a75384834 Mon Sep 17 00:00:00 2001 From: Jacobo Myerston <43222279+jmyerston@users.noreply.github.com> Date: Wed, 9 Nov 2022 20:21:20 -0800 Subject: [PATCH 12/55] Add greCy to Universe (#11774) * Update universe.json * Update universe.json fixes Github value --- website/meta/universe.json | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index d7c99956b..fa765f640 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,31 @@ { "resources": [ + { + "id": "grecy", + "title": "greCy", + "slogan": "Ancient Greek pipelines for spaCy", + "description": "greCy offers state-of-the-art pipelines for ancient Greek NLP. The repository makes language models available in various sizes, some of them containing floret word vectors and a BERT transformer layer.", + "github": "jmyerston/greCy", + "code_example": [ + "import spacy", + "#After installing the grc_ud_proiel_trf wheel package from the greCy repository", + "", + "nlp = spacy.load('grc_ud_proiel_trf')", + "doc = nlp('δοκῶ μοι περὶ ὧν πυνθάνεσθε οὐκ ἀμελέτητος εἶναι.')", + "", + "for token in doc:", + " print(token.text, token.norm_, token.lemma_, token.pos_, token.tag_)" + ], + "code_language": "python", + "author": "Jacobo Myerston", + "author_links": { + "twitter": "@jcbmyrstn", + "github": "jmyerston", + "website": "https://huggingface.co/spaces/Jacobo/syntax" + }, + "category": ["pipeline", "research"], + "tags": ["ancient Greek"] + }, { "id": "spacy-cleaner", "title": "spacy-cleaner", From 188a7d00eb552faaa70ba6ee3032757eecefbb5a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 11 Nov 2022 09:58:31 +0100 Subject: [PATCH 13/55] Auto-format code with black (#11792) Co-authored-by: explosion-bot --- spacy/cli/project/run.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index 5db9e14f4..a109c4a5a 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -343,9 +343,11 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]: except pkg_resources.VersionConflict as vc: conflicting_pkgs_msgs.append(vc.report()) except Exception: - msg.warn(f"Unable to check requirement: {req} " - "Checks are currently limited to requirement specifiers " - "(PEP 508)") + msg.warn( + f"Unable to check requirement: {req} " + "Checks are currently limited to requirement specifiers " + "(PEP 508)" + ) if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): msg.warn( From 3478ff1eb0fd57c48a332e7787efe6ea47492e13 Mon Sep 17 00:00:00 2001 From: Edward <43848523+thomashacker@users.noreply.github.com> Date: Mon, 14 Nov 2022 09:41:01 +0100 Subject: [PATCH 14/55] remove new v2 tags (#11780) --- website/README.md | 4 +- website/docs/api/cli.md | 81 ++++++------ website/docs/api/doc.md | 50 ++++---- website/docs/api/language.md | 72 +++++------ website/docs/api/lexeme.md | 82 ++++++------ website/docs/api/matcher.md | 14 +-- website/docs/api/phrasematcher.md | 10 +- website/docs/api/span.md | 62 +++++----- website/docs/api/token.md | 144 +++++++++++----------- website/docs/api/top-level.md | 52 ++++---- website/docs/api/vocab.md | 22 ++-- website/docs/usage/rule-based-matching.md | 6 +- website/docs/usage/saving-loading.md | 12 +- 13 files changed, 305 insertions(+), 306 deletions(-) diff --git a/website/README.md b/website/README.md index db050cf03..66bc20ad9 100644 --- a/website/README.md +++ b/website/README.md @@ -155,7 +155,7 @@ import Tag from 'components/tag' > ```jsx > method -> 2.1 +> 4 > tagger, parser > ``` @@ -170,7 +170,7 @@ installed. -method 2 tagger, +method 4 tagger, parser diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index fc2c46022..024450920 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -53,7 +53,7 @@ $ python -m spacy download [model] [--direct] [--sdist] [pip_args] | `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ | | `--sdist`, `-S` 3 | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| pip args 2.1 | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ | +| pip args | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ | | **CREATES** | The installed pipeline package in your `site-packages` directory. | ## info {#info tag="command"} @@ -77,15 +77,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude] $ python -m spacy info [model] [--markdown] [--silent] [--exclude] ``` -| Name | Description | -| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | -| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ | -| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ | -| `--silent`, `-s` 2.0.12 | Don't print anything, just return the values. ~~bool (flag)~~ | -| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ | -| `--url`, `-u` 3.5.0 | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| **PRINTS** | Information about your spaCy installation. | +| Name | Description | +| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ | +| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ | +| `--silent`, `-s` | Don't print anything, just return the values. ~~bool (flag)~~ | +| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ | +| `--url`, `-u` 3.5.0 | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **PRINTS** | Information about your spaCy installation. | ## validate {#validate new="2" tag="command"} @@ -260,22 +260,22 @@ chosen based on the file extension of the input file. $ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang] ``` -| Name | Description | -| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | -| `input_path` | Input file or directory. ~~Path (positional)~~ | -| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ | -| `--converter`, `-c` 2 | Name of converter to use (see below). ~~str (option)~~ | -| `--file-type`, `-t` 2.1 | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ | -| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ | -| `--seg-sents`, `-s` 2.2 | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ | -| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ | -| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ | -| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ | -| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ | -| `--lang`, `-l` 2.1 | Language code (if tokenizer required). ~~Optional[str] \(option)~~ | -| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). | +| Name | Description | +| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `input_path` | Input file or directory. ~~Path (positional)~~ | +| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ | +| `--converter`, `-c` | Name of converter to use (see below). ~~str (option)~~ | +| `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ | +| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ | +| `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ | +| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ | +| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ | +| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ | +| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ | +| `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ | +| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). | ### Converters {#converters} @@ -474,8 +474,7 @@ report span characteristics such as the average span length and the span (or span boundary) distinctiveness. The distinctiveness measure shows how different the tokens are with respect to the rest of the corpus using the KL-divergence of the token distributions. To learn more, you can check out Papay et al.'s work on -[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP -2020)](https://aclanthology.org/2020.emnlp-main.396/). +[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP 2020)](https://aclanthology.org/2020.emnlp-main.396/). @@ -1229,19 +1228,19 @@ $ python -m spacy package [input_dir] [output_dir] [--code] [--meta-path] [--cre > $ pip install dist/en_pipeline-0.0.0.tar.gz > ``` -| Name | Description | -| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ | -| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ | -| `--code`, `-c` 3 | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ | -| `--meta-path`, `-m` 2 | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ | -| `--create-meta`, `-C` 2 | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ | -| `--build`, `-b` 3 | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ | -| `--name`, `-n` 3 | Package name to override in meta. ~~Optional[str] \(option)~~ | -| `--version`, `-v` 3 | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ | -| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| **CREATES** | A Python package containing the spaCy pipeline. | +| Name | Description | +| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ | +| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ | +| `--code`, `-c` 3 | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ | +| `--meta-path`, `-m` | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ | +| `--create-meta`, `-C` | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ | +| `--build`, `-b` 3 | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ | +| `--name`, `-n` 3 | Package name to override in meta. ~~Optional[str] \(option)~~ | +| `--version`, `-v` 3 | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ | +| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **CREATES** | A Python package containing the spaCy pipeline. | ## project {#project new="3"} diff --git a/website/docs/api/doc.md b/website/docs/api/doc.md index f97ed4547..090489d83 100644 --- a/website/docs/api/doc.md +++ b/website/docs/api/doc.md @@ -209,15 +209,15 @@ alignment mode `"strict". > assert span.text == "New York" > ``` -| Name | Description | -| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `start` | The index of the first character of the span. ~~int~~ | -| `end` | The index of the last character after the span. ~~int~~ | -| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | -| `kb_id` 2.2 | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | -| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | -| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ | -| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | +| Name | Description | +| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `start` | The index of the first character of the span. ~~int~~ | +| `end` | The index of the last character after the span. ~~int~~ | +| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | +| `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | +| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | +| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ | +| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | ## Doc.set_ents {#set_ents tag="method" new="3"} @@ -751,22 +751,22 @@ The L2 norm of the document's vector representation. ## Attributes {#attributes} -| Name | Description | -| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------- | -| `text` | A string representation of the document text. ~~str~~ | -| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ | -| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ | -| `vocab` | The store of lexical types. ~~Vocab~~ | -| `tensor` 2 | Container for dense vector representations. ~~numpy.ndarray~~ | -| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ | -| `lang` 2.1 | Language of the document's vocabulary. ~~int~~ | -| `lang_` 2.1 | Language of the document's vocabulary. ~~str~~ | -| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ | -| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ | -| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ | -| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ | -| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ | -| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | +| Name | Description | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| `text` | A string representation of the document text. ~~str~~ | +| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ | +| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ | +| `vocab` | The store of lexical types. ~~Vocab~~ | +| `tensor` | Container for dense vector representations. ~~numpy.ndarray~~ | +| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ | +| `lang` | Language of the document's vocabulary. ~~int~~ | +| `lang_` | Language of the document's vocabulary. ~~str~~ | +| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ | +| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ | +| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ | +| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ | +| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ | +| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | ## Serialization fields {#serialization-fields} diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 504640d57..ad0ac2a46 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -63,18 +63,18 @@ spaCy loads a model under the hood based on its > nlp = Language.from_config(config) > ``` -| Name | Description | -| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | -| _keyword-only_ | | -| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | +| Name | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | +| _keyword-only_ | | +| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | | `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ | -| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | -| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | -| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | -| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | -| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | -| **RETURNS** | The initialized object. ~~Language~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | +| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | +| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | +| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | +| **RETURNS** | The initialized object. ~~Language~~ | ## Language.component {#component tag="classmethod" new="3"} @@ -198,16 +198,16 @@ tokenization is skipped but the rest of the pipeline is run. > assert doc.has_annotation("DEP") > ``` -| Name | Description | -| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ | -| _keyword-only_ | | -| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ | -| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ | -| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ | -| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | -| `n_process` 2.2.2 | Number of processors to use. Defaults to `1`. ~~int~~ | -| **YIELDS** | Documents in the order of the original text. ~~Doc~~ | +| Name | Description | +| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ | +| _keyword-only_ | | +| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ | +| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ | +| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ | +| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | +| `n_process` | Number of processors to use. Defaults to `1`. ~~int~~ | +| **YIELDS** | Documents in the order of the original text. ~~Doc~~ | ## Language.set_error_handler {#set_error_handler tag="method" new="3"} @@ -1030,21 +1030,21 @@ details. ## Attributes {#attributes} -| Name | Description | -| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| `vocab` | A container for the lexical types. ~~Vocab~~ | -| `tokenizer` | The tokenizer. ~~Tokenizer~~ | -| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ | -| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ | -| `pipe_names` 2 | List of pipeline component names, in order. ~~List[str]~~ | -| `pipe_labels` 2.2 | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ | -| `pipe_factories` 2.2 | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ | -| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ | -| `factory_names` 3 | List of all available factory names. ~~List[str]~~ | -| `components` 3 | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ | -| `component_names` 3 | List of all available component names, including components that are currently disabled. ~~List[str]~~ | -| `disabled` 3 | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ | -| `path` 2 | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ | +| Name | Description | +| -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | A container for the lexical types. ~~Vocab~~ | +| `tokenizer` | The tokenizer. ~~Tokenizer~~ | +| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ | +| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ | +| `pipe_names` | List of pipeline component names, in order. ~~List[str]~~ | +| `pipe_labels` | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ | +| `pipe_factories` | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ | +| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ | +| `factory_names` 3 | List of all available factory names. ~~List[str]~~ | +| `components` 3 | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ | +| `component_names` 3 | List of all available component names, including components that are currently disabled. ~~List[str]~~ | +| `disabled` 3 | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ | +| `path` | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ | ## Class attributes {#class-attributes} diff --git a/website/docs/api/lexeme.md b/website/docs/api/lexeme.md index c5d4b7544..eb76afa90 100644 --- a/website/docs/api/lexeme.md +++ b/website/docs/api/lexeme.md @@ -121,44 +121,44 @@ The L2 norm of the lexeme's vector representation. ## Attributes {#attributes} -| Name | Description | -| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `vocab` | The lexeme's vocabulary. ~~Vocab~~ | -| `text` | Verbatim text content. ~~str~~ | -| `orth` | ID of the verbatim text content. ~~int~~ | -| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ | -| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | -| `flags` | Container of the lexeme's binary flags. ~~int~~ | -| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ | -| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ | -| `lower` | Lowercase form of the word. ~~int~~ | -| `lower_` | Lowercase form of the word. ~~str~~ | -| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | -| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | -| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ | -| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ | -| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ | -| `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ | -| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ | -| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ | -| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ | -| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ | -| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ | -| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ | -| `is_punct` | Is the lexeme punctuation? ~~bool~~ | -| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ | -| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ | -| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ | -| `is_bracket` | Is the lexeme a bracket? ~~bool~~ | -| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ | -| `is_currency` 2.0.8 | Is the lexeme a currency symbol? ~~bool~~ | -| `like_url` | Does the lexeme resemble a URL? ~~bool~~ | -| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | -| `like_email` | Does the lexeme resemble an email address? ~~bool~~ | -| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ | -| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ | -| `lang` | Language of the parent vocabulary. ~~int~~ | -| `lang_` | Language of the parent vocabulary. ~~str~~ | -| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ | -| `cluster` | Brown cluster ID. ~~int~~ | -| `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ | +| Name | Description | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | The lexeme's vocabulary. ~~Vocab~~ | +| `text` | Verbatim text content. ~~str~~ | +| `orth` | ID of the verbatim text content. ~~int~~ | +| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ | +| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | +| `flags` | Container of the lexeme's binary flags. ~~int~~ | +| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ | +| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ | +| `lower` | Lowercase form of the word. ~~int~~ | +| `lower_` | Lowercase form of the word. ~~str~~ | +| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | +| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | +| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ | +| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ | +| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ | +| `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ | +| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ | +| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ | +| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ | +| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ | +| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ | +| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ | +| `is_punct` | Is the lexeme punctuation? ~~bool~~ | +| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ | +| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ | +| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ | +| `is_bracket` | Is the lexeme a bracket? ~~bool~~ | +| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ | +| `is_currency` | Is the lexeme a currency symbol? ~~bool~~ | +| `like_url` | Does the lexeme resemble a URL? ~~bool~~ | +| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | +| `like_email` | Does the lexeme resemble an email address? ~~bool~~ | +| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ | +| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ | +| `lang` | Language of the parent vocabulary. ~~int~~ | +| `lang_` | Language of the parent vocabulary. ~~str~~ | +| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ | +| `cluster` | Brown cluster ID. ~~int~~ | +| `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ | diff --git a/website/docs/api/matcher.md b/website/docs/api/matcher.md index 8cc446c6a..cd7bfa070 100644 --- a/website/docs/api/matcher.md +++ b/website/docs/api/matcher.md @@ -33,7 +33,7 @@ rule-based matching are: | Attribute | Description | | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | | `ORTH` | The exact verbatim text of a token. ~~str~~ | -| `TEXT` 2.1 | The exact verbatim text of a token. ~~str~~ | +| `TEXT` | The exact verbatim text of a token. ~~str~~ | | `NORM` | The normalized form of the token text. ~~str~~ | | `LOWER` | The lowercase form of the token text. ~~str~~ | | `LENGTH` | The length of the token text. ~~int~~ | @@ -48,7 +48,7 @@ rule-based matching are: | `ENT_IOB` | The IOB part of the token's entity tag. ~~str~~ | | `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ | | `ENT_KB_ID` | The token's entity knowledge base ID (`ent_kb_id`). ~~str~~ | -| `_` 2.1 | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ | +| `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ | | `OP` | Operator or quantifier to determine how often to match a token pattern. ~~str~~ | Operators and quantifiers define **how often** a token pattern should be @@ -64,7 +64,7 @@ matched: > ``` | OP | Description | -|---------|------------------------------------------------------------------------| +| ------- | ---------------------------------------------------------------------- | | `!` | Negate the pattern, by requiring it to match exactly 0 times. | | `?` | Make the pattern optional, by allowing it to match 0 or 1 times. | | `+` | Require the pattern to match 1 or more times. | @@ -109,10 +109,10 @@ string where an integer is expected) or unexpected property names. > matcher = Matcher(nlp.vocab) > ``` -| Name | Description | -| --------------------------------------- | ----------------------------------------------------------------------------------------------------- | -| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ | -| `validate` 2.1 | Validate all patterns added to this matcher. ~~bool~~ | +| Name | Description | +| ---------- | ----------------------------------------------------------------------------------------------------- | +| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ | +| `validate` | Validate all patterns added to this matcher. ~~bool~~ | ## Matcher.\_\_call\_\_ {#call tag="method"} diff --git a/website/docs/api/phrasematcher.md b/website/docs/api/phrasematcher.md index 2cef9ac2a..cd419ae5c 100644 --- a/website/docs/api/phrasematcher.md +++ b/website/docs/api/phrasematcher.md @@ -36,11 +36,11 @@ be shown. > matcher = PhraseMatcher(nlp.vocab) > ``` -| Name | Description | -| --------------------------------------- | ------------------------------------------------------------------------------------------------------ | -| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ | -| `attr` 2.1 | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ | -| `validate` 2.1 | Validate patterns added to the matcher. ~~bool~~ | +| Name | Description | +| ---------- | ------------------------------------------------------------------------------------------------------ | +| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ | +| `attr` | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ | +| `validate` | Validate patterns added to the matcher. ~~bool~~ | ## PhraseMatcher.\_\_call\_\_ {#call tag="method"} diff --git a/website/docs/api/span.md b/website/docs/api/span.md index 89f608994..69bbe8db1 100644 --- a/website/docs/api/span.md +++ b/website/docs/api/span.md @@ -186,14 +186,14 @@ the character indices don't map to a valid span. > assert span.text == "New York" > ``` -| Name | Description | -| ------------------------------------ | ----------------------------------------------------------------------------------------- | -| `start` | The index of the first character of the span. ~~int~~ | -| `end` | The index of the last character after the span. ~~int~~ | -| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | -| `kb_id` 2.2 | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | -| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | -| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | +| Name | Description | +| ----------- | ----------------------------------------------------------------------------------------- | +| `start` | The index of the first character of the span. ~~int~~ | +| `end` | The index of the last character after the span. ~~int~~ | +| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | +| `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | +| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | +| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | ## Span.similarity {#similarity tag="method" model="vectors"} @@ -544,26 +544,26 @@ overlaps with will be returned. ## Attributes {#attributes} -| Name | Description | -| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `doc` | The parent document. ~~Doc~~ | -| `tensor` 2.1.7 | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | -| `start` | The token offset for the start of the span. ~~int~~ | -| `end` | The token offset for the end of the span. ~~int~~ | -| `start_char` | The character offset for the start of the span. ~~int~~ | -| `end_char` | The character offset for the end of the span. ~~int~~ | -| `text` | A string representation of the span text. ~~str~~ | -| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ | -| `orth` | ID of the verbatim text content. ~~int~~ | -| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ | -| `label` | The hash value of the span's label. ~~int~~ | -| `label_` | The span's label. ~~str~~ | -| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ | -| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ | -| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ | -| `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ | -| `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ | -| `id` | The hash value of the span's ID. ~~int~~ | -| `id_` | The span's ID. ~~str~~ | -| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ | -| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `doc` | The parent document. ~~Doc~~ | +| `tensor` | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | +| `start` | The token offset for the start of the span. ~~int~~ | +| `end` | The token offset for the end of the span. ~~int~~ | +| `start_char` | The character offset for the start of the span. ~~int~~ | +| `end_char` | The character offset for the end of the span. ~~int~~ | +| `text` | A string representation of the span text. ~~str~~ | +| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ | +| `orth` | ID of the verbatim text content. ~~int~~ | +| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ | +| `label` | The hash value of the span's label. ~~int~~ | +| `label_` | The span's label. ~~str~~ | +| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ | +| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ | +| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ | +| `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ | +| `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ | +| `id` | The hash value of the span's ID. ~~int~~ | +| `id_` | The span's ID. ~~str~~ | +| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ | +| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | diff --git a/website/docs/api/token.md b/website/docs/api/token.md index d43cd3ff1..89bd77447 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -403,75 +403,75 @@ The L2 norm of the token's vector representation. ## Attributes {#attributes} -| Name | Description | -| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `doc` | The parent document. ~~Doc~~ | -| `lex` 3 | The underlying lexeme. ~~Lexeme~~ | -| `sent` 2.0.12 | The sentence span that this token is a part of. ~~Span~~ | -| `text` | Verbatim text content. ~~str~~ | -| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ | -| `whitespace_` | Trailing space character if present. ~~str~~ | -| `orth` | ID of the verbatim text content. ~~int~~ | -| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ | -| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ | -| `tensor` 2.1.7 | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | -| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ | -| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ | -| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ | -| `i` | The index of the token within the parent document. ~~int~~ | -| `ent_type` | Named entity type. ~~int~~ | -| `ent_type_` | Named entity type. ~~str~~ | -| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ | -| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ | -| `ent_kb_id` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ | -| `ent_kb_id_` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ | -| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ | -| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ | -| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ | -| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ | -| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ | -| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ | -| `lower` | Lowercase form of the token. ~~int~~ | -| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | -| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | -| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | -| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ | -| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ | -| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ | -| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ | -| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ | -| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ | -| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ | -| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ | -| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ | -| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ | -| `is_punct` | Is the token punctuation? ~~bool~~ | -| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ | -| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ | -| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. | -| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. | -| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ | -| `is_bracket` | Is the token a bracket? ~~bool~~ | -| `is_quote` | Is the token a quotation mark? ~~bool~~ | -| `is_currency` 2.0.8 | Is the token a currency symbol? ~~bool~~ | -| `like_url` | Does the token resemble a URL? ~~bool~~ | -| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | -| `like_email` | Does the token resemble an email address? ~~bool~~ | -| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ | -| `is_stop` | Is the token part of a "stop list"? ~~bool~~ | -| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ | -| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ | -| `tag` | Fine-grained part-of-speech. ~~int~~ | -| `tag_` | Fine-grained part-of-speech. ~~str~~ | -| `morph` 3 | Morphological analysis. ~~MorphAnalysis~~ | -| `dep` | Syntactic dependency relation. ~~int~~ | -| `dep_` | Syntactic dependency relation. ~~str~~ | -| `lang` | Language of the parent document's vocabulary. ~~int~~ | -| `lang_` | Language of the parent document's vocabulary. ~~str~~ | -| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ | -| `idx` | The character offset of the token within the parent document. ~~int~~ | -| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ | -| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | -| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | -| `cluster` | Brown cluster ID. ~~int~~ | -| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | +| Name | Description | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `doc` | The parent document. ~~Doc~~ | +| `lex` 3 | The underlying lexeme. ~~Lexeme~~ | +| `sent` | The sentence span that this token is a part of. ~~Span~~ | +| `text` | Verbatim text content. ~~str~~ | +| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ | +| `whitespace_` | Trailing space character if present. ~~str~~ | +| `orth` | ID of the verbatim text content. ~~int~~ | +| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ | +| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ | +| `tensor` | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | +| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ | +| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ | +| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ | +| `i` | The index of the token within the parent document. ~~int~~ | +| `ent_type` | Named entity type. ~~int~~ | +| `ent_type_` | Named entity type. ~~str~~ | +| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ | +| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ | +| `ent_kb_id` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ | +| `ent_kb_id_` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ | +| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ | +| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ | +| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ | +| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ | +| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ | +| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ | +| `lower` | Lowercase form of the token. ~~int~~ | +| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | +| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | +| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | +| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ | +| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ | +| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ | +| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ | +| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ | +| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ | +| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ | +| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ | +| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ | +| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ | +| `is_punct` | Is the token punctuation? ~~bool~~ | +| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ | +| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ | +| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. | +| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. | +| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ | +| `is_bracket` | Is the token a bracket? ~~bool~~ | +| `is_quote` | Is the token a quotation mark? ~~bool~~ | +| `is_currency` | Is the token a currency symbol? ~~bool~~ | +| `like_url` | Does the token resemble a URL? ~~bool~~ | +| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | +| `like_email` | Does the token resemble an email address? ~~bool~~ | +| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ | +| `is_stop` | Is the token part of a "stop list"? ~~bool~~ | +| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ | +| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ | +| `tag` | Fine-grained part-of-speech. ~~int~~ | +| `tag_` | Fine-grained part-of-speech. ~~str~~ | +| `morph` 3 | Morphological analysis. ~~MorphAnalysis~~ | +| `dep` | Syntactic dependency relation. ~~int~~ | +| `dep_` | Syntactic dependency relation. ~~str~~ | +| `lang` | Language of the parent document's vocabulary. ~~int~~ | +| `lang_` | Language of the parent document's vocabulary. ~~str~~ | +| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ | +| `idx` | The character offset of the token within the parent document. ~~int~~ | +| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ | +| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | +| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | +| `cluster` | Brown cluster ID. ~~int~~ | +| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index c798f2a8d..211affa4a 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -45,16 +45,16 @@ specified separately using the new `exclude` keyword argument. > nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"]) > ``` -| Name | Description | -| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | -| _keyword-only_ | | -| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | +| Name | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | | `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ | -| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ | -| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | -| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | -| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | +| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ | +| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | +| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | +| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's [`config.cfg`](/api/data-formats#config), uses the language and pipeline @@ -354,22 +354,22 @@ If a setting is not present in the options, the default value will be used. > displacy.serve(doc, style="dep", options=options) > ``` -| Name | Description | -| ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- | -| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ | -| `add_lemma` 2.2.4 | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ | -| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ | -| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ | -| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ | -| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ | -| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ | -| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ | -| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ | -| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ | -| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ | -| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ | -| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ | -| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ | +| Name | Description | +| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- | +| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ | +| `add_lemma` | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ | +| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ | +| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ | +| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ | +| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ | +| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ | +| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ | +| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ | +| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ | +| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ | +| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ | +| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ | +| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ | #### Named Entity Visualizer options {#displacy_options-ent} @@ -385,7 +385,7 @@ If a setting is not present in the options, the default value will be used. | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ | | `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ | -| `template` 2.2 | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ | +| `template` | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ | | `kb_url_template` 3.2.1 | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in. ~~Optional[str]~~ | #### Span Visualizer options {#displacy_options-span} diff --git a/website/docs/api/vocab.md b/website/docs/api/vocab.md index 2e4a206ec..afbd1301d 100644 --- a/website/docs/api/vocab.md +++ b/website/docs/api/vocab.md @@ -21,15 +21,15 @@ Create the vocabulary. > vocab = Vocab(strings=["hello", "world"]) > ``` -| Name | Description | -| ------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ | -| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ | -| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ | -| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ | -| `vectors_name` 2.2 | A name to identify the vectors table. ~~str~~ | -| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ | -| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | +| Name | Description | +| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ | +| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ | +| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ | +| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ | +| `vectors_name` | A name to identify the vectors table. ~~str~~ | +| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ | +| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | ## Vocab.\_\_len\_\_ {#len tag="method"} @@ -311,10 +311,10 @@ Load state from a binary string. | Name | Description | | ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `strings` | A table managing the string-to-int mapping. ~~StringStore~~ | -| `vectors` 2 | A table associating word IDs to word vectors. ~~Vectors~~ | +| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ | | `vectors_length` | Number of dimensions for each word vector. ~~int~~ | | `lookups` | The available lookup tables in this vocab. ~~Lookups~~ | -| `writing_system` 2.1 | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | +| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | | `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | ## Serialization fields {#serialization-fields} diff --git a/website/docs/usage/rule-based-matching.md b/website/docs/usage/rule-based-matching.md index 64bbf8e7b..ad8ea27f3 100644 --- a/website/docs/usage/rule-based-matching.md +++ b/website/docs/usage/rule-based-matching.md @@ -162,7 +162,7 @@ rule-based matching are: | Attribute | Description | | ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `ORTH` | The exact verbatim text of a token. ~~str~~ | -| `TEXT` 2.1 | The exact verbatim text of a token. ~~str~~ | +| `TEXT` | The exact verbatim text of a token. ~~str~~ | | `NORM` | The normalized form of the token text. ~~str~~ | | `LOWER` | The lowercase form of the token text. ~~str~~ | | `LENGTH` | The length of the token text. ~~int~~ | @@ -174,7 +174,7 @@ rule-based matching are: | `SPACY` | Token has a trailing space. ~~bool~~ | | `POS`, `TAG`, `MORPH`, `DEP`, `LEMMA`, `SHAPE` | The token's simple and extended part-of-speech tag, morphological analysis, dependency label, lemma, shape. Note that the values of these attributes are case-sensitive. For a list of available part-of-speech tags and dependency labels, see the [Annotation Specifications](/api/annotation). ~~str~~ | | `ENT_TYPE` | The token's entity label. ~~str~~ | -| `_` 2.1 | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ | +| `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ | | `OP` | [Operator or quantifier](#quantifiers) to determine how often to match a token pattern. ~~str~~ | @@ -375,7 +375,7 @@ scoped quantifiers – instead, you can build those behaviors with `on_match` callbacks. | OP | Description | -|---------|------------------------------------------------------------------------| +| ------- | ---------------------------------------------------------------------- | | `!` | Negate the pattern, by requiring it to match exactly 0 times. | | `?` | Make the pattern optional, by allowing it to match 0 or 1 times. | | `+` | Require the pattern to match 1 or more times. | diff --git a/website/docs/usage/saving-loading.md b/website/docs/usage/saving-loading.md index 0fd713a49..29870a2e3 100644 --- a/website/docs/usage/saving-loading.md +++ b/website/docs/usage/saving-loading.md @@ -306,12 +306,12 @@ pipeline component factories, language classes and other settings. To make spaCy use your entry points, your package needs to expose them and it needs to be installed in the same environment – that's it. -| Entry point | Description | -| ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. | -| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. | -| `spacy_lookups` 2.2 | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. | -| [`spacy_displacy_colors`](#entry-points-displacy) 2.2 | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. | +| Entry point | Description | +| ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. | +| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. | +| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. | +| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. | ### Custom components via entry points {#entry-points-components} From bb523d4d9105d417e240e6f8f83b63ed3dcc565e Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 14 Nov 2022 19:58:38 +0900 Subject: [PATCH 15/55] Remove spacy-ray from docs (#11781) * Remove spacy ray from cli docs * Remove more ray docs * Remove ray from universe --- website/docs/api/cli.md | 45 --------------------- website/docs/usage/index.md | 1 - website/docs/usage/projects.md | 48 ----------------------- website/docs/usage/training.md | 71 ---------------------------------- website/meta/universe.json | 11 ------ 5 files changed, 176 deletions(-) diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index 024450920..6e581b903 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -15,7 +15,6 @@ menu: - ['assemble', 'assemble'] - ['package', 'package'] - ['project', 'project'] - - ['ray', 'ray'] - ['huggingface-hub', 'huggingface-hub'] --- @@ -1502,50 +1501,6 @@ $ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] [-- | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. | -## ray {#ray new="3"} - -The `spacy ray` CLI includes commands for parallel and distributed computing via -[Ray](https://ray.io). - - - -To use this command, you need the -[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed. -Installing the package will automatically add the `ray` command to the spaCy -CLI. - - - -### ray train {#ray-train tag="command"} - -Train a spaCy pipeline using [Ray](https://ray.io) for parallel training. The -command works just like [`spacy train`](/api/cli#train). For more details and -examples, see the usage guide on -[parallel training](/usage/training#parallel-training) and the spaCy project -[integration](/usage/projects#ray). - -```cli -$ python -m spacy ray train [config_path] [--code] [--output] [--n-workers] [--address] [--gpu-id] [--verbose] [overrides] -``` - -> #### Example -> -> ```cli -> $ python -m spacy ray train config.cfg --n-workers 2 -> ``` - -| Name | Description | -| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `config_path` | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. ~~Path (positional)~~ | -| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | -| `--output`, `-o` | Directory or remote storage URL for saving trained pipeline. The directory will be created if it doesn't exist. ~~Optional[Path] \(option)~~ | -| `--n-workers`, `-n` | The number of workers. Defaults to `1`. ~~int (option)~~ | -| `--address`, `-a` | Optional address of the Ray cluster. If not set (default), Ray will run locally. ~~Optional[str] \(option)~~ | -| `--gpu-id`, `-g` | GPU ID or `-1` for CPU. Defaults to `-1`. ~~int (option)~~ | -| `--verbose`, `-V` | Display more information for debugging purposes. ~~bool (flag)~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ | - ## huggingface-hub {#huggingface-hub new="3.1"} The `spacy huggingface-cli` CLI includes commands for uploading your trained diff --git a/website/docs/usage/index.md b/website/docs/usage/index.md index 1f4869606..dff5a16ba 100644 --- a/website/docs/usage/index.md +++ b/website/docs/usage/index.md @@ -75,7 +75,6 @@ spaCy's [`setup.cfg`](%%GITHUB_SPACY/setup.cfg) for details on what's included. | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. | | `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. | -| `ray` | Install [`spacy-ray`](https://github.com/explosion/spacy-ray) to add CLI commands for [parallel training](/usage/training#parallel-training). | | `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. | | `apple` | Install [`thinc-apple-ops`](https://github.com/explosion/thinc-apple-ops) to improve performance on an Apple M1. | | `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). | diff --git a/website/docs/usage/projects.md b/website/docs/usage/projects.md index 90b612358..34315e4e7 100644 --- a/website/docs/usage/projects.md +++ b/website/docs/usage/projects.md @@ -1014,54 +1014,6 @@ https://github.com/explosion/projects/blob/v3/integrations/fastapi/scripts/main. --- -### Ray {#ray} - -> #### Installation -> -> ```cli -> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS -> # Check that the CLI is registered -> $ python -m spacy ray --help -> ``` - -[Ray](https://ray.io/) is a fast and simple framework for building and running -**distributed applications**. You can use Ray for parallel and distributed -training with spaCy via our lightweight -[`spacy-ray`](https://github.com/explosion/spacy-ray) extension package. If the -package is installed in the same environment as spaCy, it will automatically add -[`spacy ray`](/api/cli#ray) commands to your spaCy CLI. See the usage guide on -[parallel training](/usage/training#parallel-training) for more details on how -it works under the hood. - - - -Get started with parallel training using our project template. It trains a -simple model on a Universal Dependencies Treebank and lets you parallelize the -training with Ray. - - - -You can integrate [`spacy ray train`](/api/cli#ray-train) into your -`project.yml` just like the regular training command and pass it the config, and -optional output directory or remote storage URL and config overrides if needed. - - -```yaml -### project.yml -commands: - - name: "ray" - help: "Train a model via parallel training with Ray" - script: - - "python -m spacy ray train configs/config.cfg -o training/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy" - deps: - - "corpus/train.spacy" - - "corpus/dev.spacy" - outputs: - - "training/model-best" -``` - ---- - ### Weights & Biases {#wandb} [Weights & Biases](https://www.wandb.com/) is a popular platform for experiment diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index 27a8bbca7..e40a395c4 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -1572,77 +1572,6 @@ token-based annotations like the dependency parse or entity labels, you'll need to take care to adjust the `Example` object so its annotations match and remain valid. -## Parallel & distributed training with Ray {#parallel-training} - -> #### Installation -> -> ```cli -> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS -> # Check that the CLI is registered -> $ python -m spacy ray --help -> ``` - -[Ray](https://ray.io/) is a fast and simple framework for building and running -**distributed applications**. You can use Ray to train spaCy on one or more -remote machines, potentially speeding up your training process. Parallel -training won't always be faster though – it depends on your batch size, models, -and hardware. - - - -To use Ray with spaCy, you need the -[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed. -Installing the package will automatically add the `ray` command to the spaCy -CLI. - - - -The [`spacy ray train`](/api/cli#ray-train) command follows the same API as -[`spacy train`](/api/cli#train), with a few extra options to configure the Ray -setup. You can optionally set the `--address` option to point to your Ray -cluster. If it's not set, Ray will run locally. - -```cli -python -m spacy ray train config.cfg --n-workers 2 -``` - - - -Get started with parallel training using our project template. It trains a -simple model on a Universal Dependencies Treebank and lets you parallelize the -training with Ray. - - - -### How parallel training works {#parallel-training-details} - -Each worker receives a shard of the **data** and builds a copy of the **model -and optimizer** from the [`config.cfg`](#config). It also has a communication -channel to **pass gradients and parameters** to the other workers. Additionally, -each worker is given ownership of a subset of the parameter arrays. Every -parameter array is owned by exactly one worker, and the workers are given a -mapping so they know which worker owns which parameter. - -![Illustration of setup](../images/spacy-ray.svg) - -As training proceeds, every worker will be computing gradients for **all** of -the model parameters. When they compute gradients for parameters they don't own, -they'll **send them to the worker** that does own that parameter, along with a -version identifier so that the owner can decide whether to discard the gradient. -Workers use the gradients they receive and the ones they compute locally to -update the parameters they own, and then broadcast the updated array and a new -version ID to the other workers. - -This training procedure is **asynchronous** and **non-blocking**. Workers always -push their gradient increments and parameter updates, they do not have to pull -them and block on the result, so the transfers can happen in the background, -overlapped with the actual training work. The workers also do not have to stop -and wait for each other ("synchronize") at the start of each batch. This is very -useful for spaCy, because spaCy is often trained on long documents, which means -**batches can vary in size** significantly. Uneven workloads make synchronous -gradient descent inefficient, because if one batch is slow, all of the other -workers are stuck waiting for it to complete before they can continue. - ## Internal training API {#api} diff --git a/website/meta/universe.json b/website/meta/universe.json index fa765f640..661f5da12 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -557,17 +557,6 @@ "tags": ["sentiment", "textblob"], "spacy_version": 3 }, - { - "id": "spacy-ray", - "title": "spacy-ray", - "slogan": "Parallel and distributed training with spaCy and Ray", - "description": "[Ray](https://ray.io/) is a fast and simple framework for building and running **distributed applications**. This very lightweight extension package lets you use Ray for parallel and distributed training with spaCy. If `spacy-ray` is installed in the same environment as spaCy, it will automatically add `spacy ray` commands to your spaCy CLI.", - "github": "explosion/spacy-ray", - "pip": "spacy-ray", - "category": ["training"], - "author": "Explosion / Anyscale", - "thumb": "https://i.imgur.com/7so6ZpS.png" - }, { "id": "spacy-sentence-bert", "title": "spaCy - sentence-transformers", From 9baa686f827eeaecf28bf6d75836eeaec090cd69 Mon Sep 17 00:00:00 2001 From: Peter Baumgartner <5107405+pmbaumgartner@users.noreply.github.com> Date: Mon, 14 Nov 2022 10:53:14 -0500 Subject: [PATCH 16/55] remove migration support form (#11802) --- website/docs/usage/v3.md | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/website/docs/usage/v3.md b/website/docs/usage/v3.md index 971779ed3..64f93b7c0 100644 --- a/website/docs/usage/v3.md +++ b/website/docs/usage/v3.md @@ -15,18 +15,6 @@ menu: > To help you make the transition from v2.x to v3.0, we've uploaded the old > website to [**v2.spacy.io**](https://v2.spacy.io/docs). - - -Want to make the transition from spaCy v2 to spaCy v3 as smooth as possible for -you and your organization? We're now offering commercial **migration support** -for your spaCy pipelines! We've put a lot of work into making it easy to upgrade -your existing code and training workflows – but custom projects may always need -some custom work, especially when it comes to taking advantage of the new -capabilities. -[**Details & application →**](https://form.typeform.com/to/vMs2zSjM) - - -
From 7e684ad691992e759e71026a11c1ddd77c401f39 Mon Sep 17 00:00:00 2001 From: Denis Bezykornov Date: Tue, 15 Nov 2022 13:37:25 +0300 Subject: [PATCH 17/55] Update russian tokenizer exceptions (#11753) * Fix typos, add couple of new abbreviations, remove nonbreaking spaces * Remove space from abbreviation Co-authored-by: Adriane Boyd --- spacy/lang/ru/tokenizer_exceptions.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/spacy/lang/ru/tokenizer_exceptions.py b/spacy/lang/ru/tokenizer_exceptions.py index f3756e26c..e1889f785 100644 --- a/spacy/lang/ru/tokenizer_exceptions.py +++ b/spacy/lang/ru/tokenizer_exceptions.py @@ -61,6 +61,11 @@ for abbr in [ {ORTH: "2к23", NORM: "2023"}, {ORTH: "2к24", NORM: "2024"}, {ORTH: "2к25", NORM: "2025"}, + {ORTH: "2к26", NORM: "2026"}, + {ORTH: "2к27", NORM: "2027"}, + {ORTH: "2к28", NORM: "2028"}, + {ORTH: "2к29", NORM: "2029"}, + {ORTH: "2к30", NORM: "2030"}, ]: _exc[abbr[ORTH]] = [abbr] @@ -268,8 +273,8 @@ for abbr in [ {ORTH: "з-ка", NORM: "заимка"}, {ORTH: "п-к", NORM: "починок"}, {ORTH: "киш.", NORM: "кишлак"}, - {ORTH: "п. ст. ", NORM: "поселок станция"}, - {ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"}, + {ORTH: "п. ст.", NORM: "поселок станция"}, + {ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"}, {ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"}, {ORTH: "ж/д б-ка", NORM: "железнодорожная будка"}, {ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"}, @@ -280,12 +285,12 @@ for abbr in [ {ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"}, {ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"}, {ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"}, - {ORTH: "ж/д ст. ", NORM: "железнодорожная станция"}, + {ORTH: "ж/д ст.", NORM: "железнодорожная станция"}, {ORTH: "м-ко", NORM: "местечко"}, {ORTH: "д.", NORM: "деревня"}, {ORTH: "с.", NORM: "село"}, {ORTH: "сл.", NORM: "слобода"}, - {ORTH: "ст. ", NORM: "станция"}, + {ORTH: "ст.", NORM: "станция"}, {ORTH: "ст-ца", NORM: "станица"}, {ORTH: "у.", NORM: "улус"}, {ORTH: "х.", NORM: "хутор"}, @@ -388,8 +393,9 @@ for abbr in [ {ORTH: "прим.", NORM: "примечание"}, {ORTH: "прим.ред.", NORM: "примечание редакции"}, {ORTH: "см. также", NORM: "смотри также"}, - {ORTH: "кв.м.", NORM: "квадрантный метр"}, - {ORTH: "м2", NORM: "квадрантный метр"}, + {ORTH: "см.", NORM: "смотри"}, + {ORTH: "кв.м.", NORM: "квадратный метр"}, + {ORTH: "м2", NORM: "квадратный метр"}, {ORTH: "б/у", NORM: "бывший в употреблении"}, {ORTH: "сокр.", NORM: "сокращение"}, {ORTH: "чел.", NORM: "человек"}, From caa9efad5991d574cf2bdc69fabfc6d952d5cba9 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Tue, 15 Nov 2022 14:15:00 +0100 Subject: [PATCH 18/55] prevent rewriting an already raw URL (#11810) --- spacy/cli/project/assets.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spacy/cli/project/assets.py b/spacy/cli/project/assets.py index 61438d1a8..8f35b2d23 100644 --- a/spacy/cli/project/assets.py +++ b/spacy/cli/project/assets.py @@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str: RETURNS (str): The converted URL. """ # If the asset URL is a regular GitHub URL it's likely a mistake - if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url: + if ( + re.match(r"(http(s?)):\/\/github.com", url) + and "releases/download" not in url + and "/raw/" not in url + ): converted = url.replace("github.com", "raw.githubusercontent.com") converted = re.sub(r"/(tree|blob)/", "/", converted) msg.warn( From c0c54e44bc70ca737b421def1f6ce3c30809a54b Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 16 Nov 2022 17:44:42 +0900 Subject: [PATCH 19/55] Add equality definition for vectors (#11806) * Add equality definition for vectors This re-uses the check from sourcing components. * Use the equality check * Format Co-authored-by: Adriane Boyd --- spacy/language.py | 8 +------- spacy/tests/vocab_vectors/test_vectors.py | 20 ++++++++++++++++++++ spacy/vectors.pyx | 9 +++++++++ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/spacy/language.py b/spacy/language.py index 967af1e62..836f3abf9 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -706,13 +706,7 @@ class Language: # Check source type if not isinstance(source, Language): raise ValueError(Errors.E945.format(name=source_name, source=type(source))) - # Check vectors, with faster checks first - if ( - self.vocab.vectors.shape != source.vocab.vectors.shape - or self.vocab.vectors.key2row != source.vocab.vectors.key2row - or self.vocab.vectors.to_bytes(exclude=["strings"]) - != source.vocab.vectors.to_bytes(exclude=["strings"]) - ): + if self.vocab.vectors != source.vocab.vectors: warnings.warn(Warnings.W113.format(name=source_name)) if source_name not in source.component_names: raise KeyError( diff --git a/spacy/tests/vocab_vectors/test_vectors.py b/spacy/tests/vocab_vectors/test_vectors.py index dd2cfc596..70835816d 100644 --- a/spacy/tests/vocab_vectors/test_vectors.py +++ b/spacy/tests/vocab_vectors/test_vectors.py @@ -626,3 +626,23 @@ def test_floret_vectors(floret_vectors_vec_str, floret_vectors_hashvec_str): OPS.to_numpy(vocab_r[word].vector), decimal=6, ) + + +def test_equality(): + vectors1 = Vectors(shape=(10, 10)) + vectors2 = Vectors(shape=(10, 8)) + + assert vectors1 != vectors2 + + vectors2 = Vectors(shape=(10, 10)) + assert vectors1 == vectors2 + + vectors1.add("hello", row=2) + assert vectors1 != vectors2 + + vectors2.add("hello", row=2) + assert vectors1 == vectors2 + + vectors1.resize((5, 9)) + vectors2.resize((5, 9)) + assert vectors1 == vectors2 diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index 8300220c1..be0f6db09 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -243,6 +243,15 @@ cdef class Vectors: else: return key in self.key2row + def __eq__(self, other): + # Check for equality, with faster checks first + return ( + self.shape == other.shape + and self.key2row == other.key2row + and self.to_bytes(exclude=["strings"]) + == other.to_bytes(exclude=["strings"]) + ) + def resize(self, shape, inplace=False): """Resize the underlying vectors array. If inplace=True, the memory is reallocated. This may cause other references to the data to become From 317b6ef99c0e3512466d31a8274f9fe6a2894355 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 16 Nov 2022 14:09:10 +0100 Subject: [PATCH 20/55] Update to mypy 0.990 (#11801) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d91a3b3d4..23bfa6f14 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ pytest-timeout>=1.3.0,<2.0.0 mock>=2.0.0,<3.0.0 flake8>=3.8.0,<6.0.0 hypothesis>=3.27.0,<7.0.0 -mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7" +mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7" types-dataclasses>=0.1.3; python_version < "3.7" types-mock>=0.1.1 types-setuptools>=57.0.0 From 75bb7ad541a94c74127b57ffd6d674841767478c Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 17 Nov 2022 18:25:01 +0900 Subject: [PATCH 21/55] Check textcat values for validity (#11763) * Check textcat values for validity * Fix error numbers * Clean up vals reference * Check category value validity through training The _validate_categories is called in update, which for multilabel is inherited from the single label component. * Formatting --- spacy/errors.py | 2 ++ spacy/pipeline/textcat.py | 10 +++++++--- spacy/pipeline/textcat_multilabel.py | 8 +++++++- spacy/tests/pipeline/test_textcat.py | 24 ++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 4 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 278e5496a..1d29f0e17 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -544,6 +544,8 @@ class Errors(metaclass=ErrorsWithCodes): "during training, make sure to include it in 'annotating components'") # New errors added in v3.x + E851 = ("The 'textcat' component labels should only have values of 0 or 1, " + "but found value of '{val}'.") E852 = ("The tar file pulled from the remote attempted an unsafe path " "traversal.") E853 = ("Unsupported component factory name '{name}'. The character '.' is " diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index 4023c4456..a86eb99d2 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -293,7 +293,7 @@ class TextCategorizer(TrainablePipe): bp_scores(gradient) if sgd is not None: self.finish_update(sgd) - losses[self.name] += (gradient**2).sum() + losses[self.name] += (gradient ** 2).sum() return losses def _examples_to_truth( @@ -327,7 +327,7 @@ class TextCategorizer(TrainablePipe): not_missing = self.model.ops.asarray(not_missing) # type: ignore d_scores = scores - truths d_scores *= not_missing - mean_square_error = (d_scores**2).mean() + mean_square_error = (d_scores ** 2).mean() return float(mean_square_error), d_scores def add_label(self, label: str) -> int: @@ -401,5 +401,9 @@ class TextCategorizer(TrainablePipe): def _validate_categories(self, examples: Iterable[Example]): """Check whether the provided examples all have single-label cats annotations.""" for ex in examples: - if list(ex.reference.cats.values()).count(1.0) > 1: + vals = list(ex.reference.cats.values()) + if vals.count(1.0) > 1: raise ValueError(Errors.E895.format(value=ex.reference.cats)) + for val in vals: + if not (val == 1.0 or val == 0.0): + raise ValueError(Errors.E851.format(val=val)) diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index eb83d9cb7..ef9bd6557 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -192,6 +192,8 @@ class MultiLabel_TextCategorizer(TextCategorizer): for label in labels: self.add_label(label) subbatch = list(islice(get_examples(), 10)) + self._validate_categories(subbatch) + doc_sample = [eg.reference for eg in subbatch] label_sample, _ = self._examples_to_truth(subbatch) self._require_labels() @@ -202,4 +204,8 @@ class MultiLabel_TextCategorizer(TextCategorizer): def _validate_categories(self, examples: Iterable[Example]): """This component allows any type of single- or multi-label annotations. This method overwrites the more strict one from 'textcat'.""" - pass + # check that annotation values are valid + for ex in examples: + for val in ex.reference.cats.values(): + if not (val == 1.0 or val == 0.0): + raise ValueError(Errors.E851.format(val=val)) diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index d359b77db..2eda9deaf 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -360,6 +360,30 @@ def test_label_types(name): nlp.initialize() +@pytest.mark.parametrize( + "name,get_examples", + [ + ("textcat", make_get_examples_single_label), + ("textcat_multilabel", make_get_examples_multi_label), + ], +) +def test_invalid_label_value(name, get_examples): + nlp = Language() + textcat = nlp.add_pipe(name) + example_getter = get_examples(nlp) + + def invalid_examples(): + # make one example with an invalid score + examples = example_getter() + ref = examples[0].reference + key = list(ref.cats.keys())[0] + ref.cats[key] = 2.0 + return examples + + with pytest.raises(ValueError): + nlp.initialize(get_examples=invalid_examples) + + @pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"]) def test_no_label(name): nlp = Language() From a83463c5e07035ae5832e6790a0c0170e3746bd1 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 18 Nov 2022 08:15:27 +0100 Subject: [PATCH 22/55] Add transformer recommendation for ca (#11819) Model recommendation from @cayorodriguez. --- .../templates/quickstart_training_recommendations.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/spacy/cli/templates/quickstart_training_recommendations.yml b/spacy/cli/templates/quickstart_training_recommendations.yml index 27945e27a..4f214d22d 100644 --- a/spacy/cli/templates/quickstart_training_recommendations.yml +++ b/spacy/cli/templates/quickstart_training_recommendations.yml @@ -37,6 +37,15 @@ bn: accuracy: name: sagorsarker/bangla-bert-base size_factor: 3 +ca: + word_vectors: null + transformer: + efficiency: + name: projecte-aina/roberta-base-ca-v2 + size_factor: 3 + accuracy: + name: projecte-aina/roberta-base-ca-v2 + size_factor: 3 da: word_vectors: da_core_news_lg transformer: From e3173bd86d65a534f92578b85b0e5058a5c845f4 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 18 Nov 2022 16:24:22 +0900 Subject: [PATCH 23/55] Remove spikex from Universe (#11825) --- website/meta/universe.json | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 661f5da12..57bf2d3e3 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -461,37 +461,6 @@ }, "category": ["standalone"] }, - { - "id": "spikex", - "title": "SpikeX - SpaCy Pipes for Knowledge Extraction", - "slogan": "Use SpikeX to build knowledge extraction tools with almost-zero effort", - "description": "SpikeX is a collection of pipes ready to be plugged in a spaCy pipeline. It aims to help in building knowledge extraction tools with almost-zero effort.", - "github": "erre-quadro/spikex", - "pip": "spikex", - "code_example": [ - "from spacy import load as spacy_load", - "from spikex.wikigraph import load as wg_load", - "from spikex.pipes import WikiPageX", - "", - "# load a spacy model and get a doc", - "nlp = spacy_load('en_core_web_sm')", - "doc = nlp('An apple a day keeps the doctor away')", - "# load a WikiGraph", - "wg = wg_load('simplewiki_core')", - "# get a WikiPageX and extract all pages", - "wikipagex = WikiPageX(wg)", - "doc = wikipagex(doc)", - "# see all pages extracted from the doc", - "for span in doc._.wiki_spans:", - " print(span._.wiki_pages)" - ], - "category": ["pipeline", "standalone"], - "author": "Erre Quadro", - "author_links": { - "github": "erre-quadro", - "website": "https://www.errequadrosrl.com" - } - }, { "id": "spacy-dbpedia-spotlight", "title": "DBpedia Spotlight for SpaCy", From 89bfd06fbd89cc00ca2007bf795326538126f937 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 18 Nov 2022 18:24:13 +0900 Subject: [PATCH 24/55] Auto-format code with black (#11826) Co-authored-by: explosion-bot --- spacy/pipeline/textcat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index a86eb99d2..9490e3cb1 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -293,7 +293,7 @@ class TextCategorizer(TrainablePipe): bp_scores(gradient) if sgd is not None: self.finish_update(sgd) - losses[self.name] += (gradient ** 2).sum() + losses[self.name] += (gradient**2).sum() return losses def _examples_to_truth( @@ -327,7 +327,7 @@ class TextCategorizer(TrainablePipe): not_missing = self.model.ops.asarray(not_missing) # type: ignore d_scores = scores - truths d_scores *= not_missing - mean_square_error = (d_scores ** 2).mean() + mean_square_error = (d_scores**2).mean() return float(mean_square_error), d_scores def add_label(self, label: str) -> int: From f0d8309a289015ae44f994e8c0207cdfe41583ec Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 21 Nov 2022 07:12:03 +0000 Subject: [PATCH 25/55] fix comparison of constants (#11834) Co-authored-by: MarcoGorelli <> --- .pre-commit-config.yaml | 2 +- spacy/tests/vocab_vectors/test_vocab_api.py | 21 +++++++++++++++++++++ spacy/vocab.pyx | 4 ++-- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df59697b1..e2c5e98fd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - id: black language_version: python3.7 additional_dependencies: ['click==8.0.4'] -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/pycqa/flake8 rev: 5.0.4 hooks: - id: flake8 diff --git a/spacy/tests/vocab_vectors/test_vocab_api.py b/spacy/tests/vocab_vectors/test_vocab_api.py index 16cf80a08..b9c386eb8 100644 --- a/spacy/tests/vocab_vectors/test_vocab_api.py +++ b/spacy/tests/vocab_vectors/test_vocab_api.py @@ -1,8 +1,13 @@ +import os + import pytest from spacy.attrs import IS_ALPHA, LEMMA, ORTH +from spacy.lang.en import English from spacy.parts_of_speech import NOUN, VERB from spacy.vocab import Vocab +from ..util import make_tempdir + @pytest.mark.issue(1868) def test_issue1868(): @@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text): def test_vocab_writing_system(en_vocab): assert en_vocab.writing_system["direction"] == "ltr" assert en_vocab.writing_system["has_case"] is True + + +def test_to_disk(): + nlp = English() + with make_tempdir() as d: + nlp.vocab.to_disk(d) + assert "vectors" in os.listdir(d) + assert "lookups.bin" in os.listdir(d) + + +def test_to_disk_exclude(): + nlp = English() + with make_tempdir() as d: + nlp.vocab.to_disk(d, exclude=("vectors", "lookups")) + assert "vectors" not in os.listdir(d) + assert "lookups.bin" not in os.listdir(d) diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 428cadd82..27f8e5f98 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -468,9 +468,9 @@ cdef class Vocab: setters = ["strings", "vectors"] if "strings" not in exclude: self.strings.to_disk(path / "strings.json") - if "vectors" not in "exclude": + if "vectors" not in exclude: self.vectors.to_disk(path, exclude=["strings"]) - if "lookups" not in "exclude": + if "lookups" not in exclude: self.lookups.to_disk(path) def from_disk(self, path, *, exclude=tuple()): From 9d96e44a87fc2646bddc7e4e8d0357d48caf42e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Sun, 11 Sep 2022 22:04:00 +0200 Subject: [PATCH 26/55] Apply Prettier to `README.md` --- website/README.md | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/website/README.md b/website/README.md index 66bc20ad9..42419fbb7 100644 --- a/website/README.md +++ b/website/README.md @@ -555,13 +555,13 @@ extensions for your code editor. The file in the root defines the settings used in this codebase. ## Building & developing the site with Docker {#docker} -Sometimes it's hard to get a local environment working due to rapid updates to node dependencies, -so it may be easier to use docker for building the docs. -If you'd like to do this, -**be sure you do *not* include your local `node_modules` folder**, -since there are some dependencies that need to be built for the image system. -Rename it before using. +Sometimes it's hard to get a local environment working due to rapid updates to +node dependencies, so it may be easier to use docker for building the docs. + +If you'd like to do this, **be sure you do _not_ include your local +`node_modules` folder**, since there are some dependencies that need to be built +for the image system. Rename it before using. ```bash docker run -it \ @@ -571,13 +571,13 @@ docker run -it \ gatsby develop -H 0.0.0.0 ``` -This will allow you to access the built website at http://0.0.0.0:8000/ -in your browser, and still edit code in your editor while having the site -reflect those changes. +This will allow you to access the built website at http://0.0.0.0:8000/ in your +browser, and still edit code in your editor while having the site reflect those +changes. -**Note**: If you're working on a Mac with an M1 processor, -you might see segfault errors from `qemu` if you use the default image. -To fix this use the `arm64` tagged image in the `docker run` command +**Note**: If you're working on a Mac with an M1 processor, you might see +segfault errors from `qemu` if you use the default image. To fix this use the +`arm64` tagged image in the `docker run` command (ghcr.io/explosion/spacy-io:arm64). ### Building the Docker image {#docker-build} @@ -588,7 +588,8 @@ If you'd like to build the image locally, you can do so like this: docker build -t spacy-io . ``` -This will take some time, so if you want to use the prebuilt image you'll save a bit of time. +This will take some time, so if you want to use the prebuilt image you'll save a +bit of time. ## Markdown reference {#markdown} From 96218a1e8f3af6a2ebcfaf6a28c7cadb4a60c203 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Tue, 22 Nov 2022 15:55:39 +0100 Subject: [PATCH 27/55] Delete `styleguide.md` This is in intermediate commit, so the content of `/README.md`can be moved to the styleguid, but the history is kept --- website/docs/styleguide.md | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 website/docs/styleguide.md diff --git a/website/docs/styleguide.md b/website/docs/styleguide.md deleted file mode 100644 index ed6f9d99b..000000000 --- a/website/docs/styleguide.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Styleguide -section: styleguide -search_exclude: true -menu: - - ['Logo', 'logo'] - - ['Colors', 'colors'] - - ['Typography', 'typography'] - - ['Elements', 'elements'] - - ['Components', 'components'] - - ['Setup & Installation', 'setup'] - - ['Markdown Reference', 'markdown'] - - ['Project Structure', 'structure'] - - ['Editorial', 'editorial'] -sidebar: - - label: Styleguide - items: - - text: '' - url: '/styleguide' - - label: Resources - items: - - text: Website Source - url: https://github.com/explosion/spacy/tree/master/website - - text: Contributing Guide - url: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md ---- - -import Readme from 'README.md' - - From 0794e5c6cce6282434576cd311126b0c3bfebc35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Wed, 23 Nov 2022 01:22:27 +0100 Subject: [PATCH 28/55] Add missing files to project structure in `README.md` --- website/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/README.md b/website/README.md index 42419fbb7..b2f640543 100644 --- a/website/README.md +++ b/website/README.md @@ -648,8 +648,10 @@ In addition to the native markdown elements, you can use the components | ├── languages.json # supported languages and statistical models | ├── sidebars.json # sidebar navigations for different sections | ├── site.json # general site metadata +| ├── type-annotations.json # Type annotations | └── universe.json # data for the spaCy universe section ├── public # compiled site +├── setup # Jinja setup ├── src # source | ├── components # React components | ├── fonts # webfonts @@ -662,6 +664,8 @@ In addition to the native markdown elements, you can use the components | | ├── models.js # layout template for model pages | | └── universe.js # layout templates for universe | └── widgets # non-reusable components with content, e.g. changelog +├── .eslintrc.json # ESLint config file +├── .prettierrc # Prettier config file ├── gatsby-browser.js # browser-specific hooks for Gatsby ├── gatsby-config.js # Gatsby configuration ├── gatsby-node.js # Node-specific hooks for Gatsby From 8c0ceca637d486624cb32fbf9c875e69f81dcf83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Tue, 22 Nov 2022 15:56:21 +0100 Subject: [PATCH 29/55] Move `README.md` content to styleguide --- website/{README.md => docs/styleguide.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename website/{README.md => docs/styleguide.md} (100%) diff --git a/website/README.md b/website/docs/styleguide.md similarity index 100% rename from website/README.md rename to website/docs/styleguide.md From 5659eeaadd750e404152c731728ebf922c824226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Tue, 22 Nov 2022 15:58:39 +0100 Subject: [PATCH 30/55] Remove styleguide content from `README.md` --- website/README.md | 608 ---------------------------------------------- 1 file changed, 608 deletions(-) diff --git a/website/README.md b/website/README.md index b2f640543..743e61acd 100644 --- a/website/README.md +++ b/website/README.md @@ -11,520 +11,6 @@ rendered version is available at https://spacy.io/styleguide._ -The [spacy.io](https://spacy.io) website is implemented using -[Gatsby](https://www.gatsbyjs.org) with -[Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This -allows authoring content in **straightforward Markdown** without the usual -limitations. Standard elements can be overwritten with powerful -[React](http://reactjs.org/) components and wherever Markdown syntax isn't -enough, JSX components can be used. - -> #### Contributing to the site -> -> The docs can always use another example or more detail, and they should always -> be up to date and not misleading. We always appreciate a -> [pull request](https://github.com/explosion/spaCy/pulls). To quickly find the -> correct file to edit, simply click on the "Suggest edits" button at the bottom -> of a page. -> -> For more details on editing the site locally, see the installation -> instructions and markdown reference below. - -## Logo {#logo source="website/src/images/logo.svg"} - -import { Logos } from 'widgets/styleguide' - -If you would like to use the spaCy logo on your site, please get in touch and -ask us first. However, if you want to show support and tell others that your -project is using spaCy, you can grab one of our -[spaCy badges](/usage/spacy-101#faq-project-with-spacy). - - - -## Colors {#colors} - -import { Colors, Patterns } from 'widgets/styleguide' - - - -### Patterns - - - -## Typography {#typography} - -import { H1, H2, H3, H4, H5, Label, InlineList, Comment } from -'components/typography' - -> #### Markdown -> -> ```markdown_ -> ## Headline 2 -> ## Headline 2 {#some_id} -> ## Headline 2 {#some_id tag="method"} -> ``` -> -> #### JSX -> -> ```jsx ->

Headline 2

->

Headline 2

->

Headline 2

-> ``` - -Headlines are set in -[HK Grotesk](http://cargocollective.com/hanken/HK-Grotesk-Open-Source-Font) by -Hanken Design. All other body text and code uses the best-matching default -system font to provide a "native" reading experience. All code uses the -[JetBrains Mono](https://www.jetbrains.com/lp/mono/) typeface by JetBrains. - - - -Level 2 headings are automatically wrapped in `
` elements at compile -time, using a custom -[Markdown transformer](https://github.com/explosion/spaCy/tree/master/website/plugins/remark-wrap-section.js). -This makes it easier to highlight the section that's currently in the viewpoint -in the sidebar menu. - - - -
-

Headline 1

-

Headline 2

-

Headline 3

-

Headline 4

-
Headline 5
- -
- ---- - -The following optional attributes can be set on the headline to modify it. For -example, to add a tag for the documented type or mark features that have been -introduced in a specific version or require statistical models to be loaded. -Tags are also available as standalone `` components. - -| Argument | Example | Result | -| -------- | -------------------------- | ----------------------------------------- | -| `tag` | `{tag="method"}` | method | -| `new` | `{new="3"}` | 3 | -| `model` | `{model="tagger, parser"}` | tagger, parser | -| `hidden` | `{hidden="true"}` | | - -## Elements {#elements} - -### Links {#links} - -> #### Markdown -> -> ```markdown -> [I am a link](https://spacy.io) -> ``` -> -> #### JSX -> -> ```jsx -> I am a link -> ``` - -Special link styles are used depending on the link URL. - -- [I am a regular external link](https://explosion.ai) -- [I am a link to the documentation](/api/doc) -- [I am a link to an architecture](/api/architectures#HashEmbedCNN) -- [I am a link to a model](/models/en#en_core_web_sm) -- [I am a link to GitHub](https://github.com/explosion/spaCy) - -### Abbreviations {#abbr} - -import { Abbr } from 'components/typography' - -> #### JSX -> -> ```jsx -> Abbreviation -> ``` - -Some text with an abbreviation. On small -screens, I collapse and the explanation text is displayed next to the -abbreviation. - -### Tags {#tags} - -import Tag from 'components/tag' - -> ```jsx -> method -> 4 -> tagger, parser -> ``` - -Tags can be used together with headlines, or next to properties across the -documentation, and combined with tooltips to provide additional information. An -optional `variant` argument can be used for special tags. `variant="new"` makes -the tag take a version number to mark new features. Using the component, -visibility of this tag can later be toggled once the feature isn't considered -new anymore. Setting `variant="model"` takes a description of model capabilities -and can be used to mark features that require a respective model to be -installed. - - - -method 4 tagger, -parser - - - -### Buttons {#buttons} - -import Button from 'components/button' - -> ```jsx -> -> -> ``` - -Link buttons come in two variants, `primary` and `secondary` and two sizes, with -an optional `large` size modifier. Since they're mostly used as enhanced links, -the buttons are implemented as styled links instead of native button elements. - - - - -
- - - - -## Components - -### Table {#table} - -> #### Markdown -> -> ```markdown_ -> | Header 1 | Header 2 | -> | -------- | -------- | -> | Column 1 | Column 2 | -> ``` -> -> #### JSX -> -> ```markup -> -> -> ->
Header 1Header 2
Column 1Column 2
-> ``` - -Tables are used to present data and API documentation. Certain keywords can be -used to mark a footer row with a distinct style, for example to visualize the -return values of a documented function. - -| Header 1 | Header 2 | Header 3 | Header 4 | -| ----------- | -------- | :------: | -------: | -| Column 1 | Column 2 | Column 3 | Column 4 | -| Column 1 | Column 2 | Column 3 | Column 4 | -| Column 1 | Column 2 | Column 3 | Column 4 | -| Column 1 | Column 2 | Column 3 | Column 4 | -| **RETURNS** | Column 2 | Column 3 | Column 4 | - -Tables also support optional "divider" rows that are typically used to denote -keyword-only arguments in API documentation. To turn a row into a dividing -headline, it should only include content in its first cell, and its value should -be italicized: - -> #### Markdown -> -> ```markdown_ -> | Header 1 | Header 2 | Header 3 | -> | -------- | -------- | -------- | -> | Column 1 | Column 2 | Column 3 | -> | _Hello_ | | | -> | Column 1 | Column 2 | Column 3 | -> ``` - -| Header 1 | Header 2 | Header 3 | -| -------- | -------- | -------- | -| Column 1 | Column 2 | Column 3 | -| _Hello_ | | | -| Column 1 | Column 2 | Column 3 | - -### Type Annotations {#type-annotations} - -> #### Markdown -> -> ```markdown_ -> ~~Model[List[Doc], Floats2d]~~ -> ``` -> -> #### JSX -> -> ```markup -> Model[List[Doc], Floats2d] -> ``` - -Type annotations are special inline code blocks are used to describe Python -types in the [type hints](https://docs.python.org/3/library/typing.html) format. -The special component will split the type, apply syntax highlighting and link -all types that specify links in `meta/type-annotations.json`. Types can link to -internal or external documentation pages. To make it easy to represent the type -annotations in Markdown, the rendering "hijacks" the `~~` tags that would -typically be converted to a `` element – but in this case, text surrounded -by `~~` becomes a type annotation. - -- ~~Dict[str, List[Union[Doc, Span]]]~~ -- ~~Model[List[Doc], List[numpy.ndarray]]~~ - -Type annotations support a special visual style in tables and will render as a -separate row, under the cell text. This allows the API docs to display complex -types without taking up too much space in the cell. The type annotation should -always be the **last element** in the row. - -> #### Markdown -> -> ```markdown_ -> | Header 1 | Header 2 | -> | -------- | ----------------------- | -> | Column 1 | Column 2 ~~List[Doc]~~ | -> ``` - -| Name | Description | -| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `vocab` | The shared vocabulary. ~~Vocab~~ | -| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. ~~Model[List[Doc], FullTransformerBatch]~~ | -| `set_extra_annotations` | Function that takes a batch of `Doc` objects and transformer outputs and can set additional annotations on the `Doc`. ~~Callable[[List[Doc], FullTransformerBatch], None]~~ | - -### List {#list} - -> #### Markdown -> -> ```markdown_ -> 1. One -> 2. Two -> ``` -> -> #### JSX -> -> ```markup ->
    ->
  1. One
  2. ->
  3. Two
  4. ->
-> ``` - -Lists are available as bulleted and numbered. Markdown lists are transformed -automatically. - -- I am a bulleted list -- I have nice bullets -- Lorem ipsum dolor -- consectetur adipiscing elit - -1. I am an ordered list -2. I have nice numbers -3. Lorem ipsum dolor -4. consectetur adipiscing elit - -### Aside {#aside} - -> #### Markdown -> -> ```markdown_ -> > #### Aside title -> > This is aside text. -> ``` -> -> #### JSX -> -> ```jsx -> -> ``` - -Asides can be used to display additional notes and content in the right-hand -column. Asides can contain text, code and other elements if needed. Visually, -asides are moved to the side on the X-axis, and displayed at the same level they -were inserted. On small screens, they collapse and are rendered in their -original position, in between the text. - -To make them easier to use in Markdown, paragraphs formatted as blockquotes will -turn into asides by default. Level 4 headlines (with a leading `####`) will -become aside titles. - -### Code Block {#code-block} - -> #### Markdown -> -> ````markdown_ -> ```python -> ### This is a title -> import spacy -> ``` -> ```` -> -> #### JSX -> -> ```jsx -> -> import spacy -> -> ``` - -Code blocks use the [Prism](http://prismjs.com/) syntax highlighter with a -custom theme. The language can be set individually on each block, and defaults -to raw text with no highlighting. An optional label can be added as the first -line with the prefix `####` (Python-like) and `///` (JavaScript-like). the -indented block as plain text and preserve whitespace. - -```python -### Using spaCy -import spacy -nlp = spacy.load("en_core_web_sm") -doc = nlp("This is a sentence.") -for token in doc: - print(token.text, token.pos_) -``` - -Code blocks and also specify an optional range of line numbers to highlight by -adding `{highlight="..."}` to the headline. Acceptable ranges are spans like -`5-7`, but also `5-7,10` or `5-7,10,13-14`. - -> #### Markdown -> -> ````markdown_ -> ```python -> ### This is a title {highlight="1-2"} -> import spacy -> nlp = spacy.load("en_core_web_sm") -> ``` -> ```` - -```python -### Using the matcher {highlight="5-7"} -import spacy -from spacy.matcher import Matcher - -nlp = spacy.load('en_core_web_sm') -matcher = Matcher(nlp.vocab) -pattern = [{"LOWER": "hello"}, {"IS_PUNCT": True}, {"LOWER": "world"}] -matcher.add("HelloWorld", None, pattern) -doc = nlp("Hello, world! Hello world!") -matches = matcher(doc) -``` - -Adding `{executable="true"}` to the title turns the code into an executable -block, powered by [Binder](https://mybinder.org) and -[Juniper](https://github.com/ines/juniper). If JavaScript is disabled, the -interactive widget defaults to a regular code block. - -> #### Markdown -> -> ````markdown_ -> ```python -> ### {executable="true"} -> import spacy -> nlp = spacy.load("en_core_web_sm") -> ``` -> ```` - -```python -### {executable="true"} -import spacy -nlp = spacy.load("en_core_web_sm") -doc = nlp("This is a sentence.") -for token in doc: - print(token.text, token.pos_) -``` - -If a code block only contains a URL to a GitHub file, the raw file contents are -embedded automatically and syntax highlighting is applied. The link to the -original file is shown at the top of the widget. - -> #### Markdown -> -> ````markdown_ -> ```python -> https://github.com/... -> ``` -> ```` -> -> #### JSX -> -> ```jsx -> -> ``` - -```python -https://github.com/explosion/spaCy/tree/master/spacy/language.py -``` - -### Infobox {#infobox} - -import Infobox from 'components/infobox' - -> #### JSX -> -> ```jsx -> Regular infobox -> This is a warning. -> This is dangerous. -> ``` - -Infoboxes can be used to add notes, updates, warnings or additional information -to a page or section. Semantically, they're implemented and interpreted as an -`aside` element. Infoboxes can take an optional `title` argument, as well as an -optional `variant` (either `"warning"` or `"danger"`). - - - -If needed, an infobox can contain regular text, `inline code`, lists and other -blocks. - - - - - -If needed, an infobox can contain regular text, `inline code`, lists and other -blocks. - - - - - -If needed, an infobox can contain regular text, `inline code`, lists and other -blocks. - - - -### Accordion {#accordion} - -import Accordion from 'components/accordion' - -> #### JSX -> -> ```jsx -> -> Accordion content goes here. -> -> ``` - -Accordions are collapsible sections that are mostly used for lengthy tables, -like the tag and label annotation schemes for different languages. They all need -to be presented – but chances are the user doesn't actually care about _all_ of -them, especially not at the same time. So it's fairly reasonable to hide them -begin a click. This particular implementation was inspired by the amazing -[Inclusive Components blog](https://inclusive-components.design/collapsible-sections/). - - - -Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante, -pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt -nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor -gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor, -sit amet dignissim justo congue. - - - ## Setup and installation {#setup} Before running the setup, make sure your versions of @@ -591,54 +77,6 @@ docker build -t spacy-io . This will take some time, so if you want to use the prebuilt image you'll save a bit of time. -## Markdown reference {#markdown} - -All page content and page meta lives in the `.md` files in the `/docs` -directory. The frontmatter block at the top of each file defines the page title -and other settings like the sidebar menu. - -````markdown ---- -title: Page title ---- - -## Headline starting a section {#some_id} - -This is a regular paragraph with a [link](https://spacy.io) and **bold text**. - -> #### This is an aside title -> -> This is aside text. - -### Subheadline - -| Header 1 | Header 2 | -| -------- | -------- | -| Column 1 | Column 2 | - -```python -### Code block title {highlight="2-3"} -import spacy -nlp = spacy.load("en_core_web_sm") -doc = nlp("Hello world") -``` - - - -This is content in the infobox. - - -```` - -In addition to the native markdown elements, you can use the components -[``][infobox], [``][accordion], [``][abbr] and -[``][tag] via their JSX syntax. - -[infobox]: https://spacy.io/styleguide#infobox -[accordion]: https://spacy.io/styleguide#accordion -[abbr]: https://spacy.io/styleguide#abbr -[tag]: https://spacy.io/styleguide#tag - ## Project structure {#structure} ```yaml @@ -671,49 +109,3 @@ In addition to the native markdown elements, you can use the components ├── gatsby-node.js # Node-specific hooks for Gatsby └── package.json # package settings and dependencies ``` - -## Editorial {#editorial} - -- "spaCy" should always be spelled with a lowercase "s" and a capital "C", - unless it specifically refers to the Python package or Python import `spacy` - (in which case it should be formatted as code). - - ✅ spaCy is a library for advanced NLP in Python. - - ❌ Spacy is a library for advanced NLP in Python. - - ✅ First, you need to install the `spacy` package from pip. -- Mentions of code, like function names, classes, variable names etc. in inline - text should be formatted as `code`. - - ✅ "Calling the `nlp` object on a text returns a `Doc`." -- Objects that have pages in the [API docs](/api) should be linked – for - example, [`Doc`](/api/doc) or [`Language.to_disk`](/api/language#to_disk). The - mentions should still be formatted as code within the link. Links pointing to - the API docs will automatically receive a little icon. However, if a paragraph - includes many references to the API, the links can easily get messy. In that - case, we typically only link the first mention of an object and not any - subsequent ones. - - ✅ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a - [`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a `Doc` object - from a `Span`. - - ❌ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a - [`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a - [`Doc`](/api/doc) object from a [`Span`](/api/span). - -* Other things we format as code are: references to trained pipeline packages - like `en_core_web_sm` or file names like `code.py` or `meta.json`. - - - ✅ After training, the `config.cfg` is saved to disk. - -* [Type annotations](#type-annotations) are a special type of code formatting, - expressed by wrapping the text in `~~` instead of backticks. The result looks - like this: ~~List[Doc]~~. All references to known types will be linked - automatically. - - - ✅ The model has the input type ~~List[Doc]~~ and it outputs a - ~~List[Array2d]~~. - -* We try to keep links meaningful but short. - - ✅ For details, see the usage guide on - [training with custom code](/usage/training#custom-code). - - ❌ For details, see - [the usage guide on training with custom code](/usage/training#custom-code). - - ❌ For details, see the usage guide on training with custom code - [here](/usage/training#custom-code). From ecbf052abde2ab9373be1d7652e20d50b096e49d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Tue, 22 Nov 2022 16:00:08 +0100 Subject: [PATCH 31/55] Remove `README.md` content from styleguide --- website/docs/styleguide.md | 112 ------------------------------------- 1 file changed, 112 deletions(-) diff --git a/website/docs/styleguide.md b/website/docs/styleguide.md index b2f640543..dbc5258db 100644 --- a/website/docs/styleguide.md +++ b/website/docs/styleguide.md @@ -1,16 +1,3 @@ - - -# spacy.io website and docs - -![Netlify Status](https://api.netlify.com/api/v1/badges/d65fe97d-99ab-47f8-a339-1d8987251da0/deploy-status) - -_This page contains the documentation and styleguide for the spaCy website. Its -rendered version is available at https://spacy.io/styleguide._ - ---- - - - The [spacy.io](https://spacy.io) website is implemented using [Gatsby](https://www.gatsbyjs.org) with [Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This @@ -525,72 +512,6 @@ sit amet dignissim justo congue. -## Setup and installation {#setup} - -Before running the setup, make sure your versions of -[Node](https://nodejs.org/en/) and [npm](https://www.npmjs.com/) are up to date. -Node v10.15 or later is required. - -```bash -# Clone the repository -git clone https://github.com/explosion/spaCy -cd spaCy/website - -# Install Gatsby's command-line tool -npm install --global gatsby-cli - -# Install the dependencies -npm install - -# Start the development server -npm run dev -``` - -If you are planning on making edits to the site, you should also set up the -[Prettier](https://prettier.io/) code formatter. It takes care of formatting -Markdown and other files automatically. -[See here](https://prettier.io/docs/en/editors.html) for the available -extensions for your code editor. The -[`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc) -file in the root defines the settings used in this codebase. - -## Building & developing the site with Docker {#docker} - -Sometimes it's hard to get a local environment working due to rapid updates to -node dependencies, so it may be easier to use docker for building the docs. - -If you'd like to do this, **be sure you do _not_ include your local -`node_modules` folder**, since there are some dependencies that need to be built -for the image system. Rename it before using. - -```bash -docker run -it \ - -v $(pwd):/spacy-io/website \ - -p 8000:8000 \ - ghcr.io/explosion/spacy-io \ - gatsby develop -H 0.0.0.0 -``` - -This will allow you to access the built website at http://0.0.0.0:8000/ in your -browser, and still edit code in your editor while having the site reflect those -changes. - -**Note**: If you're working on a Mac with an M1 processor, you might see -segfault errors from `qemu` if you use the default image. To fix this use the -`arm64` tagged image in the `docker run` command -(ghcr.io/explosion/spacy-io:arm64). - -### Building the Docker image {#docker-build} - -If you'd like to build the image locally, you can do so like this: - -```bash -docker build -t spacy-io . -``` - -This will take some time, so if you want to use the prebuilt image you'll save a -bit of time. - ## Markdown reference {#markdown} All page content and page meta lives in the `.md` files in the `/docs` @@ -639,39 +560,6 @@ In addition to the native markdown elements, you can use the components [abbr]: https://spacy.io/styleguide#abbr [tag]: https://spacy.io/styleguide#tag -## Project structure {#structure} - -```yaml -### Directory structure -├── docs # the actual markdown content -├── meta # JSON-formatted site metadata -| ├── languages.json # supported languages and statistical models -| ├── sidebars.json # sidebar navigations for different sections -| ├── site.json # general site metadata -| ├── type-annotations.json # Type annotations -| └── universe.json # data for the spaCy universe section -├── public # compiled site -├── setup # Jinja setup -├── src # source -| ├── components # React components -| ├── fonts # webfonts -| ├── images # images used in the layout -| ├── plugins # custom plugins to transform Markdown -| ├── styles # CSS modules and global styles -| ├── templates # page layouts -| | ├── docs.js # layout template for documentation pages -| | ├── index.js # global layout template -| | ├── models.js # layout template for model pages -| | └── universe.js # layout templates for universe -| └── widgets # non-reusable components with content, e.g. changelog -├── .eslintrc.json # ESLint config file -├── .prettierrc # Prettier config file -├── gatsby-browser.js # browser-specific hooks for Gatsby -├── gatsby-config.js # Gatsby configuration -├── gatsby-node.js # Node-specific hooks for Gatsby -└── package.json # package settings and dependencies -``` - ## Editorial {#editorial} - "spaCy" should always be spelled with a lowercase "s" and a capital "C", From f1ddac187de7e67923e8ee63192787179f70fa4c Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 23 Nov 2022 18:51:31 +0900 Subject: [PATCH 32/55] Remove unused error object (#11837) --- spacy/language.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/spacy/language.py b/spacy/language.py index 836f3abf9..2789b6690 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -784,14 +784,6 @@ class Language: factory_name, source, name=name ) else: - if not self.has_factory(factory_name): - err = Errors.E002.format( - name=factory_name, - opts=", ".join(self.factory_names), - method="add_pipe", - lang=util.get_object_name(self), - lang_code=self.lang, - ) pipe_component = self.create_pipe( factory_name, name=name, From 8271cfb4cd8a907ff11f12841ee1ceb171b3f528 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 23 Nov 2022 19:03:18 +0900 Subject: [PATCH 33/55] Remove Learning Path spaCy (#11846) --- website/meta/universe.json | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 57bf2d3e3..97b53e9c5 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1993,17 +1993,6 @@ }, "category": ["books"] }, - { - "type": "education", - "id": "learning-path-spacy", - "title": "Learning Path: Mastering spaCy for Natural Language Processing", - "slogan": "O'Reilly, 2017", - "description": "spaCy, a fast, user-friendly library for teaching computers to understand text, simplifies NLP techniques, such as speech tagging and syntactic dependencies, so you can easily extract information, attributes, and objects from massive amounts of text to then document, measure, and analyze. This Learning Path is a hands-on introduction to using spaCy to discover insights through natural language processing. While end-to-end natural language processing solutions can be complex, you’ll learn the linguistics, algorithms, and machine learning skills to get the job done.", - "url": "https://www.safaribooksonline.com/library/view/learning-path-mastering/9781491986653/", - "thumb": "https://i.imgur.com/9MIgMAc.jpg", - "author": "Aaron Kramer", - "category": ["courses"] - }, { "type": "education", "id": "introduction-into-spacy-3", From 5ea14af32b4203bc3087dec63091e63fe4ac95b7 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Wed, 23 Nov 2022 17:54:58 +0100 Subject: [PATCH 34/55] Add `training.before_update` callback (#11739) * Add `training.before_update` callback This callback can be used to implement training paradigms like gradual (un)freezing of components (e.g: the Transformer) after a certain number of training steps to mitigate catastrophic forgetting during fine-tuning. * Fix type annotation, default config value * Generalize arguments passed to the callback * Update schema * Pass `epoch` to callback, rename `current_step` to `step` * Add test * Simplify test * Replace config string with `spacy.blank` * Apply suggestions from code review Co-authored-by: Adriane Boyd * Cleanup imports Co-authored-by: Adriane Boyd --- spacy/default_config.cfg | 2 ++ spacy/schemas.py | 1 + spacy/tests/training/test_training.py | 40 ++++++++++++++++++++++++++- spacy/training/loop.py | 6 ++++ website/docs/api/data-formats.md | 1 + 5 files changed, 49 insertions(+), 1 deletion(-) diff --git a/spacy/default_config.cfg b/spacy/default_config.cfg index 86a72926e..694fb732f 100644 --- a/spacy/default_config.cfg +++ b/spacy/default_config.cfg @@ -90,6 +90,8 @@ dev_corpus = "corpora.dev" train_corpus = "corpora.train" # Optional callback before nlp object is saved to disk after training before_to_disk = null +# Optional callback that is invoked at the start of each training step +before_update = null [training.logger] @loggers = "spacy.ConsoleLogger.v1" diff --git a/spacy/schemas.py b/spacy/schemas.py index c824d76b9..e48fe1702 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -329,6 +329,7 @@ class ConfigSchemaTraining(BaseModel): frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training") annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training") before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk") + before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step") # fmt: on class Config: diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py index 4384a796d..7933ea31f 100644 --- a/spacy/tests/training/test_training.py +++ b/spacy/tests/training/test_training.py @@ -2,6 +2,7 @@ import random import numpy import pytest +import spacy import srsly from spacy.lang.en import English from spacy.tokens import Doc, DocBin @@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags from spacy.training.alignment_array import AlignmentArray from spacy.training.align import get_alignments from spacy.training.converters import json_to_docs +from spacy.training.loop import train_while_improving from spacy.util import get_words_and_spaces, load_model_from_path, minibatch from spacy.util import load_config_from_str -from thinc.api import compounding +from thinc.api import compounding, Adam from ..util import make_tempdir @@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc): retokenizer.merge(doc1[0:2]) retokenizer.merge(doc1[5:7]) assert example.get_aligned("ORTH", as_string=True) == expected2 + + +def test_training_before_update(doc): + def before_update(nlp, args): + assert args["step"] == 0 + assert args["epoch"] == 1 + + # Raise an error here as the rest of the loop + # will not run to completion due to uninitialized + # models. + raise ValueError("ran_before_update") + + def generate_batch(): + yield 1, [Example(doc, doc)] + + nlp = spacy.blank("en") + nlp.add_pipe("tagger") + optimizer = Adam() + generator = train_while_improving( + nlp, + optimizer, + generate_batch(), + lambda: None, + dropout=0.1, + eval_frequency=100, + accumulate_gradient=10, + patience=10, + max_steps=100, + exclude=[], + annotating_components=[], + before_update=before_update, + ) + + with pytest.raises(ValueError, match="ran_before_update"): + for _ in generator: + pass diff --git a/spacy/training/loop.py b/spacy/training/loop.py index 06372cbb0..885257772 100644 --- a/spacy/training/loop.py +++ b/spacy/training/loop.py @@ -59,6 +59,7 @@ def train( batcher = T["batcher"] train_logger = T["logger"] before_to_disk = create_before_to_disk_callback(T["before_to_disk"]) + before_update = T["before_update"] # Helper function to save checkpoints. This is a closure for convenience, # to avoid passing in all the args all the time. @@ -89,6 +90,7 @@ def train( eval_frequency=T["eval_frequency"], exclude=frozen_components, annotating_components=annotating_components, + before_update=before_update, ) clean_output_dir(output_path) stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n") @@ -150,6 +152,7 @@ def train_while_improving( max_steps: int, exclude: List[str], annotating_components: List[str], + before_update: Optional[Callable[["Language", Dict[str, Any]], None]], ): """Train until an evaluation stops improving. Works as a generator, with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`, @@ -198,6 +201,9 @@ def train_while_improving( words_seen = 0 start_time = timer() for step, (epoch, batch) in enumerate(train_data): + if before_update: + before_update_args = {"step": step, "epoch": epoch} + before_update(nlp, before_update_args) dropout = next(dropouts) # type: ignore for subbatch in subdivide_batch(batch, accumulate_gradient): nlp.update( diff --git a/website/docs/api/data-formats.md b/website/docs/api/data-formats.md index ce06c4ea8..768844cf3 100644 --- a/website/docs/api/data-formats.md +++ b/website/docs/api/data-formats.md @@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train). | `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ | | `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ | | `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ | +| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ | | `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ | | `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ | | `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ | From 8f062b849c846ecdf59263c82632b9fbd4eca9d0 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 24 Nov 2022 16:03:42 +0100 Subject: [PATCH 35/55] Fix Matcher cython profile=True header (#11867) --- spacy/matcher/matcher.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index e1dba01a2..c4a057ca0 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -1,4 +1,4 @@ -# cython: infer_types=True, cython: profile=True +# cython: infer_types=True, profile=True from typing import List, Iterable from libcpp.vector cimport vector From 30d31fd335306921aa7e8be081ecb396880aa14b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 25 Nov 2022 11:12:46 +0100 Subject: [PATCH 36/55] Update Russian and Ukrainian lemmatizers (#11811) * pymorph2 issues #11620, #11626, #11625: - #11620: pymorphy2_lookup - #11626: handle multiple forms pointing to the same normal form + handling empty POS tag - #11625: matching DET that are labelled as PRON by pymorhp2 * Move lemmatizer algorithm changes back into RussianLemmatizer * Fix uk pymorphy3_lookup mode init * Move and update tests for ru/uk lookup lemmatizer modes * Fix typo * Remove traces of previous behavior for uninflected POS * Refactor to private generic-looking pymorphy methods * Remove xfailed uk lemmatizer cases * Update spacy/lang/ru/lemmatizer.py Co-authored-by: Richard Hudson Co-authored-by: Dmytro S Lituiev Co-authored-by: Richard Hudson --- spacy/lang/ru/lemmatizer.py | 51 ++++++++++++++++++-------- spacy/lang/uk/lemmatizer.py | 2 +- spacy/tests/conftest.py | 18 ++++----- spacy/tests/lang/ru/test_lemmatizer.py | 15 ++++++++ spacy/tests/lang/uk/test_lemmatizer.py | 18 ++++++--- 5 files changed, 73 insertions(+), 31 deletions(-) diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py index c37a3a91a..f4a35de38 100644 --- a/spacy/lang/ru/lemmatizer.py +++ b/spacy/lang/ru/lemmatizer.py @@ -28,34 +28,39 @@ class RussianLemmatizer(Lemmatizer): from pymorphy2 import MorphAnalyzer except ImportError: raise ImportError( - "The Russian lemmatizer mode 'pymorphy2' requires the " - "pymorphy2 library. Install it with: pip install pymorphy2" + "The lemmatizer mode 'pymorphy2' requires the " + "pymorphy2 library and dictionaries. Install them with: " + "pip install pymorphy2" + "# for Ukrainian dictionaries:" + "pip install pymorphy2-dicts-uk" ) from None if getattr(self, "_morph", None) is None: - self._morph = MorphAnalyzer() - elif mode == "pymorphy3": + self._morph = MorphAnalyzer(lang="ru") + elif mode in {"pymorphy3", "pymorphy3_lookup"}: try: from pymorphy3 import MorphAnalyzer except ImportError: raise ImportError( - "The Russian lemmatizer mode 'pymorphy3' requires the " - "pymorphy3 library. Install it with: pip install pymorphy3" + "The lemmatizer mode 'pymorphy3' requires the " + "pymorphy3 library and dictionaries. Install them with: " + "pip install pymorphy3" + "# for Ukrainian dictionaries:" + "pip install pymorphy3-dicts-uk" ) from None if getattr(self, "_morph", None) is None: - self._morph = MorphAnalyzer() + self._morph = MorphAnalyzer(lang="ru") super().__init__( vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer ) - def pymorphy2_lemmatize(self, token: Token) -> List[str]: + def _pymorphy_lemmatize(self, token: Token) -> List[str]: string = token.text univ_pos = token.pos_ morphology = token.morph.to_dict() if univ_pos == "PUNCT": return [PUNCT_RULES.get(string, string)] if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"): - # Skip unchangeable pos - return [string.lower()] + return self._pymorphy_lookup_lemmatize(token) analyses = self._morph.parse(string) filtered_analyses = [] for analysis in analyses: @@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer): # Skip suggested parse variant for unknown word for pymorphy continue analysis_pos, _ = oc2ud(str(analysis.tag)) - if analysis_pos == univ_pos or ( - analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN") + if ( + analysis_pos == univ_pos + or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN")) + or ((analysis_pos == "PRON") and (univ_pos == "DET")) ): filtered_analyses.append(analysis) if not len(filtered_analyses): @@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer): dict.fromkeys([analysis.normal_form for analysis in filtered_analyses]) ) - def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: + def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]: string = token.text analyses = self._morph.parse(string) - if len(analyses) == 1: - return [analyses[0].normal_form] + # often multiple forms would derive from the same normal form + # thus check _unique_ normal forms + normal_forms = set([an.normal_form for an in analyses]) + if len(normal_forms) == 1: + return [next(iter(normal_forms))] return [string] + def pymorphy2_lemmatize(self, token: Token) -> List[str]: + return self._pymorphy_lemmatize(token) + + def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: + return self._pymorphy_lookup_lemmatize(token) + def pymorphy3_lemmatize(self, token: Token) -> List[str]: - return self.pymorphy2_lemmatize(token) + return self._pymorphy_lemmatize(token) + + def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]: + return self._pymorphy_lookup_lemmatize(token) def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py index 8337e7328..37015cc2a 100644 --- a/spacy/lang/uk/lemmatizer.py +++ b/spacy/lang/uk/lemmatizer.py @@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer): ) from None if getattr(self, "_morph", None) is None: self._morph = MorphAnalyzer(lang="uk") - elif mode == "pymorphy3": + elif mode in {"pymorphy3", "pymorphy3_lookup"}: try: from pymorphy3 import MorphAnalyzer except ImportError: diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 0fc74243d..3a5c8e451 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -337,17 +337,17 @@ def ru_tokenizer(): return get_lang_class("ru")().tokenizer -@pytest.fixture +@pytest.fixture(scope="session") def ru_lemmatizer(): pytest.importorskip("pymorphy3") return get_lang_class("ru")().add_pipe("lemmatizer") -@pytest.fixture +@pytest.fixture(scope="session") def ru_lookup_lemmatizer(): - pytest.importorskip("pymorphy2") + pytest.importorskip("pymorphy3") return get_lang_class("ru")().add_pipe( - "lemmatizer", config={"mode": "pymorphy2_lookup"} + "lemmatizer", config={"mode": "pymorphy3_lookup"} ) @@ -423,19 +423,19 @@ def uk_tokenizer(): return get_lang_class("uk")().tokenizer -@pytest.fixture +@pytest.fixture(scope="session") def uk_lemmatizer(): pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3_dicts_uk") return get_lang_class("uk")().add_pipe("lemmatizer") -@pytest.fixture +@pytest.fixture(scope="session") def uk_lookup_lemmatizer(): - pytest.importorskip("pymorphy2") - pytest.importorskip("pymorphy2_dicts_uk") + pytest.importorskip("pymorphy3") + pytest.importorskip("pymorphy3_dicts_uk") return get_lang_class("uk")().add_pipe( - "lemmatizer", config={"mode": "pymorphy2_lookup"} + "lemmatizer", config={"mode": "pymorphy3_lookup"} ) diff --git a/spacy/tests/lang/ru/test_lemmatizer.py b/spacy/tests/lang/ru/test_lemmatizer.py index e82fd4f8c..9a5a9ad68 100644 --- a/spacy/tests/lang/ru/test_lemmatizer.py +++ b/spacy/tests/lang/ru/test_lemmatizer.py @@ -81,6 +81,7 @@ def test_ru_lemmatizer_punct(ru_lemmatizer): def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): + assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup" words = ["мама", "мыла", "раму"] pos = ["NOUN", "VERB", "NOUN"] morphs = [ @@ -92,3 +93,17 @@ def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): doc = ru_lookup_lemmatizer(doc) lemmas = [token.lemma_ for token in doc] assert lemmas == ["мама", "мыла", "раму"] + + +@pytest.mark.parametrize( + "word,lemma", + ( + ("бременем", "бремя"), + ("будешь", "быть"), + ("какая-то", "какой-то"), + ), +) +def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma): + assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup" + doc = Doc(ru_lookup_lemmatizer.vocab, words=[word]) + assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma diff --git a/spacy/tests/lang/uk/test_lemmatizer.py b/spacy/tests/lang/uk/test_lemmatizer.py index 788744aa1..a65bb25e5 100644 --- a/spacy/tests/lang/uk/test_lemmatizer.py +++ b/spacy/tests/lang/uk/test_lemmatizer.py @@ -8,12 +8,20 @@ pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_uk_lemmatizer(uk_lemmatizer): """Check that the default uk lemmatizer runs.""" doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"]) + assert uk_lemmatizer.mode == "pymorphy3" uk_lemmatizer(doc) assert [token.lemma for token in doc] -def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer): - """Check that the lookup uk lemmatizer runs.""" - doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"]) - uk_lookup_lemmatizer(doc) - assert [token.lemma for token in doc] +@pytest.mark.parametrize( + "word,lemma", + ( + ("якийсь", "якийсь"), + ("розповідають", "розповідати"), + ("розповіси", "розповісти"), + ), +) +def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma): + assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup" + doc = Doc(uk_lookup_lemmatizer.vocab, words=[word]) + assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma From dece775279955e4aa84f718675a72ff34174a7ee Mon Sep 17 00:00:00 2001 From: kadarakos Date: Fri, 25 Nov 2022 11:31:28 +0100 Subject: [PATCH 37/55] correct ndim in docs (#11869) --- website/docs/api/vectors.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/vectors.md b/website/docs/api/vectors.md index 9636ea04c..d4702b592 100644 --- a/website/docs/api/vectors.md +++ b/website/docs/api/vectors.md @@ -50,7 +50,7 @@ modified later. | _keyword-only_ | | | `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ | | `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ | -| `data` | The vector data. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | +| `data` | The vector data. ~~numpy.ndarray[ndim=2, dtype=float32]~~ | | `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ | | `name` | A name to identify the vectors table. ~~str~~ | | `mode` 3.2 | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ | From c0fd8a2e71ce5eaad07e0b555fab8a152373fdc6 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 25 Nov 2022 11:44:55 +0100 Subject: [PATCH 38/55] find-threshold: CLI command for multi-label classifier threshold tuning (#11280) * Add foundation for find-threshold CLI functionality. * Finish first draft for find-threshold. * Add tests. * Revert adjusted import statements. * Fix mypy errors. * Fix imports. * Harmonize arguments with spacy evaluate command. * Generalize component and threshold handling. Harmonize arguments with 'spacy evaluate' CLI. * Fix Spancat test. * Add beta parameter to Scorer and PRFScore. * Make beta a component scorer setting. * Remove beta. * Update nlp.config (workaround). * Reload pipeline on threshold change. Adjust tests. Remove confection reference. * Remove assumption of component being a Pipe object or having a .cfg attribute. * Adjust test output and reference values. * Remove beta references. Delete universe.json. * Reverting unnecessary changes. Removing unused default values. Renaming variables in find-cli tests. * Update spacy/cli/find_threshold.py Co-authored-by: Adriane Boyd * Remove adding labels in tests. * Remove unused error * Undo changes to PRFScorer * Change default value for n_trials. Log table iteratively. * Add warnings for pointless applications of find_threshold(). * Fix imports. * Adjust type check of TextCategorizer to exclude subclasses. * Change check of if there's only one unique value in scores. * Update spacy/cli/find_threshold.py Co-authored-by: Sofie Van Landeghem * Incorporate feedback. * Fix test issue. Update docstring. * Update docs & docstring. * Update spacy/tests/test_cli.py Co-authored-by: Adriane Boyd * Add examples to docs. Rename _nlp to nlp in tests. * Update spacy/cli/find_threshold.py Co-authored-by: Sofie Van Landeghem * Update spacy/cli/find_threshold.py Co-authored-by: Sofie Van Landeghem Co-authored-by: Adriane Boyd Co-authored-by: Sofie Van Landeghem --- spacy/cli/__init__.py | 1 + spacy/cli/find_threshold.py | 233 ++++++++++++++++++++++++++++++++++++ spacy/errors.py | 1 + spacy/pipeline/spancat.py | 4 +- spacy/tests/test_cli.py | 124 ++++++++++++++++++- website/docs/api/cli.md | 41 +++++++ 6 files changed, 399 insertions(+), 5 deletions(-) create mode 100644 spacy/cli/find_threshold.py diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index ce76ef9a9..aab2c8d12 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -27,6 +27,7 @@ from .project.dvc import project_update_dvc # noqa: F401 from .project.push import project_push # noqa: F401 from .project.pull import project_pull # noqa: F401 from .project.document import project_document # noqa: F401 +from .find_threshold import find_threshold # noqa: F401 @app.command("link", no_args_is_help=True, deprecated=True, hidden=True) diff --git a/spacy/cli/find_threshold.py b/spacy/cli/find_threshold.py new file mode 100644 index 000000000..efa664832 --- /dev/null +++ b/spacy/cli/find_threshold.py @@ -0,0 +1,233 @@ +import functools +import operator +from pathlib import Path +import logging +from typing import Optional, Tuple, Any, Dict, List + +import numpy +import wasabi.tables + +from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer +from ..errors import Errors +from ..training import Corpus +from ._util import app, Arg, Opt, import_code, setup_gpu +from .. import util + +_DEFAULTS = { + "n_trials": 11, + "use_gpu": -1, + "gold_preproc": False, +} + + +@app.command( + "find-threshold", + context_settings={"allow_extra_args": False, "ignore_unknown_options": True}, +) +def find_threshold_cli( + # fmt: off + model: str = Arg(..., help="Model name or path"), + data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True), + pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"), + threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"), + scores_key: str = Arg(..., help="Metric to optimize"), + n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"), + code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"), + use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"), + gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"), + verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"), + # fmt: on +): + """ + Runs prediction trials for a trained model with varying tresholds to maximize + the specified metric. The search space for the threshold is traversed linearly + from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout` + (the corresponding API call to `spacy.cli.find_threshold.find_threshold()` + returns all results). + + This is applicable only for components whose predictions are influenced by + thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note + that the full path to the corresponding threshold attribute in the config has to + be provided. + + DOCS: https://spacy.io/api/cli#find-threshold + """ + + util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + import_code(code_path) + find_threshold( + model=model, + data_path=data_path, + pipe_name=pipe_name, + threshold_key=threshold_key, + scores_key=scores_key, + n_trials=n_trials, + use_gpu=use_gpu, + gold_preproc=gold_preproc, + silent=False, + ) + + +def find_threshold( + model: str, + data_path: Path, + pipe_name: str, + threshold_key: str, + scores_key: str, + *, + n_trials: int = _DEFAULTS["n_trials"], # type: ignore + use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore + gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore + silent: bool = True, +) -> Tuple[float, float, Dict[float, float]]: + """ + Runs prediction trials for models with varying tresholds to maximize the specified metric. + model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory. + data_path (Path): Path to file with DocBin with docs to use for threshold search. + pipe_name (str): Name of pipe to examine thresholds for. + threshold_key (str): Key of threshold attribute in component's configuration. + scores_key (str): Name of score to metric to optimize. + n_trials (int): Number of trials to determine optimal thresholds. + use_gpu (int): GPU ID or -1 for CPU. + gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the + tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due + to train/test skew. + silent (bool): Whether to print non-error-related output to stdout. + RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all + evaluated thresholds. + """ + + setup_gpu(use_gpu, silent=silent) + data_path = util.ensure_path(data_path) + if not data_path.exists(): + wasabi.msg.fail("Evaluation data not found", data_path, exits=1) + nlp = util.load_model(model) + + if pipe_name not in nlp.component_names: + raise AttributeError( + Errors.E001.format(name=pipe_name, opts=nlp.component_names) + ) + pipe = nlp.get_pipe(pipe_name) + if not hasattr(pipe, "scorer"): + raise AttributeError(Errors.E1045) + + if type(pipe) == TextCategorizer: + wasabi.msg.warn( + "The `textcat` component doesn't use a threshold as it's not applicable to the concept of " + "exclusive classes. All thresholds will yield the same results." + ) + + if not silent: + wasabi.msg.info( + title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} " + f"trials." + ) + + # Load evaluation corpus. + corpus = Corpus(data_path, gold_preproc=gold_preproc) + dev_dataset = list(corpus(nlp)) + config_keys = threshold_key.split(".") + + def set_nested_item( + config: Dict[str, Any], keys: List[str], value: float + ) -> Dict[str, Any]: + """Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200. + config (Dict[str, Any]): Configuration dictionary. + keys (List[Any]): Path to value to set. + value (float): Value to set. + RETURNS (Dict[str, Any]): Updated dictionary. + """ + functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value + return config + + def filter_config( + config: Dict[str, Any], keys: List[str], full_key: str + ) -> Dict[str, Any]: + """Filters provided config dictionary so that only the specified keys path remains. + config (Dict[str, Any]): Configuration dictionary. + keys (List[Any]): Path to value to set. + full_key (str): Full user-specified key. + RETURNS (Dict[str, Any]): Filtered dictionary. + """ + if keys[0] not in config: + wasabi.msg.fail( + title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.", + text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: " + f"{list(config.keys())}", + exits=1, + ) + return { + keys[0]: filter_config(config[keys[0]], keys[1:], full_key) + if len(keys) > 1 + else config[keys[0]] + } + + # Evaluate with varying threshold values. + scores: Dict[float, float] = {} + config_keys_full = ["components", pipe_name, *config_keys] + table_col_widths = (10, 10) + thresholds = numpy.linspace(0, 1, n_trials) + print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths)) + for threshold in thresholds: + # Reload pipeline with overrides specifying the new threshold. + nlp = util.load_model( + model, + config=set_nested_item( + filter_config( + nlp.config, config_keys_full, ".".join(config_keys_full) + ).copy(), + config_keys_full, + threshold, + ), + ) + if hasattr(pipe, "cfg"): + setattr( + nlp.get_pipe(pipe_name), + "cfg", + set_nested_item(getattr(pipe, "cfg"), config_keys, threshold), + ) + + eval_scores = nlp.evaluate(dev_dataset) + if scores_key not in eval_scores: + wasabi.msg.fail( + title=f"Failed to look up score `{scores_key}` in evaluation results.", + text=f"Make sure you specified the correct value for `scores_key`. The following scores are " + f"available: {list(eval_scores.keys())}", + exits=1, + ) + scores[threshold] = eval_scores[scores_key] + + if not isinstance(scores[threshold], (float, int)): + wasabi.msg.fail( + f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric " + f"scores.", + exits=1, + ) + print( + wasabi.row( + [round(threshold, 3), round(scores[threshold], 3)], + widths=table_col_widths, + ) + ) + + best_threshold = max(scores.keys(), key=(lambda key: scores[key])) + + # If all scores are identical, emit warning. + if len(set(scores.values())) == 1: + wasabi.msg.warn( + title="All scores are identical. Verify that all settings are correct.", + text="" + if ( + not isinstance(pipe, MultiLabel_TextCategorizer) + or scores_key in ("cats_macro_f", "cats_micro_f") + ) + else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.", + ) + + else: + if not silent: + print( + f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}." + ) + + return best_threshold, scores[best_threshold], scores diff --git a/spacy/errors.py b/spacy/errors.py index 1d29f0e17..a8de5fb90 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -956,6 +956,7 @@ class Errors(metaclass=ErrorsWithCodes): "sure it's overwritten on the subclass.") E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default " "knowledge base, use `InMemoryLookupKB`.") + E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 956bbb72c..0a84c72fd 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -1,7 +1,7 @@ -from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast +from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Optimizer -from thinc.types import Ragged, Ints2d, Floats2d, Ints1d +from thinc.types import Ragged, Ints2d, Floats2d import numpy diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 8225e14f1..1c4d0c98f 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -1,9 +1,10 @@ import os import math +from collections import Counter +from typing import Tuple, List, Dict, Any import pkg_resources -from random import sample -from typing import Counter +import numpy import pytest import srsly from click import NoSuchOption @@ -28,11 +29,12 @@ from spacy.cli.package import get_third_party_dependencies from spacy.cli.package import _is_permitted_package_name from spacy.cli.project.run import _check_requirements from spacy.cli.validate import get_model_pkgs +from spacy.cli.find_threshold import find_threshold from spacy.lang.en import English from spacy.lang.nl import Dutch from spacy.language import Language from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate -from spacy.tokens import Doc +from spacy.tokens import Doc, DocBin from spacy.tokens.span import Span from spacy.training import Example, docs_to_json, offsets_to_biluo_tags from spacy.training.converters import conll_ner_to_docs, conllu_to_docs @@ -859,6 +861,122 @@ def test_span_length_freq_dist_output_must_be_correct(): assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] +def test_cli_find_threshold(capsys): + thresholds = numpy.linspace(0, 1, 10) + + def make_examples(nlp: Language) -> List[Example]: + docs: List[Example] = [] + + for t in [ + ( + "I am angry and confused in the Bank of America.", + { + "cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0}, + "spans": {"sc": [(31, 46, "ORG")]}, + }, + ), + ( + "I am confused but happy in New York.", + { + "cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}, + "spans": {"sc": [(27, 35, "GPE")]}, + }, + ), + ]: + doc = nlp.make_doc(t[0]) + docs.append(Example.from_dict(doc, t[1])) + + return docs + + def init_nlp( + components: Tuple[Tuple[str, Dict[str, Any]], ...] = () + ) -> Tuple[Language, List[Example]]: + new_nlp = English() + new_nlp.add_pipe( # type: ignore + factory_name="textcat_multilabel", + name="tc_multi", + config={"threshold": 0.9}, + ) + + # Append additional components to pipeline. + for cfn, comp_config in components: + new_nlp.add_pipe(cfn, config=comp_config) + + new_examples = make_examples(new_nlp) + new_nlp.initialize(get_examples=lambda: new_examples) + for i in range(5): + new_nlp.update(new_examples) + + return new_nlp, new_examples + + with make_tempdir() as docs_dir: + # Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches + # the current model behavior with the examples above. This can break once the model behavior changes and serves + # mostly as a smoke test. + nlp, examples = init_nlp() + DocBin(docs=[example.reference for example in examples]).to_disk( + docs_dir / "docs.spacy" + ) + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + res = find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="tc_multi", + threshold_key="threshold", + scores_key="cats_macro_f", + silent=True, + ) + assert res[0] != thresholds[0] + assert thresholds[0] < res[0] < thresholds[9] + assert res[1] == 1.0 + assert res[2][1.0] == 0.0 + + # Test with spancat. + nlp, _ = init_nlp((("spancat", {}),)) + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + res = find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="spancat", + threshold_key="threshold", + scores_key="spans_sc_f", + silent=True, + ) + assert res[0] != thresholds[0] + assert thresholds[0] < res[0] < thresholds[8] + assert res[1] >= 0.6 + assert res[2][1.0] == 0.0 + + # Having multiple textcat_multilabel components should work, since the name has to be specified. + nlp, _ = init_nlp((("textcat_multilabel", {}),)) + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + assert find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="tc_multi", + threshold_key="threshold", + scores_key="cats_macro_f", + silent=True, + ) + + # Specifying the name of an non-existing pipe should fail. + nlp, _ = init_nlp() + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + with pytest.raises(AttributeError): + find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="_", + threshold_key="threshold", + scores_key="cats_macro_f", + silent=True, + ) + + @pytest.mark.parametrize( "reqs,output", [ diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index 6e581b903..b42ba8a4f 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -12,6 +12,7 @@ menu: - ['train', 'train'] - ['pretrain', 'pretrain'] - ['evaluate', 'evaluate'] + - ['find-threshold', 'find-threshold'] - ['assemble', 'assemble'] - ['package', 'package'] - ['project', 'project'] @@ -1161,6 +1162,46 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Training results and optional metrics and visualizations. | +## find-threshold {#find-threshold new="3.5" tag="command"} + +Runs prediction trials for a trained model with varying tresholds to maximize +the specified metric. The search space for the threshold is traversed linearly +from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout` +(the corresponding API call to `spacy.cli.find_threshold.find_threshold()` +returns all results). + +This is applicable only for components whose predictions are influenced by +thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note +that the full path to the corresponding threshold attribute in the config has to +be provided. + +> #### Examples +> +> ```cli +> # For textcat_multilabel: +> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f +> ``` +> +> ```cli +> # For spancat: +> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f +> ``` + + +| Name | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ | +| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ | +| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ | +| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ | +| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ | +| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ | +| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | +| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ | +| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | + ## assemble {#assemble tag="command"} Assemble a pipeline from a config file without additional training. Expects a From 378db0eb1e9231c565faf72078bcfb012f439e9b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 1 Apr 2022 10:42:25 +0200 Subject: [PATCH 39/55] Temporarily skip tests that require models/compat --- .github/azure-steps.yml | 44 ++++++++++++++++++++--------------------- spacy/tests/test_cli.py | 2 ++ 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index e8bd0d212..2f77706b8 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -52,17 +52,17 @@ steps: python -W error -c "import spacy" displayName: "Test import" - - script: | - python -m spacy download ca_core_news_sm - python -m spacy download ca_core_news_md - python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" - displayName: 'Test download CLI' - condition: eq(variables['python_version'], '3.8') - - - script: | - python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" - displayName: 'Test no warnings on load (#11713)' - condition: eq(variables['python_version'], '3.8') +# - script: | +# python -m spacy download ca_core_news_sm +# python -m spacy download ca_core_news_md +# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" +# displayName: 'Test download CLI' +# condition: eq(variables['python_version'], '3.8') +# +# - script: | +# python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" +# displayName: 'Test no warnings on load (#11713)' +# condition: eq(variables['python_version'], '3.8') - script: | python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . @@ -86,17 +86,17 @@ steps: displayName: 'Test train CLI' condition: eq(variables['python_version'], '3.8') - - script: | - python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" - PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir - displayName: 'Test assemble CLI' - condition: eq(variables['python_version'], '3.8') - - - script: | - python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" - python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 - displayName: 'Test assemble CLI vectors warning' - condition: eq(variables['python_version'], '3.8') +# - script: | +# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" +# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir +# displayName: 'Test assemble CLI' +# condition: eq(variables['python_version'], '3.8') +# +# - script: | +# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" +# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 +# displayName: 'Test assemble CLI vectors warning' +# condition: eq(variables['python_version'], '3.8') - script: | python -m pip install -U -r requirements.txt diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 8225e14f1..563559cb4 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -591,6 +591,7 @@ def test_string_to_list_intify(value): assert string_to_list(value, intify=True) == [1, 2, 3] +@pytest.mark.skip(reason="Temporarily skip for dev version") def test_download_compatibility(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False @@ -601,6 +602,7 @@ def test_download_compatibility(): assert get_minor_version(about.__version__) == get_minor_version(version) +@pytest.mark.skip(reason="Temporarily skip for dev version") def test_validate_compatibility_table(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False From 32396e0bda3aceda74f2d7d050180032cd381d32 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 25 Nov 2022 11:18:56 +0100 Subject: [PATCH 40/55] Set version to v3.5.0 --- spacy/about.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/about.py b/spacy/about.py index ce86e6294..640e9e93b 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy" -__version__ = "3.4.2" +__version__ = "3.5.0" __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __projects__ = "https://github.com/explosion/projects" From 681ec209147ba476a4062e5fec2248c7e0c50d68 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 25 Nov 2022 13:00:57 +0100 Subject: [PATCH 41/55] Add smart_open requirement, update deprecated options (#11864) * Switch from deprecated `ignore_ext` to `compression` * Add upload/download test for local files --- requirements.txt | 1 + setup.cfg | 1 + spacy/cli/_util.py | 2 +- spacy/tests/test_cli.py | 16 ++++++++++++++++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 23bfa6f14..dd2eff0c2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 typer>=0.3.0,<0.8.0 pathy>=0.3.5 +smart-open>=5.2.1,<7.0.0 # Third party dependencies numpy>=1.15.0 requests>=2.13.0,<3.0.0 diff --git a/setup.cfg b/setup.cfg index 82d4d2758..330dc8205 100644 --- a/setup.cfg +++ b/setup.cfg @@ -53,6 +53,7 @@ install_requires = # Third-party dependencies typer>=0.3.0,<0.8.0 pathy>=0.3.5 + smart-open>=5.2.1,<7.0.0 tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 requests>=2.13.0,<3.0.0 diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index 897964a88..872f69c88 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -358,7 +358,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) if dest.exists() and not force: return None src = str(src) - with smart_open.open(src, mode="rb", ignore_ext=True) as input_file: + with smart_open.open(src, mode="rb", compression="disable") as input_file: with dest.open(mode="wb") as output_file: shutil.copyfileobj(input_file, output_file) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 1c4d0c98f..525c6d255 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -17,6 +17,7 @@ from spacy.cli._util import is_subpath_of, load_project_config from spacy.cli._util import parse_config_overrides, string_to_list from spacy.cli._util import substitute_project_variables from spacy.cli._util import validate_project_commands +from spacy.cli._util import upload_file, download_file from spacy.cli.debug_data import _compile_gold, _get_labels_from_model from spacy.cli.debug_data import _get_labels_from_spancat from spacy.cli.debug_data import _get_distribution, _get_kl_divergence @@ -1014,3 +1015,18 @@ def test_project_check_requirements(reqs, output): pkg_resources.require("spacyunknowndoesnotexist12345") except pkg_resources.DistributionNotFound: assert output == _check_requirements([req.strip() for req in reqs.split("\n")]) + + +def test_upload_download_local_file(): + with make_tempdir() as d1, make_tempdir() as d2: + filename = "f.txt" + content = "content" + local_file = d1 / filename + remote_file = d2 / filename + with local_file.open(mode="w") as file_: + file_.write(content) + upload_file(local_file, remote_file) + local_file.unlink() + download_file(remote_file, local_file) + with local_file.open(mode="r") as file_: + assert file_.read() == content From c23d54fd261b34ff947a18170a303a305179e7bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Wed, 23 Nov 2022 01:33:20 +0100 Subject: [PATCH 42/55] Remove MDX tags from `README.md` --- website/README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/website/README.md b/website/README.md index 743e61acd..e0c0ac450 100644 --- a/website/README.md +++ b/website/README.md @@ -11,7 +11,7 @@ rendered version is available at https://spacy.io/styleguide._ -## Setup and installation {#setup} +## Setup and installation Before running the setup, make sure your versions of [Node](https://nodejs.org/en/) and [npm](https://www.npmjs.com/) are up to date. @@ -40,7 +40,7 @@ extensions for your code editor. The [`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc) file in the root defines the settings used in this codebase. -## Building & developing the site with Docker {#docker} +## Building & developing the site with Docker Sometimes it's hard to get a local environment working due to rapid updates to node dependencies, so it may be easier to use docker for building the docs. @@ -66,7 +66,7 @@ segfault errors from `qemu` if you use the default image. To fix this use the `arm64` tagged image in the `docker run` command (ghcr.io/explosion/spacy-io:arm64). -### Building the Docker image {#docker-build} +### Building the Docker image If you'd like to build the image locally, you can do so like this: @@ -77,10 +77,9 @@ docker build -t spacy-io . This will take some time, so if you want to use the prebuilt image you'll save a bit of time. -## Project structure {#structure} +## Project structure ```yaml -### Directory structure ├── docs # the actual markdown content ├── meta # JSON-formatted site metadata | ├── languages.json # supported languages and statistical models From 7f2ea20fee67c24eb3c7a10b76f6d554c0df5c88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Tue, 22 Nov 2022 16:16:11 +0100 Subject: [PATCH 43/55] Update `README.md` --- website/README.md | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/website/README.md b/website/README.md index e0c0ac450..890a48ef9 100644 --- a/website/README.md +++ b/website/README.md @@ -1,15 +1,9 @@ - - # spacy.io website and docs ![Netlify Status](https://api.netlify.com/api/v1/badges/d65fe97d-99ab-47f8-a339-1d8987251da0/deploy-status) -_This page contains the documentation and styleguide for the spaCy website. Its -rendered version is available at https://spacy.io/styleguide._ - ---- - - +The styleguide for the spaCy website is available at +[spacy.io/styleguide](https://spacy.io/styleguide). ## Setup and installation From 5c9faf6eea34eade36e465c5493bfbbb039bcbc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Thu, 24 Nov 2022 21:02:14 +0100 Subject: [PATCH 44/55] Update menu for styleguide This reflects the removed parts from ecbf052abde2ab9373be1d7652e20d50b096e49d --- website/docs/styleguide.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/docs/styleguide.md b/website/docs/styleguide.md index 6ae23ff3f..47bca1ed4 100644 --- a/website/docs/styleguide.md +++ b/website/docs/styleguide.md @@ -8,9 +8,7 @@ menu: - ['Typography', 'typography'] - ['Elements', 'elements'] - ['Components', 'components'] - - ['Setup & Installation', 'setup'] - ['Markdown Reference', 'markdown'] - - ['Project Structure', 'structure'] - ['Editorial', 'editorial'] sidebar: - label: Styleguide From 9f986af120717680a1b97b15670dbc427b56ad89 Mon Sep 17 00:00:00 2001 From: Zhangrp Date: Mon, 28 Nov 2022 13:50:30 +0800 Subject: [PATCH 45/55] Add example sentence for Chinese in website meta (#11879) --- website/meta/languages.json | 1 + 1 file changed, 1 insertion(+) diff --git a/website/meta/languages.json b/website/meta/languages.json index bd1535c90..15158df79 100644 --- a/website/meta/languages.json +++ b/website/meta/languages.json @@ -562,6 +562,7 @@ "url": "https://github.com/explosion/spacy-pkuseg" } ], + "example": "这是一个用于示例的句子。", "has_examples": true } ], From f54bfb56c923b94ffeff5f9b774a24479c6dea61 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 28 Nov 2022 18:01:09 +0900 Subject: [PATCH 46/55] Don't throw an error if using displacy on an unset span key (#11845) * Don't throw an error if using displacy on an unset span key * List available keys in W117 --- spacy/displacy/__init__.py | 5 +++-- spacy/errors.py | 2 +- spacy/tests/test_displacy.py | 10 ++++++++++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/spacy/displacy/__init__.py b/spacy/displacy/__init__.py index 7bb300afa..bc32001d7 100644 --- a/spacy/displacy/__init__.py +++ b/spacy/displacy/__init__.py @@ -228,12 +228,13 @@ def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]: "kb_id": span.kb_id_ if span.kb_id_ else "", "kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#", } - for span in doc.spans[spans_key] + for span in doc.spans.get(spans_key, []) ] tokens = [token.text for token in doc] if not spans: - warnings.warn(Warnings.W117.format(spans_key=spans_key)) + keys = list(doc.spans.keys()) + warnings.warn(Warnings.W117.format(spans_key=spans_key, keys=keys)) title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None settings = get_doc_settings(doc) return { diff --git a/spacy/errors.py b/spacy/errors.py index a8de5fb90..e34614b0f 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -199,7 +199,7 @@ class Warnings(metaclass=ErrorsWithCodes): W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is " "surprising to you, make sure the Doc was processed using a model " "that supports span categorization, and check the `doc.spans[spans_key]` " - "property manually if necessary.") + "property manually if necessary.\n\nAvailable keys: {keys}") W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation " "for the corpora used to train the language. Please check " "`nlp.meta[\"sources\"]` for any relevant links.") diff --git a/spacy/tests/test_displacy.py b/spacy/tests/test_displacy.py index ccc145b44..f298b38e0 100644 --- a/spacy/tests/test_displacy.py +++ b/spacy/tests/test_displacy.py @@ -203,6 +203,16 @@ def test_displacy_parse_spans_different_spans_key(en_vocab): ] +def test_displacy_parse_empty_spans_key(en_vocab): + """Test that having an unset spans key doesn't raise an error""" + doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) + doc.spans["custom"] = [Span(doc, 3, 6, "BANK")] + with pytest.warns(UserWarning, match="W117"): + spans = displacy.parse_spans(doc) + + assert isinstance(spans, dict) + + def test_displacy_parse_ents(en_vocab): """Test that named entities on a Doc are converted into displaCy's format.""" doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) From 1ebe7db07c8dbb1a55dafb09131b1d08242b79c5 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 29 Nov 2022 11:40:58 +0100 Subject: [PATCH 47/55] Support local filesystem remotes for projects (#11762) * Support local filesystem remotes for projects * Fix support for local filesystem remotes for projects * Use `FluidPath` instead of `Pathy` to support both filesystem and remote paths * Create missing parent directories if required for local filesystem * Add a more general `_file_exists` method to support both `Pathy`, `Path`, and `smart_open`-compatible URLs * Add explicit `smart_open` dependency starting with support for `compression` flag * Update `pathy` dependency to exclude older versions that aren't compatible with required `smart_open` version * Update docs to refer to `Pathy` instead of `smart_open` for project remotes (technically you can still push to any `smart_open`-compatible path but you can't pull from them) * Add tests for local filesystem remotes * Update pathy for general BlobStat sorting * Add import * Remove _file_exists since only Pathy remotes are supported * Format CLI docs * Clean up merge --- requirements.txt | 2 +- setup.cfg | 2 +- spacy/cli/_util.py | 15 +++++--- spacy/cli/project/remote_storage.py | 42 ++++++++++++++-------- spacy/tests/test_cli.py | 56 +++++++++++++++++++++++++++++ website/docs/api/cli.md | 26 +++++++------- website/docs/usage/projects.md | 20 +++++------ 7 files changed, 120 insertions(+), 43 deletions(-) diff --git a/requirements.txt b/requirements.txt index dd2eff0c2..778c05e21 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ wasabi>=0.9.1,<1.1.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 typer>=0.3.0,<0.8.0 -pathy>=0.3.5 +pathy>=0.10.0 smart-open>=5.2.1,<7.0.0 # Third party dependencies numpy>=1.15.0 diff --git a/setup.cfg b/setup.cfg index 330dc8205..5768c9d3e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -52,7 +52,7 @@ install_requires = catalogue>=2.0.6,<2.1.0 # Third-party dependencies typer>=0.3.0,<0.8.0 - pathy>=0.3.5 + pathy>=0.10.0 smart-open>=5.2.1,<7.0.0 tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index 872f69c88..7ce006108 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -23,7 +23,7 @@ from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS from .. import about if TYPE_CHECKING: - from pathy import Pathy # noqa: F401 + from pathy import FluidPath # noqa: F401 SDIST_SUFFIX = ".tar.gz" @@ -331,7 +331,7 @@ def import_code(code_path: Optional[Union[Path, str]]) -> None: msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1) -def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None: +def upload_file(src: Path, dest: Union[str, "FluidPath"]) -> None: """Upload a file. src (Path): The source path. @@ -339,13 +339,20 @@ def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None: """ import smart_open + # Create parent directories for local paths + if isinstance(dest, Path): + if not dest.parent.exists(): + dest.parent.mkdir(parents=True) + dest = str(dest) with smart_open.open(dest, mode="wb") as output_file: with src.open(mode="rb") as input_file: output_file.write(input_file.read()) -def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) -> None: +def download_file( + src: Union[str, "FluidPath"], dest: Path, *, force: bool = False +) -> None: """Download a file using smart_open. url (str): The URL of the file. @@ -368,7 +375,7 @@ def ensure_pathy(path): slow and annoying Google Cloud warning).""" from pathy import Pathy # noqa: F811 - return Pathy(path) + return Pathy.fluid(path) def git_checkout( diff --git a/spacy/cli/project/remote_storage.py b/spacy/cli/project/remote_storage.py index 12e252b3c..076541580 100644 --- a/spacy/cli/project/remote_storage.py +++ b/spacy/cli/project/remote_storage.py @@ -5,15 +5,17 @@ import hashlib import urllib.parse import tarfile from pathlib import Path +from wasabi import msg -from .._util import get_hash, get_checksum, download_file, ensure_pathy -from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var +from .._util import get_hash, get_checksum, upload_file, download_file +from .._util import ensure_pathy, make_tempdir +from ...util import get_minor_version, ENV_VARS, check_bool_env_var from ...git_info import GIT_VERSION from ... import about from ...errors import Errors if TYPE_CHECKING: - from pathy import Pathy # noqa: F401 + from pathy import FluidPath # noqa: F401 class RemoteStorage: @@ -28,7 +30,7 @@ class RemoteStorage: self.url = ensure_pathy(url) self.compression = compression - def push(self, path: Path, command_hash: str, content_hash: str) -> "Pathy": + def push(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath": """Compress a file or directory within a project and upload it to a remote storage. If an object exists at the full URL, nothing is done. @@ -49,9 +51,7 @@ class RemoteStorage: mode_string = f"w:{self.compression}" if self.compression else "w" with tarfile.open(tar_loc, mode=mode_string) as tar_file: tar_file.add(str(loc), arcname=str(path)) - with tar_loc.open(mode="rb") as input_file: - with url.open(mode="wb") as output_file: - output_file.write(input_file.read()) + upload_file(tar_loc, url) return url def pull( @@ -60,7 +60,7 @@ class RemoteStorage: *, command_hash: Optional[str] = None, content_hash: Optional[str] = None, - ) -> Optional["Pathy"]: + ) -> Optional["FluidPath"]: """Retrieve a file from the remote cache. If the file already exists, nothing is done. @@ -110,25 +110,37 @@ class RemoteStorage: *, command_hash: Optional[str] = None, content_hash: Optional[str] = None, - ) -> Optional["Pathy"]: + ) -> Optional["FluidPath"]: """Find the best matching version of a file within the storage, or `None` if no match can be found. If both the creation and content hash are specified, only exact matches will be returned. Otherwise, the most recent matching file is preferred. """ name = self.encode_name(str(path)) + urls = [] if command_hash is not None and content_hash is not None: - url = self.make_url(path, command_hash, content_hash) + url = self.url / name / command_hash / content_hash urls = [url] if url.exists() else [] elif command_hash is not None: - urls = list((self.url / name / command_hash).iterdir()) + if (self.url / name / command_hash).exists(): + urls = list((self.url / name / command_hash).iterdir()) else: - urls = list((self.url / name).iterdir()) - if content_hash is not None: - urls = [url for url in urls if url.parts[-1] == content_hash] + if (self.url / name).exists(): + for sub_dir in (self.url / name).iterdir(): + urls.extend(sub_dir.iterdir()) + if content_hash is not None: + urls = [url for url in urls if url.parts[-1] == content_hash] + if len(urls) >= 2: + try: + urls.sort(key=lambda x: x.stat().last_modified) # type: ignore + except Exception: + msg.warn( + "Unable to sort remote files by last modified. The file(s) " + "pulled from the cache may not be the most recent." + ) return urls[-1] if urls else None - def make_url(self, path: Path, command_hash: str, content_hash: str) -> "Pathy": + def make_url(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath": """Construct a URL from a subpath, a creation hash and a content hash.""" return self.url / self.encode_name(str(path)) / command_hash / content_hash diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 525c6d255..ee3081283 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -3,6 +3,7 @@ import math from collections import Counter from typing import Tuple, List, Dict, Any import pkg_resources +import time import numpy import pytest @@ -28,6 +29,7 @@ from spacy.cli.download import get_compatibility, get_version from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config from spacy.cli.package import get_third_party_dependencies from spacy.cli.package import _is_permitted_package_name +from spacy.cli.project.remote_storage import RemoteStorage from spacy.cli.project.run import _check_requirements from spacy.cli.validate import get_model_pkgs from spacy.cli.find_threshold import find_threshold @@ -862,6 +864,60 @@ def test_span_length_freq_dist_output_must_be_correct(): assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] +def test_local_remote_storage(): + with make_tempdir() as d: + filename = "a.txt" + + content_hashes = ("aaaa", "cccc", "bbbb") + for i, content_hash in enumerate(content_hashes): + # make sure that each subsequent file has a later timestamp + if i > 0: + time.sleep(1) + content = f"{content_hash} content" + loc_file = d / "root" / filename + if not loc_file.parent.exists(): + loc_file.parent.mkdir(parents=True) + with loc_file.open(mode="w") as file_: + file_.write(content) + + # push first version to remote storage + remote = RemoteStorage(d / "root", str(d / "remote")) + remote.push(filename, "aaaa", content_hash) + + # retrieve with full hashes + loc_file.unlink() + remote.pull(filename, command_hash="aaaa", content_hash=content_hash) + with loc_file.open(mode="r") as file_: + assert file_.read() == content + + # retrieve with command hash + loc_file.unlink() + remote.pull(filename, command_hash="aaaa") + with loc_file.open(mode="r") as file_: + assert file_.read() == content + + # retrieve with content hash + loc_file.unlink() + remote.pull(filename, content_hash=content_hash) + with loc_file.open(mode="r") as file_: + assert file_.read() == content + + # retrieve with no hashes + loc_file.unlink() + remote.pull(filename) + with loc_file.open(mode="r") as file_: + assert file_.read() == content + + +def test_local_remote_storage_pull_missing(): + # pulling from a non-existent remote pulls nothing gracefully + with make_tempdir() as d: + filename = "a.txt" + remote = RemoteStorage(d / "root", str(d / "remote")) + assert remote.pull(filename, command_hash="aaaa") is None + assert remote.pull(filename) is None + + def test_cli_find_threshold(capsys): thresholds = numpy.linspace(0, 1, 10) diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index b42ba8a4f..8823a3bd8 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -1391,12 +1391,13 @@ If the contents are different, the new version of the file is uploaded. Deleting obsolete files is left up to you. Remotes can be defined in the `remotes` section of the -[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the -[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to -communicate with the remote storages, so you can use any protocol that -`smart-open` supports, including [S3](https://aws.amazon.com/s3/), -[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although -you may need to install extra dependencies to use certain protocols. +[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses +[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the +remote storages, so you can use any protocol that `Pathy` supports, including +[S3](https://aws.amazon.com/s3/), +[Google Cloud Storage](https://cloud.google.com/storage), and the local +filesystem, although you may need to install extra dependencies to use certain +protocols. ```cli $ python -m spacy project push [remote] [project_dir] @@ -1435,12 +1436,13 @@ outputs, so if you change the config back, you'll be able to fetch back the result. Remotes can be defined in the `remotes` section of the -[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the -[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to -communicate with the remote storages, so you can use any protocol that -`smart-open` supports, including [S3](https://aws.amazon.com/s3/), -[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although -you may need to install extra dependencies to use certain protocols. +[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses +[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the +remote storages, so you can use any protocol that `Pathy` supports, including +[S3](https://aws.amazon.com/s3/), +[Google Cloud Storage](https://cloud.google.com/storage), and the local +filesystem, although you may need to install extra dependencies to use certain +protocols. ```cli $ python -m spacy project pull [remote] [project_dir] diff --git a/website/docs/usage/projects.md b/website/docs/usage/projects.md index 34315e4e7..f57578049 100644 --- a/website/docs/usage/projects.md +++ b/website/docs/usage/projects.md @@ -259,9 +259,9 @@ pipelines. > This can be used in a project command like so: > > ```yaml -> - name: "echo-path" -> script: -> - "echo ${env.ENV_PATH}" +> - name: 'echo-path' +> script: +> - 'echo ${env.ENV_PATH}' > ``` | Section | Description | @@ -643,12 +643,13 @@ locally. You can list one or more remotes in the `remotes` section of your [`project.yml`](#project-yml) by mapping a string name to the URL of the -storage. Under the hood, spaCy uses the -[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to -communicate with the remote storages, so you can use any protocol that -`smart-open` supports, including [S3](https://aws.amazon.com/s3/), -[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although -you may need to install extra dependencies to use certain protocols. +storage. Under the hood, spaCy uses +[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the +remote storages, so you can use any protocol that `Pathy` supports, including +[S3](https://aws.amazon.com/s3/), +[Google Cloud Storage](https://cloud.google.com/storage), and the local +filesystem, although you may need to install extra dependencies to use certain +protocols. > #### Example > @@ -661,7 +662,6 @@ you may need to install extra dependencies to use certain protocols. remotes: default: 's3://my-spacy-bucket' local: '/mnt/scratch/cache' - stuff: 'ssh://myserver.example.com/whatever' ``` From f1e024345043cdc986e70308a09a7ca383b60dd0 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Tue, 29 Nov 2022 19:50:23 +0900 Subject: [PATCH 48/55] Remove macro auc per type from textcat defaults (#11887) This appears to have been added by mistake and never used. Removing it does not break validation. --- spacy/pipeline/textcat.py | 1 - spacy/pipeline/textcat_multilabel.py | 1 - 2 files changed, 2 deletions(-) diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index 9490e3cb1..65121114d 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -87,7 +87,6 @@ subword_features = true "cats_macro_f": None, "cats_macro_auc": None, "cats_f_per_type": None, - "cats_macro_auc_per_type": None, }, ) def make_textcat( diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index ef9bd6557..328cee723 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -87,7 +87,6 @@ subword_features = true "cats_macro_f": None, "cats_macro_auc": None, "cats_f_per_type": None, - "cats_macro_auc_per_type": None, }, ) def make_multilabel_textcat( From 6f9d630f7e9c11d8d5f7ba37e3764ef3630c172d Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 29 Nov 2022 13:20:08 +0100 Subject: [PATCH 49/55] Replace Pipe type with Callable in Language (#11803) * Replace Pipe type with Callable in Language * Use Callable[[Doc], Doc] in the docstrings --- spacy/cli/debug_data.py | 2 + spacy/language.py | 55 ++++++++++++++-------------- spacy/tests/pipeline/test_textcat.py | 2 +- spacy/util.py | 7 ++-- 4 files changed, 33 insertions(+), 33 deletions(-) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 963d5b926..a85324e87 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -13,6 +13,7 @@ from ._util import import_code, debug_cli, _format_number from ..training import Example, remove_bilu_prefix from ..training.initialize import get_sourced_components from ..schemas import ConfigSchemaTraining +from ..pipeline import TrainablePipe from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals.nonproj import DELIMITER from ..pipeline import Morphologizer, SpanCategorizer @@ -934,6 +935,7 @@ def _get_labels_from_model(nlp: Language, factory_name: str) -> Set[str]: labels: Set[str] = set() for pipe_name in pipe_names: pipe = nlp.get_pipe(pipe_name) + assert isinstance(pipe, TrainablePipe) labels.update(pipe.labels) return labels diff --git a/spacy/language.py b/spacy/language.py index 2789b6690..e0abfd5e7 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -43,8 +43,7 @@ from .lookups import load_lookups from .compat import Literal -if TYPE_CHECKING: - from .pipeline import Pipe # noqa: F401 +PipeCallable = Callable[[Doc], Doc] # This is the base config will all settings (training etc.) @@ -181,7 +180,7 @@ class Language: self.vocab: Vocab = vocab if self.lang is None: self.lang = self.vocab.lang - self._components: List[Tuple[str, "Pipe"]] = [] + self._components: List[Tuple[str, PipeCallable]] = [] self._disabled: Set[str] = set() self.max_length = max_length # Create the default tokenizer from the default config @@ -303,7 +302,7 @@ class Language: return SimpleFrozenList(names) @property - def components(self) -> List[Tuple[str, "Pipe"]]: + def components(self) -> List[Tuple[str, PipeCallable]]: """Get all (name, component) tuples in the pipeline, including the currently disabled components. """ @@ -322,12 +321,12 @@ class Language: return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names")) @property - def pipeline(self) -> List[Tuple[str, "Pipe"]]: + def pipeline(self) -> List[Tuple[str, PipeCallable]]: """The processing pipeline consisting of (name, component) tuples. The components are called on the Doc in order as it passes through the pipeline. - RETURNS (List[Tuple[str, Pipe]]): The pipeline. + RETURNS (List[Tuple[str, Callable[[Doc], Doc]]]): The pipeline. """ pipes = [(n, p) for n, p in self._components if n not in self._disabled] return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline")) @@ -527,7 +526,7 @@ class Language: assigns: Iterable[str] = SimpleFrozenList(), requires: Iterable[str] = SimpleFrozenList(), retokenizes: bool = False, - func: Optional["Pipe"] = None, + func: Optional[PipeCallable] = None, ) -> Callable[..., Any]: """Register a new pipeline component. Can be used for stateless function components that don't require a separate factory. Can be used as a @@ -542,7 +541,7 @@ class Language: e.g. "token.ent_id". Used for pipeline analysis. retokenizes (bool): Whether the component changes the tokenization. Used for pipeline analysis. - func (Optional[Callable]): Factory function if not used as a decorator. + func (Optional[Callable[[Doc], Doc]): Factory function if not used as a decorator. DOCS: https://spacy.io/api/language#component """ @@ -553,11 +552,11 @@ class Language: raise ValueError(Errors.E853.format(name=name)) component_name = name if name is not None else util.get_object_name(func) - def add_component(component_func: "Pipe") -> Callable: + def add_component(component_func: PipeCallable) -> Callable: if isinstance(func, type): # function is a class raise ValueError(Errors.E965.format(name=component_name)) - def factory_func(nlp, name: str) -> "Pipe": + def factory_func(nlp, name: str) -> PipeCallable: return component_func internal_name = cls.get_factory_name(name) @@ -607,7 +606,7 @@ class Language: print_pipe_analysis(analysis, keys=keys) return analysis - def get_pipe(self, name: str) -> "Pipe": + def get_pipe(self, name: str) -> PipeCallable: """Get a pipeline component for a given component name. name (str): Name of pipeline component to get. @@ -628,7 +627,7 @@ class Language: config: Dict[str, Any] = SimpleFrozenDict(), raw_config: Optional[Config] = None, validate: bool = True, - ) -> "Pipe": + ) -> PipeCallable: """Create a pipeline component. Mostly used internally. To create and add a component to the pipeline, you can use nlp.add_pipe. @@ -640,7 +639,7 @@ class Language: raw_config (Optional[Config]): Internals: the non-interpolated config. validate (bool): Whether to validate the component config against the arguments and types expected by the factory. - RETURNS (Pipe): The pipeline component. + RETURNS (Callable[[Doc], Doc]): The pipeline component. DOCS: https://spacy.io/api/language#create_pipe """ @@ -695,13 +694,13 @@ class Language: def create_pipe_from_source( self, source_name: str, source: "Language", *, name: str - ) -> Tuple["Pipe", str]: + ) -> Tuple[PipeCallable, str]: """Create a pipeline component by copying it from an existing model. source_name (str): Name of the component in the source pipeline. source (Language): The source nlp object to copy from. name (str): Optional alternative name to use in current pipeline. - RETURNS (Tuple[Callable, str]): The component and its factory name. + RETURNS (Tuple[Callable[[Doc], Doc], str]): The component and its factory name. """ # Check source type if not isinstance(source, Language): @@ -740,7 +739,7 @@ class Language: config: Dict[str, Any] = SimpleFrozenDict(), raw_config: Optional[Config] = None, validate: bool = True, - ) -> "Pipe": + ) -> PipeCallable: """Add a component to the processing pipeline. Valid components are callables that take a `Doc` object, modify it and return it. Only one of before/after/first/last can be set. Default behaviour is "last". @@ -763,7 +762,7 @@ class Language: raw_config (Optional[Config]): Internals: the non-interpolated config. validate (bool): Whether to validate the component config against the arguments and types expected by the factory. - RETURNS (Pipe): The pipeline component. + RETURNS (Callable[[Doc], Doc]): The pipeline component. DOCS: https://spacy.io/api/language#add_pipe """ @@ -869,7 +868,7 @@ class Language: *, config: Dict[str, Any] = SimpleFrozenDict(), validate: bool = True, - ) -> "Pipe": + ) -> PipeCallable: """Replace a component in the pipeline. name (str): Name of the component to replace. @@ -878,7 +877,7 @@ class Language: component. Will be merged with default config, if available. validate (bool): Whether to validate the component config against the arguments and types expected by the factory. - RETURNS (Pipe): The new pipeline component. + RETURNS (Callable[[Doc], Doc]): The new pipeline component. DOCS: https://spacy.io/api/language#replace_pipe """ @@ -930,11 +929,11 @@ class Language: init_cfg = self._config["initialize"]["components"].pop(old_name) self._config["initialize"]["components"][new_name] = init_cfg - def remove_pipe(self, name: str) -> Tuple[str, "Pipe"]: + def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]: """Remove a component from the pipeline. name (str): Name of the component to remove. - RETURNS (tuple): A `(name, component)` tuple of the removed component. + RETURNS (Tuple[str, Callable[[Doc], Doc]]): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe """ @@ -1349,15 +1348,15 @@ class Language: def set_error_handler( self, - error_handler: Callable[[str, "Pipe", List[Doc], Exception], NoReturn], + error_handler: Callable[[str, PipeCallable, List[Doc], Exception], NoReturn], ): - """Set an error handler object for all the components in the pipeline that implement - a set_error_handler function. + """Set an error handler object for all the components in the pipeline + that implement a set_error_handler function. - error_handler (Callable[[str, Pipe, List[Doc], Exception], NoReturn]): - Function that deals with a failing batch of documents. This callable function should take in - the component's name, the component itself, the offending batch of documents, and the exception - that was thrown. + error_handler (Callable[[str, Callable[[Doc], Doc], List[Doc], Exception], NoReturn]): + Function that deals with a failing batch of documents. This callable + function should take in the component's name, the component itself, + the offending batch of documents, and the exception that was thrown. DOCS: https://spacy.io/api/language#set_error_handler """ self.default_error_handler = error_handler diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index 2eda9deaf..155ce99a2 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -838,8 +838,8 @@ def test_textcat_loss(multi_label: bool, expected_loss: float): textcat = nlp.add_pipe("textcat_multilabel") else: textcat = nlp.add_pipe("textcat") - textcat.initialize(lambda: train_examples) assert isinstance(textcat, TextCategorizer) + textcat.initialize(lambda: train_examples) scores = textcat.model.ops.asarray( [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype="f" # type: ignore ) diff --git a/spacy/util.py b/spacy/util.py index 76a1e0bfa..cba403361 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -51,8 +51,7 @@ from . import about if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports - from .language import Language # noqa: F401 - from .pipeline import Pipe # noqa: F401 + from .language import Language, PipeCallable # noqa: F401 from .tokens import Doc, Span # noqa: F401 from .vocab import Vocab # noqa: F401 @@ -1642,9 +1641,9 @@ def check_bool_env_var(env_var: str) -> bool: def _pipe( docs: Iterable["Doc"], - proc: "Pipe", + proc: "PipeCallable", name: str, - default_error_handler: Callable[[str, "Pipe", List["Doc"], Exception], NoReturn], + default_error_handler: Callable[[str, "PipeCallable", List["Doc"], Exception], NoReturn], kwargs: Mapping[str, Any], ) -> Iterator["Doc"]: if hasattr(proc, "pipe"): From afd7a2476d2491af864d0723bff96191ea61b429 Mon Sep 17 00:00:00 2001 From: Damian Romero <12145757+damian-romero@users.noreply.github.com> Date: Thu, 1 Dec 2022 07:06:28 -0500 Subject: [PATCH 50/55] Fix typo in vocab.md table (#11908) * Fix typo in vocab.md table Fixes explosion/spaCy/#11907 * Reformat vocab.md with Prettier --- website/docs/api/vocab.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/api/vocab.md b/website/docs/api/vocab.md index afbd1301d..5e4de219a 100644 --- a/website/docs/api/vocab.md +++ b/website/docs/api/vocab.md @@ -308,14 +308,14 @@ Load state from a binary string. > assert type(PERSON) == int > ``` -| Name | Description | -| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ | -| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ | -| `vectors_length` | Number of dimensions for each word vector. ~~int~~ | -| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ | -| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | -| `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | +| Name | Description | +| ---------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ | +| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ | +| `vectors_length` | Number of dimensions for each word vector. ~~int~~ | +| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ | +| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | +| `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | ## Serialization fields {#serialization-fields} From 9cf3fa9711dfff94e88d6e137a52ebabdcceaad8 Mon Sep 17 00:00:00 2001 From: Zhangrp Date: Thu, 1 Dec 2022 20:30:27 +0800 Subject: [PATCH 51/55] Add docs for biluo_to_iob and iob_to_biluo. (#11901) * Add docs for biluo_to_iob and iob_to_biluo. * Fix typos. * Remove redundant links. --- website/docs/api/top-level.md | 48 +++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 211affa4a..26a5d42f4 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -1004,6 +1004,54 @@ This method was previously available as `spacy.gold.spans_from_biluo_tags`. | `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags with each tag describing one token. Each tag string will be of the form of either `""`, `"O"` or `"{action}-{label}"`, where action is one of `"B"`, `"I"`, `"L"`, `"U"`. ~~List[str]~~ | | **RETURNS** | A sequence of `Span` objects with added entity labels. ~~List[Span]~~ | +### training.biluo_to_iob {#biluo_to_iob tag="function"} + +Convert a sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags to +[IOB](/usage/linguistic-features#accessing-ner) tags. This is useful if you want +use the BILUO tags with a model that only supports IOB tags. + +> #### Example +> +> ```python +> from spacy.training import biluo_to_iob +> +> tags = ["O", "O", "B-LOC", "I-LOC", "L-LOC", "O"] +> iob_tags = biluo_to_iob(tags) +> assert iob_tags == ["O", "O", "B-LOC", "I-LOC", "I-LOC", "O"] +> ``` + +| Name | Description | +| ----------- | --------------------------------------------------------------------------------------- | +| `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ | +| **RETURNS** | A list of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ | + +### training.iob_to_biluo {#iob_to_biluo tag="function"} + +Convert a sequence of [IOB](/usage/linguistic-features#accessing-ner) tags to +[BILUO](/usage/linguistic-features#accessing-ner) tags. This is useful if you +want use the IOB tags with a model that only supports BILUO tags. + + + +This method was previously available as `spacy.gold.iob_to_biluo`. + + + +> #### Example +> +> ```python +> from spacy.training import iob_to_biluo +> +> tags = ["O", "O", "B-LOC", "I-LOC", "O"] +> biluo_tags = iob_to_biluo(tags) +> assert biluo_tags == ["O", "O", "B-LOC", "L-LOC", "O"] +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------------------------------------- | +| `tags` | A sequence of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ | +| **RETURNS** | A list of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ | + ## Utility functions {#util source="spacy/util.py"} spaCy comes with a small collection of utility functions located in From 445c670a2d537598b3d562fb7f444050164a260b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Dec 2022 09:33:52 +0100 Subject: [PATCH 52/55] Fix spancat for zero suggestions (#11860) * Add test for spancat predict with zero suggestions * Fix spancat for zero suggestions * Undo changes to extract_spans * Use .sum() as in update --- spacy/pipeline/spancat.py | 5 +++- spacy/tests/pipeline/test_spancat.py | 43 ++++++++++++++++++++++------ 2 files changed, 38 insertions(+), 10 deletions(-) diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 0a84c72fd..a3388e81a 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -272,7 +272,10 @@ class SpanCategorizer(TrainablePipe): DOCS: https://spacy.io/api/spancategorizer#predict """ indices = self.suggester(docs, ops=self.model.ops) - scores = self.model.predict((docs, indices)) # type: ignore + if indices.lengths.sum() == 0: + scores = self.model.ops.alloc2f(0, 0) + else: + scores = self.model.predict((docs, indices)) # type: ignore return indices, scores def set_candidates( diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py index 15256a763..e9db983d3 100644 --- a/spacy/tests/pipeline/test_spancat.py +++ b/spacy/tests/pipeline/test_spancat.py @@ -372,24 +372,39 @@ def test_overfitting_IO_overlapping(): def test_zero_suggestions(): - # Test with a suggester that returns 0 suggestions + # Test with a suggester that can return 0 suggestions - @registry.misc("test_zero_suggester") - def make_zero_suggester(): - def zero_suggester(docs, *, ops=None): + @registry.misc("test_mixed_zero_suggester") + def make_mixed_zero_suggester(): + def mixed_zero_suggester(docs, *, ops=None): if ops is None: ops = get_current_ops() - return Ragged( - ops.xp.zeros((0, 0), dtype="i"), ops.xp.zeros((len(docs),), dtype="i") - ) + spans = [] + lengths = [] + for doc in docs: + if len(doc) > 0 and len(doc) % 2 == 0: + spans.append((0, 1)) + lengths.append(1) + else: + lengths.append(0) + spans = ops.asarray2i(spans) + lengths_array = ops.asarray1i(lengths) + if len(spans) > 0: + output = Ragged(ops.xp.vstack(spans), lengths_array) + else: + output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array) + return output - return zero_suggester + return mixed_zero_suggester fix_random_seed(0) nlp = English() spancat = nlp.add_pipe( "spancat", - config={"suggester": {"@misc": "test_zero_suggester"}, "spans_key": SPAN_KEY}, + config={ + "suggester": {"@misc": "test_mixed_zero_suggester"}, + "spans_key": SPAN_KEY, + }, ) train_examples = make_examples(nlp) optimizer = nlp.initialize(get_examples=lambda: train_examples) @@ -397,6 +412,16 @@ def test_zero_suggestions(): assert set(spancat.labels) == {"LOC", "PERSON"} nlp.update(train_examples, sgd=optimizer) + # empty doc + nlp("") + # single doc with zero suggestions + nlp("one") + # single doc with one suggestion + nlp("two two") + # batch with mixed zero/one suggestions + list(nlp.pipe(["one", "two two", "three three three", "", "four four four four"])) + # batch with no suggestions + list(nlp.pipe(["", "one", "three three three"])) def test_set_candidates(): From f9d17a644b3d037924f715c03672ada6d12e4d86 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 2 Dec 2022 18:17:11 +0900 Subject: [PATCH 53/55] Config generation fails for GPU without transformers (#11899) If you don't have spacy-transformers installed, but try to use `init config` with the GPU flag, you'll get an error. The issue is that the `use_transformers` flag in the config is conflated with the GPU flag, and then there's an attempt to access transformers config info that may not exist. There may be a better way to do this, but this stops the error. --- spacy/cli/templates/quickstart_training.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index 58864883a..b961ac892 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -1,7 +1,7 @@ {# This is a template for training configs used for the quickstart widget in the docs and the init config command. It encodes various best practices and can help generate the best possible configuration, given a user's requirements. #} -{%- set use_transformer = hardware != "cpu" -%} +{%- set use_transformer = hardware != "cpu" and transformer_data -%} {%- set transformer = transformer_data[optimize] if use_transformer else {} -%} {%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%} [paths] From df0cb4b77be6e20a62143f5f65c3e165a4a45bcc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 2 Dec 2022 14:49:12 +0100 Subject: [PATCH 54/55] Auto-format code with black (#11913) Co-authored-by: explosion-bot --- spacy/util.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spacy/util.py b/spacy/util.py index cba403361..8d211a9a5 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1643,7 +1643,9 @@ def _pipe( docs: Iterable["Doc"], proc: "PipeCallable", name: str, - default_error_handler: Callable[[str, "PipeCallable", List["Doc"], Exception], NoReturn], + default_error_handler: Callable[ + [str, "PipeCallable", List["Doc"], Exception], NoReturn + ], kwargs: Mapping[str, Any], ) -> Iterator["Doc"]: if hasattr(proc, "pipe"): From 4b2097a2713b548cba1c841fa5cb8f6f42e3e30f Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 5 Dec 2022 08:29:13 +0100 Subject: [PATCH 55/55] fix links (#11927) --- website/docs/usage/v3-4.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/usage/v3-4.md b/website/docs/usage/v3-4.md index 597fc3cc8..e10110b71 100644 --- a/website/docs/usage/v3-4.md +++ b/website/docs/usage/v3-4.md @@ -66,8 +66,8 @@ The English CNN pipelines have new word vectors: | Package | Model Version | TAG | Parser LAS | NER F | | ----------------------------------------------- | ------------- | ---: | ---------: | ----: | | [`en_core_web_md`](/models/en#en_core_web_md) | v3.3.0 | 97.3 | 90.1 | 84.6 | -| [`en_core_web_md`](/models/en#en_core_web_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 | -| [`en_core_web_lg`](/models/en#en_core_web_md) | v3.3.0 | 97.4 | 90.1 | 85.3 | +| [`en_core_web_md`](/models/en#en_core_web_md) | v3.4.0 | 97.2 | 90.3 | 85.5 | +| [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.3.0 | 97.4 | 90.1 | 85.3 | | [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 | ## Notes about upgrading from v3.3 {#upgrading}