Merge branch 'feature/coref' into fix/coref-alignment

Had to renumber error message.
This commit is contained in:
Paul O'Leary McCann 2022-07-01 19:09:29 +09:00
commit 79720886fa
89 changed files with 3407 additions and 540 deletions

View File

@ -64,12 +64,12 @@ steps:
displayName: "Run GPU tests" displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true) condition: eq(${{ parameters.gpu }}, true)
- script: | # - script: |
python -m spacy download ca_core_news_sm # python -m spacy download ca_core_news_sm
python -m spacy download ca_core_news_md # python -m spacy download ca_core_news_md
python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" # python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
displayName: 'Test download CLI' # displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
@ -93,17 +93,17 @@ steps:
displayName: 'Test train CLI' displayName: 'Test train CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir # PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
displayName: 'Test assemble CLI' # displayName: 'Test assemble CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
#
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 # python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
displayName: 'Test assemble CLI vectors warning' # displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python .github/validate_universe_json.py website/meta/universe.json python .github/validate_universe_json.py website/meta/universe.json
@ -111,7 +111,7 @@ steps:
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | - script: |
${{ parameters.prefix }} python -m pip install thinc-apple-ops ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy ${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops" displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9')) condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

106
.github/contributors/Lucaterre.md vendored Normal file
View File

@ -0,0 +1,106 @@
# spaCy contributor agreement
This spaCy Contributor Agreement (**"SCA"**) is based on the
[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
The SCA applies to any contribution that you make to any product or project
managed by us (the **"project"**), and sets out the intellectual property rights
you grant to us in the contributed materials. The term **"us"** shall mean
[ExplosionAI GmbH](https://explosion.ai/legal). The term
**"you"** shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested
below and include the filled-in version with your first pull request, under the
folder [`.github/contributors/`](/.github/contributors/). The name of the file
should be your GitHub username, with the extension `.md`. For example, the user
example_user would create the file `.github/contributors/example_user.md`.
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
## Contributor Agreement
1. The term "contribution" or "contributed materials" means any source code,
object code, patch, tool, sample, graphic, specification, manual,
documentation, or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and
registrations, in your contribution:
* you hereby assign to us joint ownership, and to the extent that such
assignment is or becomes invalid, ineffective or unenforceable, you hereby
grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
royalty-free, unrestricted license to exercise all rights under those
copyrights. This includes, at our option, the right to sublicense these same
rights to third parties through multiple levels of sublicensees or other
licensing arrangements;
* you agree that each of us can do all things in relation to your
contribution as if each of us were the sole owners, and if one of us makes
a derivative work of your contribution, the one who makes the derivative
work (or has it made will be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution
against us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and
exercise all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the
consent of, pay or render an accounting to the other for any use or
distribution of your contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable,
non-exclusive, worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer
your contribution in whole or in part, alone or in combination with or
included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through
multiple levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective
on the date you first submitted a contribution to us, even if your submission
took place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of
authorship and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any
third party's copyrights, trademarks, patents, or other intellectual
property rights; and
* each contribution shall be in compliance with U.S. export control laws and
other applicable export and import laws. You agree to notify us if you
become aware of any circumstance which would make any of the foregoing
representations inaccurate in any respect. We may publicly disclose your
participation in the project, including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable
U.S. Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
* [x] I am signing on behalf of myself as an individual and no other person
or entity, including my employer, has or will have rights with respect to my
contributions.
* [ ] I am signing on behalf of my employer or a legal entity and I have the
actual authority to contractually bind that entity.
## Contributor Details
| Field | Entry |
|------------------------------- |---------------|
| Name | Lucas Terriel |
| Company name (if applicable) | |
| Title or role (if applicable) | |
| Date | 2022-06-20 |
| GitHub username | Lucaterre |
| Website (optional) | |

View File

@ -23,5 +23,5 @@ jobs:
env: env:
INPUT_TOKEN: ${{ secrets.EXPLOSIONBOT_TOKEN }} INPUT_TOKEN: ${{ secrets.EXPLOSIONBOT_TOKEN }}
INPUT_BK_TOKEN: ${{ secrets.BUILDKITE_SECRET }} INPUT_BK_TOKEN: ${{ secrets.BUILDKITE_SECRET }}
ENABLED_COMMANDS: "test_gpu,test_slow" ENABLED_COMMANDS: "test_gpu,test_slow,test_slow_gpu"
ALLOWED_TEAMS: "spaCy" ALLOWED_TEAMS: "spaCy"

View File

@ -1,4 +1,4 @@
recursive-include spacy *.pyi *.pyx *.pxd *.txt *.cfg *.jinja *.toml recursive-include spacy *.pyi *.pyx *.pxd *.txt *.cfg *.jinja *.toml *.hh
include LICENSE include LICENSE
include README.md include README.md
include pyproject.toml include pyproject.toml

View File

@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license. open-source software, released under the MIT license.
💫 **Version 3.2 out now!** 💫 **Version 3.3.1 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases) [Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)

View File

@ -455,6 +455,10 @@ Regression tests are tests that refer to bugs reported in specific issues. They
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file. The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
### Testing Cython Code
If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`.
### Constructing objects and state ### Constructing objects and state
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation. Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.

View File

@ -5,8 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0", "cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0", "preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0", "murmurhash>=0.28.0,<1.1.0",
"thinc>=8.1.0.dev0,<8.2.0", "thinc>=8.1.0.dev3,<8.2.0",
"blis>=0.9.0,<0.10.0",
"pathy", "pathy",
"numpy>=1.15.0", "numpy>=1.15.0",
] ]

View File

@ -3,8 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev0,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
blis>=0.9.0,<0.10.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
@ -22,7 +21,7 @@ langcodes>=3.2.0,<4.0.0
# Official Python utilities # Official Python utilities
setuptools setuptools
packaging>=20.0 packaging>=20.0
typing_extensions>=3.7.4.1,<4.0.0.0; python_version < "3.8" typing_extensions>=3.7.4.1,<4.2.0; python_version < "3.8"
# Development dependencies # Development dependencies
pre-commit>=2.13.0 pre-commit>=2.13.0
cython>=0.25,<3.0 cython>=0.25,<3.0

View File

@ -38,7 +38,7 @@ setup_requires =
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
thinc>=8.1.0.dev0,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.9,<3.1.0
@ -46,8 +46,7 @@ install_requires =
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev0,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
blis>=0.9.0,<0.10.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
@ -62,7 +61,7 @@ install_requires =
# Official Python utilities # Official Python utilities
setuptools setuptools
packaging>=20.0 packaging>=20.0
typing_extensions>=3.7.4,<4.0.0.0; python_version < "3.8" typing_extensions>=3.7.4,<4.2.0; python_version < "3.8"
langcodes>=3.2.0,<4.0.0 langcodes>=3.2.0,<4.0.0
[options.entry_points] [options.entry_points]
@ -105,7 +104,7 @@ cuda114 =
cuda115 = cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0 cupy-cuda115>=5.0.0b4,<11.0.0
apple = apple =
thinc-apple-ops>=0.0.4,<1.0.0 thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies # Language tokenizers with external dependencies
ja = ja =
sudachipy>=0.5.2,!=0.6.1 sudachipy>=0.5.2,!=0.6.1

View File

@ -32,6 +32,7 @@ def load(
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = util.SimpleFrozenList(), disable: Iterable[str] = util.SimpleFrozenList(),
enable: Iterable[str] = util.SimpleFrozenList(),
exclude: Iterable[str] = util.SimpleFrozenList(), exclude: Iterable[str] = util.SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language: ) -> Language:
@ -42,6 +43,8 @@ def load(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (but can be enabled later using nlp.enable_pipe).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -49,7 +52,12 @@ def load(
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
return util.load_model( return util.load_model(
name, vocab=vocab, disable=disable, exclude=exclude, config=config name,
vocab=vocab,
disable=disable,
enable=enable,
exclude=exclude,
config=config,
) )

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.3.0" __version__ = "3.4.0"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -12,7 +12,7 @@ from click.parser import split_arg_string
from typer.main import get_command from typer.main import get_command
from contextlib import contextmanager from contextlib import contextmanager
from thinc.api import Config, ConfigValidationError, require_gpu from thinc.api import Config, ConfigValidationError, require_gpu
from thinc.util import has_cupy, gpu_is_available from thinc.util import gpu_is_available
from configparser import InterpolationError from configparser import InterpolationError
import os import os
@ -554,5 +554,5 @@ def setup_gpu(use_gpu: int, silent=None) -> None:
require_gpu(use_gpu) require_gpu(use_gpu)
else: else:
local_msg.info("Using CPU") local_msg.info("Using CPU")
if has_cupy and gpu_is_available(): if gpu_is_available():
local_msg.info("To switch to GPU 0, use the option: --gpu-id 0") local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")

View File

@ -10,7 +10,7 @@ import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli from ._util import import_code, debug_cli
from ..training import Example from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining from ..schemas import ConfigSchemaTraining
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
@ -361,7 +361,7 @@ def debug_data(
if label != "-" if label != "-"
] ]
labels_with_counts = _format_labels(labels_with_counts, counts=True) labels_with_counts = _format_labels(labels_with_counts, counts=True)
msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose) msg.text(f"Labels in train data: {labels_with_counts}", show=verbose)
missing_labels = model_labels - labels missing_labels = model_labels - labels
if missing_labels: if missing_labels:
msg.warn( msg.warn(
@ -758,9 +758,9 @@ def _compile_gold(
# "Illegal" whitespace entity # "Illegal" whitespace entity
data["ws_ents"] += 1 data["ws_ents"] += 1
if label.startswith(("B-", "U-")): if label.startswith(("B-", "U-")):
combined_label = label.split("-")[1] combined_label = remove_bilu_prefix(label)
data["ner"][combined_label] += 1 data["ner"][combined_label] += 1
if sent_starts[i] == True and label.startswith(("I-", "L-")): if sent_starts[i] and label.startswith(("I-", "L-")):
data["boundary_cross_ents"] += 1 data["boundary_cross_ents"] += 1
elif label == "-": elif label == "-":
data["ner"]["-"] += 1 data["ner"]["-"] += 1
@ -908,7 +908,7 @@ def _get_examples_without_label(
for eg in data: for eg in data:
if component == "ner": if component == "ner":
labels = [ labels = [
label.split("-")[1] remove_bilu_prefix(label)
for label in eg.get_aligned_ner() for label in eg.get_aligned_ner()
if label not in ("O", "-", None) if label not in ("O", "-", None)
] ]

View File

@ -10,6 +10,7 @@ from jinja2 import Template
from .. import util from .. import util
from ..language import DEFAULT_CONFIG_PRETRAIN_PATH from ..language import DEFAULT_CONFIG_PRETRAIN_PATH
from ..schemas import RecommendationSchema from ..schemas import RecommendationSchema
from ..util import SimpleFrozenList
from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND
from ._util import string_to_list, import_code from ._util import string_to_list, import_code
@ -24,16 +25,30 @@ class Optimizations(str, Enum):
accuracy = "accuracy" accuracy = "accuracy"
class InitValues:
"""
Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and
init_config(), i.e. initialization calls via CLI respectively Python.
"""
lang = "en"
pipeline = SimpleFrozenList(["tagger", "parser", "ner"])
optimize = Optimizations.efficiency
gpu = False
pretraining = False
force_overwrite = False
@init_cli.command("config") @init_cli.command("config")
def init_config_cli( def init_config_cli(
# fmt: off # fmt: off
output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True), output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True),
lang: str = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"), lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"),
pipeline: str = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"),
optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."),
gpu: bool = Opt(False, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."),
pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"), force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"),
# fmt: on # fmt: on
): ):
""" """
@ -133,11 +148,11 @@ def fill_config(
def init_config( def init_config(
*, *,
lang: str, lang: str = InitValues.lang,
pipeline: List[str], pipeline: List[str] = InitValues.pipeline,
optimize: str, optimize: str = InitValues.optimize,
gpu: bool, gpu: bool = InitValues.gpu,
pretraining: bool = False, pretraining: bool = InitValues.pretraining,
silent: bool = True, silent: bool = True,
) -> Config: ) -> Config:
msg = Printer(no_print=silent) msg = Printer(no_print=silent)

View File

@ -204,6 +204,11 @@ class Warnings(metaclass=ErrorsWithCodes):
"for the corpora used to train the language. Please check " "for the corpora used to train the language. Please check "
"`nlp.meta[\"sources\"]` for any relevant links.") "`nlp.meta[\"sources\"]` for any relevant links.")
W119 = ("Overriding pipe name in `config` is not supported. Ignoring override '{name_in_config}'.") W119 = ("Overriding pipe name in `config` is not supported. Ignoring override '{name_in_config}'.")
W120 = ("Unable to load all spans in Doc.spans: more than one span group "
"with the name '{group_name}' was found in the saved spans data. "
"Only the last span group will be loaded under "
"Doc.spans['{group_name}']. Skipping span group with values: "
"{group_values}")
class Errors(metaclass=ErrorsWithCodes): class Errors(metaclass=ErrorsWithCodes):
@ -532,6 +537,8 @@ class Errors(metaclass=ErrorsWithCodes):
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.") E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
# New errors added in v3.x # New errors added in v3.x
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
"permit overlapping spans.")
E855 = ("Invalid {obj}: {obj} is not from the same doc.") E855 = ("Invalid {obj}: {obj} is not from the same doc.")
E856 = ("Error accessing span at position {i}: out of bounds in span group " E856 = ("Error accessing span at position {i}: out of bounds in span group "
"of length {length}.") "of length {length}.")
@ -903,8 +910,8 @@ class Errors(metaclass=ErrorsWithCodes):
E1022 = ("Words must be of type str or int, but input is of type '{wtype}'") E1022 = ("Words must be of type str or int, but input is of type '{wtype}'")
E1023 = ("Couldn't read EntityRuler from the {path}. This file doesn't " E1023 = ("Couldn't read EntityRuler from the {path}. This file doesn't "
"exist.") "exist.")
E1024 = ("A pattern with ID \"{ent_id}\" is not present in EntityRuler " E1024 = ("A pattern with {attr_type} '{label}' is not present in "
"patterns.") "'{component}' patterns.")
E1025 = ("Cannot intify the value '{value}' as an IOB string. The only " E1025 = ("Cannot intify the value '{value}' as an IOB string. The only "
"supported values are: 'I', 'O', 'B' and ''") "supported values are: 'I', 'O', 'B' and ''")
E1026 = ("Edit tree has an invalid format:\n{errors}") E1026 = ("Edit tree has an invalid format:\n{errors}")
@ -919,7 +926,15 @@ class Errors(metaclass=ErrorsWithCodes):
E1035 = ("Token index {i} out of bounds ({length})") E1035 = ("Token index {i} out of bounds ({length})")
E1036 = ("Cannot index into NoneNode") E1036 = ("Cannot index into NoneNode")
E1037 = ("Invalid attribute value '{attr}'.") E1037 = ("Invalid attribute value '{attr}'.")
E1038 = ("Misalignment in coref. Head token has no match in training doc.") E1038 = ("Invalid JSON input: {message}")
E1039 = ("The {obj} start or end annotations (start: {start}, end: {end}) "
"could not be aligned to token boundaries.")
E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
E1042 = ("Function was called with `{arg1}`={arg1_values} and "
"`{arg2}`={arg2_values} but these arguments are conflicting.")
E1043 = ("Misalignment in coref. Head token has no match in training doc.")
# Deprecated model shortcuts, only used in errors and warnings # Deprecated model shortcuts, only used in errors and warnings

View File

@ -93,14 +93,14 @@ cdef class KnowledgeBase:
self.vocab = vocab self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def initialize_entities(self, int64_t nr_entities): def _initialize_entities(self, int64_t nr_entities):
self._entry_index = PreshMap(nr_entities + 1) self._entry_index = PreshMap(nr_entities + 1)
self._entries = entry_vec(nr_entities + 1) self._entries = entry_vec(nr_entities + 1)
def initialize_vectors(self, int64_t nr_entities): def _initialize_vectors(self, int64_t nr_entities):
self._vectors_table = float_matrix(nr_entities + 1) self._vectors_table = float_matrix(nr_entities + 1)
def initialize_aliases(self, int64_t nr_aliases): def _initialize_aliases(self, int64_t nr_aliases):
self._alias_index = PreshMap(nr_aliases + 1) self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1)
@ -155,8 +155,8 @@ cdef class KnowledgeBase:
raise ValueError(Errors.E140) raise ValueError(Errors.E140)
nr_entities = len(set(entity_list)) nr_entities = len(set(entity_list))
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
i = 0 i = 0
cdef KBEntryC entry cdef KBEntryC entry
@ -388,9 +388,9 @@ cdef class KnowledgeBase:
nr_entities = header[0] nr_entities = header[0]
nr_aliases = header[1] nr_aliases = header[1]
entity_vector_length = header[2] entity_vector_length = header[2]
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
self.initialize_aliases(nr_aliases) self._initialize_aliases(nr_aliases)
self.entity_vector_length = entity_vector_length self.entity_vector_length = entity_vector_length
def deserialize_vectors(b): def deserialize_vectors(b):
@ -512,8 +512,8 @@ cdef class KnowledgeBase:
cdef int64_t entity_vector_length cdef int64_t entity_vector_length
reader.read_header(&nr_entities, &entity_vector_length) reader.read_header(&nr_entities, &entity_vector_length)
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
self.entity_vector_length = entity_vector_length self.entity_vector_length = entity_vector_length
# STEP 1: load entity vectors # STEP 1: load entity vectors
@ -552,7 +552,7 @@ cdef class KnowledgeBase:
# STEP 3: load aliases # STEP 3: load aliases
cdef int64_t nr_aliases cdef int64_t nr_aliases
reader.read_alias_length(&nr_aliases) reader.read_alias_length(&nr_aliases)
self.initialize_aliases(nr_aliases) self._initialize_aliases(nr_aliases)
cdef int64_t nr_candidates cdef int64_t nr_candidates
cdef vector[int64_t] entry_indices cdef vector[int64_t] entry_indices

View File

@ -35,7 +35,7 @@ for pron in ["i"]:
_exc[orth + "m"] = [ _exc[orth + "m"] = [
{ORTH: orth, NORM: pron}, {ORTH: orth, NORM: pron},
{ORTH: "m", "tenspect": 1, "number": 1}, {ORTH: "m"},
] ]
_exc[orth + "'ma"] = [ _exc[orth + "'ma"] = [
@ -139,26 +139,27 @@ for pron in ["he", "she", "it"]:
# W-words, relative pronouns, prepositions etc. # W-words, relative pronouns, prepositions etc.
for word in [ for word, morph in [
"who", ("who", None),
"what", ("what", None),
"when", ("when", None),
"where", ("where", None),
"why", ("why", None),
"how", ("how", None),
"there", ("there", None),
"that", ("that", "Number=Sing|Person=3"),
"this", ("this", "Number=Sing|Person=3"),
"these", ("these", "Number=Plur|Person=3"),
"those", ("those", "Number=Plur|Person=3"),
]: ]:
for orth in [word, word.title()]: for orth in [word, word.title()]:
_exc[orth + "'s"] = [ if morph != "Number=Plur|Person=3":
{ORTH: orth, NORM: word}, _exc[orth + "'s"] = [
{ORTH: "'s", NORM: "'s"}, {ORTH: orth, NORM: word},
] {ORTH: "'s", NORM: "'s"},
]
_exc[orth + "s"] = [{ORTH: orth, NORM: word}, {ORTH: "s"}] _exc[orth + "s"] = [{ORTH: orth, NORM: word}, {ORTH: "s"}]
_exc[orth + "'ll"] = [ _exc[orth + "'ll"] = [
{ORTH: orth, NORM: word}, {ORTH: orth, NORM: word},
@ -182,25 +183,26 @@ for word in [
{ORTH: "ve", NORM: "have"}, {ORTH: "ve", NORM: "have"},
] ]
_exc[orth + "'re"] = [ if morph != "Number=Sing|Person=3":
{ORTH: orth, NORM: word}, _exc[orth + "'re"] = [
{ORTH: "'re", NORM: "are"}, {ORTH: orth, NORM: word},
] {ORTH: "'re", NORM: "are"},
]
_exc[orth + "re"] = [ _exc[orth + "re"] = [
{ORTH: orth, NORM: word}, {ORTH: orth, NORM: word},
{ORTH: "re", NORM: "are"}, {ORTH: "re", NORM: "are"},
] ]
_exc[orth + "'ve"] = [ _exc[orth + "'ve"] = [
{ORTH: orth, NORM: word}, {ORTH: orth, NORM: word},
{ORTH: "'ve"}, {ORTH: "'ve"},
] ]
_exc[orth + "ve"] = [ _exc[orth + "ve"] = [
{ORTH: orth}, {ORTH: orth},
{ORTH: "ve", NORM: "have"}, {ORTH: "ve", NORM: "have"},
] ]
_exc[orth + "'d"] = [ _exc[orth + "'d"] = [
{ORTH: orth, NORM: word}, {ORTH: orth, NORM: word},

View File

@ -1,4 +1,4 @@
from typing import Iterator, Optional, Any, Dict, Callable, Iterable from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection
from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@ -1090,16 +1090,21 @@ class Language:
) )
return self.tokenizer(text) return self.tokenizer(text)
def _ensure_doc(self, doc_like: Union[str, Doc]) -> Doc: def _ensure_doc(self, doc_like: Union[str, Doc, bytes]) -> Doc:
"""Create a Doc if need be, or raise an error if the input is not a Doc or a string.""" """Create a Doc if need be, or raise an error if the input is not
a Doc, string, or a byte array (generated by Doc.to_bytes())."""
if isinstance(doc_like, Doc): if isinstance(doc_like, Doc):
return doc_like return doc_like
if isinstance(doc_like, str): if isinstance(doc_like, str):
return self.make_doc(doc_like) return self.make_doc(doc_like)
raise ValueError(Errors.E866.format(type=type(doc_like))) if isinstance(doc_like, bytes):
return Doc(self.vocab).from_bytes(doc_like)
raise ValueError(Errors.E1041.format(type=type(doc_like)))
def _ensure_doc_with_context(self, doc_like: Union[str, Doc], context: Any) -> Doc: def _ensure_doc_with_context(
"""Create a Doc if need be and add as_tuples context, or raise an error if the input is not a Doc or a string.""" self, doc_like: Union[str, Doc, bytes], context: _AnyContext
) -> Doc:
"""Call _ensure_doc to generate a Doc and set its context object."""
doc = self._ensure_doc(doc_like) doc = self._ensure_doc(doc_like)
doc._context = context doc._context = context
return doc return doc
@ -1519,7 +1524,6 @@ class Language:
DOCS: https://spacy.io/api/language#pipe DOCS: https://spacy.io/api/language#pipe
""" """
# Handle texts with context as tuples
if as_tuples: if as_tuples:
texts = cast(Iterable[Tuple[Union[str, Doc], _AnyContext]], texts) texts = cast(Iterable[Tuple[Union[str, Doc], _AnyContext]], texts)
docs_with_contexts = ( docs_with_contexts = (
@ -1597,8 +1601,21 @@ class Language:
n_process: int, n_process: int,
batch_size: int, batch_size: int,
) -> Iterator[Doc]: ) -> Iterator[Doc]:
def prepare_input(
texts: Iterable[Union[str, Doc]]
) -> Iterable[Tuple[Union[str, bytes], _AnyContext]]:
# Serialize Doc inputs to bytes to avoid incurring pickling
# overhead when they are passed to child processes. Also yield
# any context objects they might have separately (as they are not serialized).
for doc_like in texts:
if isinstance(doc_like, Doc):
yield (doc_like.to_bytes(), cast(_AnyContext, doc_like._context))
else:
yield (doc_like, cast(_AnyContext, None))
serialized_texts_with_ctx = prepare_input(texts) # type: ignore
# raw_texts is used later to stop iteration. # raw_texts is used later to stop iteration.
texts, raw_texts = itertools.tee(texts) texts, raw_texts = itertools.tee(serialized_texts_with_ctx) # type: ignore
# for sending texts to worker # for sending texts to worker
texts_q: List[mp.Queue] = [mp.Queue() for _ in range(n_process)] texts_q: List[mp.Queue] = [mp.Queue() for _ in range(n_process)]
# for receiving byte-encoded docs from worker # for receiving byte-encoded docs from worker
@ -1618,7 +1635,13 @@ class Language:
procs = [ procs = [
mp.Process( mp.Process(
target=_apply_pipes, target=_apply_pipes,
args=(self._ensure_doc, pipes, rch, sch, Underscore.get_state()), args=(
self._ensure_doc_with_context,
pipes,
rch,
sch,
Underscore.get_state(),
),
) )
for rch, sch in zip(texts_q, bytedocs_send_ch) for rch, sch in zip(texts_q, bytedocs_send_ch)
] ]
@ -1631,12 +1654,12 @@ class Language:
recv.recv() for recv in cycle(bytedocs_recv_ch) recv.recv() for recv in cycle(bytedocs_recv_ch)
) )
try: try:
for i, (_, (byte_doc, byte_context, byte_error)) in enumerate( for i, (_, (byte_doc, context, byte_error)) in enumerate(
zip(raw_texts, byte_tuples), 1 zip(raw_texts, byte_tuples), 1
): ):
if byte_doc is not None: if byte_doc is not None:
doc = Doc(self.vocab).from_bytes(byte_doc) doc = Doc(self.vocab).from_bytes(byte_doc)
doc._context = byte_context doc._context = context
yield doc yield doc
elif byte_error is not None: elif byte_error is not None:
error = srsly.msgpack_loads(byte_error) error = srsly.msgpack_loads(byte_error)
@ -1671,6 +1694,7 @@ class Language:
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True, auto_fill: bool = True,
@ -1685,6 +1709,8 @@ class Language:
disable (Iterable[str]): Names of pipeline components to disable. disable (Iterable[str]): Names of pipeline components to disable.
Disabled pipes will be loaded but they won't be run unless you Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe. explicitly enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. exclude (Iterable[str]): Names of pipeline components to exclude.
Excluded components won't be loaded. Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta. meta (Dict[str, Any]): Meta overrides for nlp.meta.
@ -1838,8 +1864,15 @@ class Language:
# Restore the original vocab after sourcing if necessary # Restore the original vocab after sourcing if necessary
if vocab_b is not None: if vocab_b is not None:
nlp.vocab.from_bytes(vocab_b) nlp.vocab.from_bytes(vocab_b)
disabled_pipes = [*config["nlp"]["disabled"], *disable]
# Resolve disabled/enabled settings.
disabled_pipes = cls._resolve_component_status(
[*config["nlp"]["disabled"], *disable],
[*config["nlp"].get("enabled", []), *enable],
config["nlp"]["pipeline"],
)
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
nlp.batch_size = config["nlp"]["batch_size"] nlp.batch_size = config["nlp"]["batch_size"]
nlp.config = filled if auto_fill else config nlp.config = filled if auto_fill else config
if after_pipeline_creation is not None: if after_pipeline_creation is not None:
@ -1991,6 +2024,42 @@ class Language:
serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
util.to_disk(path, serializers, exclude) util.to_disk(path, serializers, exclude)
@staticmethod
def _resolve_component_status(
disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str]
) -> Tuple[str, ...]:
"""Derives whether (1) `disable` and `enable` values are consistent and (2)
resolves those to a single set of disabled components. Raises an error in
case of inconsistency.
disable (Iterable[str]): Names of components or serialization fields to disable.
enable (Iterable[str]): Names of pipeline components to enable.
pipe_names (Iterable[str]): Names of all pipeline components.
RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t.
specified includes and excludes.
"""
if disable is not None and isinstance(disable, str):
disable = [disable]
to_disable = disable
if enable:
to_disable = [
pipe_name for pipe_name in pipe_names if pipe_name not in enable
]
if disable and disable != to_disable:
raise ValueError(
Errors.E1042.format(
arg1="enable",
arg2="disable",
arg1_values=enable,
arg2_values=disable,
)
)
return tuple(to_disable)
def from_disk( def from_disk(
self, self,
path: Union[str, Path], path: Union[str, Path],
@ -2163,7 +2232,7 @@ def _copy_examples(examples: Iterable[Example]) -> List[Example]:
def _apply_pipes( def _apply_pipes(
ensure_doc: Callable[[Union[str, Doc]], Doc], ensure_doc: Callable[[Union[str, Doc, bytes], _AnyContext], Doc],
pipes: Iterable[Callable[..., Iterator[Doc]]], pipes: Iterable[Callable[..., Iterator[Doc]]],
receiver, receiver,
sender, sender,
@ -2184,17 +2253,19 @@ def _apply_pipes(
Underscore.load_state(underscore_state) Underscore.load_state(underscore_state)
while True: while True:
try: try:
texts = receiver.get() texts_with_ctx = receiver.get()
docs = (ensure_doc(text) for text in texts) docs = (
ensure_doc(doc_like, context) for doc_like, context in texts_with_ctx
)
for pipe in pipes: for pipe in pipes:
docs = pipe(docs) # type: ignore[arg-type, assignment] docs = pipe(docs) # type: ignore[arg-type, assignment]
# Connection does not accept unpickable objects, so send list. # Connection does not accept unpickable objects, so send list.
byte_docs = [(doc.to_bytes(), doc._context, None) for doc in docs] byte_docs = [(doc.to_bytes(), doc._context, None) for doc in docs]
padding = [(None, None, None)] * (len(texts) - len(byte_docs)) padding = [(None, None, None)] * (len(texts_with_ctx) - len(byte_docs))
sender.send(byte_docs + padding) # type: ignore[operator] sender.send(byte_docs + padding) # type: ignore[operator]
except Exception: except Exception:
error_msg = [(None, None, srsly.msgpack_dumps(traceback.format_exc()))] error_msg = [(None, None, srsly.msgpack_dumps(traceback.format_exc()))]
padding = [(None, None, None)] * (len(texts) - 1) padding = [(None, None, None)] * (len(texts_with_ctx) - 1)
sender.send(error_msg + padding) sender.send(error_msg + padding)

View File

@ -22,9 +22,11 @@ def forward(model, X, is_train):
nP = model.get_dim("nP") nP = model.get_dim("nP")
nI = model.get_dim("nI") nI = model.get_dim("nI")
W = model.get_param("W") W = model.get_param("W")
Yf = model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True) # Preallocate array for layer output, including padding.
Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False)
model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:])
Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) Yf = Yf.reshape((Yf.shape[0], nF, nO, nP))
Yf = model.ops.xp.vstack((model.get_param("pad"), Yf)) Yf[0] = model.get_param("pad")
def backward(dY_ids): def backward(dY_ids):
# This backprop is particularly tricky, because we get back a different # This backprop is particularly tricky, because we get back a different

View File

@ -2,13 +2,16 @@ from typing import List, Tuple
from thinc.api import Model, chain from thinc.api import Model, chain
from thinc.api import PyTorchWrapper, ArgsKwargs from thinc.api import PyTorchWrapper, ArgsKwargs
from thinc.types import Floats2d, Ints2d, Ints1d from thinc.types import Floats2d
from thinc.util import torch, xp2torch, torch2xp from thinc.util import torch, xp2torch, torch2xp
from ...tokens import Doc from ...tokens import Doc
from ...util import registry from ...util import registry
EPSILON = 1e-7
@registry.architectures("spacy.Coref.v1") @registry.architectures("spacy.Coref.v1")
def build_wl_coref_model( def build_wl_coref_model(
tok2vec: Model[List[Doc], List[Floats2d]], tok2vec: Model[List[Doc], List[Floats2d]],
@ -42,7 +45,9 @@ def build_wl_coref_model(
return coref_model return coref_model
def convert_coref_clusterer_inputs(model: Model, X: List[Floats2d], is_train: bool): def convert_coref_clusterer_inputs(
model: Model, X: List[Floats2d], is_train: bool
):
# The input here is List[Floats2d], one for each doc # The input here is List[Floats2d], one for each doc
# just use the first # just use the first
# TODO real batching # TODO real batching
@ -50,7 +55,7 @@ def convert_coref_clusterer_inputs(model: Model, X: List[Floats2d], is_train: bo
word_features = xp2torch(X, requires_grad=is_train) word_features = xp2torch(X, requires_grad=is_train)
# TODO fix or remove type annotations # TODO fix or remove type annotations
def backprop(args: ArgsKwargs): #-> List[Floats2d]: def backprop(args: ArgsKwargs): #-> List[Floats2d]:
# convert to xp and wrap in list # convert to xp and wrap in list
gradients = torch2xp(args.args[0]) gradients = torch2xp(args.args[0])
return [gradients] return [gradients]
@ -58,7 +63,9 @@ def convert_coref_clusterer_inputs(model: Model, X: List[Floats2d], is_train: bo
return ArgsKwargs(args=(word_features,), kwargs={}), backprop return ArgsKwargs(args=(word_features,), kwargs={}), backprop
def convert_coref_clusterer_outputs(model: Model, inputs_outputs, is_train: bool): def convert_coref_clusterer_outputs(
model: Model, inputs_outputs, is_train: bool
):
_, outputs = inputs_outputs _, outputs = inputs_outputs
scores, indices = outputs scores, indices = outputs
@ -149,10 +156,10 @@ class CorefClusterer(torch.nn.Module):
a_scores_lst: List[torch.Tensor] = [] a_scores_lst: List[torch.Tensor] = []
for i in range(0, len(words), batch_size): for i in range(0, len(words), batch_size):
pw_batch = pw[i : i + batch_size] pw_batch = pw[i:i + batch_size]
words_batch = words[i : i + batch_size] words_batch = words[i:i + batch_size]
top_indices_batch = top_indices[i : i + batch_size] top_indices_batch = top_indices[i:i + batch_size]
top_rough_scores_batch = top_rough_scores[i : i + batch_size] top_rough_scores_batch = top_rough_scores[i:i + batch_size]
# a_scores_batch [batch_size, n_ants] # a_scores_batch [batch_size, n_ants]
a_scores_batch = self.a_scorer( a_scores_batch = self.a_scorer(
@ -168,7 +175,6 @@ class CorefClusterer(torch.nn.Module):
return coref_scores, top_indices return coref_scores, top_indices
EPSILON = 1e-7
# Note this function is kept here to keep a torch dep out of coref_util. # Note this function is kept here to keep a torch dep out of coref_util.
def add_dummy(tensor: torch.Tensor, eps: bool = False): def add_dummy(tensor: torch.Tensor, eps: bool = False):
"""Prepends zeros (or a very small value if eps is True) """Prepends zeros (or a very small value if eps is True)
@ -294,7 +300,7 @@ class RoughScorer(torch.nn.Module):
self.k = antecedent_limit self.k = antecedent_limit
def forward( def forward(
self, # type: ignore # pylint: disable=arguments-differ #35566 in pytorch self, # type: ignore
mentions: torch.Tensor, mentions: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]: ) -> Tuple[torch.Tensor, torch.Tensor]:
""" """
@ -305,6 +311,7 @@ class RoughScorer(torch.nn.Module):
pair_mask = torch.arange(mentions.shape[0]) pair_mask = torch.arange(mentions.shape[0])
pair_mask = pair_mask.unsqueeze(1) - pair_mask.unsqueeze(0) pair_mask = pair_mask.unsqueeze(1) - pair_mask.unsqueeze(0)
pair_mask = torch.log((pair_mask > 0).to(torch.float)) pair_mask = torch.log((pair_mask > 0).to(torch.float))
pair_mask = pair_mask.to(mentions.device)
bilinear_scores = self.dropout(self.bilinear(mentions)).mm(mentions.T) bilinear_scores = self.dropout(self.bilinear(mentions)).mm(mentions.T)
rough_scores = pair_mask + bilinear_scores rough_scores = pair_mask + bilinear_scores
top_scores, indices = torch.topk( top_scores, indices = torch.topk(
@ -340,5 +347,6 @@ class DistancePairwiseEncoder(torch.nn.Module):
log_distance = distance.to(torch.float).log2().floor_() log_distance = distance.to(torch.float).log2().floor_()
log_distance = log_distance.clamp_max_(max=6).to(torch.long) log_distance = log_distance.clamp_max_(max=6).to(torch.long)
distance = torch.where(distance < 5, distance - 1, log_distance + 2) distance = torch.where(distance < 5, distance - 1, log_distance + 2)
distance = distance.to(top_indices.device)
distance = self.distance_emb(distance) distance = self.distance_emb(distance)
return self.dropout(distance) return self.dropout(distance)

View File

@ -23,7 +23,7 @@ def build_nel_encoder(
((tok2vec >> list2ragged()) & build_span_maker()) ((tok2vec >> list2ragged()) & build_span_maker())
>> extract_spans() >> extract_spans()
>> reduce_mean() >> reduce_mean()
>> residual(Maxout(nO=token_width, nI=token_width, nP=2, dropout=0.0)) >> residual(Maxout(nO=token_width, nI=token_width, nP=2, dropout=0.0)) # type: ignore
>> output_layer >> output_layer
) )
model.set_ref("output_layer", output_layer) model.set_ref("output_layer", output_layer)

View File

@ -182,11 +182,12 @@ class SpanPredictor(torch.nn.Module):
torch.Tensor: span start/end scores, (n_heads x n_words x 2) torch.Tensor: span start/end scores, (n_heads x n_words x 2)
""" """
# If we don't receive heads, return empty # If we don't receive heads, return empty
device = heads_ids.device
if heads_ids.nelement() == 0: if heads_ids.nelement() == 0:
return torch.empty(size=(0,)) return torch.empty(size=(0,))
# Obtain distance embedding indices, [n_heads, n_words] # Obtain distance embedding indices, [n_heads, n_words]
relative_positions = heads_ids.unsqueeze(1) - torch.arange( relative_positions = heads_ids.unsqueeze(1) - torch.arange(
words.shape[0] words.shape[0], device=device
).unsqueeze(0) ).unsqueeze(0)
md = self.max_distance md = self.max_distance
# make all valid distances positive # make all valid distances positive
@ -210,20 +211,26 @@ class SpanPredictor(torch.nn.Module):
dim=1, dim=1,
) )
lengths = same_sent.sum(dim=1) lengths = same_sent.sum(dim=1)
padding_mask = torch.arange(0, lengths.max().item()).unsqueeze(0) padding_mask = torch.arange(
0, lengths.max().item(), device=device
).unsqueeze(0)
# (n_heads x max_sent_len) # (n_heads x max_sent_len)
padding_mask = padding_mask < lengths.unsqueeze(1) padding_mask = padding_mask < lengths.unsqueeze(1)
# (n_heads x max_sent_len x input_size * 2 + distance_emb_size) # (n_heads x max_sent_len x input_size * 2 + distance_emb_size)
# This is necessary to allow the convolution layer to look at several # This is necessary to allow the convolution layer to look at several
# word scores # word scores
padded_pairs = torch.zeros(*padding_mask.shape, pair_matrix.shape[-1]) padded_pairs = torch.zeros(
*padding_mask.shape, pair_matrix.shape[-1], device=device
)
padded_pairs[padding_mask] = pair_matrix padded_pairs[padding_mask] = pair_matrix
res = self.ffnn(padded_pairs) # (n_heads x n_candidates x last_layer_output) res = self.ffnn(padded_pairs) # (n_heads x n_candidates x last_layer_output)
res = self.conv(res.permute(0, 2, 1)).permute( res = self.conv(res.permute(0, 2, 1)).permute(
0, 2, 1 0, 2, 1
) # (n_heads x n_candidates, 2) ) # (n_heads x n_candidates, 2)
scores = torch.full((heads_ids.shape[0], words.shape[0], 2), float("-inf")) scores = torch.full(
(heads_ids.shape[0], words.shape[0], 2), float("-inf"), device=device
)
scores[rows, cols] = res[padding_mask] scores[rows, cols] = res[padding_mask]
# Make sure that start <= head <= end during inference # Make sure that start <= head <= end during inference
if not self.training: if not self.training:

View File

@ -1,4 +1,5 @@
from libc.string cimport memset, memcpy from libc.string cimport memset, memcpy
from thinc.backends.cblas cimport CBlas
from ..typedefs cimport weight_t, hash_t from ..typedefs cimport weight_t, hash_t
from ..pipeline._parser_internals._state cimport StateC from ..pipeline._parser_internals._state cimport StateC
@ -38,7 +39,7 @@ cdef ActivationsC alloc_activations(SizesC n) nogil
cdef void free_activations(const ActivationsC* A) nogil cdef void free_activations(const ActivationsC* A) nogil
cdef void predict_states(ActivationsC* A, StateC** states, cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states,
const WeightsC* W, SizesC n) nogil const WeightsC* W, SizesC n) nogil
cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil

View File

@ -4,11 +4,11 @@ from libc.math cimport exp
from libc.string cimport memset, memcpy from libc.string cimport memset, memcpy
from libc.stdlib cimport calloc, free, realloc from libc.stdlib cimport calloc, free, realloc
from thinc.backends.linalg cimport Vec, VecVec from thinc.backends.linalg cimport Vec, VecVec
cimport blis.cy from thinc.backends.cblas cimport saxpy, sgemm
import numpy import numpy
import numpy.random import numpy.random
from thinc.api import Model, CupyOps, NumpyOps from thinc.api import Model, CupyOps, NumpyOps, get_ops
from .. import util from .. import util
from ..errors import Errors from ..errors import Errors
@ -91,7 +91,7 @@ cdef void resize_activations(ActivationsC* A, SizesC n) nogil:
A._curr_size = n.states A._curr_size = n.states
cdef void predict_states(ActivationsC* A, StateC** states, cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states,
const WeightsC* W, SizesC n) nogil: const WeightsC* W, SizesC n) nogil:
cdef double one = 1.0 cdef double one = 1.0
resize_activations(A, n) resize_activations(A, n)
@ -99,7 +99,7 @@ cdef void predict_states(ActivationsC* A, StateC** states,
states[i].set_context_tokens(&A.token_ids[i*n.feats], n.feats) states[i].set_context_tokens(&A.token_ids[i*n.feats], n.feats)
memset(A.unmaxed, 0, n.states * n.hiddens * n.pieces * sizeof(float)) memset(A.unmaxed, 0, n.states * n.hiddens * n.pieces * sizeof(float))
memset(A.hiddens, 0, n.states * n.hiddens * sizeof(float)) memset(A.hiddens, 0, n.states * n.hiddens * sizeof(float))
sum_state_features(A.unmaxed, sum_state_features(cblas, A.unmaxed,
W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces) W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces)
for i in range(n.states): for i in range(n.states):
VecVec.add_i(&A.unmaxed[i*n.hiddens*n.pieces], VecVec.add_i(&A.unmaxed[i*n.hiddens*n.pieces],
@ -113,12 +113,10 @@ cdef void predict_states(ActivationsC* A, StateC** states,
memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float))
else: else:
# Compute hidden-to-output # Compute hidden-to-output
blis.cy.gemm(blis.cy.NO_TRANSPOSE, blis.cy.TRANSPOSE, sgemm(cblas)(False, True, n.states, n.classes, n.hiddens,
n.states, n.classes, n.hiddens, one, 1.0, <const float *>A.hiddens, n.hiddens,
<float*>A.hiddens, n.hiddens, 1, <const float *>W.hidden_weights, n.hiddens,
<float*>W.hidden_weights, n.hiddens, 1, 0.0, A.scores, n.classes)
one,
<float*>A.scores, n.classes, 1)
# Add bias # Add bias
for i in range(n.states): for i in range(n.states):
VecVec.add_i(&A.scores[i*n.classes], VecVec.add_i(&A.scores[i*n.classes],
@ -135,7 +133,7 @@ cdef void predict_states(ActivationsC* A, StateC** states,
A.scores[i*n.classes+j] = min_ A.scores[i*n.classes+j] = min_
cdef void sum_state_features(float* output, cdef void sum_state_features(CBlas cblas, float* output,
const float* cached, const int* token_ids, int B, int F, int O) nogil: const float* cached, const int* token_ids, int B, int F, int O) nogil:
cdef int idx, b, f, i cdef int idx, b, f, i
cdef const float* feature cdef const float* feature
@ -150,9 +148,7 @@ cdef void sum_state_features(float* output,
else: else:
idx = token_ids[f] * id_stride + f*O idx = token_ids[f] * id_stride + f*O
feature = &cached[idx] feature = &cached[idx]
blis.cy.axpyv(blis.cy.NO_CONJUGATE, O, one, saxpy(cblas)(O, one, <const float*>feature, 1, &output[b*O], 1)
<float*>feature, 1,
&output[b*O], 1)
token_ids += F token_ids += F
@ -443,9 +439,15 @@ cdef class precompute_hiddens:
# - Output from backward on GPU # - Output from backward on GPU
bp_hiddens = self._bp_hiddens bp_hiddens = self._bp_hiddens
cdef CBlas cblas
if isinstance(self.ops, CupyOps):
cblas = get_ops("cpu").cblas()
else:
cblas = self.ops.cblas()
feat_weights = self.get_feat_weights() feat_weights = self.get_feat_weights()
cdef int[:, ::1] ids = token_ids cdef int[:, ::1] ids = token_ids
sum_state_features(<float*>state_vector.data, sum_state_features(cblas, <float*>state_vector.data,
feat_weights, &ids[0,0], feat_weights, &ids[0,0],
token_ids.shape[0], self.nF, self.nO*self.nP) token_ids.shape[0], self.nF, self.nO*self.nP)
state_vector += self.bias state_vector += self.bias

View File

@ -15,6 +15,7 @@ from .sentencizer import Sentencizer
from .tagger import Tagger from .tagger import Tagger
from .textcat import TextCategorizer from .textcat import TextCategorizer
from .spancat import SpanCategorizer from .spancat import SpanCategorizer
from .span_ruler import SpanRuler
from .textcat_multilabel import MultiLabel_TextCategorizer from .textcat_multilabel import MultiLabel_TextCategorizer
from .tok2vec import Tok2Vec from .tok2vec import Tok2Vec
from .functions import merge_entities, merge_noun_chunks, merge_subtokens from .functions import merge_entities, merge_noun_chunks, merge_subtokens
@ -32,6 +33,7 @@ __all__ = [
"SentenceRecognizer", "SentenceRecognizer",
"Sentencizer", "Sentencizer",
"SpanCategorizer", "SpanCategorizer",
"SpanRuler",
"Tagger", "Tagger",
"TextCategorizer", "TextCategorizer",
"Tok2Vec", "Tok2Vec",

View File

@ -10,6 +10,7 @@ from ...strings cimport hash_string
from ...structs cimport TokenC from ...structs cimport TokenC
from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.doc cimport Doc, set_children_from_heads
from ...tokens.token cimport MISSING_DEP from ...tokens.token cimport MISSING_DEP
from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC, ArcC from ._state cimport StateC, ArcC
@ -687,7 +688,7 @@ cdef class ArcEager(TransitionSystem):
return self.c[name_or_id] return self.c[name_or_id]
name = name_or_id name = name_or_id
if '-' in name: if '-' in name:
move_str, label_str = name.split('-', 1) move_str, label_str = split_bilu_label(name)
label = self.strings[label_str] label = self.strings[label_str]
else: else:
move_str = name move_str = name

View File

@ -13,6 +13,7 @@ from ...typedefs cimport weight_t, attr_t
from ...lexeme cimport Lexeme from ...lexeme cimport Lexeme
from ...attrs cimport IS_SPACE from ...attrs cimport IS_SPACE
from ...structs cimport TokenC, SpanC from ...structs cimport TokenC, SpanC
from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC from ._state cimport StateC
@ -182,7 +183,7 @@ cdef class BiluoPushDown(TransitionSystem):
if name == '-' or name == '' or name is None: if name == '-' or name == '' or name is None:
return Transition(clas=0, move=MISSING, label=0, score=0) return Transition(clas=0, move=MISSING, label=0, score=0)
elif '-' in name: elif '-' in name:
move_str, label_str = name.split('-', 1) move_str, label_str = split_bilu_label(name)
# Deprecated, hacky way to denote 'not this entity' # Deprecated, hacky way to denote 'not this entity'
if label_str.startswith('!'): if label_str.startswith('!'):
raise ValueError(Errors.E869.format(label=name)) raise ValueError(Errors.E869.format(label=name))

View File

@ -0,0 +1,11 @@
#ifndef NONPROJ_HH
#define NONPROJ_HH
#include <stdexcept>
#include <string>
void raise_domain_error(std::string const &msg) {
throw std::domain_error(msg);
}
#endif // NONPROJ_HH

View File

@ -0,0 +1,4 @@
from libcpp.string cimport string
cdef extern from "nonproj.hh":
cdef void raise_domain_error(const string& msg) nogil except +

View File

@ -4,10 +4,13 @@ for doing pseudo-projective parsing implementation uses the HEAD decoration
scheme. scheme.
""" """
from copy import copy from copy import copy
from cython.operator cimport preincrement as incr, dereference as deref
from libc.limits cimport INT_MAX from libc.limits cimport INT_MAX
from libc.stdlib cimport abs from libc.stdlib cimport abs
from libcpp cimport bool from libcpp cimport bool
from libcpp.string cimport string, to_string
from libcpp.vector cimport vector from libcpp.vector cimport vector
from libcpp.unordered_set cimport unordered_set
from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.doc cimport Doc, set_children_from_heads
@ -49,7 +52,7 @@ def is_nonproj_arc(tokenid, heads):
return _is_nonproj_arc(tokenid, c_heads) return _is_nonproj_arc(tokenid, c_heads)
cdef bool _is_nonproj_arc(int tokenid, const vector[int]& heads) nogil: cdef bool _is_nonproj_arc(int tokenid, const vector[int]& heads) nogil except *:
# definition (e.g. Havelka 2007): an arc h -> d, h < d is non-projective # definition (e.g. Havelka 2007): an arc h -> d, h < d is non-projective
# if there is a token k, h < k < d such that h is not # if there is a token k, h < k < d such that h is not
# an ancestor of k. Same for h -> d, h > d # an ancestor of k. Same for h -> d, h > d
@ -58,32 +61,56 @@ cdef bool _is_nonproj_arc(int tokenid, const vector[int]& heads) nogil:
return False return False
elif head < 0: # unattached tokens cannot be non-projective elif head < 0: # unattached tokens cannot be non-projective
return False return False
cdef int start, end cdef int start, end
if head < tokenid: if head < tokenid:
start, end = (head+1, tokenid) start, end = (head+1, tokenid)
else: else:
start, end = (tokenid+1, head) start, end = (tokenid+1, head)
for k in range(start, end): for k in range(start, end):
if _has_head_as_ancestor(k, head, heads): if not _has_head_as_ancestor(k, head, heads):
continue
else: # head not in ancestors: d -> h is non-projective
return True return True
return False return False
cdef bool _has_head_as_ancestor(int tokenid, int head, const vector[int]& heads) nogil: cdef bool _has_head_as_ancestor(int tokenid, int head, const vector[int]& heads) nogil except *:
ancestor = tokenid ancestor = tokenid
cnt = 0 cdef unordered_set[int] seen_tokens
while cnt < heads.size(): seen_tokens.insert(ancestor)
while True:
# Reached the head or a disconnected node
if heads[ancestor] == head or heads[ancestor] < 0: if heads[ancestor] == head or heads[ancestor] < 0:
return True return True
# Reached the root
if heads[ancestor] == ancestor:
return False
ancestor = heads[ancestor] ancestor = heads[ancestor]
cnt += 1 result = seen_tokens.insert(ancestor)
# Found cycle
if not result.second:
raise_domain_error(heads_to_string(heads))
return False return False
cdef string heads_to_string(const vector[int]& heads) nogil:
cdef vector[int].const_iterator citer
cdef string cycle_str
cycle_str.append("Found cycle in dependency graph: [")
# FIXME: Rewrite using ostringstream when available in Cython.
citer = heads.const_begin()
while citer != heads.const_end():
if citer != heads.const_begin():
cycle_str.append(", ")
cycle_str.append(to_string(deref(citer)))
incr(citer)
cycle_str.append("]")
return cycle_str
def is_nonproj_tree(heads): def is_nonproj_tree(heads):
cdef vector[int] c_heads = _heads_to_c(heads) cdef vector[int] c_heads = _heads_to_c(heads)
# a tree is non-projective if at least one arc is non-projective # a tree is non-projective if at least one arc is non-projective
@ -176,11 +203,12 @@ def get_smallest_nonproj_arc_slow(heads):
return _get_smallest_nonproj_arc(c_heads) return _get_smallest_nonproj_arc(c_heads)
cdef int _get_smallest_nonproj_arc(const vector[int]& heads) nogil: cdef int _get_smallest_nonproj_arc(const vector[int]& heads) nogil except -2:
# return the smallest non-proj arc or None # return the smallest non-proj arc or None
# where size is defined as the distance between dep and head # where size is defined as the distance between dep and head
# and ties are broken left to right # and ties are broken left to right
cdef int smallest_size = INT_MAX cdef int smallest_size = INT_MAX
# -1 means its already projective.
cdef int smallest_np_arc = -1 cdef int smallest_np_arc = -1
cdef int size cdef int size
cdef int tokenid cdef int tokenid

View File

@ -276,7 +276,7 @@ class CoreferenceResolver(TrainablePipe):
span = example.predicted.char_span(start_char, end_char) span = example.predicted.char_span(start_char, end_char)
if span is None: if span is None:
# TODO log more details # TODO log more details
raise IndexError(Errors.E1038) raise IndexError(Errors.E1043)
cc.append( (span.start, span.end) ) cc.append( (span.start, span.end) )
clusters.append(cc) clusters.append(cc)

View File

@ -12,6 +12,7 @@ from ..language import Language
from ._parser_internals import nonproj from ._parser_internals import nonproj
from ._parser_internals.nonproj import DELIMITER from ._parser_internals.nonproj import DELIMITER
from ..scorer import Scorer from ..scorer import Scorer
from ..training import remove_bilu_prefix
from ..util import registry from ..util import registry
@ -314,7 +315,7 @@ cdef class DependencyParser(Parser):
# Get the labels from the model by looking at the available moves # Get the labels from the model by looking at the available moves
for move in self.move_names: for move in self.move_names:
if "-" in move: if "-" in move:
label = move.split("-")[1] label = remove_bilu_prefix(move)
if DELIMITER in label: if DELIMITER in label:
label = label.split(DELIMITER)[1] label = label.split(DELIMITER)[1]
labels.add(label) labels.add(label)

View File

@ -355,7 +355,7 @@ class EntityLinker(TrainablePipe):
keep_ents.append(eidx) keep_ents.append(eidx)
eidx += 1 eidx += 1
entity_encodings = self.model.ops.asarray(entity_encodings, dtype="float32") entity_encodings = self.model.ops.asarray2f(entity_encodings, dtype="float32")
selected_encodings = sentence_encodings[keep_ents] selected_encodings = sentence_encodings[keep_ents]
# if there are no matches, short circuit # if there are no matches, short circuit
@ -368,13 +368,12 @@ class EntityLinker(TrainablePipe):
method="get_loss", msg="gold entities do not match up" method="get_loss", msg="gold entities do not match up"
) )
raise RuntimeError(err) raise RuntimeError(err)
# TODO: fix typing issue here gradients = self.distance.get_grad(selected_encodings, entity_encodings)
gradients = self.distance.get_grad(selected_encodings, entity_encodings) # type: ignore
# to match the input size, we need to give a zero gradient for items not in the kb # to match the input size, we need to give a zero gradient for items not in the kb
out = self.model.ops.alloc2f(*sentence_encodings.shape) out = self.model.ops.alloc2f(*sentence_encodings.shape)
out[keep_ents] = gradients out[keep_ents] = gradients
loss = self.distance.get_loss(selected_encodings, entity_encodings) # type: ignore loss = self.distance.get_loss(selected_encodings, entity_encodings)
loss = loss / len(entity_encodings) loss = loss / len(entity_encodings)
return float(loss), out return float(loss), out
@ -391,18 +390,21 @@ class EntityLinker(TrainablePipe):
self.validate_kb() self.validate_kb()
entity_count = 0 entity_count = 0
final_kb_ids: List[str] = [] final_kb_ids: List[str] = []
xp = self.model.ops.xp
if not docs: if not docs:
return final_kb_ids return final_kb_ids
if isinstance(docs, Doc): if isinstance(docs, Doc):
docs = [docs] docs = [docs]
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
if len(doc) == 0:
continue
sentences = [s for s in doc.sents] sentences = [s for s in doc.sents]
if len(doc) > 0: # Looping through each entity (TODO: rewrite)
# Looping through each entity (TODO: rewrite) for ent in doc.ents:
for ent in doc.ents: sent_index = sentences.index(ent.sent)
sent = ent.sent assert sent_index >= 0
sent_index = sentences.index(sent)
assert sent_index >= 0 if self.incl_context:
# get n_neighbour sentences, clipped to the length of the document # get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_index - self.n_sents) start_sentence = max(0, sent_index - self.n_sents)
end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) end_sentence = min(len(sentences) - 1, sent_index + self.n_sents)
@ -410,55 +412,53 @@ class EntityLinker(TrainablePipe):
end_token = sentences[end_sentence].end end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc() sent_doc = doc[start_token:end_token].as_doc()
# currently, the context is the same for each entity in a sentence (should be refined) # currently, the context is the same for each entity in a sentence (should be refined)
xp = self.model.ops.xp sentence_encoding = self.model.predict([sent_doc])[0]
if self.incl_context: sentence_encoding_t = sentence_encoding.T
sentence_encoding = self.model.predict([sent_doc])[0] sentence_norm = xp.linalg.norm(sentence_encoding_t)
sentence_encoding_t = sentence_encoding.T entity_count += 1
sentence_norm = xp.linalg.norm(sentence_encoding_t) if ent.label_ in self.labels_discard:
entity_count += 1 # ignoring this entity - setting to NIL
if ent.label_ in self.labels_discard: final_kb_ids.append(self.NIL)
# ignoring this entity - setting to NIL else:
candidates = list(self.get_candidates(self.kb, ent))
if not candidates:
# no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
elif len(candidates) == 1:
# shortcut for efficiency reasons: take the 1 candidate
# TODO: thresholding
final_kb_ids.append(candidates[0].entity_)
else: else:
candidates = list(self.get_candidates(self.kb, ent)) random.shuffle(candidates)
if not candidates: # set all prior probabilities to 0 if incl_prior=False
# no prediction possible for this entity - setting to NIL prior_probs = xp.asarray([c.prior_prob for c in candidates])
final_kb_ids.append(self.NIL) if not self.incl_prior:
elif len(candidates) == 1: prior_probs = xp.asarray([0.0 for _ in candidates])
# shortcut for efficiency reasons: take the 1 candidate scores = prior_probs
# TODO: thresholding # add in similarity from the context
final_kb_ids.append(candidates[0].entity_) if self.incl_context:
else: entity_encodings = xp.asarray(
random.shuffle(candidates) [c.entity_vector for c in candidates]
# set all prior probabilities to 0 if incl_prior=False )
prior_probs = xp.asarray([c.prior_prob for c in candidates]) entity_norm = xp.linalg.norm(entity_encodings, axis=1)
if not self.incl_prior: if len(entity_encodings) != len(prior_probs):
prior_probs = xp.asarray([0.0 for _ in candidates]) raise RuntimeError(
scores = prior_probs Errors.E147.format(
# add in similarity from the context method="predict",
if self.incl_context: msg="vectors not of equal length",
entity_encodings = xp.asarray(
[c.entity_vector for c in candidates]
)
entity_norm = xp.linalg.norm(entity_encodings, axis=1)
if len(entity_encodings) != len(prior_probs):
raise RuntimeError(
Errors.E147.format(
method="predict",
msg="vectors not of equal length",
)
) )
# cosine similarity
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
sentence_norm * entity_norm
) )
if sims.shape != prior_probs.shape: # cosine similarity
raise ValueError(Errors.E161) sims = xp.dot(entity_encodings, sentence_encoding_t) / (
scores = prior_probs + sims - (prior_probs * sims) sentence_norm * entity_norm
# TODO: thresholding )
best_index = scores.argmax().item() if sims.shape != prior_probs.shape:
best_candidate = candidates[best_index] raise ValueError(Errors.E161)
final_kb_ids.append(best_candidate.entity_) scores = prior_probs + sims - (prior_probs * sims)
# TODO: thresholding
best_index = scores.argmax().item()
best_candidate = candidates[best_index]
final_kb_ids.append(best_candidate.entity_)
if not (len(final_kb_ids) == entity_count): if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format( err = Errors.E147.format(
method="predict", msg="result variables not of equal length" method="predict", msg="result variables not of equal length"

View File

@ -179,10 +179,7 @@ class EntityRuler(Pipe):
if start not in seen_tokens and end - 1 not in seen_tokens: if start not in seen_tokens and end - 1 not in seen_tokens:
if match_id in self._ent_ids: if match_id in self._ent_ids:
label, ent_id = self._ent_ids[match_id] label, ent_id = self._ent_ids[match_id]
span = Span(doc, start, end, label=label) span = Span(doc, start, end, label=label, span_id=ent_id)
if ent_id:
for token in span:
token.ent_id_ = ent_id
else: else:
span = Span(doc, start, end, label=match_id) span = Span(doc, start, end, label=match_id)
new_entities.append(span) new_entities.append(span)
@ -356,7 +353,9 @@ class EntityRuler(Pipe):
(label, eid) for (label, eid) in self._ent_ids.values() if eid == ent_id (label, eid) for (label, eid) in self._ent_ids.values() if eid == ent_id
] ]
if not label_id_pairs: if not label_id_pairs:
raise ValueError(Errors.E1024.format(ent_id=ent_id)) raise ValueError(
Errors.E1024.format(attr_type="ID", label=ent_id, component=self.name)
)
created_labels = [ created_labels = [
self._create_label(label, eid) for (label, eid) in label_id_pairs self._create_label(label, eid) for (label, eid) in label_id_pairs
] ]

View File

@ -6,10 +6,10 @@ from thinc.api import Model, Config
from ._parser_internals.transition_system import TransitionSystem from ._parser_internals.transition_system import TransitionSystem
from .transition_parser cimport Parser from .transition_parser cimport Parser
from ._parser_internals.ner cimport BiluoPushDown from ._parser_internals.ner cimport BiluoPushDown
from ..language import Language from ..language import Language
from ..scorer import get_ner_prf, PRFScore from ..scorer import get_ner_prf, PRFScore
from ..util import registry from ..util import registry
from ..training import remove_bilu_prefix
default_model_config = """ default_model_config = """
@ -242,7 +242,7 @@ cdef class EntityRecognizer(Parser):
def labels(self): def labels(self):
# Get the labels from the model by looking at the available moves, e.g. # Get the labels from the model by looking at the available moves, e.g.
# B-PERSON, I-PERSON, L-PERSON, U-PERSON # B-PERSON, I-PERSON, L-PERSON, U-PERSON
labels = set(move.split("-")[1] for move in self.move_names labels = set(remove_bilu_prefix(move) for move in self.move_names
if move[0] in ("B", "I", "L", "U")) if move[0] in ("B", "I", "L", "U"))
return tuple(sorted(labels)) return tuple(sorted(labels))

View File

@ -31,7 +31,7 @@ cdef class Pipe:
and returned. This usually happens under the hood when the nlp object and returned. This usually happens under the hood when the nlp object
is called on a text and all components are applied to the Doc. is called on a text and all components are applied to the Doc.
docs (Doc): The Doc to process. doc (Doc): The Doc to process.
RETURNS (Doc): The processed Doc. RETURNS (Doc): The processed Doc.
DOCS: https://spacy.io/api/pipe#call DOCS: https://spacy.io/api/pipe#call

View File

@ -0,0 +1,569 @@
from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable
from typing import Sequence, Set, cast
import warnings
from functools import partial
from pathlib import Path
import srsly
from .pipe import Pipe
from ..training import Example
from ..language import Language
from ..errors import Errors, Warnings
from ..util import ensure_path, SimpleFrozenList, registry
from ..tokens import Doc, Span
from ..scorer import Scorer
from ..matcher import Matcher, PhraseMatcher
from .. import util
PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
DEFAULT_SPANS_KEY = "ruler"
@Language.factory(
"future_entity_ruler",
assigns=["doc.ents"],
default_config={
"phrase_matcher_attr": None,
"validate": False,
"overwrite_ents": False,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
"ent_id_sep": "__unused__",
},
default_score_weights={
"ents_f": 1.0,
"ents_p": 0.0,
"ents_r": 0.0,
"ents_per_type": None,
},
)
def make_entity_ruler(
nlp: Language,
name: str,
phrase_matcher_attr: Optional[Union[int, str]],
validate: bool,
overwrite_ents: bool,
scorer: Optional[Callable],
ent_id_sep: str,
):
if overwrite_ents:
ents_filter = prioritize_new_ents_filter
else:
ents_filter = prioritize_existing_ents_filter
return SpanRuler(
nlp,
name,
spans_key=None,
spans_filter=None,
annotate_ents=True,
ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr,
validate=validate,
overwrite=False,
scorer=scorer,
)
@Language.factory(
"span_ruler",
assigns=["doc.spans"],
default_config={
"spans_key": DEFAULT_SPANS_KEY,
"spans_filter": None,
"annotate_ents": False,
"ents_filter": {"@misc": "spacy.first_longest_spans_filter.v1"},
"phrase_matcher_attr": None,
"validate": False,
"overwrite": True,
"scorer": {
"@scorers": "spacy.overlapping_labeled_spans_scorer.v1",
"spans_key": DEFAULT_SPANS_KEY,
},
},
default_score_weights={
f"spans_{DEFAULT_SPANS_KEY}_f": 1.0,
f"spans_{DEFAULT_SPANS_KEY}_p": 0.0,
f"spans_{DEFAULT_SPANS_KEY}_r": 0.0,
f"spans_{DEFAULT_SPANS_KEY}_per_type": None,
},
)
def make_span_ruler(
nlp: Language,
name: str,
spans_key: Optional[str],
spans_filter: Optional[Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]]],
annotate_ents: bool,
ents_filter: Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]],
phrase_matcher_attr: Optional[Union[int, str]],
validate: bool,
overwrite: bool,
scorer: Optional[Callable],
):
return SpanRuler(
nlp,
name,
spans_key=spans_key,
spans_filter=spans_filter,
annotate_ents=annotate_ents,
ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr,
validate=validate,
overwrite=overwrite,
scorer=scorer,
)
def prioritize_new_ents_filter(
entities: Iterable[Span], spans: Iterable[Span]
) -> List[Span]:
"""Merge entities and spans into one list without overlaps by allowing
spans to overwrite any entities that they overlap with. Intended to
replicate the overwrite_ents=True behavior from the EntityRuler.
entities (Iterable[Span]): The entities, already filtered for overlaps.
spans (Iterable[Span]): The spans to merge, may contain overlaps.
RETURNS (List[Span]): Filtered list of non-overlapping spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
spans = sorted(spans, key=get_sort_key, reverse=True)
entities = list(entities)
new_entities = []
seen_tokens: Set[int] = set()
for span in spans:
start = span.start
end = span.end
if all(token.i not in seen_tokens for token in span):
new_entities.append(span)
entities = [e for e in entities if not (e.start < end and e.end > start)]
seen_tokens.update(range(start, end))
return entities + new_entities
@registry.misc("spacy.prioritize_new_ents_filter.v1")
def make_prioritize_new_ents_filter():
return prioritize_new_ents_filter
def prioritize_existing_ents_filter(
entities: Iterable[Span], spans: Iterable[Span]
) -> List[Span]:
"""Merge entities and spans into one list without overlaps by prioritizing
existing entities. Intended to replicate the overwrite_ents=False behavior
from the EntityRuler.
entities (Iterable[Span]): The entities, already filtered for overlaps.
spans (Iterable[Span]): The spans to merge, may contain overlaps.
RETURNS (List[Span]): Filtered list of non-overlapping spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
spans = sorted(spans, key=get_sort_key, reverse=True)
entities = list(entities)
new_entities = []
seen_tokens: Set[int] = set()
seen_tokens.update(*(range(ent.start, ent.end) for ent in entities))
for span in spans:
start = span.start
end = span.end
if all(token.i not in seen_tokens for token in span):
new_entities.append(span)
seen_tokens.update(range(start, end))
return entities + new_entities
@registry.misc("spacy.prioritize_existing_ents_filter.v1")
def make_preverse_existing_ents_filter():
return prioritize_existing_ents_filter
def overlapping_labeled_spans_score(
examples: Iterable[Example], *, spans_key=DEFAULT_SPANS_KEY, **kwargs
) -> Dict[str, Any]:
kwargs = dict(kwargs)
attr_prefix = f"spans_"
kwargs.setdefault("attr", f"{attr_prefix}{spans_key}")
kwargs.setdefault("allow_overlap", True)
kwargs.setdefault("labeled", True)
kwargs.setdefault(
"getter", lambda doc, key: doc.spans.get(key[len(attr_prefix) :], [])
)
kwargs.setdefault("has_annotation", lambda doc: spans_key in doc.spans)
return Scorer.score_spans(examples, **kwargs)
@registry.scorers("spacy.overlapping_labeled_spans_scorer.v1")
def make_overlapping_labeled_spans_scorer(spans_key: str = DEFAULT_SPANS_KEY):
return partial(overlapping_labeled_spans_score, spans_key=spans_key)
class SpanRuler(Pipe):
"""The SpanRuler lets you add spans to the `Doc.spans` using token-based
rules or exact phrase matches.
DOCS: https://spacy.io/api/spanruler
USAGE: https://spacy.io/usage/rule-based-matching#spanruler
"""
def __init__(
self,
nlp: Language,
name: str = "span_ruler",
*,
spans_key: Optional[str] = DEFAULT_SPANS_KEY,
spans_filter: Optional[
Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]]
] = None,
annotate_ents: bool = False,
ents_filter: Callable[
[Iterable[Span], Iterable[Span]], Iterable[Span]
] = util.filter_chain_spans,
phrase_matcher_attr: Optional[Union[int, str]] = None,
validate: bool = False,
overwrite: bool = False,
scorer: Optional[Callable] = partial(
overlapping_labeled_spans_score, spans_key=DEFAULT_SPANS_KEY
),
) -> None:
"""Initialize the span ruler. If patterns are supplied here, they
need to be a list of dictionaries with a `"label"` and `"pattern"`
key. A pattern can either be a token pattern (list) or a phrase pattern
(string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`.
nlp (Language): The shared nlp object to pass the vocab to the matchers
and process phrase patterns.
name (str): Instance name of the current pipeline component. Typically
passed in automatically from the factory when the component is
added. Used to disable the current span ruler while creating
phrase patterns with the nlp object.
spans_key (Optional[str]): The spans key to save the spans under. If
`None`, no spans are saved. Defaults to "ruler".
spans_filter (Optional[Callable[[Iterable[Span], Iterable[Span]], List[Span]]):
The optional method to filter spans before they are assigned to
doc.spans. Defaults to `None`.
annotate_ents (bool): Whether to save spans to doc.ents. Defaults to
`False`.
ents_filter (Callable[[Iterable[Span], Iterable[Span]], List[Span]]):
The method to filter spans before they are assigned to doc.ents.
Defaults to `util.filter_chain_spans`.
phrase_matcher_attr (Optional[Union[int, str]]): Token attribute to
match on, passed to the internal PhraseMatcher as `attr`. Defaults
to `None`.
validate (bool): Whether patterns should be validated, passed to
Matcher and PhraseMatcher as `validate`.
overwrite (bool): Whether to remove any existing spans under this spans
key if `spans_key` is set, and/or to remove any ents under `doc.ents` if
`annotate_ents` is set. Defaults to `True`.
scorer (Optional[Callable]): The scoring method. Defaults to
spacy.pipeline.span_ruler.overlapping_labeled_spans_score.
DOCS: https://spacy.io/api/spanruler#init
"""
self.nlp = nlp
self.name = name
self.spans_key = spans_key
self.annotate_ents = annotate_ents
self.phrase_matcher_attr = phrase_matcher_attr
self.validate = validate
self.overwrite = overwrite
self.spans_filter = spans_filter
self.ents_filter = ents_filter
self.scorer = scorer
self._match_label_id_map: Dict[int, Dict[str, str]] = {}
self.clear()
def __len__(self) -> int:
"""The number of all labels added to the span ruler."""
return len(self._patterns)
def __contains__(self, label: str) -> bool:
"""Whether a label is present in the patterns."""
for label_id in self._match_label_id_map.values():
if label_id["label"] == label:
return True
return False
@property
def key(self) -> Optional[str]:
"""Key of the doc.spans dict to save the spans under."""
return self.spans_key
def __call__(self, doc: Doc) -> Doc:
"""Find matches in document and add them as entities.
doc (Doc): The Doc object in the pipeline.
RETURNS (Doc): The Doc with added entities, if available.
DOCS: https://spacy.io/api/spanruler#call
"""
error_handler = self.get_error_handler()
try:
matches = self.match(doc)
self.set_annotations(doc, matches)
return doc
except Exception as e:
return error_handler(self.name, self, [doc], e)
def match(self, doc: Doc):
self._require_patterns()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="\\[W036")
matches = cast(
List[Tuple[int, int, int]],
list(self.matcher(doc)) + list(self.phrase_matcher(doc)),
)
deduplicated_matches = set(
Span(
doc,
start,
end,
label=self._match_label_id_map[m_id]["label"],
span_id=self._match_label_id_map[m_id]["id"],
)
for m_id, start, end in matches
if start != end
)
return sorted(list(deduplicated_matches))
def set_annotations(self, doc, matches):
"""Modify the document in place"""
# set doc.spans if spans_key is set
if self.key:
spans = []
if self.key in doc.spans and not self.overwrite:
spans = doc.spans[self.key]
spans.extend(
self.spans_filter(spans, matches) if self.spans_filter else matches
)
doc.spans[self.key] = spans
# set doc.ents if annotate_ents is set
if self.annotate_ents:
spans = []
if not self.overwrite:
spans = list(doc.ents)
spans = self.ents_filter(spans, matches)
try:
doc.ents = sorted(spans)
except ValueError:
raise ValueError(Errors.E854)
@property
def labels(self) -> Tuple[str, ...]:
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/spanruler#labels
"""
return tuple(sorted(set([cast(str, p["label"]) for p in self._patterns])))
@property
def ids(self) -> Tuple[str, ...]:
"""All IDs present in the match patterns.
RETURNS (set): The string IDs.
DOCS: https://spacy.io/api/spanruler#ids
"""
return tuple(
sorted(set([cast(str, p.get("id")) for p in self._patterns]) - set([None]))
)
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Sequence[PatternType]] = None,
):
"""Initialize the pipe for training.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
patterns (Optional[Iterable[PatternType]]): The list of patterns.
DOCS: https://spacy.io/api/spanruler#initialize
"""
self.clear()
if patterns:
self.add_patterns(patterns) # type: ignore[arg-type]
@property
def patterns(self) -> List[PatternType]:
"""Get all patterns that were added to the span ruler.
RETURNS (list): The original patterns, one dictionary per pattern.
DOCS: https://spacy.io/api/spanruler#patterns
"""
return self._patterns
def add_patterns(self, patterns: List[PatternType]) -> None:
"""Add patterns to the span ruler. A pattern can either be a token
pattern (list of dicts) or a phrase pattern (string). For example:
{'label': 'ORG', 'pattern': 'Apple'}
{'label': 'ORG', 'pattern': 'Apple', 'id': 'apple'}
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
patterns (list): The patterns to add.
DOCS: https://spacy.io/api/spanruler#add_patterns
"""
# disable the nlp components after this one in case they haven't been
# initialized / deserialized yet
try:
current_index = -1
for i, (name, pipe) in enumerate(self.nlp.pipeline):
if self == pipe:
current_index = i
break
subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]]
except ValueError:
subsequent_pipes = []
with self.nlp.select_pipes(disable=subsequent_pipes):
phrase_pattern_labels = []
phrase_pattern_texts = []
for entry in patterns:
p_label = cast(str, entry["label"])
p_id = cast(str, entry.get("id", ""))
label = repr((p_label, p_id))
self._match_label_id_map[self.nlp.vocab.strings.as_int(label)] = {
"label": p_label,
"id": p_id,
}
if isinstance(entry["pattern"], str):
phrase_pattern_labels.append(label)
phrase_pattern_texts.append(entry["pattern"])
elif isinstance(entry["pattern"], list):
self.matcher.add(label, [entry["pattern"]])
else:
raise ValueError(Errors.E097.format(pattern=entry["pattern"]))
self._patterns.append(entry)
for label, pattern in zip(
phrase_pattern_labels,
self.nlp.pipe(phrase_pattern_texts),
):
self.phrase_matcher.add(label, [pattern])
def clear(self) -> None:
"""Reset all patterns.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#clear
"""
self._patterns: List[PatternType] = []
self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate)
self.phrase_matcher: PhraseMatcher = PhraseMatcher(
self.nlp.vocab,
attr=self.phrase_matcher_attr,
validate=self.validate,
)
def remove(self, label: str) -> None:
"""Remove a pattern by its label.
label (str): Label of the pattern to be removed.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#remove
"""
if label not in self:
raise ValueError(
Errors.E1024.format(attr_type="label", label=label, component=self.name)
)
self._patterns = [p for p in self._patterns if p["label"] != label]
for m_label in self._match_label_id_map:
if self._match_label_id_map[m_label]["label"] == label:
m_label_str = self.nlp.vocab.strings.as_string(m_label)
if m_label_str in self.phrase_matcher:
self.phrase_matcher.remove(m_label_str)
if m_label_str in self.matcher:
self.matcher.remove(m_label_str)
def remove_by_id(self, pattern_id: str) -> None:
"""Remove a pattern by its pattern ID.
pattern_id (str): ID of the pattern to be removed.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#remove_by_id
"""
orig_len = len(self)
self._patterns = [p for p in self._patterns if p.get("id") != pattern_id]
if orig_len == len(self):
raise ValueError(
Errors.E1024.format(
attr_type="ID", label=pattern_id, component=self.name
)
)
for m_label in self._match_label_id_map:
if self._match_label_id_map[m_label]["id"] == pattern_id:
m_label_str = self.nlp.vocab.strings.as_string(m_label)
if m_label_str in self.phrase_matcher:
self.phrase_matcher.remove(m_label_str)
if m_label_str in self.matcher:
self.matcher.remove(m_label_str)
def _require_patterns(self) -> None:
"""Raise a warning if this component has no patterns defined."""
if len(self) == 0:
warnings.warn(Warnings.W036.format(name=self.name))
def from_bytes(
self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
) -> "SpanRuler":
"""Load the span ruler from a bytestring.
bytes_data (bytes): The bytestring to load.
RETURNS (SpanRuler): The loaded span ruler.
DOCS: https://spacy.io/api/spanruler#from_bytes
"""
self.clear()
deserializers = {
"patterns": lambda b: self.add_patterns(srsly.json_loads(b)),
}
util.from_bytes(bytes_data, deserializers, exclude)
return self
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the span ruler to a bytestring.
RETURNS (bytes): The serialized patterns.
DOCS: https://spacy.io/api/spanruler#to_bytes
"""
serializers = {
"patterns": lambda: srsly.json_dumps(self.patterns),
}
return util.to_bytes(serializers, exclude)
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "SpanRuler":
"""Load the span ruler from a directory.
path (Union[str, Path]): A path to a directory.
RETURNS (SpanRuler): The loaded span ruler.
DOCS: https://spacy.io/api/spanruler#from_disk
"""
self.clear()
path = ensure_path(path)
deserializers = {
"patterns": lambda p: self.add_patterns(srsly.read_jsonl(p)),
}
util.from_disk(path, deserializers, {})
return self
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Save the span ruler patterns to a directory.
path (Union[str, Path]): A path to a directory.
DOCS: https://spacy.io/api/spanruler#to_disk
"""
path = ensure_path(path)
serializers = {
"patterns": lambda p: srsly.write_jsonl(p, self.patterns),
}
util.to_disk(path, serializers, {})

View File

@ -1,4 +1,5 @@
from cymem.cymem cimport Pool from cymem.cymem cimport Pool
from thinc.backends.cblas cimport CBlas
from ..vocab cimport Vocab from ..vocab cimport Vocab
from .trainable_pipe cimport TrainablePipe from .trainable_pipe cimport TrainablePipe
@ -12,7 +13,7 @@ cdef class Parser(TrainablePipe):
cdef readonly TransitionSystem moves cdef readonly TransitionSystem moves
cdef public object _multitasks cdef public object _multitasks
cdef void _parseC(self, StateC** states, cdef void _parseC(self, CBlas cblas, StateC** states,
WeightsC weights, SizesC sizes) nogil WeightsC weights, SizesC sizes) nogil
cdef void c_transition_batch(self, StateC** states, const float* scores, cdef void c_transition_batch(self, StateC** states, const float* scores,

View File

@ -9,7 +9,7 @@ from libc.stdlib cimport calloc, free
import random import random
import srsly import srsly
from thinc.api import set_dropout_rate, CupyOps from thinc.api import get_ops, set_dropout_rate, CupyOps
from thinc.extra.search cimport Beam from thinc.extra.search cimport Beam
import numpy.random import numpy.random
import numpy import numpy
@ -259,6 +259,12 @@ cdef class Parser(TrainablePipe):
def greedy_parse(self, docs, drop=0.): def greedy_parse(self, docs, drop=0.):
cdef vector[StateC*] states cdef vector[StateC*] states
cdef StateClass state cdef StateClass state
ops = self.model.ops
cdef CBlas cblas
if isinstance(ops, CupyOps):
cblas = get_ops("cpu").cblas()
else:
cblas = ops.cblas()
self._ensure_labels_are_added(docs) self._ensure_labels_are_added(docs)
set_dropout_rate(self.model, drop) set_dropout_rate(self.model, drop)
batch = self.moves.init_batch(docs) batch = self.moves.init_batch(docs)
@ -269,8 +275,7 @@ cdef class Parser(TrainablePipe):
states.push_back(state.c) states.push_back(state.c)
sizes = get_c_sizes(model, states.size()) sizes = get_c_sizes(model, states.size())
with nogil: with nogil:
self._parseC(&states[0], self._parseC(cblas, &states[0], weights, sizes)
weights, sizes)
model.clear_memory() model.clear_memory()
del model del model
return batch return batch
@ -297,14 +302,13 @@ cdef class Parser(TrainablePipe):
del model del model
return list(batch) return list(batch)
cdef void _parseC(self, StateC** states, cdef void _parseC(self, CBlas cblas, StateC** states,
WeightsC weights, SizesC sizes) nogil: WeightsC weights, SizesC sizes) nogil:
cdef int i, j cdef int i, j
cdef vector[StateC*] unfinished cdef vector[StateC*] unfinished
cdef ActivationsC activations = alloc_activations(sizes) cdef ActivationsC activations = alloc_activations(sizes)
while sizes.states >= 1: while sizes.states >= 1:
predict_states(&activations, predict_states(cblas, &activations, states, &weights, sizes)
states, &weights, sizes)
# Validate actions, argmax, take action. # Validate actions, argmax, take action.
self.c_transition_batch(states, self.c_transition_batch(states,
activations.scores, sizes.classes, sizes.states) activations.scores, sizes.classes, sizes.states)

View File

@ -485,3 +485,29 @@ class RecommendationSchema(BaseModel):
word_vectors: Optional[str] = None word_vectors: Optional[str] = None
transformer: Optional[RecommendationTrf] = None transformer: Optional[RecommendationTrf] = None
has_letters: bool = True has_letters: bool = True
class DocJSONSchema(BaseModel):
"""
JSON/dict format for JSON representation of Doc objects.
"""
cats: Optional[Dict[StrictStr, StrictFloat]] = Field(
None, title="Categories with corresponding probabilities"
)
ents: Optional[List[Dict[StrictStr, Union[StrictInt, StrictStr]]]] = Field(
None, title="Information on entities"
)
sents: Optional[List[Dict[StrictStr, StrictInt]]] = Field(
None, title="Indices of sentences' start and end indices"
)
text: StrictStr = Field(..., title="Document text")
spans: Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] = Field(
None, title="Span information - end/start indices, label, KB ID"
)
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
..., title="Token information - ID, start, annotations"
)
_: Optional[Dict[StrictStr, Any]] = Field(
None, title="Any custom data stored in the document's _ attribute"
)

View File

@ -916,6 +916,10 @@ class Scorer:
ref = eg.reference ref = eg.reference
pred = eg.predicted pred = eg.predicted
for key, gold_sg in ref.spans.items(): for key, gold_sg in ref.spans.items():
#TODO it might be better to do something like pred.spans.get(key, [])
if len(gold_sg) == 0:
# if there are no spans there's nothing to predict
continue
if key.startswith(output_prefix): if key.startswith(output_prefix):
pred_sg = pred.spans[key] pred_sg = pred.spans[key]
for gold_mention, pred_mention in zip(gold_sg, pred_sg): for gold_mention, pred_mention in zip(gold_sg, pred_sg):
@ -924,6 +928,9 @@ class Scorer:
pred_starts.append(pred_mention.start) pred_starts.append(pred_mention.start)
pred_ends.append(pred_mention.end) pred_ends.append(pred_mention.end)
# it's possible there are no heads to predict from, in which case, skip
if len(starts) == 0:
continue
# see how many are perfect # see how many are perfect
cs = [a == b for a, b in zip(starts, pred_starts)] cs = [a == b for a, b in zip(starts, pred_starts)]
@ -933,7 +940,13 @@ class Scorer:
scores.append(float(accuracy)) scores.append(float(accuracy))
out_key = f"span_{output_prefix}_accuracy" out_key = f"span_{output_prefix}_accuracy"
return {out_key: mean(scores)}
# it is possible there was nothing to score
final = 0.0
if len(scores) > 0:
final = mean(scores)
return {out_key: final}
def get_ner_prf(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: def get_ner_prf(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:

View File

@ -0,0 +1,191 @@
import pytest
import spacy
from spacy import schemas
from spacy.tokens import Doc, Span
@pytest.fixture()
def doc(en_vocab):
words = ["c", "d", "e"]
pos = ["VERB", "NOUN", "NOUN"]
tags = ["VBP", "NN", "NN"]
heads = [0, 0, 1]
deps = ["ROOT", "dobj", "dobj"]
ents = ["O", "B-ORG", "O"]
morphs = ["Feat1=A", "Feat1=B", "Feat1=A|Feat2=D"]
return Doc(
en_vocab,
words=words,
pos=pos,
tags=tags,
heads=heads,
deps=deps,
ents=ents,
morphs=morphs,
)
@pytest.fixture()
def doc_without_deps(en_vocab):
words = ["c", "d", "e"]
pos = ["VERB", "NOUN", "NOUN"]
tags = ["VBP", "NN", "NN"]
ents = ["O", "B-ORG", "O"]
morphs = ["Feat1=A", "Feat1=B", "Feat1=A|Feat2=D"]
return Doc(
en_vocab,
words=words,
pos=pos,
tags=tags,
ents=ents,
morphs=morphs,
sent_starts=[True, False, True],
)
def test_doc_to_json(doc):
json_doc = doc.to_json()
assert json_doc["text"] == "c d e "
assert len(json_doc["tokens"]) == 3
assert json_doc["tokens"][0]["pos"] == "VERB"
assert json_doc["tokens"][0]["tag"] == "VBP"
assert json_doc["tokens"][0]["dep"] == "ROOT"
assert len(json_doc["ents"]) == 1
assert json_doc["ents"][0]["start"] == 2 # character offset!
assert json_doc["ents"][0]["end"] == 3 # character offset!
assert json_doc["ents"][0]["label"] == "ORG"
assert not schemas.validate(schemas.DocJSONSchema, json_doc)
def test_doc_to_json_underscore(doc):
Doc.set_extension("json_test1", default=False)
Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
assert "_" in json_doc
assert json_doc["_"]["json_test1"] == "hello world"
assert json_doc["_"]["json_test2"] == [1, 2, 3]
assert not schemas.validate(schemas.DocJSONSchema, json_doc)
def test_doc_to_json_underscore_error_attr(doc):
"""Test that Doc.to_json() raises an error if a custom attribute doesn't
exist in the ._ space."""
with pytest.raises(ValueError):
doc.to_json(underscore=["json_test3"])
def test_doc_to_json_underscore_error_serialize(doc):
"""Test that Doc.to_json() raises an error if a custom attribute value
isn't JSON-serializable."""
Doc.set_extension("json_test4", method=lambda doc: doc.text)
with pytest.raises(ValueError):
doc.to_json(underscore=["json_test4"])
def test_doc_to_json_span(doc):
"""Test that Doc.to_json() includes spans"""
doc.spans["test"] = [Span(doc, 0, 2, "test"), Span(doc, 0, 1, "test")]
json_doc = doc.to_json()
assert "spans" in json_doc
assert len(json_doc["spans"]) == 1
assert len(json_doc["spans"]["test"]) == 2
assert json_doc["spans"]["test"][0]["start"] == 0
assert not schemas.validate(schemas.DocJSONSchema, json_doc)
def test_json_to_doc(doc):
new_doc = Doc(doc.vocab).from_json(doc.to_json(), validate=True)
new_tokens = [token for token in new_doc]
assert new_doc.text == doc.text == "c d e "
assert len(new_tokens) == len([token for token in doc]) == 3
assert new_tokens[0].pos == doc[0].pos
assert new_tokens[0].tag == doc[0].tag
assert new_tokens[0].dep == doc[0].dep
assert new_tokens[0].head.idx == doc[0].head.idx
assert new_tokens[0].lemma == doc[0].lemma
assert len(new_doc.ents) == 1
assert new_doc.ents[0].start == 1
assert new_doc.ents[0].end == 2
assert new_doc.ents[0].label_ == "ORG"
def test_json_to_doc_underscore(doc):
if not Doc.has_extension("json_test1"):
Doc.set_extension("json_test1", default=False)
if not Doc.has_extension("json_test2"):
Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)])
assert new_doc._.json_test1 == "hello world"
assert new_doc._.json_test2 == [1, 2, 3]
def test_json_to_doc_spans(doc):
"""Test that Doc.from_json() includes correct.spans."""
doc.spans["test"] = [
Span(doc, 0, 2, label="test"),
Span(doc, 0, 1, label="test", kb_id=7),
]
json_doc = doc.to_json()
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert len(new_doc.spans) == 1
assert len(new_doc.spans["test"]) == 2
for i in range(2):
assert new_doc.spans["test"][i].start == doc.spans["test"][i].start
assert new_doc.spans["test"][i].end == doc.spans["test"][i].end
assert new_doc.spans["test"][i].label == doc.spans["test"][i].label
assert new_doc.spans["test"][i].kb_id == doc.spans["test"][i].kb_id
def test_json_to_doc_sents(doc, doc_without_deps):
"""Test that Doc.from_json() includes correct.sents."""
for test_doc in (doc, doc_without_deps):
json_doc = test_doc.to_json()
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert [sent.text for sent in test_doc.sents] == [
sent.text for sent in new_doc.sents
]
assert [token.is_sent_start for token in test_doc] == [
token.is_sent_start for token in new_doc
]
def test_json_to_doc_cats(doc):
"""Test that Doc.from_json() includes correct .cats."""
cats = {"A": 0.3, "B": 0.7}
doc.cats = cats
json_doc = doc.to_json()
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert new_doc.cats == cats
def test_json_to_doc_spaces():
"""Test that Doc.from_json() preserves spaces correctly."""
doc = spacy.blank("en")("This is just brilliant.")
json_doc = doc.to_json()
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert doc.text == new_doc.text
def test_json_to_doc_attribute_consistency(doc):
"""Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties."""
doc_json = doc.to_json()
doc_json["tokens"][1].pop("morph")
with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json)
def test_json_to_doc_validation_error(doc):
"""Test that Doc.from_json() raises an exception when validating invalid input."""
doc_json = doc.to_json()
doc_json.pop("tokens")
with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json, validate=True)

View File

@ -5,11 +5,9 @@ from spacy.compat import pickle
def test_pickle_single_doc(): def test_pickle_single_doc():
nlp = Language() nlp = Language()
doc = nlp("pickle roundtrip") doc = nlp("pickle roundtrip")
doc._context = 3
data = pickle.dumps(doc, 1) data = pickle.dumps(doc, 1)
doc2 = pickle.loads(data) doc2 = pickle.loads(data)
assert doc2.text == "pickle roundtrip" assert doc2.text == "pickle roundtrip"
assert doc2._context == 3
def test_list_of_docs_pickles_efficiently(): def test_list_of_docs_pickles_efficiently():

View File

@ -428,10 +428,19 @@ def test_span_string_label_kb_id(doc):
assert span.kb_id == doc.vocab.strings["Q342"] assert span.kb_id == doc.vocab.strings["Q342"]
def test_span_string_label_id(doc):
span = Span(doc, 0, 1, label="hello", span_id="Q342")
assert span.label_ == "hello"
assert span.label == doc.vocab.strings["hello"]
assert span.id_ == "Q342"
assert span.id == doc.vocab.strings["Q342"]
def test_span_attrs_writable(doc): def test_span_attrs_writable(doc):
span = Span(doc, 0, 1) span = Span(doc, 0, 1)
span.label_ = "label" span.label_ = "label"
span.kb_id_ = "kb_id" span.kb_id_ = "kb_id"
span.id_ = "id"
def test_span_ents_property(doc): def test_span_ents_property(doc):
@ -619,6 +628,9 @@ def test_span_comparison(doc):
assert Span(doc, 0, 4, "LABEL", kb_id="KB_ID") <= Span(doc, 1, 3) assert Span(doc, 0, 4, "LABEL", kb_id="KB_ID") <= Span(doc, 1, 3)
assert Span(doc, 1, 3) > Span(doc, 0, 4, "LABEL", kb_id="KB_ID") assert Span(doc, 1, 3) > Span(doc, 0, 4, "LABEL", kb_id="KB_ID")
assert Span(doc, 1, 3) >= Span(doc, 0, 4, "LABEL", kb_id="KB_ID") assert Span(doc, 1, 3) >= Span(doc, 0, 4, "LABEL", kb_id="KB_ID")
# Different id
assert Span(doc, 1, 3, span_id="AAA") < Span(doc, 1, 3, span_id="BBB")
# fmt: on # fmt: on

View File

@ -1,72 +0,0 @@
import pytest
from spacy.tokens import Doc, Span
@pytest.fixture()
def doc(en_vocab):
words = ["c", "d", "e"]
pos = ["VERB", "NOUN", "NOUN"]
tags = ["VBP", "NN", "NN"]
heads = [0, 0, 0]
deps = ["ROOT", "dobj", "dobj"]
ents = ["O", "B-ORG", "O"]
morphs = ["Feat1=A", "Feat1=B", "Feat1=A|Feat2=D"]
return Doc(
en_vocab,
words=words,
pos=pos,
tags=tags,
heads=heads,
deps=deps,
ents=ents,
morphs=morphs,
)
def test_doc_to_json(doc):
json_doc = doc.to_json()
assert json_doc["text"] == "c d e "
assert len(json_doc["tokens"]) == 3
assert json_doc["tokens"][0]["pos"] == "VERB"
assert json_doc["tokens"][0]["tag"] == "VBP"
assert json_doc["tokens"][0]["dep"] == "ROOT"
assert len(json_doc["ents"]) == 1
assert json_doc["ents"][0]["start"] == 2 # character offset!
assert json_doc["ents"][0]["end"] == 3 # character offset!
assert json_doc["ents"][0]["label"] == "ORG"
def test_doc_to_json_underscore(doc):
Doc.set_extension("json_test1", default=False)
Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
assert "_" in json_doc
assert json_doc["_"]["json_test1"] == "hello world"
assert json_doc["_"]["json_test2"] == [1, 2, 3]
def test_doc_to_json_underscore_error_attr(doc):
"""Test that Doc.to_json() raises an error if a custom attribute doesn't
exist in the ._ space."""
with pytest.raises(ValueError):
doc.to_json(underscore=["json_test3"])
def test_doc_to_json_underscore_error_serialize(doc):
"""Test that Doc.to_json() raises an error if a custom attribute value
isn't JSON-serializable."""
Doc.set_extension("json_test4", method=lambda doc: doc.text)
with pytest.raises(ValueError):
doc.to_json(underscore=["json_test4"])
def test_doc_to_json_span(doc):
"""Test that Doc.to_json() includes spans"""
doc.spans["test"] = [Span(doc, 0, 2, "test"), Span(doc, 0, 1, "test")]
json_doc = doc.to_json()
assert "spans" in json_doc
assert len(json_doc["spans"]) == 1
assert len(json_doc["spans"]["test"]) == 2
assert json_doc["spans"]["test"][0]["start"] == 0

View File

@ -167,3 +167,12 @@ def test_issue3521(en_tokenizer, word):
tok = en_tokenizer(word)[1] tok = en_tokenizer(word)[1]
# 'not' and 'would' should be stopwords, also in their abbreviated forms # 'not' and 'would' should be stopwords, also in their abbreviated forms
assert tok.is_stop assert tok.is_stop
@pytest.mark.issue(10699)
@pytest.mark.parametrize("text", ["theses", "thisre"])
def test_issue10699(en_tokenizer, text):
"""Test that 'theses' and 'thisre' are excluded from the contractions
generated by the English tokenizer exceptions."""
tokens = en_tokenizer(text)
assert len(tokens) == 1

View File

@ -476,6 +476,17 @@ def test_matcher_extension_set_membership(en_vocab):
assert len(matches) == 0 assert len(matches) == 0
@pytest.mark.xfail(reason="IN predicate must handle sequence values in extensions")
def test_matcher_extension_in_set_predicate(en_vocab):
matcher = Matcher(en_vocab)
Token.set_extension("ext", default=[])
pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 1
def test_matcher_basic_check(en_vocab): def test_matcher_basic_check(en_vocab):
matcher = Matcher(en_vocab) matcher = Matcher(en_vocab)
# Potential mistake: pass in pattern instead of list of patterns # Potential mistake: pass in pattern instead of list of patterns

View File

@ -10,7 +10,7 @@ from spacy.lang.it import Italian
from spacy.language import Language from spacy.language import Language
from spacy.lookups import Lookups from spacy.lookups import Lookups
from spacy.pipeline._parser_internals.ner import BiluoPushDown from spacy.pipeline._parser_internals.ner import BiluoPushDown
from spacy.training import Example, iob_to_biluo from spacy.training import Example, iob_to_biluo, split_bilu_label
from spacy.tokens import Doc, Span from spacy.tokens import Doc, Span
from spacy.vocab import Vocab from spacy.vocab import Vocab
import logging import logging
@ -110,6 +110,9 @@ def test_issue2385():
# maintain support for iob2 format # maintain support for iob2 format
tags3 = ("B-PERSON", "I-PERSON", "B-PERSON") tags3 = ("B-PERSON", "I-PERSON", "B-PERSON")
assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"] assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"]
# ensure it works with hyphens in the name
tags4 = ("B-MULTI-PERSON", "I-MULTI-PERSON", "B-MULTI-PERSON")
assert iob_to_biluo(tags4) == ["B-MULTI-PERSON", "L-MULTI-PERSON", "U-MULTI-PERSON"]
@pytest.mark.issue(2800) @pytest.mark.issue(2800)
@ -154,6 +157,24 @@ def test_issue3209():
assert ner2.move_names == move_names assert ner2.move_names == move_names
def test_labels_from_BILUO():
"""Test that labels are inferred correctly when there's a - in label."""
nlp = English()
ner = nlp.add_pipe("ner")
ner.add_label("LARGE-ANIMAL")
nlp.initialize()
move_names = [
"O",
"B-LARGE-ANIMAL",
"I-LARGE-ANIMAL",
"L-LARGE-ANIMAL",
"U-LARGE-ANIMAL",
]
labels = {"LARGE-ANIMAL"}
assert ner.move_names == move_names
assert set(ner.labels) == labels
@pytest.mark.issue(4267) @pytest.mark.issue(4267)
def test_issue4267(): def test_issue4267():
"""Test that running an entity_ruler after ner gives consistent results""" """Test that running an entity_ruler after ner gives consistent results"""
@ -298,7 +319,7 @@ def test_oracle_moves_missing_B(en_vocab):
elif tag == "O": elif tag == "O":
moves.add_action(move_types.index("O"), "") moves.add_action(move_types.index("O"), "")
else: else:
action, label = tag.split("-") action, label = split_bilu_label(tag)
moves.add_action(move_types.index("B"), label) moves.add_action(move_types.index("B"), label)
moves.add_action(move_types.index("I"), label) moves.add_action(move_types.index("I"), label)
moves.add_action(move_types.index("L"), label) moves.add_action(move_types.index("L"), label)
@ -324,7 +345,7 @@ def test_oracle_moves_whitespace(en_vocab):
elif tag == "O": elif tag == "O":
moves.add_action(move_types.index("O"), "") moves.add_action(move_types.index("O"), "")
else: else:
action, label = tag.split("-") action, label = split_bilu_label(tag)
moves.add_action(move_types.index(action), label) moves.add_action(move_types.index(action), label)
moves.get_oracle_sequence(example) moves.get_oracle_sequence(example)

View File

@ -49,7 +49,9 @@ def test_parser_contains_cycle(tree, cyclic_tree, partial_tree, multirooted_tree
assert contains_cycle(multirooted_tree) is None assert contains_cycle(multirooted_tree) is None
def test_parser_is_nonproj_arc(nonproj_tree, partial_tree, multirooted_tree): def test_parser_is_nonproj_arc(
cyclic_tree, nonproj_tree, partial_tree, multirooted_tree
):
assert is_nonproj_arc(0, nonproj_tree) is False assert is_nonproj_arc(0, nonproj_tree) is False
assert is_nonproj_arc(1, nonproj_tree) is False assert is_nonproj_arc(1, nonproj_tree) is False
assert is_nonproj_arc(2, nonproj_tree) is False assert is_nonproj_arc(2, nonproj_tree) is False
@ -62,15 +64,23 @@ def test_parser_is_nonproj_arc(nonproj_tree, partial_tree, multirooted_tree):
assert is_nonproj_arc(7, partial_tree) is False assert is_nonproj_arc(7, partial_tree) is False
assert is_nonproj_arc(17, multirooted_tree) is False assert is_nonproj_arc(17, multirooted_tree) is False
assert is_nonproj_arc(16, multirooted_tree) is True assert is_nonproj_arc(16, multirooted_tree) is True
with pytest.raises(
ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
):
is_nonproj_arc(6, cyclic_tree)
def test_parser_is_nonproj_tree( def test_parser_is_nonproj_tree(
proj_tree, nonproj_tree, partial_tree, multirooted_tree proj_tree, cyclic_tree, nonproj_tree, partial_tree, multirooted_tree
): ):
assert is_nonproj_tree(proj_tree) is False assert is_nonproj_tree(proj_tree) is False
assert is_nonproj_tree(nonproj_tree) is True assert is_nonproj_tree(nonproj_tree) is True
assert is_nonproj_tree(partial_tree) is False assert is_nonproj_tree(partial_tree) is False
assert is_nonproj_tree(multirooted_tree) is True assert is_nonproj_tree(multirooted_tree) is True
with pytest.raises(
ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
):
is_nonproj_tree(cyclic_tree)
def test_parser_pseudoprojectivity(en_vocab): def test_parser_pseudoprojectivity(en_vocab):
@ -84,8 +94,10 @@ def test_parser_pseudoprojectivity(en_vocab):
tree = [1, 2, 2] tree = [1, 2, 2]
nonproj_tree = [1, 2, 2, 4, 5, 2, 7, 4, 2] nonproj_tree = [1, 2, 2, 4, 5, 2, 7, 4, 2]
nonproj_tree2 = [9, 1, 3, 1, 5, 6, 9, 8, 6, 1, 6, 12, 13, 10, 1] nonproj_tree2 = [9, 1, 3, 1, 5, 6, 9, 8, 6, 1, 6, 12, 13, 10, 1]
cyclic_tree = [1, 2, 2, 4, 5, 3, 2]
labels = ["det", "nsubj", "root", "det", "dobj", "aux", "nsubj", "acl", "punct"] labels = ["det", "nsubj", "root", "det", "dobj", "aux", "nsubj", "acl", "punct"]
labels2 = ["advmod", "root", "det", "nsubj", "advmod", "det", "dobj", "det", "nmod", "aux", "nmod", "advmod", "det", "amod", "punct"] labels2 = ["advmod", "root", "det", "nsubj", "advmod", "det", "dobj", "det", "nmod", "aux", "nmod", "advmod", "det", "amod", "punct"]
cyclic_labels = ["det", "nsubj", "root", "det", "dobj", "aux", "punct"]
# fmt: on # fmt: on
assert nonproj.decompose("X||Y") == ("X", "Y") assert nonproj.decompose("X||Y") == ("X", "Y")
assert nonproj.decompose("X") == ("X", "") assert nonproj.decompose("X") == ("X", "")
@ -97,6 +109,8 @@ def test_parser_pseudoprojectivity(en_vocab):
assert nonproj.get_smallest_nonproj_arc_slow(nonproj_tree2) == 10 assert nonproj.get_smallest_nonproj_arc_slow(nonproj_tree2) == 10
# fmt: off # fmt: off
proj_heads, deco_labels = nonproj.projectivize(nonproj_tree, labels) proj_heads, deco_labels = nonproj.projectivize(nonproj_tree, labels)
with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'):
nonproj.projectivize(cyclic_tree, cyclic_labels)
assert proj_heads == [1, 2, 2, 4, 5, 2, 7, 5, 2] assert proj_heads == [1, 2, 2, 4, 5, 2, 7, 5, 2]
assert deco_labels == ["det", "nsubj", "root", "det", "dobj", "aux", assert deco_labels == ["det", "nsubj", "root", "det", "dobj", "aux",
"nsubj", "acl||dobj", "punct"] "nsubj", "acl||dobj", "punct"]

View File

@ -1076,12 +1076,23 @@ def test_no_gold_ents(patterns):
# this will run the pipeline on the examples and shouldn't crash # this will run the pipeline on the examples and shouldn't crash
results = nlp.evaluate(train_examples) results = nlp.evaluate(train_examples)
@pytest.mark.issue(9575) @pytest.mark.issue(9575)
def test_tokenization_mismatch(): def test_tokenization_mismatch():
nlp = English() nlp = English()
# include a matching entity so that update isn't skipped # include a matching entity so that update isn't skipped
doc1 = Doc(nlp.vocab, words=["Kirby", "123456"], spaces=[True, False], ents=["B-CHARACTER", "B-CARDINAL"]) doc1 = Doc(
doc2 = Doc(nlp.vocab, words=["Kirby", "123", "456"], spaces=[True, False, False], ents=["B-CHARACTER", "B-CARDINAL", "B-CARDINAL"]) nlp.vocab,
words=["Kirby", "123456"],
spaces=[True, False],
ents=["B-CHARACTER", "B-CARDINAL"],
)
doc2 = Doc(
nlp.vocab,
words=["Kirby", "123", "456"],
spaces=[True, False, False],
ents=["B-CHARACTER", "B-CARDINAL", "B-CARDINAL"],
)
eg = Example(doc1, doc2) eg = Example(doc1, doc2)
train_examples = [eg] train_examples = [eg]

View File

@ -5,12 +5,15 @@ from spacy.tokens import Doc, Span
from spacy.language import Language from spacy.language import Language
from spacy.lang.en import English from spacy.lang.en import English
from spacy.pipeline import EntityRuler, EntityRecognizer, merge_entities from spacy.pipeline import EntityRuler, EntityRecognizer, merge_entities
from spacy.pipeline import SpanRuler
from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.pipeline.ner import DEFAULT_NER_MODEL
from spacy.errors import MatchPatternError from spacy.errors import MatchPatternError
from spacy.tests.util import make_tempdir from spacy.tests.util import make_tempdir
from thinc.api import NumpyOps, get_current_ops from thinc.api import NumpyOps, get_current_ops
ENTITY_RULERS = ["entity_ruler", "future_entity_ruler"]
@pytest.fixture @pytest.fixture
def nlp(): def nlp():
@ -37,12 +40,14 @@ def add_ent_component(doc):
@pytest.mark.issue(3345) @pytest.mark.issue(3345)
def test_issue3345(): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue3345(entity_ruler_factory):
"""Test case where preset entity crosses sentence boundary.""" """Test case where preset entity crosses sentence boundary."""
nlp = English() nlp = English()
doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"]) doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"])
doc[4].is_sent_start = True doc[4].is_sent_start = True
ruler = EntityRuler(nlp, patterns=[{"label": "GPE", "pattern": "New York"}]) ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns([{"label": "GPE", "pattern": "New York"}])
cfg = {"model": DEFAULT_NER_MODEL} cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"] model = registry.resolve(cfg, validate=True)["model"]
ner = EntityRecognizer(doc.vocab, model) ner = EntityRecognizer(doc.vocab, model)
@ -60,13 +65,18 @@ def test_issue3345():
@pytest.mark.issue(4849) @pytest.mark.issue(4849)
def test_issue4849(): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue4849(entity_ruler_factory):
nlp = English() nlp = English()
patterns = [ patterns = [
{"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"}, {"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"},
{"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"}, {"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"},
] ]
ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"}) ruler = nlp.add_pipe(
entity_ruler_factory,
name="entity_ruler",
config={"phrase_matcher_attr": "LOWER"},
)
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
text = """ text = """
The left is starting to take aim at Democratic front-runner Joe Biden. The left is starting to take aim at Democratic front-runner Joe Biden.
@ -86,10 +96,11 @@ def test_issue4849():
@pytest.mark.issue(5918) @pytest.mark.issue(5918)
def test_issue5918(): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue5918(entity_ruler_factory):
# Test edge case when merging entities. # Test edge case when merging entities.
nlp = English() nlp = English()
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "ORG", "pattern": "Digicon Inc"}, {"label": "ORG", "pattern": "Digicon Inc"},
{"label": "ORG", "pattern": "Rotan Mosle Inc's"}, {"label": "ORG", "pattern": "Rotan Mosle Inc's"},
@ -114,9 +125,10 @@ def test_issue5918():
@pytest.mark.issue(8168) @pytest.mark.issue(8168)
def test_issue8168(): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_issue8168(entity_ruler_factory):
nlp = English() nlp = English()
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "ORG", "pattern": "Apple"}, {"label": "ORG", "pattern": "Apple"},
{ {
@ -131,14 +143,17 @@ def test_issue8168():
}, },
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = nlp("San Francisco San Fran")
assert ruler._ent_ids == {8043148519967183733: ("GPE", "san-francisco")} assert all(t.ent_id_ == "san-francisco" for t in doc)
@pytest.mark.issue(8216) @pytest.mark.issue(8216)
def test_entity_ruler_fix8216(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory):
"""Test that patterns don't get added excessively.""" """Test that patterns don't get added excessively."""
ruler = nlp.add_pipe("entity_ruler", config={"validate": True}) ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"validate": True}
)
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values()) pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert pattern_count > 0 assert pattern_count > 0
@ -147,13 +162,16 @@ def test_entity_ruler_fix8216(nlp, patterns):
assert after_count == pattern_count assert after_count == pattern_count
def test_entity_ruler_init(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp, patterns=patterns) def test_entity_ruler_init(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler) == len(patterns) assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
assert "HELLO" in ruler assert "HELLO" in ruler
assert "BYE" in ruler assert "BYE" in ruler
ruler = nlp.add_pipe("entity_ruler") nlp.remove_pipe("entity_ruler")
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = nlp("hello world bye bye") doc = nlp("hello world bye bye")
assert len(doc.ents) == 2 assert len(doc.ents) == 2
@ -161,20 +179,23 @@ def test_entity_ruler_init(nlp, patterns):
assert doc.ents[1].label_ == "BYE" assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_no_patterns_warns(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_no_patterns_warns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert len(ruler) == 0 assert len(ruler) == 0
assert len(ruler.labels) == 0 assert len(ruler.labels) == 0
nlp.add_pipe("entity_ruler") nlp.remove_pipe("entity_ruler")
nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert nlp.pipe_names == ["entity_ruler"] assert nlp.pipe_names == ["entity_ruler"]
with pytest.warns(UserWarning): with pytest.warns(UserWarning):
doc = nlp("hello world bye bye") doc = nlp("hello world bye bye")
assert len(doc.ents) == 0 assert len(doc.ents) == 0
def test_entity_ruler_init_patterns(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory):
# initialize with patterns # initialize with patterns
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert len(ruler.labels) == 0 assert len(ruler.labels) == 0
ruler.initialize(lambda: [], patterns=patterns) ruler.initialize(lambda: [], patterns=patterns)
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
@ -186,7 +207,7 @@ def test_entity_ruler_init_patterns(nlp, patterns):
nlp.config["initialize"]["components"]["entity_ruler"] = { nlp.config["initialize"]["components"]["entity_ruler"] = {
"patterns": {"@misc": "entity_ruler_patterns"} "patterns": {"@misc": "entity_ruler_patterns"}
} }
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
assert len(ruler.labels) == 0 assert len(ruler.labels) == 0
nlp.initialize() nlp.initialize()
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
@ -195,18 +216,20 @@ def test_entity_ruler_init_patterns(nlp, patterns):
assert doc.ents[1].label_ == "BYE" assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_init_clear(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_init_clear(nlp, patterns, entity_ruler_factory):
"""Test that initialization clears patterns.""" """Test that initialization clears patterns."""
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
ruler.initialize(lambda: []) ruler.initialize(lambda: [])
assert len(ruler.labels) == 0 assert len(ruler.labels) == 0
def test_entity_ruler_clear(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_clear(nlp, patterns, entity_ruler_factory):
"""Test that initialization clears patterns.""" """Test that initialization clears patterns."""
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
doc = nlp("hello world") doc = nlp("hello world")
@ -218,8 +241,9 @@ def test_entity_ruler_clear(nlp, patterns):
assert len(doc.ents) == 0 assert len(doc.ents) == 0
def test_entity_ruler_existing(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = nlp.add_pipe("entity_ruler") def test_entity_ruler_existing(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler") nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye") doc = nlp("OH HELLO WORLD bye bye")
@ -228,8 +252,11 @@ def test_entity_ruler_existing(nlp, patterns):
assert doc.ents[1].label_ == "BYE" assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_existing_overwrite(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) def test_entity_ruler_existing_overwrite(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
)
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler") nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye") doc = nlp("OH HELLO WORLD bye bye")
@ -239,8 +266,11 @@ def test_entity_ruler_existing_overwrite(nlp, patterns):
assert doc.ents[1].label_ == "BYE" assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_existing_complex(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) def test_entity_ruler_existing_complex(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
)
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler") nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("foo foo bye bye") doc = nlp("foo foo bye bye")
@ -251,8 +281,11 @@ def test_entity_ruler_existing_complex(nlp, patterns):
assert len(doc.ents[1]) == 2 assert len(doc.ents[1]) == 2
def test_entity_ruler_entity_id(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True}) def test_entity_ruler_entity_id(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(
entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
)
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company") doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1 assert len(doc.ents) == 1
@ -260,18 +293,21 @@ def test_entity_ruler_entity_id(nlp, patterns):
assert doc.ents[0].ent_id_ == "a1" assert doc.ents[0].ent_id_ == "a1"
def test_entity_ruler_cfg_ent_id_sep(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_cfg_ent_id_sep(nlp, patterns, entity_ruler_factory):
config = {"overwrite_ents": True, "ent_id_sep": "**"} config = {"overwrite_ents": True, "ent_id_sep": "**"}
ruler = nlp.add_pipe("entity_ruler", config=config) ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler", config=config)
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
assert "TECH_ORG**a1" in ruler.phrase_patterns
doc = nlp("Apple is a technology company") doc = nlp("Apple is a technology company")
if isinstance(ruler, EntityRuler):
assert "TECH_ORG**a1" in ruler.phrase_patterns
assert len(doc.ents) == 1 assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG" assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1" assert doc.ents[0].ent_id_ == "a1"
def test_entity_ruler_serialize_bytes(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_serialize_bytes(nlp, patterns, entity_ruler_factory):
ruler = EntityRuler(nlp, patterns=patterns) ruler = EntityRuler(nlp, patterns=patterns)
assert len(ruler) == len(patterns) assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
@ -288,7 +324,10 @@ def test_entity_ruler_serialize_bytes(nlp, patterns):
assert sorted(new_ruler.labels) == sorted(ruler.labels) assert sorted(new_ruler.labels) == sorted(ruler.labels)
def test_entity_ruler_serialize_phrase_matcher_attr_bytes(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_serialize_phrase_matcher_attr_bytes(
nlp, patterns, entity_ruler_factory
):
ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns) ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns)
assert len(ruler) == len(patterns) assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4 assert len(ruler.labels) == 4
@ -303,8 +342,9 @@ def test_entity_ruler_serialize_phrase_matcher_attr_bytes(nlp, patterns):
assert new_ruler.phrase_matcher_attr == "LOWER" assert new_ruler.phrase_matcher_attr == "LOWER"
def test_entity_ruler_validate(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_validate(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
validated_ruler = EntityRuler(nlp, validate=True) validated_ruler = EntityRuler(nlp, validate=True)
valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]} valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]}
@ -322,32 +362,35 @@ def test_entity_ruler_validate(nlp):
validated_ruler.add_patterns([invalid_pattern]) validated_ruler.add_patterns([invalid_pattern])
def test_entity_ruler_properties(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_properties(nlp, patterns, entity_ruler_factory):
ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True) ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"]) assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"])
assert sorted(ruler.ent_ids) == ["a1", "a2"] assert sorted(ruler.ent_ids) == ["a1", "a2"]
def test_entity_ruler_overlapping_spans(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_overlapping_spans(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "FOOBAR", "pattern": "foo bar"}, {"label": "FOOBAR", "pattern": "foo bar"},
{"label": "BARBAZ", "pattern": "bar baz"}, {"label": "BARBAZ", "pattern": "bar baz"},
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("foo bar baz")) doc = nlp("foo bar baz")
assert len(doc.ents) == 1 assert len(doc.ents) == 1
assert doc.ents[0].label_ == "FOOBAR" assert doc.ents[0].label_ == "FOOBAR"
@pytest.mark.parametrize("n_process", [1, 2]) @pytest.mark.parametrize("n_process", [1, 2])
def test_entity_ruler_multiprocessing(nlp, n_process): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
def test_entity_ruler_multiprocessing(nlp, n_process, entity_ruler_factory):
if isinstance(get_current_ops, NumpyOps) or n_process < 2: if isinstance(get_current_ops, NumpyOps) or n_process < 2:
texts = ["I enjoy eating Pizza Hut pizza."] texts = ["I enjoy eating Pizza Hut pizza."]
patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}] patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}]
ruler = nlp.add_pipe("entity_ruler") ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
for doc in nlp.pipe(texts, n_process=2): for doc in nlp.pipe(texts, n_process=2):
@ -355,8 +398,9 @@ def test_entity_ruler_multiprocessing(nlp, n_process):
assert ent.ent_id_ == "1234" assert ent.ent_id_ == "1234"
def test_entity_ruler_serialize_jsonl(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = nlp.add_pipe("entity_ruler") def test_entity_ruler_serialize_jsonl(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
with make_tempdir() as d: with make_tempdir() as d:
ruler.to_disk(d / "test_ruler.jsonl") ruler.to_disk(d / "test_ruler.jsonl")
@ -365,8 +409,9 @@ def test_entity_ruler_serialize_jsonl(nlp, patterns):
ruler.from_disk(d / "non_existing.jsonl") # read from a bad jsonl file ruler.from_disk(d / "non_existing.jsonl") # read from a bad jsonl file
def test_entity_ruler_serialize_dir(nlp, patterns): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = nlp.add_pipe("entity_ruler") def test_entity_ruler_serialize_dir(nlp, patterns, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
with make_tempdir() as d: with make_tempdir() as d:
ruler.to_disk(d / "test_ruler") ruler.to_disk(d / "test_ruler")
@ -375,52 +420,65 @@ def test_entity_ruler_serialize_dir(nlp, patterns):
ruler.from_disk(d / "non_existing_dir") # read from a bad directory ruler.from_disk(d / "non_existing_dir") # read from a bad directory
def test_entity_ruler_remove_basic(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_basic(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"}, {"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"}, {"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"}, {"label": "ORG", "pattern": "ACM"},
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("Duygu went to school")) doc = nlp("Dina went to school")
assert len(ruler.patterns) == 3 assert len(ruler.patterns) == 3
assert len(doc.ents) == 1 assert len(doc.ents) == 1
if isinstance(ruler, EntityRuler):
assert "PERSON||dina" in ruler.phrase_matcher
assert doc.ents[0].label_ == "PERSON" assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Duygu" assert doc.ents[0].text == "Dina"
assert "PERSON||duygu" in ruler.phrase_matcher if isinstance(ruler, EntityRuler):
ruler.remove("duygu") ruler.remove("dina")
doc = ruler(nlp.make_doc("Duygu went to school")) else:
ruler.remove_by_id("dina")
doc = nlp("Dina went to school")
assert len(doc.ents) == 0 assert len(doc.ents) == 0
assert "PERSON||duygu" not in ruler.phrase_matcher if isinstance(ruler, EntityRuler):
assert "PERSON||dina" not in ruler.phrase_matcher
assert len(ruler.patterns) == 2 assert len(ruler.patterns) == 2
def test_entity_ruler_remove_same_id_multiple_patterns(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_same_id_multiple_patterns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"}, {"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "DuyguCorp", "id": "duygu"}, {"label": "ORG", "pattern": "DinaCorp", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"}, {"label": "ORG", "pattern": "ACME", "id": "acme"},
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("Duygu founded DuyguCorp and ACME.")) doc = nlp("Dina founded DinaCorp and ACME.")
assert len(ruler.patterns) == 3 assert len(ruler.patterns) == 3
assert "PERSON||duygu" in ruler.phrase_matcher if isinstance(ruler, EntityRuler):
assert "ORG||duygu" in ruler.phrase_matcher assert "PERSON||dina" in ruler.phrase_matcher
assert "ORG||dina" in ruler.phrase_matcher
assert len(doc.ents) == 3 assert len(doc.ents) == 3
ruler.remove("duygu") if isinstance(ruler, EntityRuler):
doc = ruler(nlp.make_doc("Duygu founded DuyguCorp and ACME.")) ruler.remove("dina")
else:
ruler.remove_by_id("dina")
doc = nlp("Dina founded DinaCorp and ACME.")
assert len(ruler.patterns) == 1 assert len(ruler.patterns) == 1
assert "PERSON||duygu" not in ruler.phrase_matcher if isinstance(ruler, EntityRuler):
assert "ORG||duygu" not in ruler.phrase_matcher assert "PERSON||dina" not in ruler.phrase_matcher
assert "ORG||dina" not in ruler.phrase_matcher
assert len(doc.ents) == 1 assert len(doc.ents) == 1
def test_entity_ruler_remove_nonexisting_pattern(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_nonexisting_pattern(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"}, {"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"}, {"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"}, {"label": "ORG", "pattern": "ACM"},
] ]
@ -428,82 +486,108 @@ def test_entity_ruler_remove_nonexisting_pattern(nlp):
assert len(ruler.patterns) == 3 assert len(ruler.patterns) == 3
with pytest.raises(ValueError): with pytest.raises(ValueError):
ruler.remove("nepattern") ruler.remove("nepattern")
assert len(ruler.patterns) == 3 if isinstance(ruler, SpanRuler):
with pytest.raises(ValueError):
ruler.remove_by_id("nepattern")
def test_entity_ruler_remove_several_patterns(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_several_patterns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"}, {"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"}, {"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"}, {"label": "ORG", "pattern": "ACM"},
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("Duygu founded her company ACME.")) doc = nlp("Dina founded her company ACME.")
assert len(ruler.patterns) == 3 assert len(ruler.patterns) == 3
assert len(doc.ents) == 2 assert len(doc.ents) == 2
assert doc.ents[0].label_ == "PERSON" assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Duygu" assert doc.ents[0].text == "Dina"
assert doc.ents[1].label_ == "ORG" assert doc.ents[1].label_ == "ORG"
assert doc.ents[1].text == "ACME" assert doc.ents[1].text == "ACME"
ruler.remove("duygu") if isinstance(ruler, EntityRuler):
doc = ruler(nlp.make_doc("Duygu founded her company ACME")) ruler.remove("dina")
else:
ruler.remove_by_id("dina")
doc = nlp("Dina founded her company ACME")
assert len(ruler.patterns) == 2 assert len(ruler.patterns) == 2
assert len(doc.ents) == 1 assert len(doc.ents) == 1
assert doc.ents[0].label_ == "ORG" assert doc.ents[0].label_ == "ORG"
assert doc.ents[0].text == "ACME" assert doc.ents[0].text == "ACME"
ruler.remove("acme") if isinstance(ruler, EntityRuler):
doc = ruler(nlp.make_doc("Duygu founded her company ACME")) ruler.remove("acme")
else:
ruler.remove_by_id("acme")
doc = nlp("Dina founded her company ACME")
assert len(ruler.patterns) == 1 assert len(ruler.patterns) == 1
assert len(doc.ents) == 0 assert len(doc.ents) == 0
def test_entity_ruler_remove_patterns_in_a_row(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_patterns_in_a_row(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"}, {"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"}, {"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "DATE", "pattern": "her birthday", "id": "bday"}, {"label": "DATE", "pattern": "her birthday", "id": "bday"},
{"label": "ORG", "pattern": "ACM"}, {"label": "ORG", "pattern": "ACM"},
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("Duygu founded her company ACME on her birthday")) doc = nlp("Dina founded her company ACME on her birthday")
assert len(doc.ents) == 3 assert len(doc.ents) == 3
assert doc.ents[0].label_ == "PERSON" assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Duygu" assert doc.ents[0].text == "Dina"
assert doc.ents[1].label_ == "ORG" assert doc.ents[1].label_ == "ORG"
assert doc.ents[1].text == "ACME" assert doc.ents[1].text == "ACME"
assert doc.ents[2].label_ == "DATE" assert doc.ents[2].label_ == "DATE"
assert doc.ents[2].text == "her birthday" assert doc.ents[2].text == "her birthday"
ruler.remove("duygu") if isinstance(ruler, EntityRuler):
ruler.remove("acme") ruler.remove("dina")
ruler.remove("bday") ruler.remove("acme")
doc = ruler(nlp.make_doc("Duygu went to school")) ruler.remove("bday")
else:
ruler.remove_by_id("dina")
ruler.remove_by_id("acme")
ruler.remove_by_id("bday")
doc = nlp("Dina went to school")
assert len(doc.ents) == 0 assert len(doc.ents) == 0
def test_entity_ruler_remove_all_patterns(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_all_patterns(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [ patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"}, {"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"}, {"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "DATE", "pattern": "her birthday", "id": "bday"}, {"label": "DATE", "pattern": "her birthday", "id": "bday"},
] ]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
assert len(ruler.patterns) == 3 assert len(ruler.patterns) == 3
ruler.remove("duygu") if isinstance(ruler, EntityRuler):
ruler.remove("dina")
else:
ruler.remove_by_id("dina")
assert len(ruler.patterns) == 2 assert len(ruler.patterns) == 2
ruler.remove("acme") if isinstance(ruler, EntityRuler):
ruler.remove("acme")
else:
ruler.remove_by_id("acme")
assert len(ruler.patterns) == 1 assert len(ruler.patterns) == 1
ruler.remove("bday") if isinstance(ruler, EntityRuler):
ruler.remove("bday")
else:
ruler.remove_by_id("bday")
assert len(ruler.patterns) == 0 assert len(ruler.patterns) == 0
with pytest.warns(UserWarning): with pytest.warns(UserWarning):
doc = ruler(nlp.make_doc("Duygu founded her company ACME on her birthday")) doc = nlp("Dina founded her company ACME on her birthday")
assert len(doc.ents) == 0 assert len(doc.ents) == 0
def test_entity_ruler_remove_and_add(nlp): @pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
ruler = EntityRuler(nlp) def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory):
ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
patterns = [{"label": "DATE", "pattern": "last time"}] patterns = [{"label": "DATE", "pattern": "last time"}]
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
doc = ruler( doc = ruler(
@ -524,7 +608,10 @@ def test_entity_ruler_remove_and_add(nlp):
assert doc.ents[0].text == "last time" assert doc.ents[0].text == "last time"
assert doc.ents[1].label_ == "DATE" assert doc.ents[1].label_ == "DATE"
assert doc.ents[1].text == "this time" assert doc.ents[1].text == "this time"
ruler.remove("ttime") if isinstance(ruler, EntityRuler):
ruler.remove("ttime")
else:
ruler.remove_by_id("ttime")
doc = ruler( doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers") nlp.make_doc("I saw him last time we met, this time he brought some flowers")
) )
@ -547,7 +634,10 @@ def test_entity_ruler_remove_and_add(nlp):
) )
assert len(ruler.patterns) == 3 assert len(ruler.patterns) == 3
assert len(doc.ents) == 3 assert len(doc.ents) == 3
ruler.remove("ttime") if isinstance(ruler, EntityRuler):
ruler.remove("ttime")
else:
ruler.remove_by_id("ttime")
doc = ruler( doc = ruler(
nlp.make_doc( nlp.make_doc(
"I saw him last time we met, this time he brought some flowers, another time some chocolate." "I saw him last time we met, this time he brought some flowers, another time some chocolate."

View File

@ -4,13 +4,14 @@ import numpy
import pytest import pytest
from thinc.api import get_current_ops from thinc.api import get_current_ops
import spacy
from spacy.lang.en import English from spacy.lang.en import English
from spacy.lang.en.syntax_iterators import noun_chunks from spacy.lang.en.syntax_iterators import noun_chunks
from spacy.language import Language from spacy.language import Language
from spacy.pipeline import TrainablePipe from spacy.pipeline import TrainablePipe
from spacy.tokens import Doc from spacy.tokens import Doc
from spacy.training import Example from spacy.training import Example
from spacy.util import SimpleFrozenList, get_arg_names from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir
from spacy.vocab import Vocab from spacy.vocab import Vocab
@ -602,3 +603,52 @@ def test_update_with_annotates():
assert results[component] == "".join(eg.predicted.text for eg in examples) assert results[component] == "".join(eg.predicted.text for eg in examples)
for component in components - set(components_to_annotate): for component in components - set(components_to_annotate):
assert results[component] == "" assert results[component] == ""
def test_load_disable_enable() -> None:
"""
Tests spacy.load() with dis-/enabling components.
"""
base_nlp = English()
for pipe in ("sentencizer", "tagger", "parser"):
base_nlp.add_pipe(pipe)
with make_tempdir() as tmp_dir:
base_nlp.to_disk(tmp_dir)
to_disable = ["parser", "tagger"]
to_enable = ["tagger", "parser"]
# Setting only `disable`.
nlp = spacy.load(tmp_dir, disable=to_disable)
assert all([comp_name in nlp.disabled for comp_name in to_disable])
# Setting only `enable`.
nlp = spacy.load(tmp_dir, enable=to_enable)
assert all(
[
(comp_name in nlp.disabled) is (comp_name not in to_enable)
for comp_name in nlp.component_names
]
)
# Testing consistent enable/disable combination.
nlp = spacy.load(
tmp_dir,
enable=to_enable,
disable=[
comp_name
for comp_name in nlp.component_names
if comp_name not in to_enable
],
)
assert all(
[
(comp_name in nlp.disabled) is (comp_name not in to_enable)
for comp_name in nlp.component_names
]
)
# Inconsistent enable/disable combination.
with pytest.raises(ValueError):
spacy.load(tmp_dir, enable=to_enable, disable=["parser"])

View File

@ -0,0 +1,465 @@
import pytest
import spacy
from spacy import registry
from spacy.errors import MatchPatternError
from spacy.tokens import Span
from spacy.training import Example
from spacy.tests.util import make_tempdir
from thinc.api import NumpyOps, get_current_ops
@pytest.fixture
@registry.misc("span_ruler_patterns")
def patterns():
return [
{"label": "HELLO", "pattern": "hello world", "id": "hello1"},
{"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]},
{"label": "HELLO", "pattern": [{"ORTH": "HELLO"}], "id": "hello2"},
{"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]},
{"label": "TECH_ORG", "pattern": "Apple"},
{"label": "TECH_ORG", "pattern": "Microsoft"},
]
@pytest.fixture
def overlapping_patterns():
return [
{"label": "FOOBAR", "pattern": "foo bar"},
{"label": "BARBAZ", "pattern": "bar baz"},
]
@pytest.fixture
def person_org_patterns():
return [
{"label": "PERSON", "pattern": "Dina"},
{"label": "ORG", "pattern": "ACME"},
{"label": "ORG", "pattern": "ACM"},
]
@pytest.fixture
def person_org_date_patterns(person_org_patterns):
return person_org_patterns + [{"label": "DATE", "pattern": "June 14th"}]
def test_span_ruler_add_empty(patterns):
"""Test that patterns don't get added excessively."""
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler", config={"validate": True})
ruler.add_patterns(patterns)
pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert pattern_count > 0
ruler.add_patterns([])
after_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert after_count == pattern_count
def test_span_ruler_init(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
assert "HELLO" in ruler
assert "BYE" in ruler
doc = nlp("hello world bye bye")
assert len(doc.spans["ruler"]) == 2
assert doc.spans["ruler"][0].label_ == "HELLO"
assert doc.spans["ruler"][0].id_ == "hello1"
assert doc.spans["ruler"][1].label_ == "BYE"
assert doc.spans["ruler"][1].id_ == ""
def test_span_ruler_no_patterns_warns():
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
assert len(ruler) == 0
assert len(ruler.labels) == 0
assert nlp.pipe_names == ["span_ruler"]
with pytest.warns(UserWarning):
doc = nlp("hello world bye bye")
assert len(doc.spans["ruler"]) == 0
def test_span_ruler_init_patterns(patterns):
# initialize with patterns
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
assert len(ruler.labels) == 0
ruler.initialize(lambda: [], patterns=patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.spans["ruler"][0].label_ == "HELLO"
assert doc.spans["ruler"][1].label_ == "BYE"
nlp.remove_pipe("span_ruler")
# initialize with patterns from misc registry
nlp.config["initialize"]["components"]["span_ruler"] = {
"patterns": {"@misc": "span_ruler_patterns"}
}
ruler = nlp.add_pipe("span_ruler")
assert len(ruler.labels) == 0
nlp.initialize()
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.spans["ruler"][0].label_ == "HELLO"
assert doc.spans["ruler"][1].label_ == "BYE"
def test_span_ruler_init_clear(patterns):
"""Test that initialization clears patterns."""
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
ruler.initialize(lambda: [])
assert len(ruler.labels) == 0
def test_span_ruler_clear(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world")
assert len(doc.spans["ruler"]) == 1
ruler.clear()
assert len(ruler.labels) == 0
with pytest.warns(UserWarning):
doc = nlp("hello world")
assert len(doc.spans["ruler"]) == 0
def test_span_ruler_existing(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler", config={"overwrite": False})
ruler.add_patterns(patterns)
doc = nlp.make_doc("OH HELLO WORLD bye bye")
doc.spans["ruler"] = [doc[0:2]]
doc = nlp(doc)
assert len(doc.spans["ruler"]) == 3
assert doc.spans["ruler"][0] == doc[0:2]
assert doc.spans["ruler"][1].label_ == "HELLO"
assert doc.spans["ruler"][1].id_ == "hello2"
assert doc.spans["ruler"][2].label_ == "BYE"
assert doc.spans["ruler"][2].id_ == ""
def test_span_ruler_existing_overwrite(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler", config={"overwrite": True})
ruler.add_patterns(patterns)
doc = nlp.make_doc("OH HELLO WORLD bye bye")
doc.spans["ruler"] = [doc[0:2]]
doc = nlp(doc)
assert len(doc.spans["ruler"]) == 2
assert doc.spans["ruler"][0].label_ == "HELLO"
assert doc.spans["ruler"][0].text == "HELLO"
assert doc.spans["ruler"][1].label_ == "BYE"
def test_span_ruler_serialize_bytes(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_nlp = spacy.blank("xx")
new_ruler = new_nlp.add_pipe("span_ruler")
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert len(new_ruler.patterns) == len(ruler.patterns)
for pattern in ruler.patterns:
assert pattern in new_ruler.patterns
assert sorted(new_ruler.labels) == sorted(ruler.labels)
def test_span_ruler_validate():
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
validated_ruler = nlp.add_pipe(
"span_ruler", name="validated_span_ruler", config={"validate": True}
)
valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]}
invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]}
# invalid pattern raises error without validate
with pytest.raises(ValueError):
ruler.add_patterns([invalid_pattern])
# valid pattern is added without errors with validate
validated_ruler.add_patterns([valid_pattern])
# invalid pattern raises error with validate
with pytest.raises(MatchPatternError):
validated_ruler.add_patterns([invalid_pattern])
def test_span_ruler_properties(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler", config={"overwrite": True})
ruler.add_patterns(patterns)
assert sorted(ruler.labels) == sorted(set([p["label"] for p in patterns]))
def test_span_ruler_overlapping_spans(overlapping_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(overlapping_patterns)
doc = ruler(nlp.make_doc("foo bar baz"))
assert len(doc.spans["ruler"]) == 2
assert doc.spans["ruler"][0].label_ == "FOOBAR"
assert doc.spans["ruler"][1].label_ == "BARBAZ"
def test_span_ruler_scorer(overlapping_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(overlapping_patterns)
text = "foo bar baz"
pred_doc = ruler(nlp.make_doc(text))
assert len(pred_doc.spans["ruler"]) == 2
assert pred_doc.spans["ruler"][0].label_ == "FOOBAR"
assert pred_doc.spans["ruler"][1].label_ == "BARBAZ"
ref_doc = nlp.make_doc(text)
ref_doc.spans["ruler"] = [Span(ref_doc, 0, 2, label="FOOBAR")]
scores = nlp.evaluate([Example(pred_doc, ref_doc)])
assert scores["spans_ruler_p"] == 0.5
assert scores["spans_ruler_r"] == 1.0
@pytest.mark.parametrize("n_process", [1, 2])
def test_span_ruler_multiprocessing(n_process):
if isinstance(get_current_ops, NumpyOps) or n_process < 2:
texts = ["I enjoy eating Pizza Hut pizza."]
patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut"}]
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
for doc in nlp.pipe(texts, n_process=2):
for ent in doc.spans["ruler"]:
assert ent.label_ == "FASTFOOD"
def test_span_ruler_serialize_dir(patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler")
ruler.from_disk(d / "test_ruler") # read from an existing directory
with pytest.raises(ValueError):
ruler.from_disk(d / "non_existing_dir") # read from a bad directory
def test_span_ruler_remove_basic(person_org_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(person_org_patterns)
doc = ruler(nlp.make_doc("Dina went to school"))
assert len(ruler.patterns) == 3
assert len(doc.spans["ruler"]) == 1
assert doc.spans["ruler"][0].label_ == "PERSON"
assert doc.spans["ruler"][0].text == "Dina"
ruler.remove("PERSON")
doc = ruler(nlp.make_doc("Dina went to school"))
assert len(doc.spans["ruler"]) == 0
assert len(ruler.patterns) == 2
def test_span_ruler_remove_nonexisting_pattern(person_org_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(person_org_patterns)
assert len(ruler.patterns) == 3
with pytest.raises(ValueError):
ruler.remove("NE")
with pytest.raises(ValueError):
ruler.remove_by_id("NE")
def test_span_ruler_remove_several_patterns(person_org_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(person_org_patterns)
doc = ruler(nlp.make_doc("Dina founded the company ACME."))
assert len(ruler.patterns) == 3
assert len(doc.spans["ruler"]) == 2
assert doc.spans["ruler"][0].label_ == "PERSON"
assert doc.spans["ruler"][0].text == "Dina"
assert doc.spans["ruler"][1].label_ == "ORG"
assert doc.spans["ruler"][1].text == "ACME"
ruler.remove("PERSON")
doc = ruler(nlp.make_doc("Dina founded the company ACME"))
assert len(ruler.patterns) == 2
assert len(doc.spans["ruler"]) == 1
assert doc.spans["ruler"][0].label_ == "ORG"
assert doc.spans["ruler"][0].text == "ACME"
ruler.remove("ORG")
with pytest.warns(UserWarning):
doc = ruler(nlp.make_doc("Dina founded the company ACME"))
assert len(ruler.patterns) == 0
assert len(doc.spans["ruler"]) == 0
def test_span_ruler_remove_patterns_in_a_row(person_org_date_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(person_org_date_patterns)
doc = ruler(nlp.make_doc("Dina founded the company ACME on June 14th"))
assert len(doc.spans["ruler"]) == 3
assert doc.spans["ruler"][0].label_ == "PERSON"
assert doc.spans["ruler"][0].text == "Dina"
assert doc.spans["ruler"][1].label_ == "ORG"
assert doc.spans["ruler"][1].text == "ACME"
assert doc.spans["ruler"][2].label_ == "DATE"
assert doc.spans["ruler"][2].text == "June 14th"
ruler.remove("ORG")
ruler.remove("DATE")
doc = ruler(nlp.make_doc("Dina went to school"))
assert len(doc.spans["ruler"]) == 1
def test_span_ruler_remove_all_patterns(person_org_date_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(person_org_date_patterns)
assert len(ruler.patterns) == 4
ruler.remove("PERSON")
assert len(ruler.patterns) == 3
ruler.remove("ORG")
assert len(ruler.patterns) == 1
ruler.remove("DATE")
assert len(ruler.patterns) == 0
with pytest.warns(UserWarning):
doc = ruler(nlp.make_doc("Dina founded the company ACME on June 14th"))
assert len(doc.spans["ruler"]) == 0
def test_span_ruler_remove_and_add():
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler")
patterns1 = [{"label": "DATE1", "pattern": "last time"}]
ruler.add_patterns(patterns1)
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 1
assert len(doc.spans["ruler"]) == 1
assert doc.spans["ruler"][0].label_ == "DATE1"
assert doc.spans["ruler"][0].text == "last time"
patterns2 = [{"label": "DATE2", "pattern": "this time"}]
ruler.add_patterns(patterns2)
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 2
assert len(doc.spans["ruler"]) == 2
assert doc.spans["ruler"][0].label_ == "DATE1"
assert doc.spans["ruler"][0].text == "last time"
assert doc.spans["ruler"][1].label_ == "DATE2"
assert doc.spans["ruler"][1].text == "this time"
ruler.remove("DATE1")
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 1
assert len(doc.spans["ruler"]) == 1
assert doc.spans["ruler"][0].label_ == "DATE2"
assert doc.spans["ruler"][0].text == "this time"
ruler.add_patterns(patterns1)
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
assert len(ruler.patterns) == 2
assert len(doc.spans["ruler"]) == 2
patterns3 = [{"label": "DATE3", "pattern": "another time"}]
ruler.add_patterns(patterns3)
doc = ruler(
nlp.make_doc(
"I saw him last time we met, this time he brought some flowers, another time some chocolate."
)
)
assert len(ruler.patterns) == 3
assert len(doc.spans["ruler"]) == 3
ruler.remove("DATE3")
doc = ruler(
nlp.make_doc(
"I saw him last time we met, this time he brought some flowers, another time some chocolate."
)
)
assert len(ruler.patterns) == 2
assert len(doc.spans["ruler"]) == 2
def test_span_ruler_spans_filter(overlapping_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe(
"span_ruler",
config={"spans_filter": {"@misc": "spacy.first_longest_spans_filter.v1"}},
)
ruler.add_patterns(overlapping_patterns)
doc = ruler(nlp.make_doc("foo bar baz"))
assert len(doc.spans["ruler"]) == 1
assert doc.spans["ruler"][0].label_ == "FOOBAR"
def test_span_ruler_ents_default_filter(overlapping_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe("span_ruler", config={"annotate_ents": True})
ruler.add_patterns(overlapping_patterns)
doc = ruler(nlp.make_doc("foo bar baz"))
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "FOOBAR"
def test_span_ruler_ents_overwrite_filter(overlapping_patterns):
nlp = spacy.blank("xx")
ruler = nlp.add_pipe(
"span_ruler",
config={
"annotate_ents": True,
"overwrite": False,
"ents_filter": {"@misc": "spacy.prioritize_new_ents_filter.v1"},
},
)
ruler.add_patterns(overlapping_patterns)
# overlapping ents are clobbered, non-overlapping ents are preserved
doc = nlp.make_doc("foo bar baz a b c")
doc.ents = [Span(doc, 1, 3, label="BARBAZ"), Span(doc, 3, 6, label="ABC")]
doc = ruler(doc)
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "FOOBAR"
assert doc.ents[1].label_ == "ABC"
def test_span_ruler_ents_bad_filter(overlapping_patterns):
@registry.misc("test_pass_through_filter")
def make_pass_through_filter():
def pass_through_filter(spans1, spans2):
return spans1 + spans2
return pass_through_filter
nlp = spacy.blank("xx")
ruler = nlp.add_pipe(
"span_ruler",
config={
"annotate_ents": True,
"ents_filter": {"@misc": "test_pass_through_filter"},
},
)
ruler.add_patterns(overlapping_patterns)
with pytest.raises(ValueError):
ruler(nlp.make_doc("foo bar baz"))

View File

@ -0,0 +1,161 @@
import pytest
from spacy.tokens import Span, SpanGroup
from spacy.tokens._dict_proxies import SpanGroups
@pytest.mark.issue(10685)
def test_issue10685(en_tokenizer):
"""Test `SpanGroups` de/serialization"""
# Start with a Doc with no SpanGroups
doc = en_tokenizer("Will it blend?")
# Test empty `SpanGroups` de/serialization:
assert len(doc.spans) == 0
doc.spans.from_bytes(doc.spans.to_bytes())
assert len(doc.spans) == 0
# Test non-empty `SpanGroups` de/serialization:
doc.spans["test"] = SpanGroup(doc, name="test", spans=[doc[0:1]])
doc.spans["test2"] = SpanGroup(doc, name="test", spans=[doc[1:2]])
def assert_spangroups():
assert len(doc.spans) == 2
assert doc.spans["test"].name == "test"
assert doc.spans["test2"].name == "test"
assert list(doc.spans["test"]) == [doc[0:1]]
assert list(doc.spans["test2"]) == [doc[1:2]]
# Sanity check the currently-expected behavior
assert_spangroups()
# Now test serialization/deserialization:
doc.spans.from_bytes(doc.spans.to_bytes())
assert_spangroups()
def test_span_groups_serialization_mismatches(en_tokenizer):
"""Test the serialization of multiple mismatching `SpanGroups` keys and `SpanGroup.name`s"""
doc = en_tokenizer("How now, brown cow?")
# Some variety:
# 1 SpanGroup where its name matches its key
# 2 SpanGroups that have the same name--which is not a key
# 2 SpanGroups that have the same name--which is a key
# 1 SpanGroup that is a value for 2 different keys (where its name is a key)
# 1 SpanGroup that is a value for 2 different keys (where its name is not a key)
groups = doc.spans
groups["key1"] = SpanGroup(doc, name="key1", spans=[doc[0:1], doc[1:2]])
groups["key2"] = SpanGroup(doc, name="too", spans=[doc[3:4], doc[4:5]])
groups["key3"] = SpanGroup(doc, name="too", spans=[doc[1:2], doc[0:1]])
groups["key4"] = SpanGroup(doc, name="key4", spans=[doc[0:1]])
groups["key5"] = SpanGroup(doc, name="key4", spans=[doc[0:1]])
sg6 = SpanGroup(doc, name="key6", spans=[doc[0:1]])
groups["key6"] = sg6
groups["key7"] = sg6
sg8 = SpanGroup(doc, name="also", spans=[doc[1:2]])
groups["key8"] = sg8
groups["key9"] = sg8
regroups = SpanGroups(doc).from_bytes(groups.to_bytes())
# Assert regroups == groups
assert regroups.keys() == groups.keys()
for key, regroup in regroups.items():
# Assert regroup == groups[key]
assert regroup.name == groups[key].name
assert list(regroup) == list(groups[key])
@pytest.mark.parametrize(
"spans_bytes,doc_text,expected_spangroups,expected_warning",
# The bytestrings below were generated from an earlier version of spaCy
# that serialized `SpanGroups` as a list of SpanGroup bytes (via SpanGroups.to_bytes).
# Comments preceding the bytestrings indicate from what Doc they were created.
[
# Empty SpanGroups:
(b"\x90", "", {}, False),
# doc = nlp("Will it blend?")
# doc.spans['test'] = SpanGroup(doc, name='test', spans=[doc[0:1]])
(
b"\x91\xc4C\x83\xa4name\xa4test\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04",
"Will it blend?",
{"test": {"name": "test", "spans": [(0, 1)]}},
False,
),
# doc = nlp("Will it blend?")
# doc.spans['test'] = SpanGroup(doc, name='test', spans=[doc[0:1]])
# doc.spans['test2'] = SpanGroup(doc, name='test', spans=[doc[1:2]])
(
b"\x92\xc4C\x83\xa4name\xa4test\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04\xc4C\x83\xa4name\xa4test\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00\x07",
"Will it blend?",
# We expect only 1 SpanGroup to be in doc.spans in this example
# because there are 2 `SpanGroup`s that have the same .name. See #10685.
{"test": {"name": "test", "spans": [(1, 2)]}},
True,
),
# doc = nlp('How now, brown cow?')
# doc.spans['key1'] = SpanGroup(doc, name='key1', spans=[doc[0:1], doc[1:2]])
# doc.spans['key2'] = SpanGroup(doc, name='too', spans=[doc[3:4], doc[4:5]])
# doc.spans['key3'] = SpanGroup(doc, name='too', spans=[doc[1:2], doc[0:1]])
# doc.spans['key4'] = SpanGroup(doc, name='key4', spans=[doc[0:1]])
# doc.spans['key5'] = SpanGroup(doc, name='key4', spans=[doc[0:1]])
(
b"\x95\xc4m\x83\xa4name\xa4key1\xa5attrs\x80\xa5spans\x92\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x07\xc4l\x83\xa4name\xa3too\xa5attrs\x80\xa5spans\x92\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\t\x00\x00\x00\x0e\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x12\xc4l\x83\xa4name\xa3too\xa5attrs\x80\xa5spans\x92\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x07\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03\xc4C\x83\xa4name\xa4key4\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03\xc4C\x83\xa4name\xa4key4\xa5attrs\x80\xa5spans\x91\xc4(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x03",
"How now, brown cow?",
{
"key1": {"name": "key1", "spans": [(0, 1), (1, 2)]},
"too": {"name": "too", "spans": [(1, 2), (0, 1)]},
"key4": {"name": "key4", "spans": [(0, 1)]},
},
True,
),
],
)
def test_deserialize_span_groups_compat(
en_tokenizer, spans_bytes, doc_text, expected_spangroups, expected_warning
):
"""Test backwards-compatibility of `SpanGroups` deserialization.
This uses serializations (bytes) from a prior version of spaCy (before 3.3.1).
spans_bytes (bytes): Serialized `SpanGroups` object.
doc_text (str): Doc text.
expected_spangroups (dict):
Dict mapping every expected (after deserialization) `SpanGroups` key
to a SpanGroup's "args", where a SpanGroup's args are given as a dict:
{"name": span_group.name,
"spans": [(span0.start, span0.end), ...]}
expected_warning (bool): Whether a warning is to be expected from .from_bytes()
--i.e. if more than 1 SpanGroup has the same .name within the `SpanGroups`.
"""
doc = en_tokenizer(doc_text)
if expected_warning:
with pytest.warns(UserWarning):
doc.spans.from_bytes(spans_bytes)
else:
# TODO: explicitly check for lack of a warning
doc.spans.from_bytes(spans_bytes)
assert doc.spans.keys() == expected_spangroups.keys()
for name, spangroup_args in expected_spangroups.items():
assert doc.spans[name].name == spangroup_args["name"]
spans = [Span(doc, start, end) for start, end in spangroup_args["spans"]]
assert list(doc.spans[name]) == spans
def test_span_groups_serialization(en_tokenizer):
doc = en_tokenizer("0 1 2 3 4 5 6")
span_groups = SpanGroups(doc)
spans = [doc[0:2], doc[1:3]]
sg1 = SpanGroup(doc, spans=spans)
span_groups["key1"] = sg1
span_groups["key2"] = sg1
span_groups["key3"] = []
reloaded_span_groups = SpanGroups(doc).from_bytes(span_groups.to_bytes())
assert span_groups.keys() == reloaded_span_groups.keys()
for key, value in span_groups.items():
assert all(
span == reloaded_span
for span, reloaded_span in zip(span_groups[key], reloaded_span_groups[key])
)

View File

@ -589,6 +589,7 @@ def test_string_to_list_intify(value):
assert string_to_list(value, intify=True) == [1, 2, 3] assert string_to_list(value, intify=True) == [1, 2, 3]
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_download_compatibility(): def test_download_compatibility():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -599,6 +600,7 @@ def test_download_compatibility():
assert get_minor_version(about.__version__) == get_minor_version(version) assert get_minor_version(about.__version__) == get_minor_version(version)
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_validate_compatibility_table(): def test_validate_compatibility_table():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False

View File

@ -60,11 +60,12 @@ def test_readers():
assert isinstance(extra_corpus, Callable) assert isinstance(extra_corpus, Callable)
# TODO: enable IMDB test once Stanford servers are back up and running
@pytest.mark.slow @pytest.mark.slow
@pytest.mark.parametrize( @pytest.mark.parametrize(
"reader,additional_config", "reader,additional_config",
[ [
("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}), # ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}),
("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}),
("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}), ("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}),
], ],

View File

@ -671,13 +671,13 @@ def test_gold_ner_missing_tags(en_tokenizer):
def test_projectivize(en_tokenizer): def test_projectivize(en_tokenizer):
doc = en_tokenizer("He pretty quickly walks away") doc = en_tokenizer("He pretty quickly walks away")
heads = [3, 2, 3, 0, 2] heads = [3, 2, 3, 3, 2]
deps = ["dep"] * len(heads) deps = ["dep"] * len(heads)
example = Example.from_dict(doc, {"heads": heads, "deps": deps}) example = Example.from_dict(doc, {"heads": heads, "deps": deps})
proj_heads, proj_labels = example.get_aligned_parse(projectivize=True) proj_heads, proj_labels = example.get_aligned_parse(projectivize=True)
nonproj_heads, nonproj_labels = example.get_aligned_parse(projectivize=False) nonproj_heads, nonproj_labels = example.get_aligned_parse(projectivize=False)
assert proj_heads == [3, 2, 3, 0, 3] assert proj_heads == [3, 2, 3, 3, 3]
assert nonproj_heads == [3, 2, 3, 0, 2] assert nonproj_heads == [3, 2, 3, 3, 2]
def test_iob_to_biluo(): def test_iob_to_biluo():

View File

@ -5,6 +5,7 @@ import srsly
from spacy.tokens import Doc from spacy.tokens import Doc
from spacy.vocab import Vocab from spacy.vocab import Vocab
from spacy.util import make_tempdir # noqa: F401 from spacy.util import make_tempdir # noqa: F401
from spacy.training import split_bilu_label
from thinc.api import get_current_ops from thinc.api import get_current_ops
@ -40,7 +41,7 @@ def apply_transition_sequence(parser, doc, sequence):
desired state.""" desired state."""
for action_name in sequence: for action_name in sequence:
if "-" in action_name: if "-" in action_name:
move, label = action_name.split("-") move, label = split_bilu_label(action_name)
parser.add_label(label) parser.add_label(label)
with parser.step_through(doc) as stepwise: with parser.step_through(doc) as stepwise:
for transition in sequence: for transition in sequence:

View File

@ -1,10 +1,11 @@
from typing import Iterable, Tuple, Union, Optional, TYPE_CHECKING from typing import Dict, Iterable, List, Tuple, Union, Optional, TYPE_CHECKING
import warnings
import weakref import weakref
from collections import UserDict from collections import UserDict
import srsly import srsly
from .span_group import SpanGroup from .span_group import SpanGroup
from ..errors import Errors from ..errors import Errors, Warnings
if TYPE_CHECKING: if TYPE_CHECKING:
@ -16,7 +17,7 @@ if TYPE_CHECKING:
# Why inherit from UserDict instead of dict here? # Why inherit from UserDict instead of dict here?
# Well, the 'dict' class doesn't necessarily delegate everything nicely, # Well, the 'dict' class doesn't necessarily delegate everything nicely,
# for performance reasons. The UserDict is slower but better behaved. # for performance reasons. The UserDict is slower but better behaved.
# See https://treyhunner.com/2019/04/why-you-shouldnt-inherit-from-list-and-dict-in-python/0ww # See https://treyhunner.com/2019/04/why-you-shouldnt-inherit-from-list-and-dict-in-python/
class SpanGroups(UserDict): class SpanGroups(UserDict):
"""A dict-like proxy held by the Doc, to control access to span groups.""" """A dict-like proxy held by the Doc, to control access to span groups."""
@ -53,20 +54,52 @@ class SpanGroups(UserDict):
return super().setdefault(key, default=default) return super().setdefault(key, default=default)
def to_bytes(self) -> bytes: def to_bytes(self) -> bytes:
# We don't need to serialize this as a dict, because the groups # We serialize this as a dict in order to track the key(s) a SpanGroup
# know their names. # is a value of (in a backward- and forward-compatible way), since
# a SpanGroup can have a key that doesn't match its `.name` (See #10685)
if len(self) == 0: if len(self) == 0:
return self._EMPTY_BYTES return self._EMPTY_BYTES
msg = [value.to_bytes() for value in self.values()] msg: Dict[bytes, List[str]] = {}
for key, value in self.items():
msg.setdefault(value.to_bytes(), []).append(key)
return srsly.msgpack_dumps(msg) return srsly.msgpack_dumps(msg)
def from_bytes(self, bytes_data: bytes) -> "SpanGroups": def from_bytes(self, bytes_data: bytes) -> "SpanGroups":
msg = [] if bytes_data == self._EMPTY_BYTES else srsly.msgpack_loads(bytes_data) # backwards-compatibility: bytes_data may be one of:
# b'', a serialized empty list, a serialized list of SpanGroup bytes
# or a serialized dict of SpanGroup bytes -> keys
msg = (
[]
if not bytes_data or bytes_data == self._EMPTY_BYTES
else srsly.msgpack_loads(bytes_data)
)
self.clear() self.clear()
doc = self._ensure_doc() doc = self._ensure_doc()
for value_bytes in msg: if isinstance(msg, list):
group = SpanGroup(doc).from_bytes(value_bytes) # This is either the 1st version of `SpanGroups` serialization
self[group.name] = group # or there were no SpanGroups serialized
for value_bytes in msg:
group = SpanGroup(doc).from_bytes(value_bytes)
if group.name in self:
# Display a warning if `msg` contains `SpanGroup`s
# that have the same .name (attribute).
# Because, for `SpanGroups` serialized as lists,
# only 1 SpanGroup per .name is loaded. (See #10685)
warnings.warn(
Warnings.W120.format(
group_name=group.name, group_values=self[group.name]
)
)
self[group.name] = group
else:
for value_bytes, keys in msg.items():
group = SpanGroup(doc).from_bytes(value_bytes)
# Deserialize `SpanGroup`s as copies because it's possible for two
# different `SpanGroup`s (pre-serialization) to have the same bytes
# (since they can have the same `.name`).
self[keys[0]] = group
for key in keys[1:]:
self[key] = group.copy()
return self return self
def _ensure_doc(self) -> "Doc": def _ensure_doc(self) -> "Doc":

View File

@ -170,6 +170,9 @@ class Doc:
def extend_tensor(self, tensor: Floats2d) -> None: ... def extend_tensor(self, tensor: Floats2d) -> None: ...
def retokenize(self) -> Retokenizer: ... def retokenize(self) -> Retokenizer: ...
def to_json(self, underscore: Optional[List[str]] = ...) -> Dict[str, Any]: ... def to_json(self, underscore: Optional[List[str]] = ...) -> Dict[str, Any]: ...
def from_json(
self, doc_json: Dict[str, Any] = ..., validate: bool = False
) -> Doc: ...
def to_utf8_array(self, nr_char: int = ...) -> Ints2d: ... def to_utf8_array(self, nr_char: int = ...) -> Ints2d: ...
@staticmethod @staticmethod
def _get_array_attrs() -> Tuple[Any]: ... def _get_array_attrs() -> Tuple[Any]: ...

View File

@ -1,4 +1,6 @@
# cython: infer_types=True, bounds_check=False, profile=True # cython: infer_types=True, bounds_check=False, profile=True
from typing import Set
cimport cython cimport cython
cimport numpy as np cimport numpy as np
from libc.string cimport memcpy from libc.string cimport memcpy
@ -31,10 +33,11 @@ from ..errors import Errors, Warnings
from ..morphology import Morphology from ..morphology import Morphology
from .. import util from .. import util
from .. import parts_of_speech from .. import parts_of_speech
from .. import schemas
from .underscore import Underscore, get_ext_args from .underscore import Underscore, get_ext_args
from ._retokenize import Retokenizer from ._retokenize import Retokenizer
from ._serialize import ALL_ATTRS as DOCBIN_ALL_ATTRS from ._serialize import ALL_ATTRS as DOCBIN_ALL_ATTRS
from ..util import get_words_and_spaces
DEF PADDING = 5 DEF PADDING = 5
@ -516,7 +519,7 @@ cdef class Doc:
def doc(self): def doc(self):
return self return self
def char_span(self, int start_idx, int end_idx, label=0, kb_id=0, vector=None, alignment_mode="strict"): def char_span(self, int start_idx, int end_idx, label=0, kb_id=0, vector=None, alignment_mode="strict", span_id=0):
"""Create a `Span` object from the slice """Create a `Span` object from the slice
`doc.text[start_idx : end_idx]`. Returns None if no valid `Span` can be `doc.text[start_idx : end_idx]`. Returns None if no valid `Span` can be
created. created.
@ -575,7 +578,7 @@ cdef class Doc:
start += 1 start += 1
# Currently we have the token index, we want the range-end index # Currently we have the token index, we want the range-end index
end += 1 end += 1
cdef Span span = Span(self, start, end, label=label, kb_id=kb_id, vector=vector) cdef Span span = Span(self, start, end, label=label, kb_id=kb_id, span_id=span_id, vector=vector)
return span return span
def similarity(self, other): def similarity(self, other):
@ -713,6 +716,7 @@ cdef class Doc:
cdef int start = -1 cdef int start = -1
cdef attr_t label = 0 cdef attr_t label = 0
cdef attr_t kb_id = 0 cdef attr_t kb_id = 0
cdef attr_t ent_id = 0
output = [] output = []
for i in range(self.length): for i in range(self.length):
token = &self.c[i] token = &self.c[i]
@ -723,18 +727,20 @@ cdef class Doc:
elif token.ent_iob == 2 or token.ent_iob == 0 or \ elif token.ent_iob == 2 or token.ent_iob == 0 or \
(token.ent_iob == 3 and token.ent_type == 0): (token.ent_iob == 3 and token.ent_type == 0):
if start != -1: if start != -1:
output.append(Span(self, start, i, label=label, kb_id=kb_id)) output.append(Span(self, start, i, label=label, kb_id=kb_id, span_id=ent_id))
start = -1 start = -1
label = 0 label = 0
kb_id = 0 kb_id = 0
ent_id = 0
elif token.ent_iob == 3: elif token.ent_iob == 3:
if start != -1: if start != -1:
output.append(Span(self, start, i, label=label, kb_id=kb_id)) output.append(Span(self, start, i, label=label, kb_id=kb_id, span_id=ent_id))
start = i start = i
label = token.ent_type label = token.ent_type
kb_id = token.ent_kb_id kb_id = token.ent_kb_id
ent_id = token.ent_id
if start != -1: if start != -1:
output.append(Span(self, start, self.length, label=label, kb_id=kb_id)) output.append(Span(self, start, self.length, label=label, kb_id=kb_id, span_id=ent_id))
# remove empty-label spans # remove empty-label spans
output = [o for o in output if o.label_ != ""] output = [o for o in output if o.label_ != ""]
return tuple(output) return tuple(output)
@ -743,14 +749,14 @@ cdef class Doc:
# TODO: # TODO:
# 1. Test basic data-driven ORTH gazetteer # 1. Test basic data-driven ORTH gazetteer
# 2. Test more nuanced date and currency regex # 2. Test more nuanced date and currency regex
cdef attr_t entity_type, kb_id cdef attr_t entity_type, kb_id, ent_id
cdef int ent_start, ent_end cdef int ent_start, ent_end
ent_spans = [] ent_spans = []
for ent_info in ents: for ent_info in ents:
entity_type_, kb_id, ent_start, ent_end = get_entity_info(ent_info) entity_type_, kb_id, ent_start, ent_end, ent_id = get_entity_info(ent_info)
if isinstance(entity_type_, str): if isinstance(entity_type_, str):
self.vocab.strings.add(entity_type_) self.vocab.strings.add(entity_type_)
span = Span(self, ent_start, ent_end, label=entity_type_, kb_id=kb_id) span = Span(self, ent_start, ent_end, label=entity_type_, kb_id=kb_id, span_id=ent_id)
ent_spans.append(span) ent_spans.append(span)
self.set_ents(ent_spans, default=SetEntsDefault.outside) self.set_ents(ent_spans, default=SetEntsDefault.outside)
@ -801,6 +807,9 @@ cdef class Doc:
self.c[i].ent_iob = 1 self.c[i].ent_iob = 1
self.c[i].ent_type = span.label self.c[i].ent_type = span.label
self.c[i].ent_kb_id = span.kb_id self.c[i].ent_kb_id = span.kb_id
# for backwards compatibility in v3, only set ent_id from
# span.id if it's set, otherwise don't override
self.c[i].ent_id = span.id if span.id else self.c[i].ent_id
for span in blocked: for span in blocked:
for i in range(span.start, span.end): for i in range(span.start, span.end):
self.c[i].ent_iob = 3 self.c[i].ent_iob = 3
@ -1180,6 +1189,7 @@ cdef class Doc:
span.end_char + char_offset, span.end_char + char_offset,
span.label, span.label,
span.kb_id, span.kb_id,
span.id,
span.text, # included as a check span.text, # included as a check
)) ))
char_offset += len(doc.text) char_offset += len(doc.text)
@ -1215,8 +1225,9 @@ cdef class Doc:
span_tuple[1], span_tuple[1],
label=span_tuple[2], label=span_tuple[2],
kb_id=span_tuple[3], kb_id=span_tuple[3],
span_id=span_tuple[4],
) )
text = span_tuple[4] text = span_tuple[5]
if span is not None and span.text == text: if span is not None and span.text == text:
concat_doc.spans[key].append(span) concat_doc.spans[key].append(span)
else: else:
@ -1467,6 +1478,138 @@ cdef class Doc:
remove_label_if_necessary(attributes[i]) remove_label_if_necessary(attributes[i])
retokenizer.merge(span, attributes[i]) retokenizer.merge(span, attributes[i])
def from_json(self, doc_json, *, validate=False):
"""Convert a JSON document generated by Doc.to_json() to a Doc.
doc_json (Dict): JSON representation of doc object to load.
validate (bool): Whether to validate `doc_json` against the expected schema.
Defaults to False.
RETURNS (Doc): A doc instance corresponding to the specified JSON representation.
"""
if validate:
schema_validation_message = schemas.validate(schemas.DocJSONSchema, doc_json)
if schema_validation_message:
raise ValueError(Errors.E1038.format(message=schema_validation_message))
### Token-level properties ###
words = []
token_attrs_ids = (POS, HEAD, DEP, LEMMA, TAG, MORPH)
# Map annotation type IDs to their string equivalents.
token_attrs = {t: self.vocab.strings[t].lower() for t in token_attrs_ids}
token_annotations = {}
# Gather token-level properties.
for token_json in doc_json["tokens"]:
words.append(doc_json["text"][token_json["start"]:token_json["end"]])
for attr, attr_json in token_attrs.items():
if attr_json in token_json:
if token_json["id"] == 0 and attr not in token_annotations:
token_annotations[attr] = []
elif attr not in token_annotations:
raise ValueError(Errors.E1040.format(partial_attrs=attr))
token_annotations[attr].append(token_json[attr_json])
# Initialize doc instance.
start = 0
cdef const LexemeC* lex
cdef bint has_space
reconstructed_words, spaces = get_words_and_spaces(words, doc_json["text"])
assert words == reconstructed_words
for word, has_space in zip(words, spaces):
lex = self.vocab.get(self.mem, word)
self.push_back(lex, has_space)
# Set remaining token-level attributes via Doc.from_array().
if HEAD in token_annotations:
token_annotations[HEAD] = [
head - i for i, head in enumerate(token_annotations[HEAD])
]
if DEP in token_annotations and HEAD not in token_annotations:
token_annotations[HEAD] = [0] * len(token_annotations[DEP])
if HEAD in token_annotations and DEP not in token_annotations:
raise ValueError(Errors.E1017)
if POS in token_annotations:
for pp in set(token_annotations[POS]):
if pp not in parts_of_speech.IDS:
raise ValueError(Errors.E1021.format(pp=pp))
# Collect token attributes, assert all tokens have exactly the same set of attributes.
attrs = []
partial_attrs: Set[str] = set()
for attr in token_attrs.keys():
if attr in token_annotations:
if len(token_annotations[attr]) != len(words):
partial_attrs.add(token_attrs[attr])
attrs.append(attr)
if len(partial_attrs):
raise ValueError(Errors.E1040.format(partial_attrs=partial_attrs))
# If there are any other annotations, set them.
if attrs:
array = self.to_array(attrs)
if array.ndim == 1:
array = numpy.reshape(array, (array.size, 1))
j = 0
for j, (attr, annot) in enumerate(token_annotations.items()):
if attr is HEAD:
for i in range(len(words)):
array[i, j] = annot[i]
elif attr is MORPH:
for i in range(len(words)):
array[i, j] = self.vocab.morphology.add(annot[i])
else:
for i in range(len(words)):
array[i, j] = self.vocab.strings.add(annot[i])
self.from_array(attrs, array)
### Span/document properties ###
# Complement other document-level properties (cats, spans, ents).
self.cats = doc_json.get("cats", {})
# Set sentence boundaries, if dependency parser not available but sentences are specified in JSON.
if not self.has_annotation("DEP"):
for sent in doc_json.get("sents", {}):
char_span = self.char_span(sent["start"], sent["end"])
if char_span is None:
raise ValueError(Errors.E1039.format(obj="sentence", start=sent["start"], end=sent["end"]))
char_span[0].is_sent_start = True
for token in char_span[1:]:
token.is_sent_start = False
for span_group in doc_json.get("spans", {}):
spans = []
for span in doc_json["spans"][span_group]:
char_span = self.char_span(span["start"], span["end"], span["label"], span["kb_id"])
if char_span is None:
raise ValueError(Errors.E1039.format(obj="span", start=span["start"], end=span["end"]))
spans.append(char_span)
self.spans[span_group] = spans
if "ents" in doc_json:
ents = []
for ent in doc_json["ents"]:
char_span = self.char_span(ent["start"], ent["end"], ent["label"])
if char_span is None:
raise ValueError(Errors.E1039.format(obj="entity"), start=ent["start"], end=ent["end"])
ents.append(char_span)
self.ents = ents
# Add custom attributes. Note that only Doc extensions are currently considered, Token and Span extensions are
# not yet supported.
for attr in doc_json.get("_", {}):
if not Doc.has_extension(attr):
Doc.set_extension(attr)
self._.set(attr, doc_json["_"][attr])
return self
def to_json(self, underscore=None): def to_json(self, underscore=None):
"""Convert a Doc to JSON. """Convert a Doc to JSON.
@ -1477,12 +1620,10 @@ cdef class Doc:
""" """
data = {"text": self.text} data = {"text": self.text}
if self.has_annotation("ENT_IOB"): if self.has_annotation("ENT_IOB"):
data["ents"] = [{"start": ent.start_char, "end": ent.end_char, data["ents"] = [{"start": ent.start_char, "end": ent.end_char, "label": ent.label_} for ent in self.ents]
"label": ent.label_} for ent in self.ents]
if self.has_annotation("SENT_START"): if self.has_annotation("SENT_START"):
sents = list(self.sents) sents = list(self.sents)
data["sents"] = [{"start": sent.start_char, "end": sent.end_char} data["sents"] = [{"start": sent.start_char, "end": sent.end_char} for sent in sents]
for sent in sents]
if self.cats: if self.cats:
data["cats"] = self.cats data["cats"] = self.cats
data["tokens"] = [] data["tokens"] = []
@ -1508,7 +1649,9 @@ cdef class Doc:
for span_group in self.spans: for span_group in self.spans:
data["spans"][span_group] = [] data["spans"][span_group] = []
for span in self.spans[span_group]: for span in self.spans[span_group]:
span_data = {"start": span.start_char, "end": span.end_char, "label": span.label_, "kb_id": span.kb_id_} span_data = {
"start": span.start_char, "end": span.end_char, "label": span.label_, "kb_id": span.kb_id_
}
data["spans"][span_group].append(span_data) data["spans"][span_group].append(span_data)
if underscore: if underscore:
@ -1737,18 +1880,17 @@ cdef int [:,:] _get_lca_matrix(Doc doc, int start, int end):
def pickle_doc(doc): def pickle_doc(doc):
bytes_data = doc.to_bytes(exclude=["vocab", "user_data", "user_hooks"]) bytes_data = doc.to_bytes(exclude=["vocab", "user_data", "user_hooks"])
hooks_and_data = (doc.user_data, doc.user_hooks, doc.user_span_hooks, hooks_and_data = (doc.user_data, doc.user_hooks, doc.user_span_hooks,
doc.user_token_hooks, doc._context) doc.user_token_hooks)
return (unpickle_doc, (doc.vocab, srsly.pickle_dumps(hooks_and_data), bytes_data)) return (unpickle_doc, (doc.vocab, srsly.pickle_dumps(hooks_and_data), bytes_data))
def unpickle_doc(vocab, hooks_and_data, bytes_data): def unpickle_doc(vocab, hooks_and_data, bytes_data):
user_data, doc_hooks, span_hooks, token_hooks, _context = srsly.pickle_loads(hooks_and_data) user_data, doc_hooks, span_hooks, token_hooks = srsly.pickle_loads(hooks_and_data)
doc = Doc(vocab, user_data=user_data).from_bytes(bytes_data, exclude=["user_data"]) doc = Doc(vocab, user_data=user_data).from_bytes(bytes_data, exclude=["user_data"])
doc.user_hooks.update(doc_hooks) doc.user_hooks.update(doc_hooks)
doc.user_span_hooks.update(span_hooks) doc.user_span_hooks.update(span_hooks)
doc.user_token_hooks.update(token_hooks) doc.user_token_hooks.update(token_hooks)
doc._context = _context
return doc return doc
@ -1772,16 +1914,18 @@ def fix_attributes(doc, attributes):
def get_entity_info(ent_info): def get_entity_info(ent_info):
ent_kb_id = 0
ent_id = 0
if isinstance(ent_info, Span): if isinstance(ent_info, Span):
ent_type = ent_info.label ent_type = ent_info.label
ent_kb_id = ent_info.kb_id ent_kb_id = ent_info.kb_id
start = ent_info.start start = ent_info.start
end = ent_info.end end = ent_info.end
ent_id = ent_info.id
elif len(ent_info) == 3: elif len(ent_info) == 3:
ent_type, start, end = ent_info ent_type, start, end = ent_info
ent_kb_id = 0
elif len(ent_info) == 4: elif len(ent_info) == 4:
ent_type, ent_kb_id, start, end = ent_info ent_type, ent_kb_id, start, end = ent_info
else: else:
ent_id, ent_kb_id, ent_type, start, end = ent_info ent_id, ent_kb_id, ent_type, start, end = ent_info
return ent_type, ent_kb_id, start, end return ent_type, ent_kb_id, start, end, ent_id

View File

@ -48,7 +48,8 @@ class Span:
label: Union[str, int] = ..., label: Union[str, int] = ...,
vector: Optional[Floats1d] = ..., vector: Optional[Floats1d] = ...,
vector_norm: Optional[float] = ..., vector_norm: Optional[float] = ...,
kb_id: Optional[int] = ..., kb_id: Union[str, int] = ...,
span_id: Union[str, int] = ...,
) -> None: ... ) -> None: ...
def __richcmp__(self, other: Span, op: int) -> bool: ... def __richcmp__(self, other: Span, op: int) -> bool: ...
def __hash__(self) -> int: ... def __hash__(self) -> int: ...

View File

@ -80,17 +80,20 @@ cdef class Span:
return Underscore.span_extensions.pop(name) return Underscore.span_extensions.pop(name)
def __cinit__(self, Doc doc, int start, int end, label=0, vector=None, def __cinit__(self, Doc doc, int start, int end, label=0, vector=None,
vector_norm=None, kb_id=0): vector_norm=None, kb_id=0, span_id=0):
"""Create a `Span` object from the slice `doc[start : end]`. """Create a `Span` object from the slice `doc[start : end]`.
doc (Doc): The parent document. doc (Doc): The parent document.
start (int): The index of the first token of the span. start (int): The index of the first token of the span.
end (int): The index of the first token after the span. end (int): The index of the first token after the span.
label (int or str): A label to attach to the Span, e.g. for named entities. label (Union[int, str]): A label to attach to the Span, e.g. for named
entities.
vector (ndarray[ndim=1, dtype='float32']): A meaning representation vector (ndarray[ndim=1, dtype='float32']): A meaning representation
of the span. of the span.
vector_norm (float): The L2 norm of the span's vector representation. vector_norm (float): The L2 norm of the span's vector representation.
kb_id (uint64): An identifier from a Knowledge Base to capture the meaning of a named entity. kb_id (Union[int, str]): An identifier from a Knowledge Base to capture
the meaning of a named entity.
span_id (Union[int, str]): An identifier to associate with the span.
DOCS: https://spacy.io/api/span#init DOCS: https://spacy.io/api/span#init
""" """
@ -101,6 +104,8 @@ cdef class Span:
label = doc.vocab.strings.add(label) label = doc.vocab.strings.add(label)
if isinstance(kb_id, str): if isinstance(kb_id, str):
kb_id = doc.vocab.strings.add(kb_id) kb_id = doc.vocab.strings.add(kb_id)
if isinstance(span_id, str):
span_id = doc.vocab.strings.add(span_id)
if label not in doc.vocab.strings: if label not in doc.vocab.strings:
raise ValueError(Errors.E084.format(label=label)) raise ValueError(Errors.E084.format(label=label))
@ -112,6 +117,7 @@ cdef class Span:
self.c = SpanC( self.c = SpanC(
label=label, label=label,
kb_id=kb_id, kb_id=kb_id,
id=span_id,
start=start, start=start,
end=end, end=end,
start_char=start_char, start_char=start_char,
@ -126,8 +132,8 @@ cdef class Span:
return False return False
else: else:
return True return True
self_tuple = (self.c.start_char, self.c.end_char, self.c.label, self.c.kb_id, self.doc) self_tuple = (self.c.start_char, self.c.end_char, self.c.label, self.c.kb_id, self.id, self.doc)
other_tuple = (other.c.start_char, other.c.end_char, other.c.label, other.c.kb_id, other.doc) other_tuple = (other.c.start_char, other.c.end_char, other.c.label, other.c.kb_id, other.id, other.doc)
# < # <
if op == 0: if op == 0:
return self_tuple < other_tuple return self_tuple < other_tuple
@ -148,7 +154,7 @@ cdef class Span:
return self_tuple >= other_tuple return self_tuple >= other_tuple
def __hash__(self): def __hash__(self):
return hash((self.doc, self.c.start_char, self.c.end_char, self.c.label, self.c.kb_id)) return hash((self.doc, self.c.start_char, self.c.end_char, self.c.label, self.c.kb_id, self.c.id))
def __len__(self): def __len__(self):
"""Get the number of tokens in the span. """Get the number of tokens in the span.
@ -632,7 +638,7 @@ cdef class Span:
else: else:
return self.doc[root] return self.doc[root]
def char_span(self, int start_idx, int end_idx, label=0, kb_id=0, vector=None): def char_span(self, int start_idx, int end_idx, label=0, kb_id=0, vector=None, id=0):
"""Create a `Span` object from the slice `span.text[start : end]`. """Create a `Span` object from the slice `span.text[start : end]`.
start (int): The index of the first character of the span. start (int): The index of the first character of the span.
@ -774,6 +780,13 @@ cdef class Span:
def __set__(self, attr_t kb_id): def __set__(self, attr_t kb_id):
self.c.kb_id = kb_id self.c.kb_id = kb_id
property id:
def __get__(self):
return self.c.id
def __set__(self, attr_t id):
self.c.id = id
property ent_id: property ent_id:
"""RETURNS (uint64): The entity ID.""" """RETURNS (uint64): The entity ID."""
def __get__(self): def __get__(self):
@ -812,13 +825,21 @@ cdef class Span:
self.label = self.doc.vocab.strings.add(label_) self.label = self.doc.vocab.strings.add(label_)
property kb_id_: property kb_id_:
"""RETURNS (str): The named entity's KB ID.""" """RETURNS (str): The span's KB ID."""
def __get__(self): def __get__(self):
return self.doc.vocab.strings[self.kb_id] return self.doc.vocab.strings[self.kb_id]
def __set__(self, str kb_id_): def __set__(self, str kb_id_):
self.kb_id = self.doc.vocab.strings.add(kb_id_) self.kb_id = self.doc.vocab.strings.add(kb_id_)
property id_:
"""RETURNS (str): The span's ID."""
def __get__(self):
return self.doc.vocab.strings[self.id]
def __set__(self, str id_):
self.id = self.doc.vocab.strings.add(id_)
cdef int _count_words_to_root(const TokenC* token, int sent_length) except -1: cdef int _count_words_to_root(const TokenC* token, int sent_length) except -1:
# Don't allow spaces to be the root, if there are # Don't allow spaces to be the root, if there are

View File

@ -24,3 +24,4 @@ class SpanGroup:
def __getitem__(self, i: int) -> Span: ... def __getitem__(self, i: int) -> Span: ...
def to_bytes(self) -> bytes: ... def to_bytes(self) -> bytes: ...
def from_bytes(self, bytes_data: bytes) -> SpanGroup: ... def from_bytes(self, bytes_data: bytes) -> SpanGroup: ...
def copy(self) -> SpanGroup: ...

View File

@ -5,6 +5,7 @@ from .augment import dont_augment, orth_variants_augmenter # noqa: F401
from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401
from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401 from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401
from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401 from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401
from .iob_utils import split_bilu_label, remove_bilu_prefix # noqa: F401
from .gold_io import docs_to_json, read_json_file # noqa: F401 from .gold_io import docs_to_json, read_json_file # noqa: F401
from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401 from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401
from .loggers import console_logger # noqa: F401 from .loggers import console_logger # noqa: F401

View File

@ -1,33 +1,39 @@
from typing import List from typing import List
from ..errors import Errors from ..errors import Errors
import numpy import numpy
from libc.stdint cimport int32_t
cdef class AlignmentArray: cdef class AlignmentArray:
"""AlignmentArray is similar to Thinc's Ragged with two simplfications: """AlignmentArray is similar to Thinc's Ragged with two simplfications:
indexing returns numpy arrays and this type can only be used for CPU arrays. indexing returns numpy arrays and this type can only be used for CPU arrays.
However, these changes make AlginmentArray more efficient for indexing in a However, these changes make AlignmentArray more efficient for indexing in a
tight loop.""" tight loop."""
__slots__ = [] __slots__ = []
def __init__(self, alignment: List[List[int]]): def __init__(self, alignment: List[List[int]]):
self._lengths = None
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype="i")
cdef int data_len = 0 cdef int data_len = 0
cdef int outer_len cdef int outer_len
cdef int idx cdef int idx
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype='int32')
cdef int32_t* starts_ends_ptr = <int32_t*>self._starts_ends.data
for idx, outer in enumerate(alignment): for idx, outer in enumerate(alignment):
outer_len = len(outer) outer_len = len(outer)
self._starts_ends[idx + 1] = self._starts_ends[idx] + outer_len starts_ends_ptr[idx + 1] = starts_ends_ptr[idx] + outer_len
data_len += outer_len data_len += outer_len
self._data = numpy.empty(data_len, dtype="i") self._lengths = None
self._data = numpy.empty(data_len, dtype="int32")
idx = 0 idx = 0
cdef int32_t* data_ptr = <int32_t*>self._data.data
for outer in alignment: for outer in alignment:
for inner in outer: for inner in outer:
self._data[idx] = inner data_ptr[idx] = inner
idx += 1 idx += 1
def __getitem__(self, idx): def __getitem__(self, idx):

View File

@ -3,10 +3,10 @@ from typing import Optional
import random import random
import itertools import itertools
from functools import partial from functools import partial
from pydantic import BaseModel, StrictStr
from ..util import registry from ..util import registry
from .example import Example from .example import Example
from .iob_utils import split_bilu_label
if TYPE_CHECKING: if TYPE_CHECKING:
from ..language import Language # noqa: F401 from ..language import Language # noqa: F401
@ -278,10 +278,8 @@ def make_whitespace_variant(
ent_prev = doc_dict["entities"][position - 1] ent_prev = doc_dict["entities"][position - 1]
ent_next = doc_dict["entities"][position] ent_next = doc_dict["entities"][position]
if "-" in ent_prev and "-" in ent_next: if "-" in ent_prev and "-" in ent_next:
ent_iob_prev = ent_prev.split("-")[0] ent_iob_prev, ent_type_prev = split_bilu_label(ent_prev)
ent_type_prev = ent_prev.split("-", 1)[1] ent_iob_next, ent_type_next = split_bilu_label(ent_next)
ent_iob_next = ent_next.split("-")[0]
ent_type_next = ent_next.split("-", 1)[1]
if ( if (
ent_iob_prev in ("B", "I") ent_iob_prev in ("B", "I")
and ent_iob_next in ("I", "L") and ent_iob_next in ("I", "L")

View File

@ -9,11 +9,11 @@ from ..tokens.span import Span
from ..attrs import IDS from ..attrs import IDS
from .alignment import Alignment from .alignment import Alignment
from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags
from .iob_utils import biluo_tags_to_spans from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix
from ..errors import Errors, Warnings from ..errors import Errors, Warnings
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
from ..tokens.token cimport MISSING_DEP from ..tokens.token cimport MISSING_DEP
from ..util import logger, to_ternary_int from ..util import logger, to_ternary_int, all_equal
cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot):
@ -151,54 +151,131 @@ cdef class Example:
self._y_sig = y_sig self._y_sig = y_sig
return self._cached_alignment return self._cached_alignment
def _get_aligned_vectorized(self, align, gold_values):
# Fast path for Doc attributes/fields that are predominantly a single value,
# i.e., TAG, POS, MORPH.
x2y_single_toks = []
x2y_single_toks_i = []
x2y_multiple_toks = []
x2y_multiple_toks_i = []
# Gather indices of gold tokens aligned to the candidate tokens into two buckets.
# Bucket 1: All tokens that have a one-to-one alignment.
# Bucket 2: All tokens that have a one-to-many alignment.
for idx, token in enumerate(self.predicted):
aligned_gold_i = align[token.i]
aligned_gold_len = len(aligned_gold_i)
if aligned_gold_len == 1:
x2y_single_toks.append(aligned_gold_i.item())
x2y_single_toks_i.append(idx)
elif aligned_gold_len > 1:
x2y_multiple_toks.append(aligned_gold_i)
x2y_multiple_toks_i.append(idx)
# Map elements of the first bucket directly to the output array.
output = numpy.full(len(self.predicted), None)
output[x2y_single_toks_i] = gold_values[x2y_single_toks].squeeze()
# Collapse many-to-one alignments into one-to-one alignments if they
# share the same value. Map to None in all other cases.
for i in range(len(x2y_multiple_toks)):
aligned_gold_values = gold_values[x2y_multiple_toks[i]]
# If all aligned tokens have the same value, use it.
if all_equal(aligned_gold_values):
x2y_multiple_toks[i] = aligned_gold_values[0].item()
else:
x2y_multiple_toks[i] = None
output[x2y_multiple_toks_i] = x2y_multiple_toks
return output.tolist()
def _get_aligned_non_vectorized(self, align, gold_values):
# Slower path for fields that return multiple values (resulting
# in ragged arrays that cannot be vectorized trivially).
output = [None] * len(self.predicted)
for token in self.predicted:
aligned_gold_i = align[token.i]
values = gold_values[aligned_gold_i].ravel()
if len(values) == 1:
output[token.i] = values.item()
elif all_equal(values):
# If all aligned tokens have the same value, use it.
output[token.i] = values[0].item()
return output
def get_aligned(self, field, as_string=False): def get_aligned(self, field, as_string=False):
"""Return an aligned array for a token attribute.""" """Return an aligned array for a token attribute."""
align = self.alignment.x2y align = self.alignment.x2y
gold_values = self.reference.to_array([field])
if len(gold_values.shape) == 1:
output = self._get_aligned_vectorized(align, gold_values)
else:
output = self._get_aligned_non_vectorized(align, gold_values)
vocab = self.reference.vocab vocab = self.reference.vocab
gold_values = self.reference.to_array([field])
output = [None] * len(self.predicted)
for token in self.predicted:
values = gold_values[align[token.i]]
values = values.ravel()
if len(values) == 0:
output[token.i] = None
elif len(values) == 1:
output[token.i] = values[0]
elif len(set(list(values))) == 1:
# If all aligned tokens have the same value, use it.
output[token.i] = values[0]
else:
output[token.i] = None
if as_string and field not in ["ENT_IOB", "SENT_START"]: if as_string and field not in ["ENT_IOB", "SENT_START"]:
output = [vocab.strings[o] if o is not None else o for o in output] output = [vocab.strings[o] if o is not None else o for o in output]
return output return output
def get_aligned_parse(self, projectivize=True): def get_aligned_parse(self, projectivize=True):
cand_to_gold = self.alignment.x2y cand_to_gold = self.alignment.x2y
gold_to_cand = self.alignment.y2x gold_to_cand = self.alignment.y2x
aligned_heads = [None] * self.x.length
aligned_deps = [None] * self.x.length
has_deps = [token.has_dep() for token in self.y]
has_heads = [token.has_head() for token in self.y]
heads = [token.head.i for token in self.y] heads = [token.head.i for token in self.y]
deps = [token.dep_ for token in self.y] deps = [token.dep_ for token in self.y]
if projectivize: if projectivize:
proj_heads, proj_deps = nonproj.projectivize(heads, deps) proj_heads, proj_deps = nonproj.projectivize(heads, deps)
has_deps = [token.has_dep() for token in self.y]
has_heads = [token.has_head() for token in self.y]
# ensure that missing data remains missing # ensure that missing data remains missing
heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)] heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)]
deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)] deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)]
for cand_i in range(self.x.length):
if cand_to_gold.lengths[cand_i] == 1: # Select all candidate tokens that are aligned to a single gold token.
gold_i = cand_to_gold[cand_i][0] c2g_single_toks = numpy.where(cand_to_gold.lengths == 1)[0]
if gold_to_cand.lengths[heads[gold_i]] == 1:
aligned_heads[cand_i] = int(gold_to_cand[heads[gold_i]][0]) # Fetch all aligned gold token incides.
aligned_deps[cand_i] = deps[gold_i] if c2g_single_toks.shape == cand_to_gold.lengths.shape:
return aligned_heads, aligned_deps # This the most likely case.
gold_i = cand_to_gold[:].squeeze()
else:
gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0])(c2g_single_toks).squeeze()
# Fetch indices of all gold heads for the aligned gold tokens.
heads = numpy.asarray(heads, dtype='i')
gold_head_i = heads[gold_i]
# Select all gold tokens that are heads of the previously selected
# gold tokens (and are aligned to a single candidate token).
g2c_len_heads = gold_to_cand.lengths[gold_head_i]
g2c_len_heads = numpy.where(g2c_len_heads == 1)[0]
g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0])(gold_head_i[g2c_len_heads]).squeeze()
# Update head/dep alignments with the above.
aligned_heads = numpy.full((self.x.length), None)
aligned_heads[c2g_single_toks[g2c_len_heads]] = g2c_i
deps = numpy.asarray(deps)
aligned_deps = numpy.full((self.x.length), None)
aligned_deps[c2g_single_toks] = deps[gold_i]
return aligned_heads.tolist(), aligned_deps.tolist()
def get_aligned_sent_starts(self): def get_aligned_sent_starts(self):
"""Get list of SENT_START attributes aligned to the predicted tokenization. """Get list of SENT_START attributes aligned to the predicted tokenization.
If the reference has not sentence starts, return a list of None values. If the reference does not have sentence starts, return a list of None values.
""" """
if self.y.has_annotation("SENT_START"): if self.y.has_annotation("SENT_START"):
align = self.alignment.y2x align = self.alignment.y2x
@ -519,7 +596,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces):
else: else:
ent_iobs.append(iob_tag.split("-")[0]) ent_iobs.append(iob_tag.split("-")[0])
if iob_tag.startswith("I") or iob_tag.startswith("B"): if iob_tag.startswith("I") or iob_tag.startswith("B"):
ent_types.append(iob_tag.split("-", 1)[1]) ent_types.append(remove_bilu_prefix(iob_tag))
else: else:
ent_types.append("") ent_types.append("")
return ent_iobs, ent_types return ent_iobs, ent_types

View File

@ -1,4 +1,4 @@
from typing import List, Dict, Tuple, Iterable, Union, Iterator from typing import List, Dict, Tuple, Iterable, Union, Iterator, cast
import warnings import warnings
from ..errors import Errors, Warnings from ..errors import Errors, Warnings
@ -218,6 +218,14 @@ def tags_to_entities(tags: Iterable[str]) -> List[Tuple[str, int, int]]:
return entities return entities
def split_bilu_label(label: str) -> Tuple[str, str]:
return cast(Tuple[str, str], label.split("-", 1))
def remove_bilu_prefix(label: str) -> str:
return label.split("-", 1)[1]
# Fallbacks to make backwards-compat easier # Fallbacks to make backwards-compat easier
offsets_from_biluo_tags = biluo_tags_to_offsets offsets_from_biluo_tags = biluo_tags_to_offsets
spans_from_biluo_tags = biluo_tags_to_spans spans_from_biluo_tags = biluo_tags_to_spans

View File

@ -1,6 +1,6 @@
from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast
from typing import Optional, Iterable, Callable, Tuple, Type from typing import Optional, Iterable, Callable, Tuple, Type
from typing import Iterator, Type, Pattern, Generator, TYPE_CHECKING from typing import Iterator, Pattern, Generator, TYPE_CHECKING
from types import ModuleType from types import ModuleType
import os import os
import importlib import importlib
@ -12,7 +12,6 @@ from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer
from thinc.api import ConfigValidationError, Model from thinc.api import ConfigValidationError, Model
import functools import functools
import itertools import itertools
import numpy.random
import numpy import numpy
import srsly import srsly
import catalogue import catalogue
@ -400,6 +399,7 @@ def load_model(
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -409,11 +409,19 @@ def load_model(
vocab (Vocab / True): Optional vocab to pass in on initialization. If True, vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created. a new Vocab object will be created.
disable (Iterable[str]): Names of pipeline components to disable. disable (Iterable[str]): Names of pipeline components to disable.
enable (Iterable[str]): Names of pipeline components to enable. All others will be disabled.
exclude (Iterable[str]): Names of pipeline components to exclude.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation. keyed by section values in dot notation.
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
kwargs = {"vocab": vocab, "disable": disable, "exclude": exclude, "config": config} kwargs = {
"vocab": vocab,
"disable": disable,
"enable": enable,
"exclude": exclude,
"config": config,
}
if isinstance(name, str): # name or string path if isinstance(name, str): # name or string path
if name.startswith("blank:"): # shortcut for blank model if name.startswith("blank:"): # shortcut for blank model
return get_lang_class(name.replace("blank:", ""))() return get_lang_class(name.replace("blank:", ""))()
@ -433,6 +441,7 @@ def load_model_from_package(
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -444,6 +453,8 @@ def load_model_from_package(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -451,7 +462,7 @@ def load_model_from_package(
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
cls = importlib.import_module(name) cls = importlib.import_module(name)
return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined] return cls.load(vocab=vocab, disable=disable, enable=enable, exclude=exclude, config=config) # type: ignore[attr-defined]
def load_model_from_path( def load_model_from_path(
@ -460,6 +471,7 @@ def load_model_from_path(
meta: Optional[Dict[str, Any]] = None, meta: Optional[Dict[str, Any]] = None,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -473,6 +485,8 @@ def load_model_from_path(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -487,7 +501,12 @@ def load_model_from_path(
overrides = dict_to_dot(config) overrides = dict_to_dot(config)
config = load_config(config_path, overrides=overrides) config = load_config(config_path, overrides=overrides)
nlp = load_model_from_config( nlp = load_model_from_config(
config, vocab=vocab, disable=disable, exclude=exclude, meta=meta config,
vocab=vocab,
disable=disable,
enable=enable,
exclude=exclude,
meta=meta,
) )
return nlp.from_disk(model_path, exclude=exclude, overrides=overrides) return nlp.from_disk(model_path, exclude=exclude, overrides=overrides)
@ -498,6 +517,7 @@ def load_model_from_config(
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
auto_fill: bool = False, auto_fill: bool = False,
validate: bool = True, validate: bool = True,
@ -512,6 +532,8 @@ def load_model_from_config(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
auto_fill (bool): Whether to auto-fill config with missing defaults. auto_fill (bool): Whether to auto-fill config with missing defaults.
@ -530,6 +552,7 @@ def load_model_from_config(
config, config,
vocab=vocab, vocab=vocab,
disable=disable, disable=disable,
enable=enable,
exclude=exclude, exclude=exclude,
auto_fill=auto_fill, auto_fill=auto_fill,
validate=validate, validate=validate,
@ -594,6 +617,7 @@ def load_model_from_init_py(
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -605,6 +629,8 @@ def load_model_from_init_py(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -622,6 +648,7 @@ def load_model_from_init_py(
vocab=vocab, vocab=vocab,
meta=meta, meta=meta,
disable=disable, disable=disable,
enable=enable,
exclude=exclude, exclude=exclude,
config=config, config=config,
) )
@ -1242,6 +1269,15 @@ def filter_spans(spans: Iterable["Span"]) -> List["Span"]:
return result return result
def filter_chain_spans(*spans: Iterable["Span"]) -> List["Span"]:
return filter_spans(itertools.chain(*spans))
@registry.misc("spacy.first_longest_spans_filter.v1")
def make_first_longest_spans_filter():
return filter_chain_spans
def to_bytes(getters: Dict[str, Callable[[], bytes]], exclude: Iterable[str]) -> bytes: def to_bytes(getters: Dict[str, Callable[[], bytes]], exclude: Iterable[str]) -> bytes:
return srsly.msgpack_dumps(to_dict(getters, exclude)) return srsly.msgpack_dumps(to_dict(getters, exclude))
@ -1680,3 +1716,10 @@ def packages_distributions() -> Dict[str, List[str]]:
for pkg in (dist.read_text("top_level.txt") or "").split(): for pkg in (dist.read_text("top_level.txt") or "").split():
pkg_to_dist[pkg].append(dist.metadata["Name"]) pkg_to_dist[pkg].append(dist.metadata["Name"])
return dict(pkg_to_dist) return dict(pkg_to_dist)
def all_equal(iterable):
"""Return True if all the elements are equal to each other
(or if the input is an empty sequence), False otherwise."""
g = itertools.groupby(iterable)
return next(g, True) and not next(g, False)

View File

@ -339,7 +339,7 @@ cdef class Vectors:
return self.key2row.get(key, -1) return self.key2row.get(key, -1)
elif keys is not None: elif keys is not None:
keys = [get_string_id(key) for key in keys] keys = [get_string_id(key) for key in keys]
rows = [self.key2row.get(key, -1.) for key in keys] rows = [self.key2row.get(key, -1) for key in keys]
return xp.asarray(rows, dtype="i") return xp.asarray(rows, dtype="i")
else: else:
row2key = {row: key for key, row in self.key2row.items()} row2key = {row: key for key, row in self.key2row.items()}

View File

@ -587,7 +587,7 @@ consists of either two or three subnetworks:
run once for each batch. run once for each batch.
- **lower**: Construct a feature-specific vector for each `(token, feature)` - **lower**: Construct a feature-specific vector for each `(token, feature)`
pair. This is also run once for each batch. Constructing the state pair. This is also run once for each batch. Constructing the state
representation is then simply a matter of summing the component features and representation is then a matter of summing the component features and
applying the non-linearity. applying the non-linearity.
- **upper** (optional): A feed-forward network that predicts scores from the - **upper** (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is used state representation. If not present, the output from the lower model is used
@ -628,7 +628,7 @@ same signature, but the `use_upper` argument was `True` by default.
> ``` > ```
Build a tagger model, using a provided token-to-vector component. The tagger Build a tagger model, using a provided token-to-vector component. The tagger
model simply adds a linear layer with softmax activation to predict scores given model adds a linear layer with softmax activation to predict scores given
the token vectors. the token vectors.
| Name | Description | | Name | Description |
@ -920,7 +920,7 @@ A function that reads an existing `KnowledgeBase` from file.
A function that takes as input a [`KnowledgeBase`](/api/kb) and a A function that takes as input a [`KnowledgeBase`](/api/kb) and a
[`Span`](/api/span) object denoting a named entity, and returns a list of [`Span`](/api/span) object denoting a named entity, and returns a list of
plausible [`Candidate`](/api/kb/#candidate) objects. The default plausible [`Candidate`](/api/kb/#candidate) objects. The default
`CandidateGenerator` simply uses the text of a mention to find its potential `CandidateGenerator` uses the text of a mention to find its potential
aliases in the `KnowledgeBase`. Note that this function is case-dependent. aliases in the `KnowledgeBase`. Note that this function is case-dependent.
## Coreference Architectures ## Coreference Architectures

View File

@ -0,0 +1,78 @@
---
title: Attributes
teaser: Token attributes
source: spacy/attrs.pyx
---
[Token](/api/token) attributes are specified using internal IDs in many places
including:
- [`Matcher` patterns](/api/matcher#patterns),
- [`Doc.to_array`](/api/doc#to_array) and
[`Doc.from_array`](/api/doc#from_array)
- [`Doc.has_annotation`](/api/doc#has_annotation)
- [`MultiHashEmbed`](/api/architectures#MultiHashEmbed) Tok2Vec architecture
`attrs`
> ```python
> import spacy
> from spacy.attrs import DEP
>
> nlp = spacy.blank("en")
> doc = nlp("There are many attributes.")
>
> # DEP always has the same internal value
> assert DEP == 76
>
> # "DEP" is automatically converted to DEP
> assert DEP == nlp.vocab.strings["DEP"]
> assert doc.has_annotation(DEP) == doc.has_annotation("DEP")
>
> # look up IDs in spacy.attrs.IDS
> from spacy.attrs import IDS
> assert IDS["DEP"] == DEP
> ```
All methods automatically convert between the string version of an ID (`"DEP"`)
and the internal integer symbols (`DEP`). The internal IDs can be imported from
`spacy.attrs` or retrieved from the [`StringStore`](/api/stringstore). A map
from string attribute names to internal attribute IDs is stored in
`spacy.attrs.IDS`.
The corresponding [`Token` object attributes](/api/token#attributes) can be
accessed using the same names in lowercase, e.g. `token.orth` or `token.length`.
For attributes that represent string values, the internal integer ID is
accessed as `Token.attr`, e.g. `token.dep`, while the string value can be
retrieved by appending `_` as in `token.dep_`.
| Attribute | Description |
| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `DEP` | The token's dependency label. ~~str~~ |
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
| `ENT_IOB` | The IOB part of the token's entity tag. Uses custom integer vaues rather than the string store: unset is `0`, `I` is `1`, `O` is `2`, and `B` is `3`. ~~str~~ |
| `ENT_KB_ID` | The token's entity knowledge base ID. ~~str~~ |
| `ENT_TYPE` | The token's entity label. ~~str~~ |
| `IS_ALPHA` | Token text consists of alphabetic characters. ~~bool~~ |
| `IS_ASCII` | Token text consists of ASCII characters. ~~bool~~ |
| `IS_DIGIT` | Token text consists of digits. ~~bool~~ |
| `IS_LOWER` | Token text is in lowercase. ~~bool~~ |
| `IS_PUNCT` | Token is punctuation. ~~bool~~ |
| `IS_SPACE` | Token is whitespace. ~~bool~~ |
| `IS_STOP` | Token is a stop word. ~~bool~~ |
| `IS_TITLE` | Token text is in titlecase. ~~bool~~ |
| `IS_UPPER` | Token text is in uppercase. ~~bool~~ |
| `LEMMA` | The token's lemma. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
| `LIKE_EMAIL` | Token text resembles an email address. ~~bool~~ |
| `LIKE_NUM` | Token text resembles a number. ~~bool~~ |
| `LIKE_URL` | Token text resembles a URL. ~~bool~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `MORPH` | The token's morphological analysis. ~~MorphAnalysis~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `POS` | The token's universal part of speech (UPOS). ~~str~~ |
| `SENT_START` | Token is start of sentence. ~~bool~~ |
| `SHAPE` | The token's shape. ~~str~~ |
| `SPACY` | Token has a trailing space. ~~bool~~ |
| `TAG` | The token's fine-grained part of speech. ~~str~~ |

View File

@ -481,6 +481,45 @@ Deserialize, i.e. import the document contents from a binary string.
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | | `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
| **RETURNS** | The `Doc` object. ~~Doc~~ | | **RETURNS** | The `Doc` object. ~~Doc~~ |
## Doc.to_json {#to_json tag="method"}
Serializes a document to JSON. Note that this is format differs from the
deprecated [`JSON training format`](/api/data-formats#json-input).
> #### Example
>
> ```python
> doc = nlp("All we have to decide is what to do with the time that is given us.")
> assert doc.to_json()["text"] == doc.text
> ```
| Name | Description |
| ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `underscore` | Optional list of string names of custom `Doc` attributes. Attribute values need to be JSON-serializable. Values will be added to an `"_"` key in the data, e.g. `"_": {"foo": "bar"}`. ~~Optional[List[str]]~~ |
| **RETURNS** | The data in JSON format. ~~Dict[str, Any]~~ |
## Doc.from_json {#from_json tag="method" new="3.3.1"}
Deserializes a document from JSON, i.e. generates a document from the provided
JSON data as generated by [`Doc.to_json()`](/api/doc#to_json).
> #### Example
>
> ```python
> from spacy.tokens import Doc
> doc = nlp("All we have to decide is what to do with the time that is given us.")
> doc_json = doc.to_json()
> deserialized_doc = Doc(nlp.vocab).from_json(doc_json)
> assert deserialized_doc.text == doc.text == doc_json["text"]
> ```
| Name | Description |
| -------------- | -------------------------------------------------------------------------------------------------------------------- |
| `doc_json` | The Doc data in JSON format from [`Doc.to_json`](#to_json). ~~Dict[str, Any]~~ |
| _keyword-only_ | |
| `validate` | Whether to validate the JSON input against the expected schema for detailed debugging. Defaults to `False`. ~~bool~~ |
| **RETURNS** | A `Doc` corresponding to the provided JSON. ~~Doc~~ |
## Doc.retokenize {#retokenize tag="contextmanager" new="2.1"} ## Doc.retokenize {#retokenize tag="contextmanager" new="2.1"}
Context manager to handle retokenization of the `Doc`. Modifications to the Context manager to handle retokenization of the `Doc`. Modifications to the

View File

@ -290,7 +290,7 @@ Load the pipe from a bytestring. Modifies the object in place and returns it.
> >
> ```python > ```python
> ruler_bytes = ruler.to_bytes() > ruler_bytes = ruler.to_bytes()
> ruler = nlp.add_pipe("enity_ruler") > ruler = nlp.add_pipe("entity_ruler")
> ruler.from_bytes(ruler_bytes) > ruler.from_bytes(ruler_bytes)
> ``` > ```

View File

@ -113,6 +113,10 @@ string where an integer is expected) or unexpected property names.
Find all token sequences matching the supplied patterns on the `Doc` or `Span`. Find all token sequences matching the supplied patterns on the `Doc` or `Span`.
Note that if a single label has multiple patterns associated with it, the
returned matches don't provide a way to tell which pattern was responsible for
the match.
> #### Example > #### Example
> >
> ```python > ```python
@ -131,7 +135,7 @@ Find all token sequences matching the supplied patterns on the `Doc` or `Span`.
| _keyword-only_ | | | _keyword-only_ | |
| `as_spans` <Tag variant="new">3</Tag> | Instead of tuples, return a list of [`Span`](/api/span) objects of the matches, with the `match_id` assigned as the span label. Defaults to `False`. ~~bool~~ | | `as_spans` <Tag variant="new">3</Tag> | Instead of tuples, return a list of [`Span`](/api/span) objects of the matches, with the `match_id` assigned as the span label. Defaults to `False`. ~~bool~~ |
| `allow_missing` <Tag variant="new">3</Tag> | Whether to skip checks for missing annotation for attributes included in patterns. Defaults to `False`. ~~bool~~ | | `allow_missing` <Tag variant="new">3</Tag> | Whether to skip checks for missing annotation for attributes included in patterns. Defaults to `False`. ~~bool~~ |
| `with_alignments` <Tag variant="new">3.0.6</Tag> | Return match alignment information as part of the match tuple as `List[int]` with the same length as the matched span. Each entry denotes the corresponding index of the token pattern. If `as_spans` is set to `True`, this setting is ignored. Defaults to `False`. ~~bool~~ | | `with_alignments` <Tag variant="new">3.0.6</Tag> | Return match alignment information as part of the match tuple as `List[int]` with the same length as the matched span. Each entry denotes the corresponding index of the token in the pattern. If `as_spans` is set to `True`, this setting is ignored. Defaults to `False`. ~~bool~~ |
| **RETURNS** | A list of `(match_id, start, end)` tuples, describing the matches. A match tuple describes a span `doc[start:end`]. The `match_id` is the ID of the added match pattern. If `as_spans` is set to `True`, a list of `Span` objects is returned instead. ~~Union[List[Tuple[int, int, int]], List[Span]]~~ | | **RETURNS** | A list of `(match_id, start, end)` tuples, describing the matches. A match tuple describes a span `doc[start:end`]. The `match_id` is the ID of the added match pattern. If `as_spans` is set to `True`, a list of `Span` objects is returned instead. ~~Union[List[Tuple[int, int, int]], List[Span]]~~ |
## Matcher.\_\_len\_\_ {#len tag="method" new="2"} ## Matcher.\_\_len\_\_ {#len tag="method" new="2"}

View File

@ -7,6 +7,7 @@ menu:
- ['merge_entities', 'merge_entities'] - ['merge_entities', 'merge_entities']
- ['merge_subtokens', 'merge_subtokens'] - ['merge_subtokens', 'merge_subtokens']
- ['token_splitter', 'token_splitter'] - ['token_splitter', 'token_splitter']
- ['doc_cleaner', 'doc_cleaner']
--- ---
## merge_noun_chunks {#merge_noun_chunks tag="function"} ## merge_noun_chunks {#merge_noun_chunks tag="function"}

View File

@ -27,6 +27,7 @@ Create a `Span` object from the slice `doc[start : end]`.
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | | `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| `vector_norm` | The L2 norm of the document's vector representation. ~~float~~ | | `vector_norm` | The L2 norm of the document's vector representation. ~~float~~ |
| `kb_id` | A knowledge base ID to attach to the span, e.g. for named entities. ~~Union[str, int]~~ | | `kb_id` | A knowledge base ID to attach to the span, e.g. for named entities. ~~Union[str, int]~~ |
| `span_id` | An ID to associate with the span. ~~Union[str, int]~~ |
## Span.\_\_getitem\_\_ {#getitem tag="method"} ## Span.\_\_getitem\_\_ {#getitem tag="method"}
@ -560,7 +561,9 @@ overlaps with will be returned.
| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ | | `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ | | `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ | | `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
| `ent_id` | The hash value of the named entity the token is an instance of. ~~int~~ | | `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ |
| `ent_id_` | The string ID of the named entity the token is an instance of. ~~str~~ | | `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ |
| `id` | The hash value of the span's ID. ~~int~~ |
| `id_` | The span's ID. ~~str~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ | | `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | | `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |

View File

@ -0,0 +1,351 @@
---
title: SpanRuler
tag: class
source: spacy/pipeline/span_ruler.py
new: 3.3
teaser: 'Pipeline component for rule-based span and named entity recognition'
api_string_name: span_ruler
api_trainable: false
---
The span ruler lets you add spans to [`Doc.spans`](/api/doc#spans) and/or
[`Doc.ents`](/api/doc#ents) using token-based rules or exact phrase matches. For
usage examples, see the docs on
[rule-based span matching](/usage/rule-based-matching#spanruler).
## Assigned Attributes {#assigned-attributes}
Matches will be saved to `Doc.spans[spans_key]` as a
[`SpanGroup`](/api/spangroup) and/or to `Doc.ents`, where the annotation is
saved in the `Token.ent_type` and `Token.ent_iob` fields.
| Location | Value |
| ---------------------- | ----------------------------------------------------------------- |
| `Doc.spans[spans_key]` | The annotated spans. ~~SpanGroup~~ |
| `Doc.ents` | The annotated spans. ~~Tuple[Span]~~ |
| `Token.ent_iob` | An enum encoding of the IOB part of the named entity tag. ~~int~~ |
| `Token.ent_iob_` | The IOB part of the named entity tag. ~~str~~ |
| `Token.ent_type` | The label part of the named entity tag (hash). ~~int~~ |
| `Token.ent_type_` | The label part of the named entity tag. ~~str~~ |
## Config and implementation {#config}
The default config is defined by the pipeline component factory and describes
how the component should be configured. You can override its settings via the
`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your
[`config.cfg`](/usage/training#config).
> #### Example
>
> ```python
> config = {
> "spans_key": "my_spans",
> "validate": True,
> "overwrite": False,
> }
> nlp.add_pipe("span_ruler", config=config)
> ```
| Setting | Description |
| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `spans_key` | The spans key to save the spans under. If `None`, no spans are saved. Defaults to `"ruler"`. ~~Optional[str]~~ |
| `spans_filter` | The optional method to filter spans before they are assigned to doc.spans. Defaults to `None`. ~~Optional[Callable[[Iterable[Span], Iterable[Span]], List[Span]]]~~ |
| `annotate_ents` | Whether to save spans to doc.ents. Defaults to `False`. ~~bool~~ |
| `ents_filter` | The method to filter spans before they are assigned to doc.ents. Defaults to `util.filter_chain_spans`. ~~Callable[[Iterable[Span], Iterable[Span]], List[Span]]~~ |
| `phrase_matcher_attr` | Token attribute to match on, passed to the internal PhraseMatcher as `attr`. Defaults to `None`. ~~Optional[Union[int, str]]~~ |
| `validate` | Whether patterns should be validated, passed to Matcher and PhraseMatcher as `validate`. Defaults to `False`. ~~bool~~ |
| `overwrite` | Whether to remove any existing spans under `Doc.spans[spans key]` if `spans_key` is set, or to remove any ents under `Doc.ents` if `annotate_ents` is set. Defaults to `True`. ~~bool~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/span_ruler.py
```
## SpanRuler.\_\_init\_\_ {#init tag="method"}
Initialize the span ruler. If patterns are supplied here, they need to be a list
of dictionaries with a `"label"` and `"pattern"` key. A pattern can either be a
token pattern (list) or a phrase pattern (string). For example:
`{"label": "ORG", "pattern": "Apple"}`.
> #### Example
>
> ```python
> # Construction via add_pipe
> ruler = nlp.add_pipe("span_ruler")
>
> # Construction from class
> from spacy.pipeline import SpanRuler
> ruler = SpanRuler(nlp, overwrite=True)
> ```
| Name | Description |
| --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `nlp` | The shared nlp object to pass the vocab to the matchers and process phrase patterns. ~~Language~~ |
| `name` | Instance name of the current pipeline component. Typically passed in automatically from the factory when the component is added. Used to disable the current span ruler while creating phrase patterns with the nlp object. ~~str~~ |
| _keyword-only_ | |
| `spans_key` | The spans key to save the spans under. If `None`, no spans are saved. Defaults to `"ruler"`. ~~Optional[str]~~ |
| `spans_filter` | The optional method to filter spans before they are assigned to doc.spans. Defaults to `None`. ~~Optional[Callable[[Iterable[Span], Iterable[Span]], List[Span]]]~~ |
| `annotate_ents` | Whether to save spans to doc.ents. Defaults to `False`. ~~bool~~ |
| `ents_filter` | The method to filter spans before they are assigned to doc.ents. Defaults to `util.filter_chain_spans`. ~~Callable[[Iterable[Span], Iterable[Span]], List[Span]]~~ |
| `phrase_matcher_attr` | Token attribute to match on, passed to the internal PhraseMatcher as `attr`. Defaults to `None`. ~~Optional[Union[int, str]]~~ |
| `validate` | Whether patterns should be validated, passed to Matcher and PhraseMatcher as `validate`. Defaults to `False`. ~~bool~~ |
| `overwrite` | Whether to remove any existing spans under `Doc.spans[spans key]` if `spans_key` is set, or to remove any ents under `Doc.ents` if `annotate_ents` is set. Defaults to `True`. ~~bool~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ |
## SpanRuler.initialize {#initialize tag="method"}
Initialize the component with data and used before training to load in rules
from a [pattern file](/usage/rule-based-matching/#spanruler-files). This method
is typically called by [`Language.initialize`](/api/language#initialize) and
lets you customize arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the
config. Any existing patterns are removed on initialization.
> #### Example
>
> ```python
> span_ruler = nlp.add_pipe("span_ruler")
> span_ruler.initialize(lambda: [], nlp=nlp, patterns=patterns)
> ```
>
> ```ini
> ### config.cfg
> [initialize.components.span_ruler]
>
> [initialize.components.span_ruler.patterns]
> @readers = "srsly.read_jsonl.v1"
> path = "corpus/span_ruler_patterns.jsonl
> ```
| Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Not used by the `SpanRuler`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `patterns` | The list of patterns. Defaults to `None`. ~~Optional[Sequence[Dict[str, Union[str, List[Dict[str, Any]]]]]]~~ |
## SpanRuler.\_\len\_\_ {#len tag="method"}
The number of all patterns added to the span ruler.
> #### Example
>
> ```python
> ruler = nlp.add_pipe("span_ruler")
> assert len(ruler) == 0
> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}])
> assert len(ruler) == 1
> ```
| Name | Description |
| ----------- | ------------------------------- |
| **RETURNS** | The number of patterns. ~~int~~ |
## SpanRuler.\_\_contains\_\_ {#contains tag="method"}
Whether a label is present in the patterns.
> #### Example
>
> ```python
> ruler = nlp.add_pipe("span_ruler")
> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}])
> assert "ORG" in ruler
> assert not "PERSON" in ruler
> ```
| Name | Description |
| ----------- | --------------------------------------------------- |
| `label` | The label to check. ~~str~~ |
| **RETURNS** | Whether the span ruler contains the label. ~~bool~~ |
## SpanRuler.\_\_call\_\_ {#call tag="method"}
Find matches in the `Doc` and add them to `doc.spans[span_key]` and/or
`doc.ents`. Typically, this happens automatically after the component has been
added to the pipeline using [`nlp.add_pipe`](/api/language#add_pipe). If the
span ruler was initialized with `overwrite=True`, existing spans and entities
will be removed.
> #### Example
>
> ```python
> ruler = nlp.add_pipe("span_ruler")
> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}])
>
> doc = nlp("A text about Apple.")
> spans = [(span.text, span.label_) for span in doc.spans["ruler"]]
> assert spans == [("Apple", "ORG")]
> ```
| Name | Description |
| ----------- | -------------------------------------------------------------------- |
| `doc` | The `Doc` object to process, e.g. the `Doc` in the pipeline. ~~Doc~~ |
| **RETURNS** | The modified `Doc` with added spans/entities. ~~Doc~~ |
## SpanRuler.add_patterns {#add_patterns tag="method"}
Add patterns to the span ruler. A pattern can either be a token pattern (list of
dicts) or a phrase pattern (string). For more details, see the usage guide on
[rule-based matching](/usage/rule-based-matching).
> #### Example
>
> ```python
> patterns = [
> {"label": "ORG", "pattern": "Apple"},
> {"label": "GPE", "pattern": [{"lower": "san"}, {"lower": "francisco"}]}
> ]
> ruler = nlp.add_pipe("span_ruler")
> ruler.add_patterns(patterns)
> ```
| Name | Description |
| ---------- | ---------------------------------------------------------------- |
| `patterns` | The patterns to add. ~~List[Dict[str, Union[str, List[dict]]]]~~ |
## SpanRuler.remove {#remove tag="method"}
Remove patterns by label from the span ruler. A `ValueError` is raised if the
label does not exist in any patterns.
> #### Example
>
> ```python
> patterns = [{"label": "ORG", "pattern": "Apple", "id": "apple"}]
> ruler = nlp.add_pipe("span_ruler")
> ruler.add_patterns(patterns)
> ruler.remove("ORG")
> ```
| Name | Description |
| ------- | -------------------------------------- |
| `label` | The label of the pattern rule. ~~str~~ |
## SpanRuler.remove_by_id {#remove_by_id tag="method"}
Remove patterns by ID from the span ruler. A `ValueError` is raised if the ID
does not exist in any patterns.
> #### Example
>
> ```python
> patterns = [{"label": "ORG", "pattern": "Apple", "id": "apple"}]
> ruler = nlp.add_pipe("span_ruler")
> ruler.add_patterns(patterns)
> ruler.remove_by_id("apple")
> ```
| Name | Description |
| ------------ | ----------------------------------- |
| `pattern_id` | The ID of the pattern rule. ~~str~~ |
## SpanRuler.clear {#clear tag="method"}
Remove all patterns the span ruler.
> #### Example
>
> ```python
> patterns = [{"label": "ORG", "pattern": "Apple", "id": "apple"}]
> ruler = nlp.add_pipe("span_ruler")
> ruler.add_patterns(patterns)
> ruler.clear()
> ```
## SpanRuler.to_disk {#to_disk tag="method"}
Save the span ruler patterns to a directory. The patterns will be saved as
newline-delimited JSON (JSONL).
> #### Example
>
> ```python
> ruler = nlp.add_pipe("span_ruler")
> ruler.to_disk("/path/to/span_ruler")
> ```
| Name | Description |
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------ |
| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
## SpanRuler.from_disk {#from_disk tag="method"}
Load the span ruler from a path.
> #### Example
>
> ```python
> ruler = nlp.add_pipe("span_ruler")
> ruler.from_disk("/path/to/span_ruler")
> ```
| Name | Description |
| ----------- | ----------------------------------------------------------------------------------------------- |
| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
| **RETURNS** | The modified `SpanRuler` object. ~~SpanRuler~~ |
## SpanRuler.to_bytes {#to_bytes tag="method"}
Serialize the span ruler to a bytestring.
> #### Example
>
> ```python
> ruler = nlp.add_pipe("span_ruler")
> ruler_bytes = ruler.to_bytes()
> ```
| Name | Description |
| ----------- | ---------------------------------- |
| **RETURNS** | The serialized patterns. ~~bytes~~ |
## SpanRuler.from_bytes {#from_bytes tag="method"}
Load the pipe from a bytestring. Modifies the object in place and returns it.
> #### Example
>
> ```python
> ruler_bytes = ruler.to_bytes()
> ruler = nlp.add_pipe("span_ruler")
> ruler.from_bytes(ruler_bytes)
> ```
| Name | Description |
| ------------ | ---------------------------------------------- |
| `bytes_data` | The bytestring to load. ~~bytes~~ |
| **RETURNS** | The modified `SpanRuler` object. ~~SpanRuler~~ |
## SpanRuler.labels {#labels tag="property"}
All labels present in the match patterns.
| Name | Description |
| ----------- | -------------------------------------- |
| **RETURNS** | The string labels. ~~Tuple[str, ...]~~ |
## SpanRuler.ids {#ids tag="property"}
All IDs present in the `id` property of the match patterns.
| Name | Description |
| ----------- | ----------------------------------- |
| **RETURNS** | The string IDs. ~~Tuple[str, ...]~~ |
## SpanRuler.patterns {#patterns tag="property"}
All patterns that were added to the span ruler.
| Name | Description |
| ----------- | ---------------------------------------------------------------------------------------- |
| **RETURNS** | The original patterns, one dictionary per pattern. ~~List[Dict[str, Union[str, dict]]]~~ |
## Attributes {#attributes}
| Name | Description |
| ---------------- | -------------------------------------------------------------------------------- |
| `key` | The spans key that spans are saved under. ~~Optional[str]~~ |
| `matcher` | The underlying matcher used to process token patterns. ~~Matcher~~ |
| `phrase_matcher` | The underlying phrase matcher used to process phrase patterns. ~~PhraseMatcher~~ |

View File

@ -161,7 +161,7 @@ Load state from a binary string.
> #### Example > #### Example
> >
> ```python > ```python
> fron spacy.strings import StringStore > from spacy.strings import StringStore
> store_bytes = stringstore.to_bytes() > store_bytes = stringstore.to_bytes()
> new_store = StringStore().from_bytes(store_bytes) > new_store = StringStore().from_bytes(store_bytes)
> ``` > ```

View File

@ -221,7 +221,7 @@ dependency tree.
## Token.ancestors {#ancestors tag="property" model="parser"} ## Token.ancestors {#ancestors tag="property" model="parser"}
The rightmost token of this token's syntactic descendants. A sequence of the token's syntactic ancestors (parents, grandparents, etc).
> #### Example > #### Example
> >

View File

@ -51,6 +51,7 @@ specified separately using the new `exclude` keyword argument.
| _keyword-only_ | | | _keyword-only_ | |
| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | | `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ | | `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
| `enable` | Names of pipeline components to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~List[str]~~ |
| `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | | `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | | `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | | **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
@ -239,7 +240,7 @@ browser. Will run a simple web server.
| Name | Description | | Name | Description |
| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ | | `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
| `style` | Visualization style, `"dep"` or `"ent"`. Defaults to `"dep"`. ~~str~~ | | `style` | Visualization style, `"dep"`, `"ent"` or `"span"` <Tag variant="new">3.3</Tag>. Defaults to `"dep"`. ~~str~~ |
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ | | `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ | | `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ | | `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
@ -264,7 +265,7 @@ Render a dependency parse tree or named entity visualization.
| Name | Description | | Name | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span, dict]], Doc, Span, dict]~~ | | `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span, dict]], Doc, Span, dict]~~ |
| `style` | Visualization style, `"dep"` or `"ent"`. Defaults to `"dep"`. ~~str~~ | | `style` | Visualization style,`"dep"`, `"ent"` or `"span"` <Tag variant="new">3.3</Tag>. Defaults to `"dep"`. ~~str~~ |
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ | | `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ | | `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ | | `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |

View File

@ -1899,7 +1899,7 @@ access to some nice Latin vectors. You can then pass the directory path to
> ``` > ```
```cli ```cli
$ wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/word-vectors-v2/cc.la.300.vec.gz $ wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.la.300.vec.gz
$ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg $ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg
``` ```

View File

@ -362,6 +362,18 @@ nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"])
nlp.enable_pipe("tagger") nlp.enable_pipe("tagger")
``` ```
In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is
set, all components except for those in `enable` are disabled.
```python
# Load the complete pipeline, but disable all components except for tok2vec and tagger
nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"])
# Has the same effect, as NER is already not part of enabled set of components
nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"], disable=["ner"])
# Will raise an error, as the sets of enabled and disabled components are conflicting
nlp = spacy.load("en_core_web_sm", enable=["ner"], disable=["ner"])
```
<Infobox variant="warning" title="Changed in v3.0"> <Infobox variant="warning" title="Changed in v3.0">
As of v3.0, the `disable` keyword argument specifies components to load but As of v3.0, the `disable` keyword argument specifies components to load but

View File

@ -6,6 +6,7 @@ menu:
- ['Phrase Matcher', 'phrasematcher'] - ['Phrase Matcher', 'phrasematcher']
- ['Dependency Matcher', 'dependencymatcher'] - ['Dependency Matcher', 'dependencymatcher']
- ['Entity Ruler', 'entityruler'] - ['Entity Ruler', 'entityruler']
- ['Span Ruler', 'spanruler']
- ['Models & Rules', 'models-rules'] - ['Models & Rules', 'models-rules']
--- ---
@ -1446,6 +1447,108 @@ with nlp.select_pipes(enable="tagger"):
ruler.add_patterns(patterns) ruler.add_patterns(patterns)
``` ```
## Rule-based span matching {#spanruler new="3.3.1"}
The [`SpanRuler`](/api/spanruler) is a generalized version of the entity ruler
that lets you add spans to `doc.spans` or `doc.ents` based on pattern
dictionaries, which makes it easy to combine rule-based and statistical pipeline
components.
### Span patterns {#spanruler-patterns}
The [pattern format](#entityruler-patterns) is the same as for the entity ruler:
1. **Phrase patterns** for exact string matches (string).
```python
{"label": "ORG", "pattern": "Apple"}
```
2. **Token patterns** with one dictionary describing one token (list).
```python
{"label": "GPE", "pattern": [{"LOWER": "san"}, {"LOWER": "francisco"}]}
```
### Using the span ruler {#spanruler-usage}
The [`SpanRuler`](/api/spanruler) is a pipeline component that's typically added
via [`nlp.add_pipe`](/api/language#add_pipe). When the `nlp` object is called on
a text, it will find matches in the `doc` and add them as spans to
`doc.spans["ruler"]`, using the specified pattern label as the entity label.
Unlike in `doc.ents`, overlapping matches are allowed in `doc.spans`, so no
filtering is required, but optional filtering and sorting can be applied to the
spans before they're saved.
```python
### {executable="true"}
import spacy
nlp = spacy.blank("en")
ruler = nlp.add_pipe("span_ruler")
patterns = [{"label": "ORG", "pattern": "Apple"},
{"label": "GPE", "pattern": [{"LOWER": "san"}, {"LOWER": "francisco"}]}]
ruler.add_patterns(patterns)
doc = nlp("Apple is opening its first big office in San Francisco.")
print([(span.text, span.label_) for span in doc.spans["ruler"]])
```
The span ruler is designed to integrate with spaCy's existing pipeline
components and enhance the [SpanCategorizer](/api/spancat) and
[EntityRecognizer](/api/entityrecognizer). The `overwrite` setting determines
whether the existing annotation in `doc.spans` or `doc.ents` is preserved.
Because overlapping entities are not allowed for `doc.ents`, the entities are
always filtered, using [`util.filter_spans`](/api/top-level#util.filter_spans)
by default. See the [`SpanRuler` API docs](/api/spanruler) for more information
about how to customize the sorting and filtering of matched spans.
```python
### {executable="true"}
import spacy
nlp = spacy.load("en_core_web_sm")
# only annotate doc.ents, not doc.spans
config = {"spans_key": None, "annotate_ents": True, "overwrite": False}
ruler = nlp.add_pipe("span_ruler", config=config)
patterns = [{"label": "ORG", "pattern": "MyCorp Inc."}]
ruler.add_patterns(patterns)
doc = nlp("MyCorp Inc. is a company in the U.S.")
print([(ent.text, ent.label_) for ent in doc.ents])
```
### Using pattern files {#spanruler-files}
You can save patterns in a JSONL file (newline-delimited JSON) to load with
[`SpanRuler.initialize`](/api/spanruler#initialize) or
[`SpanRuler.add_patterns`](/api/spanruler#add_patterns).
```json
### patterns.jsonl
{"label": "ORG", "pattern": "Apple"}
{"label": "GPE", "pattern": [{"LOWER": "san"}, {"LOWER": "francisco"}]}
```
```python
import srsly
patterns = srsly.read_jsonl("patterns.jsonl")
ruler = nlp.add_pipe("span_ruler")
ruler.add_patterns(patterns)
```
<Infobox title="Important note" variant="warning">
Unlike the entity ruler, the span ruler cannot load patterns on initialization
with `SpanRuler(patterns=patterns)` or directly from a JSONL file path with
`SpanRuler.from_disk(jsonl_path)`. Patterns should be loaded from the JSONL file
separately and then added through
[`SpanRuler.initialize`](/api/spanruler#initialize]) or
[`SpanRuler.add_patterns`](/api/spanruler#add_patterns) as shown above.
</Infobox>
## Combining models and rules {#models-rules} ## Combining models and rules {#models-rules}
You can combine statistical and rule-based components in a variety of ways. You can combine statistical and rule-based components in a variety of ways.

View File

@ -203,11 +203,14 @@ the data to and from a JSON file.
```python ```python
### {highlight="16-23,25-30"} ### {highlight="16-23,25-30"}
import json
from spacy import Language
from spacy.util import ensure_path from spacy.util import ensure_path
@Language.factory("my_component") @Language.factory("my_component")
class CustomComponent: class CustomComponent:
def __init__(self): def __init__(self, nlp: Language, name: str = "my_component"):
self.name = name
self.data = [] self.data = []
def __call__(self, doc): def __call__(self, doc):
@ -231,7 +234,7 @@ class CustomComponent:
# This will receive the directory path + /my_component # This will receive the directory path + /my_component
data_path = path / "data.json" data_path = path / "data.json"
with data_path.open("r", encoding="utf8") as f: with data_path.open("r", encoding="utf8") as f:
self.data = json.loads(f) self.data = json.load(f)
return self return self
``` ```

View File

@ -103,6 +103,7 @@
{ "text": "SentenceRecognizer", "url": "/api/sentencerecognizer" }, { "text": "SentenceRecognizer", "url": "/api/sentencerecognizer" },
{ "text": "Sentencizer", "url": "/api/sentencizer" }, { "text": "Sentencizer", "url": "/api/sentencizer" },
{ "text": "SpanCategorizer", "url": "/api/spancategorizer" }, { "text": "SpanCategorizer", "url": "/api/spancategorizer" },
{ "text": "SpanRuler", "url": "/api/spanruler" },
{ "text": "Tagger", "url": "/api/tagger" }, { "text": "Tagger", "url": "/api/tagger" },
{ "text": "TextCategorizer", "url": "/api/textcategorizer" }, { "text": "TextCategorizer", "url": "/api/textcategorizer" },
{ "text": "Tok2Vec", "url": "/api/tok2vec" }, { "text": "Tok2Vec", "url": "/api/tok2vec" },
@ -123,6 +124,7 @@
{ {
"label": "Other", "label": "Other",
"items": [ "items": [
{ "text": "Attributes", "url": "/api/attributes" },
{ "text": "Corpus", "url": "/api/corpus" }, { "text": "Corpus", "url": "/api/corpus" },
{ "text": "KnowledgeBase", "url": "/api/kb" }, { "text": "KnowledgeBase", "url": "/api/kb" },
{ "text": "Lookups", "url": "/api/lookups" }, { "text": "Lookups", "url": "/api/lookups" },

View File

@ -1,5 +1,77 @@
{ {
"resources": [ "resources": [
{
"id": "spacyfishing",
"title": "spaCy fishing",
"slogan": "Named entity disambiguation and linking on Wikidata in spaCy with Entity-Fishing.",
"description": "A spaCy wrapper of Entity-Fishing for named entity disambiguation and linking against a Wikidata knowledge base.",
"github": "Lucaterre/spacyfishing",
"pip": "spacyfishing",
"code_example": [
"import spacy",
"text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'",
"nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe('entityfishing')",
"doc = nlp(text)",
"for span in doc.ents:",
" print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))",
"# ('Victor Hugo', 'PERSON', 'Q535', 'https://www.wikidata.org/wiki/Q535', 0.972)",
"# ('Honoré de Balzac', 'PERSON', 'Q9711', 'https://www.wikidata.org/wiki/Q9711', 0.9724)",
"# ('French', 'NORP', 'Q121842', 'https://www.wikidata.org/wiki/Q121842', 0.3739)",
"# ('Paris', 'GPE', 'Q90', 'https://www.wikidata.org/wiki/Q90', 0.5652)",
"## Set parameter `extra_info` to `True` and check also span._.description, span._.src_description, span._.normal_term, span._.other_ids"
],
"category": ["models", "pipeline"],
"tags": ["NER", "NEL"],
"author": "Lucas Terriel",
"author_links": {
"twitter": "TerreLuca",
"github": "Lucaterre"
}
},
{
"id": "aim-spacy",
"title": "Aim-spaCy",
"slogan": "Aim-spaCy is an Aim-based spaCy experiment tracker.",
"description": "Aim-spaCy helps to easily collect, store and explore training logs for spaCy, including: hyper-parameters, metrics and displaCy visualizations",
"github": "aimhubio/aim-spacy",
"pip": "aim-spacy",
"code_example": [
"https://github.com/aimhubio/aim-spacy/tree/master/examples"
],
"code_language": "python",
"url": "https://aimstack.io/spacy",
"thumb": "https://user-images.githubusercontent.com/13848158/172912427-ee9327ea-3cd8-47fa-8427-6c0d36cd831f.png",
"image": "https://user-images.githubusercontent.com/13848158/136364717-0939222c-55b6-44f0-ad32-d9ab749546e4.png",
"author": "AimStack",
"author_links": {
"twitter": "aimstackio",
"github": "aimhubio",
"website": "https://aimstack.io"
},
"category": ["visualizers"],
"tags": ["experiment-tracking", "visualization"]
},
{
"id": "spacy-report",
"title": "spacy-report",
"slogan": "Generates interactive reports for spaCy models.",
"description": "The goal of spacy-report is to offer static reports for spaCy models that help users make better decisions on how the models can be used.",
"github": "koaning/spacy-report",
"pip": "spacy-report",
"thumb": "https://github.com/koaning/spacy-report/raw/main/icon.png",
"image": "https://raw.githubusercontent.com/koaning/spacy-report/main/gif.gif",
"code_example": [
"python -m spacy report textcat training/model-best/ corpus/train.spacy corpus/dev.spacy"
],
"category": ["visualizers", "research"],
"author": "Vincent D. Warmerdam",
"author_links": {
"twitter": "fishnets88",
"github": "koaning",
"website": "https://koaning.io"
}
},
{ {
"id": "scrubadub_spacy", "id": "scrubadub_spacy",
"title": "scrubadub_spacy", "title": "scrubadub_spacy",
@ -12,7 +84,7 @@
"code_language": "python", "code_language": "python",
"author": "Leap Beyond", "author": "Leap Beyond",
"author_links": { "author_links": {
"github": "https://github.com/LeapBeyond", "github": "LeapBeyond",
"website": "https://leapbeyond.ai" "website": "https://leapbeyond.ai"
}, },
"code_example": [ "code_example": [
@ -35,8 +107,8 @@
"code_language": "python", "code_language": "python",
"author": "Peter Baumgartner", "author": "Peter Baumgartner",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/pmbaumgartner", "twitter" : "pmbaumgartner",
"github": "https://github.com/pmbaumgartner", "github": "pmbaumgartner",
"website": "https://www.peterbaumgartner.com/" "website": "https://www.peterbaumgartner.com/"
}, },
"code_example": [ "code_example": [
@ -55,8 +127,8 @@
"code_language": "python", "code_language": "python",
"author": "Explosion", "author": "Explosion",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/explosion_ai", "twitter" : "explosion_ai",
"github": "https://github.com/explosion", "github": "explosion",
"website": "https://explosion.ai/" "website": "https://explosion.ai/"
}, },
"code_example": [ "code_example": [
@ -528,8 +600,8 @@
"code_language": "python", "code_language": "python",
"author": "Keith Rozario", "author": "Keith Rozario",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/keithrozario", "twitter" : "keithrozario",
"github": "https://github.com/keithrozario", "github": "keithrozario",
"website": "https://www.keithrozario.com" "website": "https://www.keithrozario.com"
}, },
"code_example": [ "code_example": [
@ -2252,7 +2324,7 @@
"author": "Daniel Whitenack & Chris Benson", "author": "Daniel Whitenack & Chris Benson",
"author_links": { "author_links": {
"website": "https://changelog.com/practicalai", "website": "https://changelog.com/practicalai",
"twitter": "https://twitter.com/PracticalAIFM" "twitter": "PracticalAIFM"
}, },
"category": ["podcasts"] "category": ["podcasts"]
}, },
@ -2799,13 +2871,13 @@
"id": "holmes", "id": "holmes",
"title": "Holmes", "title": "Holmes",
"slogan": "Information extraction from English and German texts based on predicate logic", "slogan": "Information extraction from English and German texts based on predicate logic",
"github": "msg-systems/holmes-extractor", "github": "explosion/holmes-extractor",
"url": "https://github.com/msg-systems/holmes-extractor", "url": "https://github.com/explosion/holmes-extractor",
"description": "Holmes is a Python 3 library that supports a number of use cases involving information extraction from English and German texts, including chatbot, structural extraction, topic matching and supervised document classification. There is a [website demonstrating intelligent search based on topic matching](https://holmes-demo.xt.msg.team).", "description": "Holmes is a Python 3 library that supports a number of use cases involving information extraction from English and German texts, including chatbot, structural extraction, topic matching and supervised document classification. There is a [website demonstrating intelligent search based on topic matching](https://demo.holmes.prod.demos.explosion.services).",
"pip": "holmes-extractor", "pip": "holmes-extractor",
"category": ["conversational", "standalone"], "category": ["pipeline", "standalone"],
"tags": ["chatbots", "text-processing"], "tags": ["chatbots", "text-processing"],
"thumb": "https://raw.githubusercontent.com/msg-systems/holmes-extractor/master/docs/holmes_thumbnail.png", "thumb": "https://raw.githubusercontent.com/explosion/holmes-extractor/master/docs/holmes_thumbnail.png",
"code_example": [ "code_example": [
"import holmes_extractor as holmes", "import holmes_extractor as holmes",
"holmes_manager = holmes.Manager(model='en_core_web_lg')", "holmes_manager = holmes.Manager(model='en_core_web_lg')",

View File

@ -24,7 +24,6 @@ const CUDA = {
'11.3': 'cuda113', '11.3': 'cuda113',
'11.4': 'cuda114', '11.4': 'cuda114',
'11.5': 'cuda115', '11.5': 'cuda115',
'11.6': 'cuda116',
} }
const LANG_EXTRAS = ['ja'] // only for languages with models const LANG_EXTRAS = ['ja'] // only for languages with models