Merge pull request #11035 from danieldk/merge-master-v4-20220627-2

Merge `master` into `v4`
This commit is contained in:
Daniël de Kok 2022-06-27 19:20:21 +02:00 committed by GitHub
commit 851a7ca4fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 685 additions and 136 deletions

View File

@ -64,12 +64,12 @@ steps:
displayName: "Run GPU tests" displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true) condition: eq(${{ parameters.gpu }}, true)
- script: | # - script: |
python -m spacy download ca_core_news_sm # python -m spacy download ca_core_news_sm
python -m spacy download ca_core_news_md # python -m spacy download ca_core_news_md
python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" # python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
displayName: 'Test download CLI' # displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
@ -93,17 +93,17 @@ steps:
displayName: 'Test train CLI' displayName: 'Test train CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir # PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
displayName: 'Test assemble CLI' # displayName: 'Test assemble CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
#
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 # python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
displayName: 'Test assemble CLI vectors warning' # displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python .github/validate_universe_json.py website/meta/universe.json python .github/validate_universe_json.py website/meta/universe.json
@ -111,7 +111,7 @@ steps:
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | - script: |
${{ parameters.prefix }} python -m pip install thinc-apple-ops ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy ${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops" displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9')) condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

106
.github/contributors/Lucaterre.md vendored Normal file
View File

@ -0,0 +1,106 @@
# spaCy contributor agreement
This spaCy Contributor Agreement (**"SCA"**) is based on the
[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
The SCA applies to any contribution that you make to any product or project
managed by us (the **"project"**), and sets out the intellectual property rights
you grant to us in the contributed materials. The term **"us"** shall mean
[ExplosionAI GmbH](https://explosion.ai/legal). The term
**"you"** shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested
below and include the filled-in version with your first pull request, under the
folder [`.github/contributors/`](/.github/contributors/). The name of the file
should be your GitHub username, with the extension `.md`. For example, the user
example_user would create the file `.github/contributors/example_user.md`.
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
## Contributor Agreement
1. The term "contribution" or "contributed materials" means any source code,
object code, patch, tool, sample, graphic, specification, manual,
documentation, or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and
registrations, in your contribution:
* you hereby assign to us joint ownership, and to the extent that such
assignment is or becomes invalid, ineffective or unenforceable, you hereby
grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
royalty-free, unrestricted license to exercise all rights under those
copyrights. This includes, at our option, the right to sublicense these same
rights to third parties through multiple levels of sublicensees or other
licensing arrangements;
* you agree that each of us can do all things in relation to your
contribution as if each of us were the sole owners, and if one of us makes
a derivative work of your contribution, the one who makes the derivative
work (or has it made will be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution
against us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and
exercise all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the
consent of, pay or render an accounting to the other for any use or
distribution of your contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable,
non-exclusive, worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer
your contribution in whole or in part, alone or in combination with or
included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through
multiple levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective
on the date you first submitted a contribution to us, even if your submission
took place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of
authorship and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any
third party's copyrights, trademarks, patents, or other intellectual
property rights; and
* each contribution shall be in compliance with U.S. export control laws and
other applicable export and import laws. You agree to notify us if you
become aware of any circumstance which would make any of the foregoing
representations inaccurate in any respect. We may publicly disclose your
participation in the project, including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable
U.S. Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
* [x] I am signing on behalf of myself as an individual and no other person
or entity, including my employer, has or will have rights with respect to my
contributions.
* [ ] I am signing on behalf of my employer or a legal entity and I have the
actual authority to contractually bind that entity.
## Contributor Details
| Field | Entry |
|------------------------------- |---------------|
| Name | Lucas Terriel |
| Company name (if applicable) | |
| Title or role (if applicable) | |
| Date | 2022-06-20 |
| GitHub username | Lucaterre |
| Website (optional) | |

View File

@ -455,6 +455,10 @@ Regression tests are tests that refer to bugs reported in specific issues. They
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file. The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
### Testing Cython Code
If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`.
### Constructing objects and state ### Constructing objects and state
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation. Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.

View File

@ -5,7 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0", "cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0", "preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0", "murmurhash>=0.28.0,<1.1.0",
"thinc>=8.1.0.dev0,<8.2.0", "thinc>=8.1.0.dev3,<8.2.0",
"pathy", "pathy",
"numpy>=1.15.0", "numpy>=1.15.0",
] ]

View File

@ -3,7 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev0,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0

View File

@ -38,7 +38,7 @@ setup_requires =
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
thinc>=8.1.0.dev0,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.9,<3.1.0
@ -46,7 +46,7 @@ install_requires =
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev0,<8.2.0 thinc>=8.1.0.dev3,<8.2.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
@ -104,7 +104,7 @@ cuda114 =
cuda115 = cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0 cupy-cuda115>=5.0.0b4,<11.0.0
apple = apple =
thinc-apple-ops>=0.0.4,<1.0.0 thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies # Language tokenizers with external dependencies
ja = ja =
sudachipy>=0.5.2,!=0.6.1 sudachipy>=0.5.2,!=0.6.1

View File

@ -32,6 +32,7 @@ def load(
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = util.SimpleFrozenList(), disable: Iterable[str] = util.SimpleFrozenList(),
enable: Iterable[str] = util.SimpleFrozenList(),
exclude: Iterable[str] = util.SimpleFrozenList(), exclude: Iterable[str] = util.SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language: ) -> Language:
@ -42,6 +43,8 @@ def load(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (but can be enabled later using nlp.enable_pipe).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -49,7 +52,12 @@ def load(
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
return util.load_model( return util.load_model(
name, vocab=vocab, disable=disable, exclude=exclude, config=config name,
vocab=vocab,
disable=disable,
enable=enable,
exclude=exclude,
config=config,
) )

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.3.0" __version__ = "3.4.0"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -10,7 +10,7 @@ import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli from ._util import import_code, debug_cli
from ..training import Example from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining from ..schemas import ConfigSchemaTraining
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
@ -361,7 +361,7 @@ def debug_data(
if label != "-" if label != "-"
] ]
labels_with_counts = _format_labels(labels_with_counts, counts=True) labels_with_counts = _format_labels(labels_with_counts, counts=True)
msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose) msg.text(f"Labels in train data: {labels_with_counts}", show=verbose)
missing_labels = model_labels - labels missing_labels = model_labels - labels
if missing_labels: if missing_labels:
msg.warn( msg.warn(
@ -758,9 +758,9 @@ def _compile_gold(
# "Illegal" whitespace entity # "Illegal" whitespace entity
data["ws_ents"] += 1 data["ws_ents"] += 1
if label.startswith(("B-", "U-")): if label.startswith(("B-", "U-")):
combined_label = label.split("-")[1] combined_label = remove_bilu_prefix(label)
data["ner"][combined_label] += 1 data["ner"][combined_label] += 1
if sent_starts[i] == True and label.startswith(("I-", "L-")): if sent_starts[i] and label.startswith(("I-", "L-")):
data["boundary_cross_ents"] += 1 data["boundary_cross_ents"] += 1
elif label == "-": elif label == "-":
data["ner"]["-"] += 1 data["ner"]["-"] += 1
@ -908,7 +908,7 @@ def _get_examples_without_label(
for eg in data: for eg in data:
if component == "ner": if component == "ner":
labels = [ labels = [
label.split("-")[1] remove_bilu_prefix(label)
for label in eg.get_aligned_ner() for label in eg.get_aligned_ner()
if label not in ("O", "-", None) if label not in ("O", "-", None)
] ]

View File

@ -10,6 +10,7 @@ from jinja2 import Template
from .. import util from .. import util
from ..language import DEFAULT_CONFIG_PRETRAIN_PATH from ..language import DEFAULT_CONFIG_PRETRAIN_PATH
from ..schemas import RecommendationSchema from ..schemas import RecommendationSchema
from ..util import SimpleFrozenList
from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND
from ._util import string_to_list, import_code from ._util import string_to_list, import_code
@ -24,16 +25,30 @@ class Optimizations(str, Enum):
accuracy = "accuracy" accuracy = "accuracy"
class InitValues:
"""
Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and
init_config(), i.e. initialization calls via CLI respectively Python.
"""
lang = "en"
pipeline = SimpleFrozenList(["tagger", "parser", "ner"])
optimize = Optimizations.efficiency
gpu = False
pretraining = False
force_overwrite = False
@init_cli.command("config") @init_cli.command("config")
def init_config_cli( def init_config_cli(
# fmt: off # fmt: off
output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True), output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True),
lang: str = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"), lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"),
pipeline: str = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"),
optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."),
gpu: bool = Opt(False, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."),
pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"), force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"),
# fmt: on # fmt: on
): ):
""" """
@ -133,11 +148,11 @@ def fill_config(
def init_config( def init_config(
*, *,
lang: str, lang: str = InitValues.lang,
pipeline: List[str], pipeline: List[str] = InitValues.pipeline,
optimize: str, optimize: str = InitValues.optimize,
gpu: bool, gpu: bool = InitValues.gpu,
pretraining: bool = False, pretraining: bool = InitValues.pretraining,
silent: bool = True, silent: bool = True,
) -> Config: ) -> Config:
msg = Printer(no_print=silent) msg = Printer(no_print=silent)

View File

@ -932,6 +932,8 @@ class Errors(metaclass=ErrorsWithCodes):
E1040 = ("Doc.from_json requires all tokens to have the same attributes. " E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}") "Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
E1042 = ("Function was called with `{arg1}`={arg1_values} and "
"`{arg2}`={arg2_values} but these arguments are conflicting.")
# Deprecated model shortcuts, only used in errors and warnings # Deprecated model shortcuts, only used in errors and warnings

View File

@ -93,14 +93,14 @@ cdef class KnowledgeBase:
self.vocab = vocab self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def initialize_entities(self, int64_t nr_entities): def _initialize_entities(self, int64_t nr_entities):
self._entry_index = PreshMap(nr_entities + 1) self._entry_index = PreshMap(nr_entities + 1)
self._entries = entry_vec(nr_entities + 1) self._entries = entry_vec(nr_entities + 1)
def initialize_vectors(self, int64_t nr_entities): def _initialize_vectors(self, int64_t nr_entities):
self._vectors_table = float_matrix(nr_entities + 1) self._vectors_table = float_matrix(nr_entities + 1)
def initialize_aliases(self, int64_t nr_aliases): def _initialize_aliases(self, int64_t nr_aliases):
self._alias_index = PreshMap(nr_aliases + 1) self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1)
@ -155,8 +155,8 @@ cdef class KnowledgeBase:
raise ValueError(Errors.E140) raise ValueError(Errors.E140)
nr_entities = len(set(entity_list)) nr_entities = len(set(entity_list))
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
i = 0 i = 0
cdef KBEntryC entry cdef KBEntryC entry
@ -388,9 +388,9 @@ cdef class KnowledgeBase:
nr_entities = header[0] nr_entities = header[0]
nr_aliases = header[1] nr_aliases = header[1]
entity_vector_length = header[2] entity_vector_length = header[2]
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
self.initialize_aliases(nr_aliases) self._initialize_aliases(nr_aliases)
self.entity_vector_length = entity_vector_length self.entity_vector_length = entity_vector_length
def deserialize_vectors(b): def deserialize_vectors(b):
@ -512,8 +512,8 @@ cdef class KnowledgeBase:
cdef int64_t entity_vector_length cdef int64_t entity_vector_length
reader.read_header(&nr_entities, &entity_vector_length) reader.read_header(&nr_entities, &entity_vector_length)
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
self.entity_vector_length = entity_vector_length self.entity_vector_length = entity_vector_length
# STEP 1: load entity vectors # STEP 1: load entity vectors
@ -552,7 +552,7 @@ cdef class KnowledgeBase:
# STEP 3: load aliases # STEP 3: load aliases
cdef int64_t nr_aliases cdef int64_t nr_aliases
reader.read_alias_length(&nr_aliases) reader.read_alias_length(&nr_aliases)
self.initialize_aliases(nr_aliases) self._initialize_aliases(nr_aliases)
cdef int64_t nr_candidates cdef int64_t nr_candidates
cdef vector[int64_t] entry_indices cdef vector[int64_t] entry_indices

View File

@ -1,4 +1,4 @@
from typing import Iterator, Optional, Any, Dict, Callable, Iterable from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection
from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@ -1694,6 +1694,7 @@ class Language:
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True, auto_fill: bool = True,
@ -1708,6 +1709,8 @@ class Language:
disable (Iterable[str]): Names of pipeline components to disable. disable (Iterable[str]): Names of pipeline components to disable.
Disabled pipes will be loaded but they won't be run unless you Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe. explicitly enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. exclude (Iterable[str]): Names of pipeline components to exclude.
Excluded components won't be loaded. Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta. meta (Dict[str, Any]): Meta overrides for nlp.meta.
@ -1861,8 +1864,15 @@ class Language:
# Restore the original vocab after sourcing if necessary # Restore the original vocab after sourcing if necessary
if vocab_b is not None: if vocab_b is not None:
nlp.vocab.from_bytes(vocab_b) nlp.vocab.from_bytes(vocab_b)
disabled_pipes = [*config["nlp"]["disabled"], *disable]
# Resolve disabled/enabled settings.
disabled_pipes = cls._resolve_component_status(
[*config["nlp"]["disabled"], *disable],
[*config["nlp"].get("enabled", []), *enable],
config["nlp"]["pipeline"],
)
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
nlp.batch_size = config["nlp"]["batch_size"] nlp.batch_size = config["nlp"]["batch_size"]
nlp.config = filled if auto_fill else config nlp.config = filled if auto_fill else config
if after_pipeline_creation is not None: if after_pipeline_creation is not None:
@ -2014,6 +2024,42 @@ class Language:
serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
util.to_disk(path, serializers, exclude) util.to_disk(path, serializers, exclude)
@staticmethod
def _resolve_component_status(
disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str]
) -> Tuple[str, ...]:
"""Derives whether (1) `disable` and `enable` values are consistent and (2)
resolves those to a single set of disabled components. Raises an error in
case of inconsistency.
disable (Iterable[str]): Names of components or serialization fields to disable.
enable (Iterable[str]): Names of pipeline components to enable.
pipe_names (Iterable[str]): Names of all pipeline components.
RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t.
specified includes and excludes.
"""
if disable is not None and isinstance(disable, str):
disable = [disable]
to_disable = disable
if enable:
to_disable = [
pipe_name for pipe_name in pipe_names if pipe_name not in enable
]
if disable and disable != to_disable:
raise ValueError(
Errors.E1042.format(
arg1="enable",
arg2="disable",
arg1_values=enable,
arg2_values=disable,
)
)
return tuple(to_disable)
def from_disk( def from_disk(
self, self,
path: Union[str, Path], path: Union[str, Path],

View File

@ -22,9 +22,11 @@ def forward(model, X, is_train):
nP = model.get_dim("nP") nP = model.get_dim("nP")
nI = model.get_dim("nI") nI = model.get_dim("nI")
W = model.get_param("W") W = model.get_param("W")
Yf = model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True) # Preallocate array for layer output, including padding.
Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False)
model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:])
Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) Yf = Yf.reshape((Yf.shape[0], nF, nO, nP))
Yf = model.ops.xp.vstack((model.get_param("pad"), Yf)) Yf[0] = model.get_param("pad")
def backward(dY_ids): def backward(dY_ids):
# This backprop is particularly tricky, because we get back a different # This backprop is particularly tricky, because we get back a different

View File

@ -4,6 +4,7 @@ from libc.math cimport exp
from libc.string cimport memset, memcpy from libc.string cimport memset, memcpy
from libc.stdlib cimport calloc, free, realloc from libc.stdlib cimport calloc, free, realloc
from thinc.backends.linalg cimport Vec, VecVec from thinc.backends.linalg cimport Vec, VecVec
from thinc.backends.cblas cimport saxpy, sgemm
import numpy import numpy
import numpy.random import numpy.random
@ -112,7 +113,7 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states,
memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float))
else: else:
# Compute hidden-to-output # Compute hidden-to-output
cblas.sgemm()(False, True, n.states, n.classes, n.hiddens, sgemm(cblas)(False, True, n.states, n.classes, n.hiddens,
1.0, <const float *>A.hiddens, n.hiddens, 1.0, <const float *>A.hiddens, n.hiddens,
<const float *>W.hidden_weights, n.hiddens, <const float *>W.hidden_weights, n.hiddens,
0.0, A.scores, n.classes) 0.0, A.scores, n.classes)
@ -147,7 +148,7 @@ cdef void sum_state_features(CBlas cblas, float* output,
else: else:
idx = token_ids[f] * id_stride + f*O idx = token_ids[f] * id_stride + f*O
feature = &cached[idx] feature = &cached[idx]
cblas.saxpy()(O, one, <const float*>feature, 1, &output[b*O], 1) saxpy(cblas)(O, one, <const float*>feature, 1, &output[b*O], 1)
token_ids += F token_ids += F

View File

@ -10,6 +10,7 @@ from ...strings cimport hash_string
from ...structs cimport TokenC from ...structs cimport TokenC
from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.doc cimport Doc, set_children_from_heads
from ...tokens.token cimport MISSING_DEP from ...tokens.token cimport MISSING_DEP
from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC, ArcC from ._state cimport StateC, ArcC
@ -687,7 +688,7 @@ cdef class ArcEager(TransitionSystem):
return self.c[name_or_id] return self.c[name_or_id]
name = name_or_id name = name_or_id
if '-' in name: if '-' in name:
move_str, label_str = name.split('-', 1) move_str, label_str = split_bilu_label(name)
label = self.strings[label_str] label = self.strings[label_str]
else: else:
move_str = name move_str = name

View File

@ -15,6 +15,7 @@ from ...typedefs cimport weight_t, attr_t
from ...lexeme cimport Lexeme from ...lexeme cimport Lexeme
from ...attrs cimport IS_SPACE from ...attrs cimport IS_SPACE
from ...structs cimport TokenC, SpanC from ...structs cimport TokenC, SpanC
from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC from ._state cimport StateC
@ -180,7 +181,7 @@ cdef class BiluoPushDown(TransitionSystem):
if name == '-' or name == '' or name is None: if name == '-' or name == '' or name is None:
return Transition(clas=0, move=MISSING, label=0, score=0) return Transition(clas=0, move=MISSING, label=0, score=0)
elif '-' in name: elif '-' in name:
move_str, label_str = name.split('-', 1) move_str, label_str = split_bilu_label(name)
# Deprecated, hacky way to denote 'not this entity' # Deprecated, hacky way to denote 'not this entity'
if label_str.startswith('!'): if label_str.startswith('!'):
raise ValueError(Errors.E869.format(label=name)) raise ValueError(Errors.E869.format(label=name))

View File

@ -12,6 +12,7 @@ from ..language import Language
from ._parser_internals import nonproj from ._parser_internals import nonproj
from ._parser_internals.nonproj import DELIMITER from ._parser_internals.nonproj import DELIMITER
from ..scorer import Scorer from ..scorer import Scorer
from ..training import remove_bilu_prefix
from ..util import registry from ..util import registry
@ -314,7 +315,7 @@ cdef class DependencyParser(Parser):
# Get the labels from the model by looking at the available moves # Get the labels from the model by looking at the available moves
for move in self.move_names: for move in self.move_names:
if "-" in move: if "-" in move:
label = move.split("-")[1] label = remove_bilu_prefix(move)
if DELIMITER in label: if DELIMITER in label:
label = label.split(DELIMITER)[1] label = label.split(DELIMITER)[1]
labels.add(label) labels.add(label)

View File

@ -6,10 +6,10 @@ from thinc.api import Model, Config
from ._parser_internals.transition_system import TransitionSystem from ._parser_internals.transition_system import TransitionSystem
from .transition_parser cimport Parser from .transition_parser cimport Parser
from ._parser_internals.ner cimport BiluoPushDown from ._parser_internals.ner cimport BiluoPushDown
from ..language import Language from ..language import Language
from ..scorer import get_ner_prf, PRFScore from ..scorer import get_ner_prf, PRFScore
from ..util import registry from ..util import registry
from ..training import remove_bilu_prefix
default_model_config = """ default_model_config = """
@ -242,7 +242,7 @@ cdef class EntityRecognizer(Parser):
def labels(self): def labels(self):
# Get the labels from the model by looking at the available moves, e.g. # Get the labels from the model by looking at the available moves, e.g.
# B-PERSON, I-PERSON, L-PERSON, U-PERSON # B-PERSON, I-PERSON, L-PERSON, U-PERSON
labels = set(move.split("-")[1] for move in self.move_names labels = set(remove_bilu_prefix(move) for move in self.move_names
if move[0] in ("B", "I", "L", "U")) if move[0] in ("B", "I", "L", "U"))
return tuple(sorted(labels)) return tuple(sorted(labels))

View File

@ -476,6 +476,17 @@ def test_matcher_extension_set_membership(en_vocab):
assert len(matches) == 0 assert len(matches) == 0
@pytest.mark.xfail(reason="IN predicate must handle sequence values in extensions")
def test_matcher_extension_in_set_predicate(en_vocab):
matcher = Matcher(en_vocab)
Token.set_extension("ext", default=[])
pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}]
matcher.add("M", [pattern])
doc = Doc(en_vocab, words=["a", "b", "c"])
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 1
def test_matcher_basic_check(en_vocab): def test_matcher_basic_check(en_vocab):
matcher = Matcher(en_vocab) matcher = Matcher(en_vocab)
# Potential mistake: pass in pattern instead of list of patterns # Potential mistake: pass in pattern instead of list of patterns

View File

@ -10,7 +10,7 @@ from spacy.lang.it import Italian
from spacy.language import Language from spacy.language import Language
from spacy.lookups import Lookups from spacy.lookups import Lookups
from spacy.pipeline._parser_internals.ner import BiluoPushDown from spacy.pipeline._parser_internals.ner import BiluoPushDown
from spacy.training import Example, iob_to_biluo from spacy.training import Example, iob_to_biluo, split_bilu_label
from spacy.tokens import Doc, Span from spacy.tokens import Doc, Span
from spacy.vocab import Vocab from spacy.vocab import Vocab
import logging import logging
@ -110,6 +110,9 @@ def test_issue2385():
# maintain support for iob2 format # maintain support for iob2 format
tags3 = ("B-PERSON", "I-PERSON", "B-PERSON") tags3 = ("B-PERSON", "I-PERSON", "B-PERSON")
assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"] assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"]
# ensure it works with hyphens in the name
tags4 = ("B-MULTI-PERSON", "I-MULTI-PERSON", "B-MULTI-PERSON")
assert iob_to_biluo(tags4) == ["B-MULTI-PERSON", "L-MULTI-PERSON", "U-MULTI-PERSON"]
@pytest.mark.issue(2800) @pytest.mark.issue(2800)
@ -154,6 +157,24 @@ def test_issue3209():
assert ner2.move_names == move_names assert ner2.move_names == move_names
def test_labels_from_BILUO():
"""Test that labels are inferred correctly when there's a - in label."""
nlp = English()
ner = nlp.add_pipe("ner")
ner.add_label("LARGE-ANIMAL")
nlp.initialize()
move_names = [
"O",
"B-LARGE-ANIMAL",
"I-LARGE-ANIMAL",
"L-LARGE-ANIMAL",
"U-LARGE-ANIMAL",
]
labels = {"LARGE-ANIMAL"}
assert ner.move_names == move_names
assert set(ner.labels) == labels
@pytest.mark.issue(4267) @pytest.mark.issue(4267)
def test_issue4267(): def test_issue4267():
"""Test that running an entity_ruler after ner gives consistent results""" """Test that running an entity_ruler after ner gives consistent results"""
@ -298,7 +319,7 @@ def test_oracle_moves_missing_B(en_vocab):
elif tag == "O": elif tag == "O":
moves.add_action(move_types.index("O"), "") moves.add_action(move_types.index("O"), "")
else: else:
action, label = tag.split("-") action, label = split_bilu_label(tag)
moves.add_action(move_types.index("B"), label) moves.add_action(move_types.index("B"), label)
moves.add_action(move_types.index("I"), label) moves.add_action(move_types.index("I"), label)
moves.add_action(move_types.index("L"), label) moves.add_action(move_types.index("L"), label)
@ -324,7 +345,7 @@ def test_oracle_moves_whitespace(en_vocab):
elif tag == "O": elif tag == "O":
moves.add_action(move_types.index("O"), "") moves.add_action(move_types.index("O"), "")
else: else:
action, label = tag.split("-") action, label = split_bilu_label(tag)
moves.add_action(move_types.index(action), label) moves.add_action(move_types.index(action), label)
moves.get_oracle_sequence(example) moves.get_oracle_sequence(example)

View File

@ -49,7 +49,9 @@ def test_parser_contains_cycle(tree, cyclic_tree, partial_tree, multirooted_tree
assert contains_cycle(multirooted_tree) is None assert contains_cycle(multirooted_tree) is None
def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multirooted_tree): def test_parser_is_nonproj_arc(
cyclic_tree, nonproj_tree, partial_tree, multirooted_tree
):
assert is_nonproj_arc(0, nonproj_tree) is False assert is_nonproj_arc(0, nonproj_tree) is False
assert is_nonproj_arc(1, nonproj_tree) is False assert is_nonproj_arc(1, nonproj_tree) is False
assert is_nonproj_arc(2, nonproj_tree) is False assert is_nonproj_arc(2, nonproj_tree) is False
@ -62,7 +64,9 @@ def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multiroo
assert is_nonproj_arc(7, partial_tree) is False assert is_nonproj_arc(7, partial_tree) is False
assert is_nonproj_arc(17, multirooted_tree) is False assert is_nonproj_arc(17, multirooted_tree) is False
assert is_nonproj_arc(16, multirooted_tree) is True assert is_nonproj_arc(16, multirooted_tree) is True
with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'): with pytest.raises(
ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
):
is_nonproj_arc(6, cyclic_tree) is_nonproj_arc(6, cyclic_tree)
@ -73,7 +77,9 @@ def test_parser_is_nonproj_tree(
assert is_nonproj_tree(nonproj_tree) is True assert is_nonproj_tree(nonproj_tree) is True
assert is_nonproj_tree(partial_tree) is False assert is_nonproj_tree(partial_tree) is False
assert is_nonproj_tree(multirooted_tree) is True assert is_nonproj_tree(multirooted_tree) is True
with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'): with pytest.raises(
ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
):
is_nonproj_tree(cyclic_tree) is_nonproj_tree(cyclic_tree)

View File

@ -4,13 +4,14 @@ import numpy
import pytest import pytest
from thinc.api import get_current_ops from thinc.api import get_current_ops
import spacy
from spacy.lang.en import English from spacy.lang.en import English
from spacy.lang.en.syntax_iterators import noun_chunks from spacy.lang.en.syntax_iterators import noun_chunks
from spacy.language import Language from spacy.language import Language
from spacy.pipeline import TrainablePipe from spacy.pipeline import TrainablePipe
from spacy.tokens import Doc from spacy.tokens import Doc
from spacy.training import Example from spacy.training import Example
from spacy.util import SimpleFrozenList, get_arg_names from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir
from spacy.vocab import Vocab from spacy.vocab import Vocab
@ -602,3 +603,52 @@ def test_update_with_annotates():
assert results[component] == "".join(eg.predicted.text for eg in examples) assert results[component] == "".join(eg.predicted.text for eg in examples)
for component in components - set(components_to_annotate): for component in components - set(components_to_annotate):
assert results[component] == "" assert results[component] == ""
def test_load_disable_enable() -> None:
"""
Tests spacy.load() with dis-/enabling components.
"""
base_nlp = English()
for pipe in ("sentencizer", "tagger", "parser"):
base_nlp.add_pipe(pipe)
with make_tempdir() as tmp_dir:
base_nlp.to_disk(tmp_dir)
to_disable = ["parser", "tagger"]
to_enable = ["tagger", "parser"]
# Setting only `disable`.
nlp = spacy.load(tmp_dir, disable=to_disable)
assert all([comp_name in nlp.disabled for comp_name in to_disable])
# Setting only `enable`.
nlp = spacy.load(tmp_dir, enable=to_enable)
assert all(
[
(comp_name in nlp.disabled) is (comp_name not in to_enable)
for comp_name in nlp.component_names
]
)
# Testing consistent enable/disable combination.
nlp = spacy.load(
tmp_dir,
enable=to_enable,
disable=[
comp_name
for comp_name in nlp.component_names
if comp_name not in to_enable
],
)
assert all(
[
(comp_name in nlp.disabled) is (comp_name not in to_enable)
for comp_name in nlp.component_names
]
)
# Inconsistent enable/disable combination.
with pytest.raises(ValueError):
spacy.load(tmp_dir, enable=to_enable, disable=["parser"])

View File

@ -589,6 +589,7 @@ def test_string_to_list_intify(value):
assert string_to_list(value, intify=True) == [1, 2, 3] assert string_to_list(value, intify=True) == [1, 2, 3]
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_download_compatibility(): def test_download_compatibility():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -599,6 +600,7 @@ def test_download_compatibility():
assert get_minor_version(about.__version__) == get_minor_version(version) assert get_minor_version(about.__version__) == get_minor_version(version)
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_validate_compatibility_table(): def test_validate_compatibility_table():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False

View File

@ -60,11 +60,12 @@ def test_readers():
assert isinstance(extra_corpus, Callable) assert isinstance(extra_corpus, Callable)
# TODO: enable IMDB test once Stanford servers are back up and running
@pytest.mark.slow @pytest.mark.slow
@pytest.mark.parametrize( @pytest.mark.parametrize(
"reader,additional_config", "reader,additional_config",
[ [
("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}), # ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}),
("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}),
("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}), ("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}),
], ],

View File

@ -5,6 +5,7 @@ import srsly
from spacy.tokens import Doc from spacy.tokens import Doc
from spacy.vocab import Vocab from spacy.vocab import Vocab
from spacy.util import make_tempdir # noqa: F401 from spacy.util import make_tempdir # noqa: F401
from spacy.training import split_bilu_label
from thinc.api import get_current_ops from thinc.api import get_current_ops
@ -40,7 +41,7 @@ def apply_transition_sequence(parser, doc, sequence):
desired state.""" desired state."""
for action_name in sequence: for action_name in sequence:
if "-" in action_name: if "-" in action_name:
move, label = action_name.split("-") move, label = split_bilu_label(action_name)
parser.add_label(label) parser.add_label(label)
with parser.step_through(doc) as stepwise: with parser.step_through(doc) as stepwise:
for transition in sequence: for transition in sequence:

View File

@ -5,6 +5,7 @@ from .augment import dont_augment, orth_variants_augmenter # noqa: F401
from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401
from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401 from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401
from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401 from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401
from .iob_utils import split_bilu_label, remove_bilu_prefix # noqa: F401
from .gold_io import docs_to_json, read_json_file # noqa: F401 from .gold_io import docs_to_json, read_json_file # noqa: F401
from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401 from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401
from .loggers import console_logger # noqa: F401 from .loggers import console_logger # noqa: F401

View File

@ -1,33 +1,39 @@
from typing import List from typing import List
from ..errors import Errors from ..errors import Errors
import numpy import numpy
from libc.stdint cimport int32_t
cdef class AlignmentArray: cdef class AlignmentArray:
"""AlignmentArray is similar to Thinc's Ragged with two simplfications: """AlignmentArray is similar to Thinc's Ragged with two simplfications:
indexing returns numpy arrays and this type can only be used for CPU arrays. indexing returns numpy arrays and this type can only be used for CPU arrays.
However, these changes make AlginmentArray more efficient for indexing in a However, these changes make AlignmentArray more efficient for indexing in a
tight loop.""" tight loop."""
__slots__ = [] __slots__ = []
def __init__(self, alignment: List[List[int]]): def __init__(self, alignment: List[List[int]]):
self._lengths = None
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype="i")
cdef int data_len = 0 cdef int data_len = 0
cdef int outer_len cdef int outer_len
cdef int idx cdef int idx
self._starts_ends = numpy.zeros(len(alignment) + 1, dtype='int32')
cdef int32_t* starts_ends_ptr = <int32_t*>self._starts_ends.data
for idx, outer in enumerate(alignment): for idx, outer in enumerate(alignment):
outer_len = len(outer) outer_len = len(outer)
self._starts_ends[idx + 1] = self._starts_ends[idx] + outer_len starts_ends_ptr[idx + 1] = starts_ends_ptr[idx] + outer_len
data_len += outer_len data_len += outer_len
self._data = numpy.empty(data_len, dtype="i") self._lengths = None
self._data = numpy.empty(data_len, dtype="int32")
idx = 0 idx = 0
cdef int32_t* data_ptr = <int32_t*>self._data.data
for outer in alignment: for outer in alignment:
for inner in outer: for inner in outer:
self._data[idx] = inner data_ptr[idx] = inner
idx += 1 idx += 1
def __getitem__(self, idx): def __getitem__(self, idx):

View File

@ -3,10 +3,10 @@ from typing import Optional
import random import random
import itertools import itertools
from functools import partial from functools import partial
from pydantic import BaseModel, StrictStr
from ..util import registry from ..util import registry
from .example import Example from .example import Example
from .iob_utils import split_bilu_label
if TYPE_CHECKING: if TYPE_CHECKING:
from ..language import Language # noqa: F401 from ..language import Language # noqa: F401
@ -278,10 +278,8 @@ def make_whitespace_variant(
ent_prev = doc_dict["entities"][position - 1] ent_prev = doc_dict["entities"][position - 1]
ent_next = doc_dict["entities"][position] ent_next = doc_dict["entities"][position]
if "-" in ent_prev and "-" in ent_next: if "-" in ent_prev and "-" in ent_next:
ent_iob_prev = ent_prev.split("-")[0] ent_iob_prev, ent_type_prev = split_bilu_label(ent_prev)
ent_type_prev = ent_prev.split("-", 1)[1] ent_iob_next, ent_type_next = split_bilu_label(ent_next)
ent_iob_next = ent_next.split("-")[0]
ent_type_next = ent_next.split("-", 1)[1]
if ( if (
ent_iob_prev in ("B", "I") ent_iob_prev in ("B", "I")
and ent_iob_next in ("I", "L") and ent_iob_next in ("I", "L")

View File

@ -9,11 +9,11 @@ from ..tokens.span import Span
from ..attrs import IDS from ..attrs import IDS
from .alignment import Alignment from .alignment import Alignment
from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags
from .iob_utils import biluo_tags_to_spans from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix
from ..errors import Errors, Warnings from ..errors import Errors, Warnings
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
from ..tokens.token cimport MISSING_DEP from ..tokens.token cimport MISSING_DEP
from ..util import logger, to_ternary_int from ..util import logger, to_ternary_int, all_equal
cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot):
@ -151,50 +151,127 @@ cdef class Example:
self._y_sig = y_sig self._y_sig = y_sig
return self._cached_alignment return self._cached_alignment
def _get_aligned_vectorized(self, align, gold_values):
# Fast path for Doc attributes/fields that are predominantly a single value,
# i.e., TAG, POS, MORPH.
x2y_single_toks = []
x2y_single_toks_i = []
x2y_multiple_toks = []
x2y_multiple_toks_i = []
# Gather indices of gold tokens aligned to the candidate tokens into two buckets.
# Bucket 1: All tokens that have a one-to-one alignment.
# Bucket 2: All tokens that have a one-to-many alignment.
for idx, token in enumerate(self.predicted):
aligned_gold_i = align[token.i]
aligned_gold_len = len(aligned_gold_i)
if aligned_gold_len == 1:
x2y_single_toks.append(aligned_gold_i.item())
x2y_single_toks_i.append(idx)
elif aligned_gold_len > 1:
x2y_multiple_toks.append(aligned_gold_i)
x2y_multiple_toks_i.append(idx)
# Map elements of the first bucket directly to the output array.
output = numpy.full(len(self.predicted), None)
output[x2y_single_toks_i] = gold_values[x2y_single_toks].squeeze()
# Collapse many-to-one alignments into one-to-one alignments if they
# share the same value. Map to None in all other cases.
for i in range(len(x2y_multiple_toks)):
aligned_gold_values = gold_values[x2y_multiple_toks[i]]
# If all aligned tokens have the same value, use it.
if all_equal(aligned_gold_values):
x2y_multiple_toks[i] = aligned_gold_values[0].item()
else:
x2y_multiple_toks[i] = None
output[x2y_multiple_toks_i] = x2y_multiple_toks
return output.tolist()
def _get_aligned_non_vectorized(self, align, gold_values):
# Slower path for fields that return multiple values (resulting
# in ragged arrays that cannot be vectorized trivially).
output = [None] * len(self.predicted)
for token in self.predicted:
aligned_gold_i = align[token.i]
values = gold_values[aligned_gold_i].ravel()
if len(values) == 1:
output[token.i] = values.item()
elif all_equal(values):
# If all aligned tokens have the same value, use it.
output[token.i] = values[0].item()
return output
def get_aligned(self, field, as_string=False): def get_aligned(self, field, as_string=False):
"""Return an aligned array for a token attribute.""" """Return an aligned array for a token attribute."""
align = self.alignment.x2y align = self.alignment.x2y
gold_values = self.reference.to_array([field])
if len(gold_values.shape) == 1:
output = self._get_aligned_vectorized(align, gold_values)
else:
output = self._get_aligned_non_vectorized(align, gold_values)
vocab = self.reference.vocab vocab = self.reference.vocab
gold_values = self.reference.to_array([field])
output = [None] * len(self.predicted)
for token in self.predicted:
values = gold_values[align[token.i]]
values = values.ravel()
if len(values) == 0:
output[token.i] = None
elif len(values) == 1:
output[token.i] = values[0]
elif len(set(list(values))) == 1:
# If all aligned tokens have the same value, use it.
output[token.i] = values[0]
else:
output[token.i] = None
if as_string and field not in ["ENT_IOB", "SENT_START"]: if as_string and field not in ["ENT_IOB", "SENT_START"]:
output = [vocab.strings[o] if o is not None else o for o in output] output = [vocab.strings[o] if o is not None else o for o in output]
return output return output
def get_aligned_parse(self, projectivize=True): def get_aligned_parse(self, projectivize=True):
cand_to_gold = self.alignment.x2y cand_to_gold = self.alignment.x2y
gold_to_cand = self.alignment.y2x gold_to_cand = self.alignment.y2x
aligned_heads = [None] * self.x.length
aligned_deps = [None] * self.x.length
has_deps = [token.has_dep() for token in self.y]
has_heads = [token.has_head() for token in self.y]
heads = [token.head.i for token in self.y] heads = [token.head.i for token in self.y]
deps = [token.dep_ for token in self.y] deps = [token.dep_ for token in self.y]
if projectivize: if projectivize:
proj_heads, proj_deps = nonproj.projectivize(heads, deps) proj_heads, proj_deps = nonproj.projectivize(heads, deps)
has_deps = [token.has_dep() for token in self.y]
has_heads = [token.has_head() for token in self.y]
# ensure that missing data remains missing # ensure that missing data remains missing
heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)] heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)]
deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)] deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)]
for cand_i in range(self.x.length):
if cand_to_gold.lengths[cand_i] == 1: # Select all candidate tokens that are aligned to a single gold token.
gold_i = cand_to_gold[cand_i][0] c2g_single_toks = numpy.where(cand_to_gold.lengths == 1)[0]
if gold_to_cand.lengths[heads[gold_i]] == 1:
aligned_heads[cand_i] = int(gold_to_cand[heads[gold_i]][0]) # Fetch all aligned gold token incides.
aligned_deps[cand_i] = deps[gold_i] if c2g_single_toks.shape == cand_to_gold.lengths.shape:
return aligned_heads, aligned_deps # This the most likely case.
gold_i = cand_to_gold[:].squeeze()
else:
gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0])(c2g_single_toks).squeeze()
# Fetch indices of all gold heads for the aligned gold tokens.
heads = numpy.asarray(heads, dtype='i')
gold_head_i = heads[gold_i]
# Select all gold tokens that are heads of the previously selected
# gold tokens (and are aligned to a single candidate token).
g2c_len_heads = gold_to_cand.lengths[gold_head_i]
g2c_len_heads = numpy.where(g2c_len_heads == 1)[0]
g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0])(gold_head_i[g2c_len_heads]).squeeze()
# Update head/dep alignments with the above.
aligned_heads = numpy.full((self.x.length), None)
aligned_heads[c2g_single_toks[g2c_len_heads]] = g2c_i
deps = numpy.asarray(deps)
aligned_deps = numpy.full((self.x.length), None)
aligned_deps[c2g_single_toks] = deps[gold_i]
return aligned_heads.tolist(), aligned_deps.tolist()
def get_aligned_sent_starts(self): def get_aligned_sent_starts(self):
"""Get list of SENT_START attributes aligned to the predicted tokenization. """Get list of SENT_START attributes aligned to the predicted tokenization.
@ -519,7 +596,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces):
else: else:
ent_iobs.append(iob_tag.split("-")[0]) ent_iobs.append(iob_tag.split("-")[0])
if iob_tag.startswith("I") or iob_tag.startswith("B"): if iob_tag.startswith("I") or iob_tag.startswith("B"):
ent_types.append(iob_tag.split("-", 1)[1]) ent_types.append(remove_bilu_prefix(iob_tag))
else: else:
ent_types.append("") ent_types.append("")
return ent_iobs, ent_types return ent_iobs, ent_types

View File

@ -1,4 +1,4 @@
from typing import List, Dict, Tuple, Iterable, Union, Iterator from typing import List, Dict, Tuple, Iterable, Union, Iterator, cast
import warnings import warnings
from ..errors import Errors, Warnings from ..errors import Errors, Warnings
@ -218,6 +218,14 @@ def tags_to_entities(tags: Iterable[str]) -> List[Tuple[str, int, int]]:
return entities return entities
def split_bilu_label(label: str) -> Tuple[str, str]:
return cast(Tuple[str, str], label.split("-", 1))
def remove_bilu_prefix(label: str) -> str:
return label.split("-", 1)[1]
# Fallbacks to make backwards-compat easier # Fallbacks to make backwards-compat easier
offsets_from_biluo_tags = biluo_tags_to_offsets offsets_from_biluo_tags = biluo_tags_to_offsets
spans_from_biluo_tags = biluo_tags_to_spans spans_from_biluo_tags = biluo_tags_to_spans

View File

@ -1,6 +1,6 @@
from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast
from typing import Optional, Iterable, Callable, Tuple, Type from typing import Optional, Iterable, Callable, Tuple, Type
from typing import Iterator, Type, Pattern, Generator, TYPE_CHECKING from typing import Iterator, Pattern, Generator, TYPE_CHECKING
from types import ModuleType from types import ModuleType
import os import os
import importlib import importlib
@ -12,7 +12,6 @@ from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer
from thinc.api import ConfigValidationError, Model from thinc.api import ConfigValidationError, Model
import functools import functools
import itertools import itertools
import numpy.random
import numpy import numpy
import srsly import srsly
import catalogue import catalogue
@ -400,6 +399,7 @@ def load_model(
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -409,11 +409,19 @@ def load_model(
vocab (Vocab / True): Optional vocab to pass in on initialization. If True, vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created. a new Vocab object will be created.
disable (Iterable[str]): Names of pipeline components to disable. disable (Iterable[str]): Names of pipeline components to disable.
enable (Iterable[str]): Names of pipeline components to enable. All others will be disabled.
exclude (Iterable[str]): Names of pipeline components to exclude.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation. keyed by section values in dot notation.
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
kwargs = {"vocab": vocab, "disable": disable, "exclude": exclude, "config": config} kwargs = {
"vocab": vocab,
"disable": disable,
"enable": enable,
"exclude": exclude,
"config": config,
}
if isinstance(name, str): # name or string path if isinstance(name, str): # name or string path
if name.startswith("blank:"): # shortcut for blank model if name.startswith("blank:"): # shortcut for blank model
return get_lang_class(name.replace("blank:", ""))() return get_lang_class(name.replace("blank:", ""))()
@ -433,6 +441,7 @@ def load_model_from_package(
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -444,6 +453,8 @@ def load_model_from_package(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -451,7 +462,7 @@ def load_model_from_package(
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
cls = importlib.import_module(name) cls = importlib.import_module(name)
return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined] return cls.load(vocab=vocab, disable=disable, enable=enable, exclude=exclude, config=config) # type: ignore[attr-defined]
def load_model_from_path( def load_model_from_path(
@ -460,6 +471,7 @@ def load_model_from_path(
meta: Optional[Dict[str, Any]] = None, meta: Optional[Dict[str, Any]] = None,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -473,6 +485,8 @@ def load_model_from_path(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -487,7 +501,12 @@ def load_model_from_path(
overrides = dict_to_dot(config) overrides = dict_to_dot(config)
config = load_config(config_path, overrides=overrides) config = load_config(config_path, overrides=overrides)
nlp = load_model_from_config( nlp = load_model_from_config(
config, vocab=vocab, disable=disable, exclude=exclude, meta=meta config,
vocab=vocab,
disable=disable,
enable=enable,
exclude=exclude,
meta=meta,
) )
return nlp.from_disk(model_path, exclude=exclude, overrides=overrides) return nlp.from_disk(model_path, exclude=exclude, overrides=overrides)
@ -498,6 +517,7 @@ def load_model_from_config(
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
auto_fill: bool = False, auto_fill: bool = False,
validate: bool = True, validate: bool = True,
@ -512,6 +532,8 @@ def load_model_from_config(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
auto_fill (bool): Whether to auto-fill config with missing defaults. auto_fill (bool): Whether to auto-fill config with missing defaults.
@ -530,6 +552,7 @@ def load_model_from_config(
config, config,
vocab=vocab, vocab=vocab,
disable=disable, disable=disable,
enable=enable,
exclude=exclude, exclude=exclude,
auto_fill=auto_fill, auto_fill=auto_fill,
validate=validate, validate=validate,
@ -594,6 +617,7 @@ def load_model_from_init_py(
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Iterable[str] = SimpleFrozenList(),
enable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
@ -605,6 +629,8 @@ def load_model_from_init_py(
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Iterable[str]): Names of pipeline components to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
@ -622,6 +648,7 @@ def load_model_from_init_py(
vocab=vocab, vocab=vocab,
meta=meta, meta=meta,
disable=disable, disable=disable,
enable=enable,
exclude=exclude, exclude=exclude,
config=config, config=config,
) )
@ -1689,3 +1716,10 @@ def packages_distributions() -> Dict[str, List[str]]:
for pkg in (dist.read_text("top_level.txt") or "").split(): for pkg in (dist.read_text("top_level.txt") or "").split():
pkg_to_dist[pkg].append(dist.metadata["Name"]) pkg_to_dist[pkg].append(dist.metadata["Name"])
return dict(pkg_to_dist) return dict(pkg_to_dist)
def all_equal(iterable):
"""Return True if all the elements are equal to each other
(or if the input is an empty sequence), False otherwise."""
g = itertools.groupby(iterable)
return next(g, True) and not next(g, False)

View File

@ -339,7 +339,7 @@ cdef class Vectors:
return self.key2row.get(key, -1) return self.key2row.get(key, -1)
elif keys is not None: elif keys is not None:
keys = [get_string_id(key) for key in keys] keys = [get_string_id(key) for key in keys]
rows = [self.key2row.get(key, -1.) for key in keys] rows = [self.key2row.get(key, -1) for key in keys]
return xp.asarray(rows, dtype="i") return xp.asarray(rows, dtype="i")
else: else:
row2key = {row: key for key, row in self.key2row.items()} row2key = {row: key for key, row in self.key2row.items()}

View File

@ -587,7 +587,7 @@ consists of either two or three subnetworks:
run once for each batch. run once for each batch.
- **lower**: Construct a feature-specific vector for each `(token, feature)` - **lower**: Construct a feature-specific vector for each `(token, feature)`
pair. This is also run once for each batch. Constructing the state pair. This is also run once for each batch. Constructing the state
representation is then simply a matter of summing the component features and representation is then a matter of summing the component features and
applying the non-linearity. applying the non-linearity.
- **upper** (optional): A feed-forward network that predicts scores from the - **upper** (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is used state representation. If not present, the output from the lower model is used
@ -628,7 +628,7 @@ same signature, but the `use_upper` argument was `True` by default.
> ``` > ```
Build a tagger model, using a provided token-to-vector component. The tagger Build a tagger model, using a provided token-to-vector component. The tagger
model simply adds a linear layer with softmax activation to predict scores given model adds a linear layer with softmax activation to predict scores given
the token vectors. the token vectors.
| Name | Description | | Name | Description |
@ -920,5 +920,5 @@ A function that reads an existing `KnowledgeBase` from file.
A function that takes as input a [`KnowledgeBase`](/api/kb) and a A function that takes as input a [`KnowledgeBase`](/api/kb) and a
[`Span`](/api/span) object denoting a named entity, and returns a list of [`Span`](/api/span) object denoting a named entity, and returns a list of
plausible [`Candidate`](/api/kb/#candidate) objects. The default plausible [`Candidate`](/api/kb/#candidate) objects. The default
`CandidateGenerator` simply uses the text of a mention to find its potential `CandidateGenerator` uses the text of a mention to find its potential
aliases in the `KnowledgeBase`. Note that this function is case-dependent. aliases in the `KnowledgeBase`. Note that this function is case-dependent.

View File

@ -0,0 +1,78 @@
---
title: Attributes
teaser: Token attributes
source: spacy/attrs.pyx
---
[Token](/api/token) attributes are specified using internal IDs in many places
including:
- [`Matcher` patterns](/api/matcher#patterns),
- [`Doc.to_array`](/api/doc#to_array) and
[`Doc.from_array`](/api/doc#from_array)
- [`Doc.has_annotation`](/api/doc#has_annotation)
- [`MultiHashEmbed`](/api/architectures#MultiHashEmbed) Tok2Vec architecture
`attrs`
> ```python
> import spacy
> from spacy.attrs import DEP
>
> nlp = spacy.blank("en")
> doc = nlp("There are many attributes.")
>
> # DEP always has the same internal value
> assert DEP == 76
>
> # "DEP" is automatically converted to DEP
> assert DEP == nlp.vocab.strings["DEP"]
> assert doc.has_annotation(DEP) == doc.has_annotation("DEP")
>
> # look up IDs in spacy.attrs.IDS
> from spacy.attrs import IDS
> assert IDS["DEP"] == DEP
> ```
All methods automatically convert between the string version of an ID (`"DEP"`)
and the internal integer symbols (`DEP`). The internal IDs can be imported from
`spacy.attrs` or retrieved from the [`StringStore`](/api/stringstore). A map
from string attribute names to internal attribute IDs is stored in
`spacy.attrs.IDS`.
The corresponding [`Token` object attributes](/api/token#attributes) can be
accessed using the same names in lowercase, e.g. `token.orth` or `token.length`.
For attributes that represent string values, the internal integer ID is
accessed as `Token.attr`, e.g. `token.dep`, while the string value can be
retrieved by appending `_` as in `token.dep_`.
| Attribute | Description |
| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `DEP` | The token's dependency label. ~~str~~ |
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
| `ENT_IOB` | The IOB part of the token's entity tag. Uses custom integer vaues rather than the string store: unset is `0`, `I` is `1`, `O` is `2`, and `B` is `3`. ~~str~~ |
| `ENT_KB_ID` | The token's entity knowledge base ID. ~~str~~ |
| `ENT_TYPE` | The token's entity label. ~~str~~ |
| `IS_ALPHA` | Token text consists of alphabetic characters. ~~bool~~ |
| `IS_ASCII` | Token text consists of ASCII characters. ~~bool~~ |
| `IS_DIGIT` | Token text consists of digits. ~~bool~~ |
| `IS_LOWER` | Token text is in lowercase. ~~bool~~ |
| `IS_PUNCT` | Token is punctuation. ~~bool~~ |
| `IS_SPACE` | Token is whitespace. ~~bool~~ |
| `IS_STOP` | Token is a stop word. ~~bool~~ |
| `IS_TITLE` | Token text is in titlecase. ~~bool~~ |
| `IS_UPPER` | Token text is in uppercase. ~~bool~~ |
| `LEMMA` | The token's lemma. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
| `LIKE_EMAIL` | Token text resembles an email address. ~~bool~~ |
| `LIKE_NUM` | Token text resembles a number. ~~bool~~ |
| `LIKE_URL` | Token text resembles a URL. ~~bool~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `MORPH` | The token's morphological analysis. ~~MorphAnalysis~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `POS` | The token's universal part of speech (UPOS). ~~str~~ |
| `SENT_START` | Token is start of sentence. ~~bool~~ |
| `SHAPE` | The token's shape. ~~str~~ |
| `SPACY` | Token has a trailing space. ~~bool~~ |
| `TAG` | The token's fine-grained part of speech. ~~str~~ |

View File

@ -2,7 +2,7 @@
title: SpanRuler title: SpanRuler
tag: class tag: class
source: spacy/pipeline/span_ruler.py source: spacy/pipeline/span_ruler.py
new: 3.3.1 new: 3.3
teaser: 'Pipeline component for rule-based span and named entity recognition' teaser: 'Pipeline component for rule-based span and named entity recognition'
api_string_name: span_ruler api_string_name: span_ruler
api_trainable: false api_trainable: false

View File

@ -51,6 +51,7 @@ specified separately using the new `exclude` keyword argument.
| _keyword-only_ | | | _keyword-only_ | |
| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | | `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ | | `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
| `enable` | Names of pipeline components to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~List[str]~~ |
| `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | | `exclude` <Tag variant="new">3</Tag> | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | | `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | | **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |

View File

@ -1899,7 +1899,7 @@ access to some nice Latin vectors. You can then pass the directory path to
> ``` > ```
```cli ```cli
$ wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/word-vectors-v2/cc.la.300.vec.gz $ wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.la.300.vec.gz
$ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg $ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg
``` ```

View File

@ -362,6 +362,18 @@ nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"])
nlp.enable_pipe("tagger") nlp.enable_pipe("tagger")
``` ```
In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is
set, all components except for those in `enable` are disabled.
```python
# Load the complete pipeline, but disable all components except for tok2vec and tagger
nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"])
# Has the same effect, as NER is already not part of enabled set of components
nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"], disable=["ner"])
# Will raise an error, as the sets of enabled and disabled components are conflicting
nlp = spacy.load("en_core_web_sm", enable=["ner"], disable=["ner"])
```
<Infobox variant="warning" title="Changed in v3.0"> <Infobox variant="warning" title="Changed in v3.0">
As of v3.0, the `disable` keyword argument specifies components to load but As of v3.0, the `disable` keyword argument specifies components to load but

View File

@ -203,11 +203,14 @@ the data to and from a JSON file.
```python ```python
### {highlight="16-23,25-30"} ### {highlight="16-23,25-30"}
import json
from spacy import Language
from spacy.util import ensure_path from spacy.util import ensure_path
@Language.factory("my_component") @Language.factory("my_component")
class CustomComponent: class CustomComponent:
def __init__(self): def __init__(self, nlp: Language, name: str = "my_component"):
self.name = name
self.data = [] self.data = []
def __call__(self, doc): def __call__(self, doc):
@ -231,7 +234,7 @@ class CustomComponent:
# This will receive the directory path + /my_component # This will receive the directory path + /my_component
data_path = path / "data.json" data_path = path / "data.json"
with data_path.open("r", encoding="utf8") as f: with data_path.open("r", encoding="utf8") as f:
self.data = json.loads(f) self.data = json.load(f)
return self return self
``` ```

View File

@ -124,6 +124,7 @@
{ {
"label": "Other", "label": "Other",
"items": [ "items": [
{ "text": "Attributes", "url": "/api/attributes" },
{ "text": "Corpus", "url": "/api/corpus" }, { "text": "Corpus", "url": "/api/corpus" },
{ "text": "KnowledgeBase", "url": "/api/kb" }, { "text": "KnowledgeBase", "url": "/api/kb" },
{ "text": "Lookups", "url": "/api/lookups" }, { "text": "Lookups", "url": "/api/lookups" },

View File

@ -1,5 +1,57 @@
{ {
"resources": [ "resources": [
{
"id": "spacyfishing",
"title": "spaCy fishing",
"slogan": "Named entity disambiguation and linking on Wikidata in spaCy with Entity-Fishing.",
"description": "A spaCy wrapper of Entity-Fishing for named entity disambiguation and linking against a Wikidata knowledge base.",
"github": "Lucaterre/spacyfishing",
"pip": "spacyfishing",
"code_example": [
"import spacy",
"text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'",
"nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe('entityfishing')",
"doc = nlp(text)",
"for span in doc.ents:",
" print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))",
"# ('Victor Hugo', 'PERSON', 'Q535', 'https://www.wikidata.org/wiki/Q535', 0.972)",
"# ('Honoré de Balzac', 'PERSON', 'Q9711', 'https://www.wikidata.org/wiki/Q9711', 0.9724)",
"# ('French', 'NORP', 'Q121842', 'https://www.wikidata.org/wiki/Q121842', 0.3739)",
"# ('Paris', 'GPE', 'Q90', 'https://www.wikidata.org/wiki/Q90', 0.5652)",
"## Set parameter `extra_info` to `True` and check also span._.description, span._.src_description, span._.normal_term, span._.other_ids"
],
"category": ["models", "pipeline"],
"tags": ["NER", "NEL"],
"author": "Lucas Terriel",
"author_links": {
"twitter": "TerreLuca",
"github": "Lucaterre"
}
},
{
"id": "aim-spacy",
"title": "Aim-spaCy",
"slogan": "Aim-spaCy is an Aim-based spaCy experiment tracker.",
"description": "Aim-spaCy helps to easily collect, store and explore training logs for spaCy, including: hyper-parameters, metrics and displaCy visualizations",
"github": "aimhubio/aim-spacy",
"pip": "aim-spacy",
"code_example": [
"https://github.com/aimhubio/aim-spacy/tree/master/examples"
],
"code_language": "python",
"url": "https://aimstack.io/spacy",
"thumb": "https://user-images.githubusercontent.com/13848158/172912427-ee9327ea-3cd8-47fa-8427-6c0d36cd831f.png",
"image": "https://user-images.githubusercontent.com/13848158/136364717-0939222c-55b6-44f0-ad32-d9ab749546e4.png",
"author": "AimStack",
"author_links": {
"twitter": "aimstackio",
"github": "aimhubio",
"website": "https://aimstack.io"
},
"category": ["visualizers"],
"tags": ["experiment-tracking", "visualization"]
},
{ {
"id": "spacy-report", "id": "spacy-report",
"title": "spacy-report", "title": "spacy-report",
@ -32,7 +84,7 @@
"code_language": "python", "code_language": "python",
"author": "Leap Beyond", "author": "Leap Beyond",
"author_links": { "author_links": {
"github": "https://github.com/LeapBeyond", "github": "LeapBeyond",
"website": "https://leapbeyond.ai" "website": "https://leapbeyond.ai"
}, },
"code_example": [ "code_example": [
@ -55,8 +107,8 @@
"code_language": "python", "code_language": "python",
"author": "Peter Baumgartner", "author": "Peter Baumgartner",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/pmbaumgartner", "twitter" : "pmbaumgartner",
"github": "https://github.com/pmbaumgartner", "github": "pmbaumgartner",
"website": "https://www.peterbaumgartner.com/" "website": "https://www.peterbaumgartner.com/"
}, },
"code_example": [ "code_example": [
@ -75,8 +127,8 @@
"code_language": "python", "code_language": "python",
"author": "Explosion", "author": "Explosion",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/explosion_ai", "twitter" : "explosion_ai",
"github": "https://github.com/explosion", "github": "explosion",
"website": "https://explosion.ai/" "website": "https://explosion.ai/"
}, },
"code_example": [ "code_example": [
@ -548,8 +600,8 @@
"code_language": "python", "code_language": "python",
"author": "Keith Rozario", "author": "Keith Rozario",
"author_links": { "author_links": {
"twitter" : "https://twitter.com/keithrozario", "twitter" : "keithrozario",
"github": "https://github.com/keithrozario", "github": "keithrozario",
"website": "https://www.keithrozario.com" "website": "https://www.keithrozario.com"
}, },
"code_example": [ "code_example": [
@ -2272,7 +2324,7 @@
"author": "Daniel Whitenack & Chris Benson", "author": "Daniel Whitenack & Chris Benson",
"author_links": { "author_links": {
"website": "https://changelog.com/practicalai", "website": "https://changelog.com/practicalai",
"twitter": "https://twitter.com/PracticalAIFM" "twitter": "PracticalAIFM"
}, },
"category": ["podcasts"] "category": ["podcasts"]
}, },

View File

@ -24,7 +24,6 @@ const CUDA = {
'11.3': 'cuda113', '11.3': 'cuda113',
'11.4': 'cuda114', '11.4': 'cuda114',
'11.5': 'cuda115', '11.5': 'cuda115',
'11.6': 'cuda116',
} }
const LANG_EXTRAS = ['ja'] // only for languages with models const LANG_EXTRAS = ['ja'] // only for languages with models