Merge branch 'master' into fix/windows-quoting

This commit is contained in:
Paul O'Leary McCann 2022-11-14 17:59:27 +09:00
commit d85cad0922
93 changed files with 2482 additions and 1255 deletions

View File

@ -1,31 +1,30 @@
parameters:
python_version: ''
architecture: ''
prefix: ''
gpu: false
num_build_jobs: 1
architecture: 'x64'
num_build_jobs: 2
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: ${{ parameters.python_version }}
architecture: ${{ parameters.architecture }}
allowUnstable: true
- bash: |
echo "##vso[task.setvariable variable=python_version]${{ parameters.python_version }}"
displayName: 'Set variables'
- script: |
${{ parameters.prefix }} python -m pip install -U pip setuptools
${{ parameters.prefix }} python -m pip install -U -r requirements.txt
python -m pip install -U build pip setuptools
python -m pip install -U -r requirements.txt
displayName: "Install dependencies"
- script: |
${{ parameters.prefix }} python setup.py build_ext --inplace -j ${{ parameters.num_build_jobs }}
${{ parameters.prefix }} python setup.py sdist --formats=gztar
displayName: "Compile and build sdist"
python -m build --sdist
displayName: "Build sdist"
- script: python -m mypy spacy
- script: |
python -m mypy spacy
displayName: 'Run mypy'
condition: ne(variables['python_version'], '3.6')
@ -34,35 +33,24 @@ steps:
contents: "spacy"
displayName: "Delete source directory"
- task: DeleteFiles@1
inputs:
contents: "*.egg-info"
displayName: "Delete egg-info directory"
- script: |
${{ parameters.prefix }} python -m pip freeze --exclude torch --exclude cupy-cuda110 > installed.txt
${{ parameters.prefix }} python -m pip uninstall -y -r installed.txt
python -m pip freeze > installed.txt
python -m pip uninstall -y -r installed.txt
displayName: "Uninstall all packages"
- bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
${{ parameters.prefix }} SPACY_NUM_BUILD_JOBS=2 python -m pip install dist/$SDIST
SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
SPACY_NUM_BUILD_JOBS=${{ parameters.num_build_jobs }} python -m pip install dist/$SDIST
displayName: "Install from sdist"
- script: |
${{ parameters.prefix }} python -m pip install -U -r requirements.txt
displayName: "Install test requirements"
- script: |
${{ parameters.prefix }} python -m pip install -U cupy-cuda110 -f https://github.com/cupy/cupy/releases/v9.0.0
${{ parameters.prefix }} python -m pip install "torch==1.7.1+cu110" -f https://download.pytorch.org/whl/torch_stable.html
displayName: "Install GPU requirements"
condition: eq(${{ parameters.gpu }}, true)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
condition: eq(${{ parameters.gpu }}, false)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -W error -p spacy.tests.enable_gpu
displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true)
python -W error -c "import spacy"
displayName: "Test import"
- script: |
python -m spacy download ca_core_news_sm
@ -71,6 +59,11 @@ steps:
displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8')
- script: |
python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
displayName: 'Test no warnings on load (#11713)'
condition: eq(variables['python_version'], '3.8')
- script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
displayName: 'Test convert CLI'
@ -105,13 +98,22 @@ steps:
displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8')
- script: |
python -m pip install -U -r requirements.txt
displayName: "Install test requirements"
- script: |
python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
- script: |
python -m pip install --pre thinc-apple-ops
python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11'))
- script: |
python .github/validate_universe_json.py website/meta/universe.json
displayName: 'Test website/meta/universe.json'
condition: eq(variables['python_version'], '3.8')
- script: |
${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

View File

@ -12,10 +12,10 @@ jobs:
if: github.repository_owner == 'explosion'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- uses: actions/setup-python@v2
- uses: actions/setup-python@v4
- run: pip install black
- name: Auto-format code if needed
run: black spacy
@ -23,10 +23,11 @@ jobs:
# code and makes GitHub think the action failed
- name: Check for modified files
id: git-check
run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)
run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT
- name: Create Pull Request
if: steps.git-check.outputs.modified == 'true'
uses: peter-evans/create-pull-request@v3
uses: peter-evans/create-pull-request@v4
with:
title: Auto-format code with black
labels: meta

View File

@ -8,14 +8,14 @@ on:
jobs:
explosion-bot:
runs-on: ubuntu-18.04
runs-on: ubuntu-latest
steps:
- name: Dump GitHub context
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- name: Install and run explosion-bot
run: |
pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@v3
with:
ref: ${{ matrix.branch }}
- name: Get commits from past 24 hours
@ -23,9 +23,9 @@ jobs:
today=$(date '+%Y-%m-%d %H:%M:%S')
yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S')
if git log --after="$yesterday" --before="$today" | grep commit ; then
echo "::set-output name=run_tests::true"
echo run_tests=true >> $GITHUB_OUTPUT
else
echo "::set-output name=run_tests::false"
echo run_tests=false >> $GITHUB_OUTPUT
fi
- name: Trigger buildkite build

View File

@ -17,8 +17,10 @@ jobs:
run: |
echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Bernadette app dependency and send an alert
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}

View File

@ -8,7 +8,7 @@ be used in real products.
spaCy comes with
[pretrained pipelines](https://spacy.io/models) and
currently supports tokenization and training for **60+ languages**. It features
currently supports tokenization and training for **70+ languages**. It features
state-of-the-art speed and **neural network models** for tagging,
parsing, **named entity recognition**, **text classification** and more,
multi-task learning with pretrained **transformers** like BERT, as well as a
@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license.
💫 **Version 3.4.0 out now!**
💫 **Version 3.4 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
@ -79,7 +79,7 @@ more people can benefit from it.
## Features
- Support for **60+ languages**
- Support for **70+ languages**
- **Trained pipelines** for different languages and tasks
- Multi-task learning with pretrained **transformers** like BERT
- Support for pretrained **word vectors** and embeddings

View File

@ -76,15 +76,24 @@ jobs:
# Python39Mac:
# imageName: "macos-latest"
# python.version: "3.9"
Python310Linux:
imageName: "ubuntu-latest"
python.version: "3.10"
# Python310Linux:
# imageName: "ubuntu-latest"
# python.version: "3.10"
Python310Windows:
imageName: "windows-latest"
python.version: "3.10"
Python310Mac:
imageName: "macos-latest"
python.version: "3.10"
# Python310Mac:
# imageName: "macos-latest"
# python.version: "3.10"
Python311Linux:
imageName: 'ubuntu-latest'
python.version: '3.11'
Python311Windows:
imageName: 'windows-latest'
python.version: '3.11'
Python311Mac:
imageName: 'macos-latest'
python.version: '3.11'
maxParallel: 4
pool:
vmImage: $(imageName)
@ -92,20 +101,3 @@ jobs:
- template: .github/azure-steps.yml
parameters:
python_version: '$(python.version)'
architecture: 'x64'
# - job: "TestGPU"
# dependsOn: "Validate"
# strategy:
# matrix:
# Python38LinuxX64_GPU:
# python.version: '3.8'
# pool:
# name: "LinuxX64_GPU"
# steps:
# - template: .github/azure-steps.yml
# parameters:
# python_version: '$(python.version)'
# architecture: 'x64'
# gpu: true
# num_build_jobs: 24

View File

@ -9,7 +9,7 @@ murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0
typer>=0.3.0,<0.5.0
typer>=0.3.0,<0.8.0
pathy>=0.3.5
# Third party dependencies
numpy>=1.15.0

View File

@ -51,7 +51,7 @@ install_requires =
srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0
# Third-party dependencies
typer>=0.3.0,<0.5.0
typer>=0.3.0,<0.8.0
pathy>=0.3.5
tqdm>=4.38.0,<5.0.0
numpy>=1.15.0

View File

@ -30,7 +30,9 @@ MOD_NAMES = [
"spacy.lexeme",
"spacy.vocab",
"spacy.attrs",
"spacy.kb",
"spacy.kb.candidate",
"spacy.kb.kb",
"spacy.kb.kb_in_memory",
"spacy.ml.parser_model",
"spacy.morphology",
"spacy.pipeline.dep_parser",

View File

@ -1,6 +1,6 @@
# fmt: off
__title__ = "spacy"
__version__ = "3.4.1"
__version__ = "3.4.2"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects"

View File

@ -10,6 +10,7 @@ from .._util import get_hash, get_checksum, download_file, ensure_pathy
from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var
from ...git_info import GIT_VERSION
from ... import about
from ...errors import Errors
if TYPE_CHECKING:
from pathy import Pathy # noqa: F401
@ -84,7 +85,23 @@ class RemoteStorage:
with tarfile.open(tar_loc, mode=mode_string) as tar_file:
# This requires that the path is added correctly, relative
# to root. This is how we set things up in push()
tar_file.extractall(self.root)
# Disallow paths outside the current directory for the tar
# file (CVE-2007-4559, directory traversal vulnerability)
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise ValueError(Errors.E852)
tar.extractall(path)
safe_extract(tar_file, self.root)
return url
def find(

View File

@ -55,6 +55,7 @@ def project_run(
force: bool = False,
dry: bool = False,
capture: bool = False,
skip_requirements_check: bool = False,
) -> None:
"""Run a named script defined in the project.yml. If the script is a
command rather than a workflow, it is checked against the project lock file
@ -70,6 +71,7 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function.
skip_requirements_check (bool): Whether to skip the requirements check.
"""
config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
@ -77,9 +79,10 @@ def project_run(
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
req_path = project_dir / "requirements.txt"
if config.get("check_requirements", True) and os.path.exists(req_path):
with req_path.open() as requirements_file:
_check_requirements([req.replace("\n", "") for req in requirements_file])
if not skip_requirements_check:
if config.get("check_requirements", True) and os.path.exists(req_path):
with req_path.open() as requirements_file:
_check_requirements([req.strip() for req in requirements_file])
if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'")
@ -91,6 +94,7 @@ def project_run(
force=force,
dry=dry,
capture=capture,
skip_requirements_check=True,
)
else:
cmd = commands[subcommand]
@ -364,6 +368,12 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
failed_pkgs_msgs.append(dnf.report())
except pkg_resources.VersionConflict as vc:
conflicting_pkgs_msgs.append(vc.report())
except Exception:
msg.warn(
f"Unable to check requirement: {req} "
"Checks are currently limited to requirement specifiers "
"(PEP 508)"
)
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
msg.warn(

View File

@ -212,8 +212,8 @@ class Warnings(metaclass=ErrorsWithCodes):
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.")
W123 = ("Argument {arg} with value {arg_value} is used instead of {config_value} as specified in the config. Be "
"aware that this might affect other components in your pipeline.")
W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option "
"`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.")
class Errors(metaclass=ErrorsWithCodes):
@ -540,8 +540,12 @@ class Errors(metaclass=ErrorsWithCodes):
E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.")
E200 = ("Can't set {attr} from Span.")
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
E203 = ("If the {name} embedding layer is not updated "
"during training, make sure to include it in 'annotating components'")
# New errors added in v3.x
E852 = ("The tar file pulled from the remote attempted an unsafe path "
"traversal.")
E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
@ -711,9 +715,9 @@ class Errors(metaclass=ErrorsWithCodes):
"`nlp.enable_pipe` instead.")
E927 = ("Can't write to frozen list. Maybe you're trying to modify a computed "
"property or default function argument?")
E928 = ("A KnowledgeBase can only be serialized to/from from a directory, "
E928 = ("An InMemoryLookupKB can only be serialized to/from from a directory, "
"but the provided argument {loc} points to a file.")
E929 = ("Couldn't read KnowledgeBase from {loc}. The path does not seem to exist.")
E929 = ("Couldn't read InMemoryLookupKB from {loc}. The path does not seem to exist.")
E930 = ("Received invalid get_examples callback in `{method}`. "
"Expected function that returns an iterable of Example objects but "
"got: {obj}")
@ -944,6 +948,12 @@ class Errors(metaclass=ErrorsWithCodes):
"case pass an empty list for the previously not specified argument to avoid this error.")
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
"{value}.")
E1044 = ("Expected `candidates_batch_size` to be >= 1, but got: {value}")
E1045 = ("Encountered {parent} subclass without `{parent}.{method}` "
"method in '{name}'. If you want to use this method, make "
"sure it's overwritten on the subclass.")
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
"knowledge base, use `InMemoryLookupKB`.")
# Deprecated model shortcuts, only used in errors and warnings

3
spacy/kb/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .kb import KnowledgeBase
from .kb_in_memory import InMemoryLookupKB
from .candidate import Candidate, get_candidates, get_candidates_batch

12
spacy/kb/candidate.pxd Normal file
View File

@ -0,0 +1,12 @@
from .kb cimport KnowledgeBase
from libcpp.vector cimport vector
from ..typedefs cimport hash_t
# Object used by the Entity Linker that summarizes one entity-alias candidate combination.
cdef class Candidate:
cdef readonly KnowledgeBase kb
cdef hash_t entity_hash
cdef float entity_freq
cdef vector[float] entity_vector
cdef hash_t alias_hash
cdef float prior_prob

74
spacy/kb/candidate.pyx Normal file
View File

@ -0,0 +1,74 @@
# cython: infer_types=True, profile=True
from typing import Iterable
from .kb cimport KnowledgeBase
from ..tokens import Span
cdef class Candidate:
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned a certain prior probability.
DOCS: https://spacy.io/api/kb/#candidate-init
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
self.entity_freq = entity_freq
self.entity_vector = entity_vector
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self) -> int:
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
@property
def entity_(self) -> str:
"""RETURNS (str): ID/name of this entity in the KB"""
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self) -> int:
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self) -> str:
"""RETURNS (str): ID of the original alias"""
return self.kb.vocab.strings[self.alias_hash]
@property
def entity_freq(self) -> float:
return self.entity_freq
@property
def entity_vector(self) -> Iterable[float]:
return self.entity_vector
@property
def prior_prob(self) -> float:
return self.prior_prob
def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]:
"""
Return candidate entities for a given mention and fetching appropriate entries from the index.
kb (KnowledgeBase): Knowledge base to query.
mention (Span): Entity mention for which to identify candidates.
RETURNS (Iterable[Candidate]): Identified candidates.
"""
return kb.get_candidates(mention)
def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
"""
Return candidate entities for the given mentions and fetching appropriate entries from the index.
kb (KnowledgeBase): Knowledge base to query.
mention (Iterable[Span]): Entity mentions for which to identify candidates.
RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
"""
return kb.get_candidates_batch(mentions)

10
spacy/kb/kb.pxd Normal file
View File

@ -0,0 +1,10 @@
"""Knowledge-base for entity or concept linking."""
from cymem.cymem cimport Pool
from libc.stdint cimport int64_t
from ..vocab cimport Vocab
cdef class KnowledgeBase:
cdef Pool mem
cdef readonly Vocab vocab
cdef readonly int64_t entity_vector_length

108
spacy/kb/kb.pyx Normal file
View File

@ -0,0 +1,108 @@
# cython: infer_types=True, profile=True
from pathlib import Path
from typing import Iterable, Tuple, Union
from cymem.cymem cimport Pool
from .candidate import Candidate
from ..tokens import Span
from ..util import SimpleFrozenList
from ..errors import Errors
cdef class KnowledgeBase:
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts.
This is an abstract class and requires its operations to be implemented.
DOCS: https://spacy.io/api/kb
"""
def __init__(self, vocab: Vocab, entity_vector_length: int):
"""Create a KnowledgeBase."""
# Make sure abstract KB is not instantiated.
if self.__class__ == KnowledgeBase:
raise TypeError(
Errors.E1046.format(cls_name=self.__class__.__name__)
)
self.vocab = vocab
self.entity_vector_length = entity_vector_length
self.mem = Pool()
def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
"""
Return candidate entities for specified texts. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If no candidate is found for a given text, an empty list is returned.
mentions (Iterable[Span]): Mentions for which to get candidates.
RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
"""
return [self.get_candidates(span) for span in mentions]
def get_candidates(self, mention: Span) -> Iterable[Candidate]:
"""
Return candidate entities for specified text. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If the no candidate is found for a given text, an empty list is returned.
mention (Span): Mention for which to get candidates.
RETURNS (Iterable[Candidate]): Identified candidates.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__)
)
def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]:
"""
Return vectors for entities.
entity (str): Entity name/ID.
RETURNS (Iterable[Iterable[float]]): Vectors for specified entities.
"""
return [self.get_vector(entity) for entity in entities]
def get_vector(self, str entity) -> Iterable[float]:
"""
Return vector for entity.
entity (str): Entity name/ID.
RETURNS (Iterable[float]): Vector for specified entity.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__)
)
def to_bytes(self, **kwargs) -> bytes:
"""Serialize the current state to a binary string.
RETURNS (bytes): Current state as binary string.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__)
)
def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()):
"""Load state from a binary string.
bytes_data (bytes): KB state.
exclude (Tuple[str]): Properties to exclude when restoring KB.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__)
)
def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
"""
Write KnowledgeBase content to disk.
path (Union[str, Path]): Target file path.
exclude (Iterable[str]): List of components to exclude.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__)
)
def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
"""
Load KnowledgeBase content from disk.
path (Union[str, Path]): Target file path.
exclude (Iterable[str]): List of components to exclude.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__)
)

View File

@ -1,14 +1,12 @@
"""Knowledge-base for entity or concept linking."""
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int64_t
from libc.stdio cimport FILE
from .vocab cimport Vocab
from .typedefs cimport hash_t
from .structs cimport KBEntryC, AliasC
from ..typedefs cimport hash_t
from ..structs cimport KBEntryC, AliasC
from .kb cimport KnowledgeBase
ctypedef vector[KBEntryC] entry_vec
ctypedef vector[AliasC] alias_vec
@ -16,21 +14,7 @@ ctypedef vector[float] float_vec
ctypedef vector[float_vec] float_matrix
# Object used by the Entity Linker that summarizes one entity-alias candidate combination.
cdef class Candidate:
cdef readonly KnowledgeBase kb
cdef hash_t entity_hash
cdef float entity_freq
cdef vector[float] entity_vector
cdef hash_t alias_hash
cdef float prior_prob
cdef class KnowledgeBase:
cdef Pool mem
cdef readonly Vocab vocab
cdef int64_t entity_vector_length
cdef class InMemoryLookupKB(KnowledgeBase):
# This maps 64bit keys (hash of unique entity string)
# to 64bit values (position of the _KBEntryC struct in the _entries vector).
# The PreshMap is pretty space efficient, as it uses open addressing. So

View File

@ -1,8 +1,7 @@
# cython: infer_types=True, profile=True
from typing import Iterator, Iterable, Callable, Dict, Any
from typing import Iterable, Callable, Dict, Any, Union
import srsly
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
from cpython.exc cimport PyErr_SetFromErrno
from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek
@ -12,85 +11,28 @@ from libcpp.vector cimport vector
from pathlib import Path
import warnings
from .typedefs cimport hash_t
from .errors import Errors, Warnings
from . import util
from .util import SimpleFrozenList, ensure_path
cdef class Candidate:
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned to a certain prior probability.
DOCS: https://spacy.io/api/kb/#candidate_init
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
self.entity_freq = entity_freq
self.entity_vector = entity_vector
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self):
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
@property
def entity_(self):
"""RETURNS (str): ID/name of this entity in the KB"""
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self):
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self):
"""RETURNS (str): ID of the original alias"""
return self.kb.vocab.strings[self.alias_hash]
@property
def entity_freq(self):
return self.entity_freq
@property
def entity_vector(self):
return self.entity_vector
@property
def prior_prob(self):
return self.prior_prob
from ..tokens import Span
from ..typedefs cimport hash_t
from ..errors import Errors, Warnings
from .. import util
from ..util import SimpleFrozenList, ensure_path
from ..vocab cimport Vocab
from .kb cimport KnowledgeBase
from .candidate import Candidate as Candidate
def get_candidates(KnowledgeBase kb, span) -> Iterator[Candidate]:
"""
Return candidate entities for a given span by using the text of the span as the alias
and fetching appropriate entries from the index.
This particular function is optimized to work with the built-in KB functionality,
but any other custom candidate generation method can be used in combination with the KB as well.
"""
return kb.get_alias_candidates(span.text)
cdef class KnowledgeBase:
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
cdef class InMemoryLookupKB(KnowledgeBase):
"""An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts.
DOCS: https://spacy.io/api/kb
DOCS: https://spacy.io/api/kb_in_memory
"""
def __init__(self, Vocab vocab, entity_vector_length):
"""Create a KnowledgeBase."""
self.mem = Pool()
self.entity_vector_length = entity_vector_length
"""Create an InMemoryLookupKB."""
super().__init__(vocab, entity_vector_length)
self._entry_index = PreshMap()
self._alias_index = PreshMap()
self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def _initialize_entities(self, int64_t nr_entities):
@ -104,11 +46,6 @@ cdef class KnowledgeBase:
self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1)
@property
def entity_vector_length(self):
"""RETURNS (uint64): length of the entity vectors"""
return self.entity_vector_length
def __len__(self):
return self.get_size_entities()
@ -286,7 +223,10 @@ cdef class KnowledgeBase:
alias_entry.probs = probs
self._aliases_table[alias_index] = alias_entry
def get_alias_candidates(self, str alias) -> Iterator[Candidate]:
def get_candidates(self, mention: Span) -> Iterable[Candidate]:
return self.get_alias_candidates(mention.text) # type: ignore
def get_alias_candidates(self, str alias) -> Iterable[Candidate]:
"""
Return candidate entities for an alias. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.

View File

@ -72,10 +72,10 @@ class CatalanLemmatizer(Lemmatizer):
oov_forms.append(form)
if not forms:
forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0])
# use lookups, and fall back to the token itself
if not forms:
forms.append(string)
forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms
return forms

View File

@ -280,7 +280,7 @@ _currency = (
_punct = (
r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 · । ، ۔ ؛ ٪"
)
_quotes = r'\' " ” “ ` ´ , „ » « 「 」 『 』 【 】 《 》 〈 〉'
_quotes = r'\' " ” “ ` ´ , „ » « 「 」 『 』 【 】 《 》 〈 〉 〈 〉 ⟦ ⟧'
_hyphens = "- — -- --- —— ~"
# Various symbols like dingbats, but also emoji

View File

@ -53,11 +53,16 @@ class FrenchLemmatizer(Lemmatizer):
rules = rules_table.get(univ_pos, [])
string = string.lower()
forms = []
# first try lookup in table based on upos
if string in index:
forms.append(string)
self.cache[cache_key] = forms
return forms
# then add anything in the exceptions table
forms.extend(exceptions.get(string, []))
# if nothing found yet, use the rules
oov_forms = []
if not forms:
for old, new in rules:
@ -69,12 +74,14 @@ class FrenchLemmatizer(Lemmatizer):
forms.append(form)
else:
oov_forms.append(form)
# if still nothing, add the oov forms from rules
if not forms:
forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0])
# use lookups, which fall back to the token itself
if not forms:
forms.append(string)
forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms
return forms

View File

@ -1,11 +1,15 @@
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from ...language import Language, BaseDefaults
class AncientGreekDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS

View File

@ -0,0 +1,46 @@
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS
from ..char_classes import CONCAT_QUOTES
_prefixes = (
[
"",
"",
]
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_CURRENCY
+ LIST_ICONS
)
_suffixes = (
LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
"",
"",
r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]",
]
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])—",
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -28,7 +28,7 @@ class Russian(Language):
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "pymorphy2",
"mode": "pymorphy3",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},

View File

@ -19,7 +19,7 @@ class RussianLemmatizer(Lemmatizer):
model: Optional[Model],
name: str = "lemmatizer",
*,
mode: str = "pymorphy2",
mode: str = "pymorphy3",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
@ -33,6 +33,16 @@ class RussianLemmatizer(Lemmatizer):
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer()
elif mode == "pymorphy3":
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Russian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library. Install it with: pip install pymorphy3"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer()
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
@ -104,6 +114,9 @@ class RussianLemmatizer(Lemmatizer):
return [analyses[0].normal_form]
return [string]
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
return self.pymorphy2_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
gram_map = {

View File

@ -1,9 +1,17 @@
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from ...language import Language, BaseDefaults
class SlovenianDefaults(BaseDefaults):
stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
class Slovenian(Language):

145
spacy/lang/sl/lex_attrs.py Normal file
View File

@ -0,0 +1,145 @@
from ...attrs import LIKE_NUM
from ...attrs import IS_CURRENCY
import unicodedata
_num_words = set(
"""
nula ničla nič ena dva tri štiri pet šest sedem osem
devet deset enajst dvanajst trinajst štirinajst petnajst
šestnajst sedemnajst osemnajst devetnajst dvajset trideset štirideset
petdeset šestdest sedemdeset osemdeset devedeset sto tisoč
milijon bilijon trilijon kvadrilijon nešteto
en eden enega enemu ennem enim enih enima enimi ene eni eno
dveh dvema dvem dvoje trije treh trem tremi troje štirje štirih štirim štirimi
petih petim petimi šestih šestim šestimi sedmih sedmim sedmimi osmih osmim osmimi
devetih devetim devetimi desetih desetim desetimi enajstih enajstim enajstimi
dvanajstih dvanajstim dvanajstimi trinajstih trinajstim trinajstimi
šestnajstih šestnajstim šestnajstimi petnajstih petnajstim petnajstimi
sedemnajstih sedemnajstim sedemnajstimi osemnajstih osemnajstim osemnajstimi
devetnajstih devetnajstim devetnajstimi dvajsetih dvajsetim dvajsetimi
""".split()
)
_ordinal_words = set(
"""
prvi drugi tretji četrti peti šesti sedmi osmi
deveti deseti enajsti dvanajsti trinajsti štirinajsti
petnajsti šestnajsti sedemnajsti osemnajsti devetnajsti
dvajseti trideseti štirideseti petdeseti šestdeseti sedemdeseti
osemdeseti devetdeseti stoti tisoči milijonti bilijonti
trilijonti kvadrilijonti nešteti
prva druga tretja četrta peta šesta sedma osma
deveta deseta enajsta dvanajsta trinajsta štirnajsta
petnajsta šestnajsta sedemnajsta osemnajsta devetnajsta
dvajseta trideseta štirideseta petdeseta šestdeseta sedemdeseta
osemdeseta devetdeseta stota tisoča milijonta bilijonta
trilijonta kvadrilijonta nešteta
prvo drugo tretje četrto peto šestro sedmo osmo
deveto deseto enajsto dvanajsto trinajsto štirnajsto
petnajsto šestnajsto sedemnajsto osemnajsto devetnajsto
dvajseto trideseto štirideseto petdeseto šestdeseto sedemdeseto
osemdeseto devetdeseto stoto tisočo milijonto bilijonto
trilijonto kvadrilijonto nešteto
prvega drugega tretjega četrtega petega šestega sedmega osmega
devega desetega enajstega dvanajstega trinajstega štirnajstega
petnajstega šestnajstega sedemnajstega osemnajstega devetnajstega
dvajsetega tridesetega štiridesetega petdesetega šestdesetega sedemdesetega
osemdesetega devetdesetega stotega tisočega milijontega bilijontega
trilijontega kvadrilijontega neštetega
prvemu drugemu tretjemu četrtemu petemu šestemu sedmemu osmemu devetemu desetemu
enajstemu dvanajstemu trinajstemu štirnajstemu petnajstemu šestnajstemu sedemnajstemu
osemnajstemu devetnajstemu dvajsetemu tridesetemu štiridesetemu petdesetemu šestdesetemu
sedemdesetemu osemdesetemu devetdesetemu stotemu tisočemu milijontemu bilijontemu
trilijontemu kvadrilijontemu neštetemu
prvem drugem tretjem četrtem petem šestem sedmem osmem devetem desetem
enajstem dvanajstem trinajstem štirnajstem petnajstem šestnajstem sedemnajstem
osemnajstem devetnajstem dvajsetem tridesetem štiridesetem petdesetem šestdesetem
sedemdesetem osemdesetem devetdesetem stotem tisočem milijontem bilijontem
trilijontem kvadrilijontem neštetem
prvim drugim tretjim četrtim petim šestim sedtim osmim devetim desetim
enajstim dvanajstim trinajstim štirnajstim petnajstim šestnajstim sedemnajstim
osemnajstim devetnajstim dvajsetim tridesetim štiridesetim petdesetim šestdesetim
sedemdesetim osemdesetim devetdesetim stotim tisočim milijontim bilijontim
trilijontim kvadrilijontim neštetim
prvih drugih tretjih četrthih petih šestih sedmih osmih deveth desetih
enajstih dvanajstih trinajstih štirnajstih petnajstih šestnajstih sedemnajstih
osemnajstih devetnajstih dvajsetih tridesetih štiridesetih petdesetih šestdesetih
sedemdesetih osemdesetih devetdesetih stotih tisočih milijontih bilijontih
trilijontih kvadrilijontih nešteth
prvima drugima tretjima četrtima petima šestima sedmima osmima devetima desetima
enajstima dvanajstima trinajstima štirnajstima petnajstima šestnajstima sedemnajstima
osemnajstima devetnajstima dvajsetima tridesetima štiridesetima petdesetima šestdesetima
sedemdesetima osemdesetima devetdesetima stotima tisočima milijontima bilijontima
trilijontima kvadrilijontima neštetima
prve druge četrte pete šeste sedme osme devete desete
enajste dvanajste trinajste štirnajste petnajste šestnajste sedemnajste
osemnajste devetnajste dvajsete tridesete štiridesete petdesete šestdesete
sedemdesete osemdesete devetdesete stote tisoče milijonte bilijonte
trilijonte kvadrilijonte neštete
prvimi drugimi tretjimi četrtimi petimi šestimi sedtimi osmimi devetimi desetimi
enajstimi dvanajstimi trinajstimi štirnajstimi petnajstimi šestnajstimi sedemnajstimi
osemnajstimi devetnajstimi dvajsetimi tridesetimi štiridesetimi petdesetimi šestdesetimi
sedemdesetimi osemdesetimi devetdesetimi stotimi tisočimi milijontimi bilijontimi
trilijontimi kvadrilijontimi neštetimi
""".split()
)
_currency_words = set(
"""
evro evra evru evrom evrov evroma evrih evrom evre evri evr eur
cent centa centu cenom centov centoma centih centom cente centi
dolar dolarja dolarji dolarju dolarjem dolarjev dolarjema dolarjih dolarje usd
tolar tolarja tolarji tolarju tolarjem tolarjev tolarjema tolarjih tolarje tol
dinar dinarja dinarji dinarju dinarjem dinarjev dinarjema dinarjih dinarje din
funt funta funti funtu funtom funtov funtoma funtih funte gpb
forint forinta forinti forintu forintom forintov forintoma forintih forinte
zlot zlota zloti zlotu zlotom zlotov zlotoma zlotih zlote
rupij rupija rupiji rupiju rupijem rupijev rupijema rupijih rupije
jen jena jeni jenu jenom jenov jenoma jenih jene
kuna kuni kune kuno kun kunama kunah kunam kunami
marka marki marke markama markah markami
""".split()
)
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
if text_lower in _ordinal_words:
return True
return False
def is_currency(text):
text_lower = text.lower()
if text in _currency_words:
return True
for char in text:
if unicodedata.category(char) != "Sc":
return False
return True
LEX_ATTRS = {LIKE_NUM: like_num, IS_CURRENCY: is_currency}

View File

@ -0,0 +1,84 @@
from ..char_classes import (
LIST_ELLIPSES,
LIST_ICONS,
HYPHENS,
LIST_PUNCT,
LIST_QUOTES,
CURRENCY,
UNITS,
PUNCT,
LIST_CURRENCY,
CONCAT_QUOTES,
)
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
from ..char_classes import merge_chars
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
INCLUDE_SPECIAL = ["\\+", "\\/", "\\", "\\¯", "\\=", "\\×"] + HYPHENS.split("|")
_prefixes = INCLUDE_SPECIAL + BASE_TOKENIZER_PREFIXES
_suffixes = (
INCLUDE_SPECIAL
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
r"(?<=°[FfCcKk])\.",
r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
r"(?<=[0-9])(?:{u})".format(u=UNITS),
r"(?<=[{al}{e}{p}(?:{q})])\.".format(
al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES, p=PUNCT
),
r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
# split initials like J.K. Rowling
r"(?<=[A-Z]\.)(?:[A-Z].)",
]
)
# a list of all suffixes following a hyphen that are shouldn't split (eg. BTC-jev)
# source: Obeliks tokenizer - https://github.com/clarinsi/obeliks/blob/master/obeliks/res/TokRulesPart1.txt
CONCAT_QUOTES = CONCAT_QUOTES.replace("'", "")
HYPHENS_PERMITTED = (
"((a)|(evemu)|(evskega)|(i)|(jevega)|(jevska)|(jevskimi)|(jinemu)|(oma)|(ovim)|"
"(ovski)|(e)|(evi)|(evskem)|(ih)|(jevem)|(jevske)|(jevsko)|(jini)|(ov)|(ovima)|"
"(ovskih)|(em)|(evih)|(evskemu)|(ja)|(jevemu)|(jevskega)|(ji)|(jinih)|(ova)|"
"(ovimi)|(ovskim)|(ema)|(evim)|(evski)|(je)|(jevi)|(jevskem)|(jih)|(jinim)|"
"(ove)|(ovo)|(ovskima)|(ev)|(evima)|(evskih)|(jem)|(jevih)|(jevskemu)|(jin)|"
"(jinima)|(ovega)|(ovska)|(ovskimi)|(eva)|(evimi)|(evskim)|(jema)|(jevim)|"
"(jevski)|(jina)|(jinimi)|(ovem)|(ovske)|(ovsko)|(eve)|(evo)|(evskima)|(jev)|"
"(jevima)|(jevskih)|(jine)|(jino)|(ovemu)|(ovskega)|(u)|(evega)|(evska)|"
"(evskimi)|(jeva)|(jevimi)|(jevskim)|(jinega)|(ju)|(ovi)|(ovskem)|(evem)|"
"(evske)|(evsko)|(jeve)|(jevo)|(jevskima)|(jinem)|(om)|(ovih)|(ovskemu)|"
"(ovec)|(ovca)|(ovcu)|(ovcem)|(ovcev)|(ovcema)|(ovcih)|(ovci)|(ovce)|(ovcimi)|"
"(evec)|(evca)|(evcu)|(evcem)|(evcev)|(evcema)|(evcih)|(evci)|(evce)|(evcimi)|"
"(jevec)|(jevca)|(jevcu)|(jevcem)|(jevcev)|(jevcema)|(jevcih)|(jevci)|(jevce)|"
"(jevcimi)|(ovka)|(ovke)|(ovki)|(ovko)|(ovk)|(ovkama)|(ovkah)|(ovkam)|(ovkami)|"
"(evka)|(evke)|(evki)|(evko)|(evk)|(evkama)|(evkah)|(evkam)|(evkami)|(jevka)|"
"(jevke)|(jevki)|(jevko)|(jevk)|(jevkama)|(jevkah)|(jevkam)|(jevkami)|(timi)|"
"(im)|(ima)|(a)|(imi)|(e)|(o)|(ega)|(ti)|(em)|(tih)|(emu)|(tim)|(i)|(tima)|"
"(ih)|(ta)|(te)|(to)|(tega)|(tem)|(temu))"
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?!{hp}$)(?=[{a}])".format(
a=ALPHA, h=HYPHENS, hp=HYPHENS_PERMITTED
),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -1,326 +1,84 @@
# Source: https://github.com/stopwords-iso/stopwords-sl
# Removed various words that are not normally considered stop words, such as months.
STOP_WORDS = set(
"""
a
ali
b
bi
bil
bila
bile
bili
bilo
biti
blizu
bo
bodo
bolj
bom
bomo
boste
bova
boš
brez
c
cel
cela
celi
celo
d
da
daleč
dan
danes
do
dober
dobra
dobri
dobro
dokler
dol
dovolj
e
eden
en
ena
ene
eni
enkrat
eno
etc.
a ali
b bi bil bila bile bili bilo biti blizu bo bodo bojo bolj bom bomo
boste bova boš brez
c cel cela celi celo
č če često četrta četrtek četrti četrto čez čigav
d da daleč dan danes datum deset deseta deseti deseto devet
deveta deveti deveto do dober dobra dobri dobro dokler dol dolg
dolga dolgi dovolj drug druga drugi drugo dva dve
e eden en ena ene eni enkrat eno etc.
f
g
g.
ga
ga.
gor
gospa
gospod
h
halo
i
idr.
ii
iii
in
iv
ix
iz
j
jaz
je
ji
jih
jim
jo
k
kadarkoli
kaj
kajti
kako
kakor
kamor
kamorkoli
kar
karkoli
katerikoli
kdaj
kdo
kdorkoli
ker
ki
kje
kjer
kjerkoli
ko
koderkoli
koga
komu
kot
l
le
lep
lepa
lepe
lepi
lepo
m
manj
me
med
medtem
mene
mi
midva
midve
mnogo
moj
moja
moje
mora
morajo
moram
moramo
morate
moraš
morem
mu
n
na
nad
naj
najina
najino
najmanj
naju
največ
nam
nas
nato
nazaj
naš
naša
naše
ne
nedavno
nek
neka
nekaj
nekatere
nekateri
nekatero
nekdo
neke
nekega
neki
nekje
neko
nekoga
nekoč
ni
nikamor
nikdar
nikjer
nikoli
nič
nje
njega
njegov
njegova
njegovo
njej
njemu
njen
njena
njeno
nji
njih
njihov
njihova
njihovo
njiju
njim
njo
njun
njuna
njuno
no
nocoj
npr.
o
ob
oba
obe
oboje
od
okoli
on
onadva
one
oni
onidve
oz.
p
pa
po
pod
pogosto
poleg
ponavadi
ponovno
potem
povsod
prbl.
precej
pred
prej
preko
pri
pribl.
približno
proti
r
redko
res
s
saj
sam
sama
same
sami
samo
se
sebe
sebi
sedaj
sem
seveda
si
sicer
skoraj
skozi
smo
so
spet
sta
ste
sva
t
ta
tak
taka
take
taki
tako
takoj
tam
te
tebe
tebi
tega
ti
tista
tiste
tisti
tisto
tj.
tja
to
toda
tu
tudi
tukaj
tvoj
tvoja
tvoje
g g. ga ga. gor gospa gospod
h halo
i idr. ii iii in iv ix iz
j jaz je ji jih jim jo jutri
k kadarkoli kaj kajti kako kakor kamor kamorkoli kar karkoli
katerikoli kdaj kdo kdorkoli ker ki kje kjer kjerkoli
ko koder koderkoli koga komu kot kratek kratka kratke kratki
l lahka lahke lahki lahko le lep lepa lepe lepi lepo leto
m majhen majhna majhni malce malo manj me med medtem mene
mesec mi midva midve mnogo moj moja moje mora morajo moram
moramo morate moraš morem mu
n na nad naj najina najino najmanj naju največ nam narobe
nas nato nazaj naš naša naše ne nedavno nedelja nek neka
nekaj nekatere nekateri nekatero nekdo neke nekega neki
nekje neko nekoga nekoč ni nikamor nikdar nikjer nikoli
nič nje njega njegov njegova njegovo njej njemu njen
njena njeno nji njih njihov njihova njihovo njiju njim
njo njun njuna njuno no nocoj npr.
o ob oba obe oboje od odprt odprta odprti okoli on
onadva one oni onidve osem osma osmi osmo oz.
p pa pet peta petek peti peto po pod pogosto poleg poln
polna polni polno ponavadi ponedeljek ponovno potem
povsod pozdravljen pozdravljeni prav prava prave pravi
pravo prazen prazna prazno prbl. precej pred prej preko
pri pribl. približno primer pripravljen pripravljena
pripravljeni proti prva prvi prvo
r ravno redko res reč
s saj sam sama same sami samo se sebe sebi sedaj sedem
sedma sedmi sedmo sem seveda si sicer skoraj skozi slab sm
so sobota spet sreda srednja srednji sta ste stran stvar sva
š šest šesta šesti šesto štiri
t ta tak taka take taki tako takoj tam te tebe tebi tega
težak težka težki težko ti tista tiste tisti tisto tj.
tja to toda torek tretja tretje tretji tri tu tudi tukaj
tvoj tvoja tvoje
u
v
vaju
vam
vas
vaš
vaša
vaše
ve
vedno
vendar
ves
več
vi
vidva
vii
viii
vsa
vsaj
vsak
vsaka
vsakdo
vsake
vsaki
vsakomur
vse
vsega
vsi
vso
včasih
x
z
za
zadaj
zadnji
zakaj
zdaj
zelo
zunaj
č
če
često
čez
čigav
š
ž
že
v vaju vam vas vaš vaša vaše ve vedno velik velika veliki
veliko vendar ves več vi vidva vii viii visok visoka visoke
visoki vsa vsaj vsak vsaka vsakdo vsake vsaki vsakomur vse
vsega vsi vso včasih včeraj
x
z za zadaj zadnji zakaj zaprta zaprti zaprto zdaj zelo zunaj
ž že
""".split()
)

View File

@ -0,0 +1,272 @@
from typing import Dict, List
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH, NORM
from ...util import update_exc
_exc: Dict[str, List[Dict]] = {}
_other_exc = {
"t.i.": [{ORTH: "t.", NORM: "tako"}, {ORTH: "i.", NORM: "imenovano"}],
"t.j.": [{ORTH: "t.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"T.j.": [{ORTH: "T.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"d.o.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "o.", NORM: "omejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.O.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "O.", NORM: "omejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.n.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "n.", NORM: "neomejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.N.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "N.", NORM: "neomejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.d.": [{ORTH: "d.", NORM: "delniška"}, {ORTH: "d.", NORM: "družba"}],
"D.D.": [{ORTH: "D.", NORM: "delniška"}, {ORTH: "D.", NORM: "družba"}],
"s.p.": [{ORTH: "s.", NORM: "samostojni"}, {ORTH: "p.", NORM: "podjetnik"}],
"S.P.": [{ORTH: "S.", NORM: "samostojni"}, {ORTH: "P.", NORM: "podjetnik"}],
"l.r.": [{ORTH: "l.", NORM: "lastno"}, {ORTH: "r.", NORM: "ročno"}],
"le-te": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "te"}],
"Le-te": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "te"}],
"le-ti": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ti"}],
"Le-ti": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ti"}],
"le-to": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "to"}],
"Le-to": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "to"}],
"le-ta": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ta"}],
"Le-ta": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ta"}],
"le-tega": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "tega"}],
"Le-tega": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "tega"}],
}
_exc.update(_other_exc)
for exc_data in [
{ORTH: "adm.", NORM: "administracija"},
{ORTH: "aer.", NORM: "aeronavtika"},
{ORTH: "agr.", NORM: "agronomija"},
{ORTH: "amer.", NORM: "ameriško"},
{ORTH: "anat.", NORM: "anatomija"},
{ORTH: "angl.", NORM: "angleški"},
{ORTH: "ant.", NORM: "antonim"},
{ORTH: "antr.", NORM: "antropologija"},
{ORTH: "apr.", NORM: "april"},
{ORTH: "arab.", NORM: "arabsko"},
{ORTH: "arheol.", NORM: "arheologija"},
{ORTH: "arhit.", NORM: "arhitektura"},
{ORTH: "avg.", NORM: "avgust"},
{ORTH: "avstr.", NORM: "avstrijsko"},
{ORTH: "avt.", NORM: "avtomobilizem"},
{ORTH: "bibl.", NORM: "biblijsko"},
{ORTH: "biokem.", NORM: "biokemija"},
{ORTH: "biol.", NORM: "biologija"},
{ORTH: "bolg.", NORM: "bolgarski"},
{ORTH: "bot.", NORM: "botanika"},
{ORTH: "cit.", NORM: "citat"},
{ORTH: "daj.", NORM: "dajalnik"},
{ORTH: "del.", NORM: "deležnik"},
{ORTH: "ed.", NORM: "ednina"},
{ORTH: "etn.", NORM: "etnografija"},
{ORTH: "farm.", NORM: "farmacija"},
{ORTH: "filat.", NORM: "filatelija"},
{ORTH: "filoz.", NORM: "filozofija"},
{ORTH: "fin.", NORM: "finančništvo"},
{ORTH: "fiz.", NORM: "fizika"},
{ORTH: "fot.", NORM: "fotografija"},
{ORTH: "fr.", NORM: "francoski"},
{ORTH: "friz.", NORM: "frizerstvo"},
{ORTH: "gastr.", NORM: "gastronomija"},
{ORTH: "geogr.", NORM: "geografija"},
{ORTH: "geol.", NORM: "geologija"},
{ORTH: "geom.", NORM: "geometrija"},
{ORTH: "germ.", NORM: "germanski"},
{ORTH: "gl.", NORM: "glej"},
{ORTH: "glag.", NORM: "glagolski"},
{ORTH: "glasb.", NORM: "glasba"},
{ORTH: "gled.", NORM: "gledališče"},
{ORTH: "gost.", NORM: "gostinstvo"},
{ORTH: "gozd.", NORM: "gozdarstvo"},
{ORTH: "gr.", NORM: "grški"},
{ORTH: "grad.", NORM: "gradbeništvo"},
{ORTH: "hebr.", NORM: "hebrejsko"},
{ORTH: "hrv.", NORM: "hrvaško"},
{ORTH: "ide.", NORM: "indoevropsko"},
{ORTH: "igr.", NORM: "igre"},
{ORTH: "im.", NORM: "imenovalnik"},
{ORTH: "iron.", NORM: "ironično"},
{ORTH: "it.", NORM: "italijanski"},
{ORTH: "itd.", NORM: "in tako dalje"},
{ORTH: "itn.", NORM: "in tako naprej"},
{ORTH: "ipd.", NORM: "in podobno"},
{ORTH: "jap.", NORM: "japonsko"},
{ORTH: "jul.", NORM: "julij"},
{ORTH: "jun.", NORM: "junij"},
{ORTH: "kit.", NORM: "kitajsko"},
{ORTH: "knj.", NORM: "knjižno"},
{ORTH: "knjiž.", NORM: "knjižno"},
{ORTH: "kor.", NORM: "koreografija"},
{ORTH: "lat.", NORM: "latinski"},
{ORTH: "les.", NORM: "lesna stroka"},
{ORTH: "lingv.", NORM: "lingvistika"},
{ORTH: "lit.", NORM: "literarni"},
{ORTH: "ljubk.", NORM: "ljubkovalno"},
{ORTH: "lov.", NORM: "lovstvo"},
{ORTH: "m.", NORM: "moški"},
{ORTH: "mak.", NORM: "makedonski"},
{ORTH: "mar.", NORM: "marec"},
{ORTH: "mat.", NORM: "matematika"},
{ORTH: "med.", NORM: "medicina"},
{ORTH: "meh.", NORM: "mehiško"},
{ORTH: "mest.", NORM: "mestnik"},
{ORTH: "mdr.", NORM: "med drugim"},
{ORTH: "min.", NORM: "mineralogija"},
{ORTH: "mitol.", NORM: "mitologija"},
{ORTH: "mn.", NORM: "množina"},
{ORTH: "mont.", NORM: "montanistika"},
{ORTH: "muz.", NORM: "muzikologija"},
{ORTH: "nam.", NORM: "namenilnik"},
{ORTH: "nar.", NORM: "narečno"},
{ORTH: "nav.", NORM: "navadno"},
{ORTH: "nedol.", NORM: "nedoločnik"},
{ORTH: "nedov.", NORM: "nedovršni"},
{ORTH: "neprav.", NORM: "nepravilno"},
{ORTH: "nepreh.", NORM: "neprehodno"},
{ORTH: "neskl.", NORM: "nesklonljiv(o)"},
{ORTH: "nestrok.", NORM: "nestrokovno"},
{ORTH: "num.", NORM: "numizmatika"},
{ORTH: "npr.", NORM: "na primer"},
{ORTH: "obrt.", NORM: "obrtništvo"},
{ORTH: "okt.", NORM: "oktober"},
{ORTH: "or.", NORM: "orodnik"},
{ORTH: "os.", NORM: "oseba"},
{ORTH: "otr.", NORM: "otroško"},
{ORTH: "oz.", NORM: "oziroma"},
{ORTH: "pal.", NORM: "paleontologija"},
{ORTH: "papir.", NORM: "papirništvo"},
{ORTH: "ped.", NORM: "pedagogika"},
{ORTH: "pisar.", NORM: "pisarniško"},
{ORTH: "pog.", NORM: "pogovorno"},
{ORTH: "polit.", NORM: "politika"},
{ORTH: "polj.", NORM: "poljsko"},
{ORTH: "poljud.", NORM: "poljudno"},
{ORTH: "preg.", NORM: "pregovor"},
{ORTH: "preh.", NORM: "prehodno"},
{ORTH: "pren.", NORM: "preneseno"},
{ORTH: "prid.", NORM: "pridevnik"},
{ORTH: "prim.", NORM: "primerjaj"},
{ORTH: "prisl.", NORM: "prislov"},
{ORTH: "psih.", NORM: "psihologija"},
{ORTH: "psiht.", NORM: "psihiatrija"},
{ORTH: "rad.", NORM: "radiotehnika"},
{ORTH: "rač.", NORM: "računalništvo"},
{ORTH: "rib.", NORM: "ribištvo"},
{ORTH: "rod.", NORM: "rodilnik"},
{ORTH: "rus.", NORM: "rusko"},
{ORTH: "s.", NORM: "srednji"},
{ORTH: "sam.", NORM: "samostalniški"},
{ORTH: "sed.", NORM: "sedanjik"},
{ORTH: "sep.", NORM: "september"},
{ORTH: "slabš.", NORM: "slabšalno"},
{ORTH: "slovan.", NORM: "slovansko"},
{ORTH: "slovaš.", NORM: "slovaško"},
{ORTH: "srb.", NORM: "srbsko"},
{ORTH: "star.", NORM: "starinsko"},
{ORTH: "stil.", NORM: "stilno"},
{ORTH: "sv.", NORM: "svet(i)"},
{ORTH: "teh.", NORM: "tehnika"},
{ORTH: "tisk.", NORM: "tiskarstvo"},
{ORTH: "tj.", NORM: "to je"},
{ORTH: "tož.", NORM: "tožilnik"},
{ORTH: "trg.", NORM: "trgovina"},
{ORTH: "ukr.", NORM: "ukrajinski"},
{ORTH: "um.", NORM: "umetnost"},
{ORTH: "vel.", NORM: "velelnik"},
{ORTH: "vet.", NORM: "veterina"},
{ORTH: "vez.", NORM: "veznik"},
{ORTH: "vn.", NORM: "visokonemško"},
{ORTH: "voj.", NORM: "vojska"},
{ORTH: "vrtn.", NORM: "vrtnarstvo"},
{ORTH: "vulg.", NORM: "vulgarno"},
{ORTH: "vznes.", NORM: "vzneseno"},
{ORTH: "zal.", NORM: "založništvo"},
{ORTH: "zastar.", NORM: "zastarelo"},
{ORTH: "zgod.", NORM: "zgodovina"},
{ORTH: "zool.", NORM: "zoologija"},
{ORTH: "čeb.", NORM: "čebelarstvo"},
{ORTH: "češ.", NORM: "češki"},
{ORTH: "člov.", NORM: "človeškost"},
{ORTH: "šah.", NORM: "šahovski"},
{ORTH: "šalj.", NORM: "šaljivo"},
{ORTH: "šp.", NORM: "španski"},
{ORTH: "špan.", NORM: "špansko"},
{ORTH: "šport.", NORM: "športni"},
{ORTH: "štev.", NORM: "števnik"},
{ORTH: "šved.", NORM: "švedsko"},
{ORTH: "švic.", NORM: "švicarsko"},
{ORTH: "ž.", NORM: "ženski"},
{ORTH: "žarg.", NORM: "žargonsko"},
{ORTH: "žel.", NORM: "železnica"},
{ORTH: "živ.", NORM: "živost"},
]:
_exc[exc_data[ORTH]] = [exc_data]
abbrv = """
Co. Ch. DIPL. DR. Dr. Ev. Inc. Jr. Kr. Mag. M. MR. Mr. Mt. Murr. Npr. OZ.
Opr. Osn. Prim. Roj. ST. Sim. Sp. Sred. St. Sv. Škofl. Tel. UR. Zb.
a. aa. ab. abc. abit. abl. abs. abt. acc. accel. add. adj. adv. aet. afr. akad. al. alban. all. alleg.
alp. alt. alter. alžir. am. an. andr. ang. anh. anon. ans. antrop. apoc. app. approx. apt. ar. arc. arch.
arh. arr. as. asist. assist. assoc. asst. astr. attn. aug. avstral. az. b. bab. bal. bbl. bd. belg. bioinf.
biomed. bk. bl. bn. borg. bp. br. braz. brit. bros. broš. bt. bu. c. ca. cal. can. cand. cantab. cap. capt.
cat. cath. cc. cca. cd. cdr. cdre. cent. cerkv. cert. cf. cfr. ch. chap. chem. chr. chs. cic. circ. civ. cl.
cm. cmd. cnr. co. cod. col. coll. colo. com. comp. con. conc. cond. conn. cons. cont. coop. corr. cost. cp.
cpl. cr. crd. cres. cresc. ct. cu. d. dan. dat. davč. ddr. dec. ded. def. dem. dent. dept. dia. dip. dipl.
dir. disp. diss. div. do. doc. dok. dol. doo. dop. dott. dr. dram. druž. družb. drž. dt. duh. dur. dvr. dwt. e.
ea. ecc. eccl. eccles. econ. edn. egipt. egr. ekon. eksp. el. em. enc. eng. eo. ep. err. esp. esq. est.
et. etc. etnogr. etnol. ev. evfem. evr. ex. exc. excl. exp. expl. ext. exx. f. fa. facs. fak. faks. fas.
fasc. fco. fcp. feb. febr. fec. fed. fem. ff. fff. fid. fig. fil. film. fiziol. fiziot. flam. fm. fo. fol. folk.
frag. fran. franc. fsc. g. ga. gal. gdč. ge. gen. geod. geog. geotehnol. gg. gimn. glas. glav. gnr. go. gor.
gosp. gp. graf. gram. gren. grš. gs. h. hab. hf. hist. ho. hort. i. ia. ib. ibid. id. idr. idridr. ill. imen.
imp. impf. impr. in. inc. incl. ind. indus. inf. inform. ing. init. ins. int. inv. inšp. inštr. inž. is. islam.
ist. ital. iur. iz. izbr. izd. izg. izgr. izr. izv. j. jak. jam. jan. jav. je. jez. jr. jsl. jud. jug.
jugoslovan. jur. juž. jv. jz. k. kal. kan. kand. kat. kdo. kem. kip. kmet. kol. kom. komp. konf. kont. kost. kov.
kp. kpfw. kr. kraj. krat. kub. kult. kv. kval. l. la. lab. lb. ld. let. lib. lik. litt. lj. ljud. ll. loc. log.
loč. lt. ma. madž. mag. manag. manjš. masc. mass. mater. max. maxmax. mb. md. mech. medic. medij. medn.
mehč. mem. menedž. mes. mess. metal. meteor. meteorol. mex. mi. mikr. mil. minn. mio. misc. miss. mit. mk.
mkt. ml. mlad. mlle. mlr. mm. mme. množ. mo. moj. moš. možn. mr. mrd. mrs. ms. msc. msgr. mt. murr. mus. mut.
n. na. nad. nadalj. nadom. nagl. nakl. namer. nan. naniz. nasl. nat. navt. nač. ned. nem. nik. nizoz. nm. nn.
no. nom. norv. notr. nov. novogr. ns. o. ob. obd. obj. oblač. obl. oblik. obr. obraz. obs. obst. obt. obč. oc.
oct. od. odd. odg. odn. odst. odv. oec. off. ok. okla. okr. ont. oo. op. opis. opp. opr. orch. ord. ore. oreg.
org. orient. orig. ork. ort. oseb. osn. ot. ozir. ošk. p. pag. par. para. parc. parl. part. past. pat. pdk.
pen. perf. pert. perz. pesn. pet. pev. pf. pfc. ph. pharm. phil. pis. pl. po. pod. podr. podaljš. pogl. pogoj. pojm.
pok. pokr. pol. poljed. poljub. polu. pom. pomen. pon. ponov. pop. por. port. pos. posl. posn. pov. pp. ppl. pr.
praet. prav. pravopis. pravosl. preb. pred. predl. predm. predp. preds. pref. pregib. prel. prem. premen. prep.
pres. pret. prev. pribl. prih. pril. primerj. primor. prip. pripor. prir. prist. priv. proc. prof. prog. proiz.
prom. pron. prop. prot. protest. prov. ps. pss. pt. publ. pz. q. qld. qu. quad. que. r. racc. rastl. razgl.
razl. razv. rd. red. ref. reg. rel. relig. rep. repr. rer. resp. rest. ret. rev. revol. rež. rim. rist. rkp. rm.
roj. rom. romun. rp. rr. rt. rud. ruš. ry. sal. samogl. san. sc. scen. sci. scr. sdv. seg. sek. sen. sept. ser.
sev. sg. sgt. sh. sig. sigg. sign. sim. sin. sing. sinh. skand. skl. sklad. sklanj. sklep. skr. sl. slik. slov.
slovak. slovn. sn. so. sob. soc. sociol. sod. sopomen. sopr. sor. sov. sovj. sp. spec. spl. spr. spreg. sq. sr.
sre. sred. sredoz. srh. ss. ssp. st. sta. stan. stanstar. stcsl. ste. stim. stol. stom. str. stroj. strok. stsl.
stud. sup. supl. suppl. svet. sz. t. tab. tech. ted. tehn. tehnol. tek. teks. tekst. tel. temp. ten. teol. ter.
term. test. th. theol. tim. tip. tisočl. tit. tl. tol. tolmač. tom. tor. tov. tr. trad. traj. trans. tren.
trib. tril. trop. trp. trž. ts. tt. tu. tur. turiz. tvor. tvorb. . u. ul. umet. un. univ. up. upr. ur. urad.
us. ust. utr. v. va. val. var. varn. ven. ver. verb. vest. vezal. vic. vis. viv. viz. viš. vod. vok. vol. vpr.
vrst. vrstil. vs. vv. vzd. vzg. vzh. vzor. w. wed. wg. wk. x. y. z. zah. zaim. zak. zap. zasl. zavar. zač. zb.
združ. zg. zn. znan. znanstv. zoot. zun. zv. zvd. á. é. ć. č. čas. čet. čl. člen. čustv. đ. ľ. ł. ş. ŠT. š. šir.
škofl. škot. šol. št. števil. štud. ů. ű. žen. žival.
""".split()
for orth in abbrv:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

View File

@ -29,7 +29,7 @@ class Ukrainian(Language):
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "pymorphy2",
"mode": "pymorphy3",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},

View File

@ -14,7 +14,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
model: Optional[Model],
name: str = "lemmatizer",
*,
mode: str = "pymorphy2",
mode: str = "pymorphy3",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
@ -29,6 +29,17 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
elif mode == "pymorphy3":
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3 pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)

View File

@ -1879,31 +1879,22 @@ class Language:
if isinstance(exclude, str):
exclude = [exclude]
def fetch_pipes_status(value: Iterable[str], key: str) -> Iterable[str]:
"""Fetch value for `enable` or `disable` w.r.t. the specified config and passed arguments passed to
.load(). If both arguments and config specified values for this field, the passed arguments take precedence
and a warning is printed.
value (Iterable[str]): Passed value for `enable` or `disable`.
key (str): Key for field in config (either "enabled" or "disabled").
RETURN (Iterable[str]):
"""
# We assume that no argument was passed if the value is the specified default value.
if id(value) == id(_DEFAULT_EMPTY_PIPES):
return config["nlp"].get(key, [])
else:
if len(config["nlp"].get(key, [])):
warnings.warn(
Warnings.W123.format(
arg=key[:-1],
arg_value=value,
config_value=config["nlp"][key],
)
# `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config
# specifies values for `enabled` not included in `enable`, emit warning.
if id(enable) != id(_DEFAULT_EMPTY_PIPES):
enabled = config["nlp"].get("enabled", [])
if len(enabled) and not set(enabled).issubset(enable):
warnings.warn(
Warnings.W123.format(
enable=enable,
enabled=enabled,
)
return value
)
# Ensure sets of disabled/enabled pipe names are not contradictory.
disabled_pipes = cls._resolve_component_status(
fetch_pipes_status(disable, "disabled"),
fetch_pipes_status(enable, "enabled"),
list({*disable, *config["nlp"].get("disabled", [])}),
enable,
config["nlp"]["pipeline"],
)
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
@ -2084,10 +2075,12 @@ class Language:
if enable:
if isinstance(enable, str):
enable = [enable]
to_disable = [
pipe_name for pipe_name in pipe_names if pipe_name not in enable
]
if disable and disable != to_disable:
to_disable = {
*[pipe_name for pipe_name in pipe_names if pipe_name not in enable],
*disable,
}
# If any pipe to be enabled is in to_disable, the specification is inconsistent.
if len(set(enable) & to_disable):
raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
return tuple(to_disable)

View File

@ -1,11 +1,12 @@
from pathlib import Path
from typing import Optional, Callable, Iterable, List, Tuple
from thinc.types import Floats2d
from thinc.api import chain, clone, list2ragged, reduce_mean, residual
from thinc.api import Model, Maxout, Linear, noop, tuplify, Ragged
from thinc.api import chain, list2ragged, reduce_mean, residual
from thinc.api import Model, Maxout, Linear, tuplify, Ragged
from ...util import registry
from ...kb import KnowledgeBase, Candidate, get_candidates
from ...kb import KnowledgeBase, InMemoryLookupKB
from ...kb import Candidate, get_candidates, get_candidates_batch
from ...vocab import Vocab
from ...tokens import Span, Doc
from ..extract_spans import extract_spans
@ -70,17 +71,18 @@ def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callab
cands.append((start_token, end_token))
candidates.append(ops.asarray2i(cands))
candlens = ops.asarray1i([len(cands) for cands in candidates])
candidates = ops.xp.concatenate(candidates)
outputs = Ragged(candidates, candlens)
lengths = model.ops.asarray1i([len(cands) for cands in candidates])
out = Ragged(model.ops.flatten(candidates), lengths)
# because this is just rearranging docs, the backprop does nothing
return outputs, lambda x: []
return out, lambda x: []
@registry.misc("spacy.KBFromFile.v1")
def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]:
def kb_from_file(vocab):
kb = KnowledgeBase(vocab, entity_vector_length=1)
def load_kb(
kb_path: Path,
) -> Callable[[Vocab], KnowledgeBase]:
def kb_from_file(vocab: Vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.from_disk(kb_path)
return kb
@ -88,9 +90,11 @@ def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.EmptyKB.v1")
def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]:
def empty_kb_factory(vocab):
return KnowledgeBase(vocab=vocab, entity_vector_length=entity_vector_length)
def empty_kb(
entity_vector_length: int,
) -> Callable[[Vocab], KnowledgeBase]:
def empty_kb_factory(vocab: Vocab):
return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length)
return empty_kb_factory
@ -98,3 +102,10 @@ def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.CandidateGenerator.v1")
def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]:
return get_candidates
@registry.misc("spacy.CandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
]:
return get_candidates_batch

View File

@ -53,9 +53,11 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
"incl_context": True,
"entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
"get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
"overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True,
"candidates_batch_size": 1,
"threshold": None,
},
default_score_weights={
@ -75,9 +77,13 @@ def make_entity_linker(
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
overwrite: bool,
scorer: Optional[Callable],
use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
):
"""Construct an EntityLinker component.
@ -90,17 +96,21 @@ def make_entity_linker(
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
get_candidates_batch (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
prediction is discarded. If None, predictions are not filtered by any threshold.
"""
if not model.attrs.get("include_span_maker", False):
# The only difference in arguments here is that use_gold_ents is not available
# The only difference in arguments here is that use_gold_ents and threshold aren't available.
return EntityLinker_v1(
nlp.vocab,
model,
@ -124,9 +134,11 @@ def make_entity_linker(
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
get_candidates_batch=get_candidates_batch,
overwrite=overwrite,
scorer=scorer,
use_gold_ents=use_gold_ents,
candidates_batch_size=candidates_batch_size,
threshold=threshold,
)
@ -160,9 +172,13 @@ class EntityLinker(TrainablePipe):
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
) -> None:
"""Initialize an entity linker.
@ -178,10 +194,14 @@ class EntityLinker(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_links.
get_candidates_batch (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]],
Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init
@ -204,22 +224,27 @@ class EntityLinker(TrainablePipe):
self.incl_prior = incl_prior
self.incl_context = incl_context
self.get_candidates = get_candidates
self.get_candidates_batch = get_candidates_batch
self.cfg: Dict[str, Any] = {"overwrite": overwrite}
self.distance = CosineDistance(normalize=False)
# how many neighbour sentences to take into account
# create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'.
# create an empty KB by default
self.kb = empty_kb(entity_vector_length)(self.vocab)
self.scorer = scorer
self.use_gold_ents = use_gold_ents
self.candidates_batch_size = candidates_batch_size
self.threshold = threshold
if candidates_batch_size < 1:
raise ValueError(Errors.E1044)
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will
create it using this object's vocab."""
if not callable(kb_loader):
raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
self.kb = kb_loader(self.vocab)
self.kb = kb_loader(self.vocab) # type: ignore
def validate_kb(self) -> None:
# Raise an error if the knowledge base is not initialized.
@ -241,8 +266,8 @@ class EntityLinker(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance.
Note that providing this argument, will overwrite all data accumulated in the current KB.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab
instance. Note that providing this argument will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file.
DOCS: https://spacy.io/api/entitylinker#initialize
@ -419,66 +444,93 @@ class EntityLinker(TrainablePipe):
if len(doc) == 0:
continue
sentences = [s for s in doc.sents]
# Looping through each entity (TODO: rewrite)
for ent in doc.ents:
sent_index = sentences.index(ent.sent)
assert sent_index >= 0
if self.incl_context:
# get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_index - self.n_sents)
end_sentence = min(len(sentences) - 1, sent_index + self.n_sents)
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc()
# currently, the context is the same for each entity in a sentence (should be refined)
sentence_encoding = self.model.predict([sent_doc])[0]
sentence_encoding_t = sentence_encoding.T
sentence_norm = xp.linalg.norm(sentence_encoding_t)
entity_count += 1
if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL)
else:
candidates = list(self.get_candidates(self.kb, ent))
if not candidates:
# no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL)
elif len(candidates) == 1 and self.threshold is None:
# shortcut for efficiency reasons: take the 1 candidate
final_kb_ids.append(candidates[0].entity_)
else:
random.shuffle(candidates)
# set all prior probabilities to 0 if incl_prior=False
prior_probs = xp.asarray([c.prior_prob for c in candidates])
if not self.incl_prior:
prior_probs = xp.asarray([0.0 for _ in candidates])
scores = prior_probs
# add in similarity from the context
if self.incl_context:
entity_encodings = xp.asarray(
[c.entity_vector for c in candidates]
)
entity_norm = xp.linalg.norm(entity_encodings, axis=1)
if len(entity_encodings) != len(prior_probs):
raise RuntimeError(
Errors.E147.format(
method="predict",
msg="vectors not of equal length",
)
)
# cosine similarity
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
sentence_norm * entity_norm
)
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
final_kb_ids.append(
candidates[scores.argmax().item()].entity_
if self.threshold is None or scores.max() >= self.threshold
else EntityLinker.NIL
# Loop over entities in batches.
for ent_idx in range(0, len(doc.ents), self.candidates_batch_size):
ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size]
# Look up candidate entities.
valid_ent_idx = [
idx
for idx in range(len(ent_batch))
if ent_batch[idx].label_ not in self.labels_discard
]
batch_candidates = list(
self.get_candidates_batch(
self.kb, [ent_batch[idx] for idx in valid_ent_idx]
)
if self.candidates_batch_size > 1
else [
self.get_candidates(self.kb, ent_batch[idx])
for idx in valid_ent_idx
]
)
# Looping through each entity in batch (TODO: rewrite)
for j, ent in enumerate(ent_batch):
sent_index = sentences.index(ent.sent)
assert sent_index >= 0
if self.incl_context:
# get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_index - self.n_sents)
end_sentence = min(
len(sentences) - 1, sent_index + self.n_sents
)
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc()
# currently, the context is the same for each entity in a sentence (should be refined)
sentence_encoding = self.model.predict([sent_doc])[0]
sentence_encoding_t = sentence_encoding.T
sentence_norm = xp.linalg.norm(sentence_encoding_t)
entity_count += 1
if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL)
else:
candidates = list(batch_candidates[j])
if not candidates:
# no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL)
elif len(candidates) == 1 and self.threshold is None:
# shortcut for efficiency reasons: take the 1 candidate
final_kb_ids.append(candidates[0].entity_)
else:
random.shuffle(candidates)
# set all prior probabilities to 0 if incl_prior=False
prior_probs = xp.asarray([c.prior_prob for c in candidates])
if not self.incl_prior:
prior_probs = xp.asarray([0.0 for _ in candidates])
scores = prior_probs
# add in similarity from the context
if self.incl_context:
entity_encodings = xp.asarray(
[c.entity_vector for c in candidates]
)
entity_norm = xp.linalg.norm(entity_encodings, axis=1)
if len(entity_encodings) != len(prior_probs):
raise RuntimeError(
Errors.E147.format(
method="predict",
msg="vectors not of equal length",
)
)
# cosine similarity
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
sentence_norm * entity_norm
)
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
final_kb_ids.append(
candidates[scores.argmax().item()].entity_
if self.threshold is None
or scores.max() >= self.threshold
else EntityLinker.NIL
)
if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format(
method="predict", msg="result variables not of equal length"

View File

@ -68,8 +68,7 @@ class EntityLinker_v1(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_links.
scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
DOCS: https://spacy.io/api/entitylinker#init
"""
self.vocab = vocab
@ -115,7 +114,7 @@ class EntityLinker_v1(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance.
Note that providing this argument, will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file.

View File

@ -26,17 +26,17 @@ scorer = {"@layers": "spacy.LinearLogistic.v1"}
hidden_size = 128
[model.tok2vec]
@architectures = "spacy.Tok2Vec.v1"
@architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v1"
@architectures = "spacy.MultiHashEmbed.v2"
width = 96
rows = [5000, 2000, 1000, 1000]
attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1"
@architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width}
window_size = 1
maxout_pieces = 3

View File

@ -24,8 +24,8 @@ single_label_default_config = """
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = 64
rows = [2000, 2000, 1000, 1000, 1000, 1000]
attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"]
rows = [2000, 2000, 500, 1000, 500]
attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
@ -72,7 +72,7 @@ subword_features = true
"textcat",
assigns=["doc.cats"],
default_config={
"threshold": 0.5,
"threshold": 0.0,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v1"},
},
@ -144,7 +144,8 @@ class TextCategorizer(TrainablePipe):
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the
losses during training.
threshold (float): Cutoff to consider a prediction "positive".
threshold (float): Unused, not needed for single-label (exclusive
classes) classification.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_cats for the attribute "cats".
@ -154,7 +155,11 @@ class TextCategorizer(TrainablePipe):
self.model = model
self.name = name
self._rehearsal_model = None
cfg = {"labels": [], "threshold": threshold, "positive_label": None}
cfg: Dict[str, Any] = {
"labels": [],
"threshold": threshold,
"positive_label": None,
}
self.cfg = dict(cfg)
self.scorer = scorer

View File

@ -19,17 +19,17 @@ multi_label_default_config = """
@architectures = "spacy.TextCatEnsemble.v2"
[model.tok2vec]
@architectures = "spacy.Tok2Vec.v1"
@architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = 64
rows = [2000, 2000, 1000, 1000, 1000, 1000]
attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"]
rows = [2000, 2000, 500, 1000, 500]
attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1"
@architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width}
window_size = 1
maxout_pieces = 3

View File

@ -123,9 +123,6 @@ class Tok2Vec(TrainablePipe):
width = self.model.get_dim("nO")
return [self.model.ops.alloc((0, width)) for doc in docs]
tokvecs = self.model.predict(docs)
batch_id = Tok2VecListener.get_batch_id(docs)
for listener in self.listeners:
listener.receive(batch_id, tokvecs, _empty_backprop)
return tokvecs
def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None:
@ -286,8 +283,19 @@ class Tok2VecListener(Model):
def forward(model: Tok2VecListener, inputs, is_train: bool):
"""Supply the outputs from the upstream Tok2Vec component."""
if is_train:
model.verify_inputs(inputs)
return model._outputs, model._backprop
# This might occur during training when the tok2vec layer is frozen / hasn't been updated.
# In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc.
if model._batch_id is None:
outputs = []
for doc in inputs:
if doc.tensor.size == 0:
raise ValueError(Errors.E203.format(name="tok2vec"))
else:
outputs.append(doc.tensor)
return outputs, _empty_backprop
else:
model.verify_inputs(inputs)
return model._outputs, model._backprop
else:
# This is pretty grim, but it's hard to do better :(.
# It's hard to avoid relying on the doc.tensor attribute, because the
@ -306,7 +314,7 @@ def forward(model: Tok2VecListener, inputs, is_train: bool):
outputs.append(model.ops.alloc2f(len(doc), width))
else:
outputs.append(doc.tensor)
return outputs, lambda dX: []
return outputs, _empty_backprop
def _empty_backprop(dX): # for pickling

View File

@ -519,9 +519,9 @@ class DocJSONSchema(BaseModel):
title="Any custom data stored in the document's _ attribute",
alias="_",
)
underscore_token: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
underscore_token: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the token's _ attribute"
)
underscore_span: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
underscore_span: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the span's _ attribute"
)

View File

@ -446,7 +446,7 @@ class Scorer:
labels (Iterable[str]): The set of possible labels. Defaults to [].
multi_label (bool): Whether the attribute allows multiple labels.
Defaults to True. When set to False (exclusive labels), missing
gold labels are interpreted as 0.0.
gold labels are interpreted as 0.0 and the threshold is set to 0.0.
positive_label (str): The positive label for a binary task with
exclusive classes. Defaults to None.
threshold (float): Cutoff to consider a prediction "positive". Defaults
@ -471,6 +471,8 @@ class Scorer:
"""
if threshold is None:
threshold = 0.5 if multi_label else 0.0
if not multi_label:
threshold = 0.0
f_per_type = {label: PRFScore() for label in labels}
auc_per_type = {label: ROCAUCScore() for label in labels}
labels = set(labels)
@ -505,20 +507,18 @@ class Scorer:
# Get the highest-scoring for each.
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1])
if pred_label == gold_label and pred_score >= threshold:
if pred_label == gold_label:
f_per_type[pred_label].tp += 1
else:
f_per_type[gold_label].fn += 1
if pred_score >= threshold:
f_per_type[pred_label].fp += 1
f_per_type[pred_label].fp += 1
elif gold_cats:
gold_label, gold_score = max(gold_cats, key=lambda it: it[1])
if gold_score > 0:
f_per_type[gold_label].fn += 1
elif pred_cats:
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
if pred_score >= threshold:
f_per_type[pred_label].fp += 1
f_per_type[pred_label].fp += 1
micro_prf = PRFScore()
for label_prf in f_per_type.values():
micro_prf.tp += label_prf.tp

View File

@ -333,13 +333,13 @@ def ro_tokenizer():
@pytest.fixture(scope="session")
def ru_tokenizer():
pytest.importorskip("pymorphy2")
pytest.importorskip("pymorphy3")
return get_lang_class("ru")().tokenizer
@pytest.fixture
def ru_lemmatizer():
pytest.importorskip("pymorphy2")
pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer")
@ -419,14 +419,14 @@ def ky_tokenizer():
@pytest.fixture(scope="session")
def uk_tokenizer():
pytest.importorskip("pymorphy2")
pytest.importorskip("pymorphy3")
return get_lang_class("uk")().tokenizer
@pytest.fixture
def uk_lemmatizer():
pytest.importorskip("pymorphy2")
pytest.importorskip("pymorphy2_dicts_uk")
pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer")

View File

@ -128,7 +128,9 @@ def test_doc_to_json_with_token_span_attributes(doc):
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
doc[0:1]._.span_test = "span_attribute"
doc[0:2]._.span_test = "span_attribute_2"
doc[0]._.token_test = 117
doc[1]._.token_test = 118
doc.spans["span_group"] = [doc[0:1]]
json_doc = doc.to_json(
underscore=["json_test1", "json_test2", "token_test", "span_test"]
@ -139,8 +141,10 @@ def test_doc_to_json_with_token_span_attributes(doc):
assert json_doc["_"]["json_test2"] == [1, 2, 3]
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["token_test"]["value"] == 117
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert json_doc["underscore_token"]["token_test"][0]["value"] == 117
assert json_doc["underscore_token"]["token_test"][1]["value"] == 118
assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute"
assert json_doc["underscore_span"]["span_test"][1]["value"] == "span_attribute_2"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
@ -161,8 +165,8 @@ def test_doc_to_json_with_custom_user_data(doc):
assert json_doc["_"]["json_test"] == "hello world"
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["token_test"]["value"] == 117
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert json_doc["underscore_token"]["token_test"][0]["value"] == 117
assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
@ -181,8 +185,8 @@ def test_doc_to_json_with_token_span_same_identifier(doc):
assert json_doc["_"]["my_ext"] == "hello world"
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["my_ext"]["value"] == 117
assert json_doc["underscore_span"]["my_ext"]["value"] == "span_attribute"
assert json_doc["underscore_token"]["my_ext"][0]["value"] == 117
assert json_doc["underscore_span"]["my_ext"][0]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
@ -195,10 +199,9 @@ def test_doc_to_json_with_token_attributes_missing(doc):
doc[0]._.token_test = 117
json_doc = doc.to_json(underscore=["span_test"])
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert "token_test" not in json_doc["underscore_token"]
assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute"
assert "underscore_token" not in json_doc
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
@ -283,7 +286,9 @@ def test_json_to_doc_with_token_span_attributes(doc):
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
doc[0:1]._.span_test = "span_attribute"
doc[0:2]._.span_test = "span_attribute_2"
doc[0]._.token_test = 117
doc[1]._.token_test = 118
json_doc = doc.to_json(
underscore=["json_test1", "json_test2", "token_test", "span_test"]
@ -295,7 +300,9 @@ def test_json_to_doc_with_token_span_attributes(doc):
assert new_doc._.json_test1 == "hello world"
assert new_doc._.json_test2 == [1, 2, 3]
assert new_doc[0]._.token_test == 117
assert new_doc[1]._.token_test == 118
assert new_doc[0:1]._.span_test == "span_attribute"
assert new_doc[0:2]._.span_test == "span_attribute_2"
assert new_doc.user_data == doc.user_data
assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes(
exclude=["user_data"]
@ -363,3 +370,12 @@ def test_json_to_doc_validation_error(doc):
doc_json.pop("tokens")
with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json, validate=True)
def test_to_json_underscore_doc_getters(doc):
def get_text_length(doc):
return len(doc.text)
Doc.set_extension("text_length", getter=get_text_length)
doc_json = doc.to_json(underscore=["text_length"])
assert doc_json["_"]["text_length"] == get_text_length(doc)

View File

@ -0,0 +1,18 @@
import pytest
# fmt: off
GRC_TOKEN_EXCEPTION_TESTS = [
("τὸ 〈τῆς〉 φιλοσοφίας ἔργον ἔνιοί φασιν ἀπὸ ⟦βαρβάρων⟧ ἄρξαι.", ["τὸ", "", "τῆς", "", "φιλοσοφίας", "ἔργον", "ἔνιοί", "φασιν", "ἀπὸ", "", "βαρβάρων", "", "ἄρξαι", "."]),
("τὴν δὲ τῶν Αἰγυπτίων φιλοσοφίαν εἶναι τοιαύτην περί τε †θεῶν† καὶ ὑπὲρ δικαιοσύνης.", ["τὴν", "δὲ", "τῶν", "Αἰγυπτίων", "φιλοσοφίαν", "εἶναι", "τοιαύτην", "περί", "τε", "", "θεῶν", "", "καὶ", "ὑπὲρ", "δικαιοσύνης", "."]),
("⸏πόσις δ' Ἐρεχθεύς ἐστί μοι σεσωσμένος⸏", ["", "πόσις", "δ'", "Ἐρεχθεύς", "ἐστί", "μοι", "σεσωσμένος", ""]),
("⸏ὔπνον ἴδωμεν⸎", ["", "ὔπνον", "ἴδωμεν", ""]),
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", GRC_TOKEN_EXCEPTION_TESTS)
def test_grc_tokenizer(grc_tokenizer, text, expected_tokens):
tokens = grc_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list

View File

@ -20,7 +20,6 @@ od katerih so te svoboščine odvisne,
assert len(tokens) == 116
@pytest.mark.xfail
def test_ordinal_number(sl_tokenizer):
text = "10. decembra 1948"
tokens = sl_tokenizer(text)

View File

@ -6,9 +6,10 @@ from numpy.testing import assert_equal
from spacy import registry, util
from spacy.attrs import ENT_KB_ID
from spacy.compat import pickle
from spacy.kb import Candidate, KnowledgeBase, get_candidates
from spacy.kb import Candidate, InMemoryLookupKB, get_candidates, KnowledgeBase
from spacy.lang.en import English
from spacy.ml import load_kb
from spacy.ml.models.entity_linker import build_span_maker
from spacy.pipeline import EntityLinker
from spacy.pipeline.legacy import EntityLinker_v1
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
@ -34,7 +35,7 @@ def assert_almost_equal(a, b):
def test_issue4674():
"""Test that setting entities with overlapping identifiers does not mess up IO"""
nlp = English()
kb = KnowledgeBase(nlp.vocab, entity_vector_length=3)
kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
vector1 = [0.9, 1.1, 1.01]
vector2 = [1.8, 2.25, 2.01]
with pytest.warns(UserWarning):
@ -51,7 +52,7 @@ def test_issue4674():
dir_path.mkdir()
file_path = dir_path / "kb"
kb.to_disk(str(file_path))
kb2 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
kb2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
assert kb2.get_size_entities() == 1
@ -59,9 +60,9 @@ def test_issue4674():
@pytest.mark.issue(6730)
def test_issue6730(en_vocab):
"""Ensure that the KB does not accept empty strings, but otherwise IO works fine."""
from spacy.kb import KnowledgeBase
from spacy.kb.kb_in_memory import InMemoryLookupKB
kb = KnowledgeBase(en_vocab, entity_vector_length=3)
kb = InMemoryLookupKB(en_vocab, entity_vector_length=3)
kb.add_entity(entity="1", freq=148, entity_vector=[1, 2, 3])
with pytest.raises(ValueError):
@ -127,7 +128,7 @@ def test_issue7065_b():
def create_kb(vocab):
# create artificial KB
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q270853", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
alias="No. 8",
@ -190,7 +191,7 @@ def test_no_entities():
def create_kb(vocab):
# create artificial KB
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Russ Cochran", ["Q2146908"], [0.9])
return mykb
@ -231,7 +232,7 @@ def test_partial_links():
def create_kb(vocab):
# create artificial KB
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Russ Cochran", ["Q2146908"], [0.9])
return mykb
@ -263,7 +264,7 @@ def test_partial_links():
def test_kb_valid_entities(nlp):
"""Test the valid construction of a KB with 3 entities and two aliases"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=3)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3])
@ -292,7 +293,7 @@ def test_kb_valid_entities(nlp):
def test_kb_invalid_entities(nlp):
"""Test the invalid construction of a KB with an alias linked to a non-existing entity"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
@ -308,7 +309,7 @@ def test_kb_invalid_entities(nlp):
def test_kb_invalid_probabilities(nlp):
"""Test the invalid construction of a KB with wrong prior probabilities"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
@ -322,7 +323,7 @@ def test_kb_invalid_probabilities(nlp):
def test_kb_invalid_combination(nlp):
"""Test the invalid construction of a KB with non-matching entity and probability lists"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
@ -338,7 +339,7 @@ def test_kb_invalid_combination(nlp):
def test_kb_invalid_entity_vector(nlp):
"""Test the invalid construction of a KB with non-matching entity vector lengths"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=3)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1, 2, 3])
@ -376,7 +377,7 @@ def test_kb_initialize_empty(nlp):
def test_kb_serialize(nlp):
"""Test serialization of the KB"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
with make_tempdir() as d:
# normal read-write behaviour
mykb.to_disk(d / "kb")
@ -393,12 +394,12 @@ def test_kb_serialize(nlp):
@pytest.mark.issue(9137)
def test_kb_serialize_2(nlp):
v = [5, 6, 7, 8]
kb1 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4)
kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb1.set_entities(["E1"], [1], [v])
assert kb1.get_vector("E1") == v
with make_tempdir() as d:
kb1.to_disk(d / "kb")
kb2 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4)
kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb2.from_disk(d / "kb")
assert kb2.get_vector("E1") == v
@ -408,7 +409,7 @@ def test_kb_set_entities(nlp):
v = [5, 6, 7, 8]
v1 = [1, 1, 1, 0]
v2 = [2, 2, 2, 3]
kb1 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4)
kb1 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb1.set_entities(["E0"], [1], [v])
assert kb1.get_entity_strings() == ["E0"]
kb1.set_entities(["E1", "E2"], [1, 9], [v1, v2])
@ -417,7 +418,7 @@ def test_kb_set_entities(nlp):
assert kb1.get_vector("E2") == v2
with make_tempdir() as d:
kb1.to_disk(d / "kb")
kb2 = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=4)
kb2 = InMemoryLookupKB(vocab=nlp.vocab, entity_vector_length=4)
kb2.from_disk(d / "kb")
assert set(kb2.get_entity_strings()) == {"E1", "E2"}
assert kb2.get_vector("E1") == v1
@ -428,7 +429,7 @@ def test_kb_serialize_vocab(nlp):
"""Test serialization of the KB and custom strings"""
entity = "MyFunnyID"
assert entity not in nlp.vocab.strings
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
assert not mykb.contains_entity(entity)
mykb.add_entity(entity, freq=342, entity_vector=[3])
assert mykb.contains_entity(entity)
@ -436,14 +437,14 @@ def test_kb_serialize_vocab(nlp):
with make_tempdir() as d:
# normal read-write behaviour
mykb.to_disk(d / "kb")
mykb_new = KnowledgeBase(Vocab(), entity_vector_length=1)
mykb_new = InMemoryLookupKB(Vocab(), entity_vector_length=1)
mykb_new.from_disk(d / "kb")
assert entity in mykb_new.vocab.strings
def test_candidate_generation(nlp):
"""Test correct candidate generation"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
doc = nlp("douglas adam Adam shrubbery")
douglas_ent = doc[0:1]
@ -481,7 +482,7 @@ def test_el_pipe_configuration(nlp):
ruler.add_patterns([pattern])
def create_kb(vocab):
kb = KnowledgeBase(vocab, entity_vector_length=1)
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.add_entity(entity="Q2", freq=12, entity_vector=[2])
kb.add_entity(entity="Q3", freq=5, entity_vector=[3])
kb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.1])
@ -500,10 +501,21 @@ def test_el_pipe_configuration(nlp):
def get_lowercased_candidates(kb, span):
return kb.get_alias_candidates(span.text.lower())
def get_lowercased_candidates_batch(kb, spans):
return [get_lowercased_candidates(kb, span) for span in spans]
@registry.misc("spacy.LowercaseCandidateGenerator.v1")
def create_candidates() -> Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]:
def create_candidates() -> Callable[
[InMemoryLookupKB, "Span"], Iterable[Candidate]
]:
return get_lowercased_candidates
@registry.misc("spacy.LowercaseCandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[InMemoryLookupKB, Iterable["Span"]], Iterable[Iterable[Candidate]]
]:
return get_lowercased_candidates_batch
# replace the pipe with a new one with with a different candidate generator
entity_linker = nlp.replace_pipe(
"entity_linker",
@ -511,6 +523,9 @@ def test_el_pipe_configuration(nlp):
config={
"incl_context": False,
"get_candidates": {"@misc": "spacy.LowercaseCandidateGenerator.v1"},
"get_candidates_batch": {
"@misc": "spacy.LowercaseCandidateBatchGenerator.v1"
},
},
)
entity_linker.set_kb(create_kb)
@ -532,7 +547,7 @@ def test_nel_nsents(nlp):
def test_vocab_serialization(nlp):
"""Test that string information is retained across storage"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
@ -552,7 +567,7 @@ def test_vocab_serialization(nlp):
with make_tempdir() as d:
mykb.to_disk(d / "kb")
kb_new_vocab = KnowledgeBase(Vocab(), entity_vector_length=1)
kb_new_vocab = InMemoryLookupKB(Vocab(), entity_vector_length=1)
kb_new_vocab.from_disk(d / "kb")
candidates = kb_new_vocab.get_alias_candidates("adam")
@ -568,7 +583,7 @@ def test_vocab_serialization(nlp):
def test_append_alias(nlp):
"""Test that we can append additional alias-entity pairs"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
@ -599,7 +614,7 @@ def test_append_alias(nlp):
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_append_invalid_alias(nlp):
"""Test that append an alias will throw an error if prior probs are exceeding 1"""
mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
@ -621,7 +636,7 @@ def test_preserving_links_asdoc(nlp):
vector_length = 1
def create_kb(vocab):
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=8, entity_vector=[1])
@ -701,7 +716,11 @@ TRAIN_DATA = [
("Russ Cochran was a member of University of Kentucky's golf team.",
{"links": {(0, 12): {"Q7381115": 0.0, "Q2146908": 1.0}},
"entities": [(0, 12, "PERSON"), (43, 51, "LOC")],
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]})
"sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}),
# having a blank instance shouldn't break things
("The weather is nice today.",
{"links": {}, "entities": [],
"sent_starts": [1, -1, 0, 0, 0, 0]})
]
GOLD_entities = ["Q2146908", "Q7381115", "Q7381115", "Q2146908"]
# fmt: on
@ -723,7 +742,7 @@ def test_overfitting_IO():
# create artificial KB - assign same prior weight to the two russ cochran's
# Q2146908 (Russ Cochran): American golfer
# Q7381115 (Russ Cochran): publisher
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
@ -805,7 +824,7 @@ def test_kb_serialization():
kb_dir = tmp_dir / "kb"
nlp1 = English()
assert "Q2146908" not in nlp1.vocab.strings
mykb = KnowledgeBase(nlp1.vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(nlp1.vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
assert "Q2146908" in nlp1.vocab.strings
@ -828,7 +847,7 @@ def test_kb_serialization():
def test_kb_pickle():
# Test that the KB can be pickled
nlp = English()
kb_1 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
assert not kb_1.contains_alias("Russ Cochran")
kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
@ -842,7 +861,7 @@ def test_kb_pickle():
def test_nel_pickle():
# Test that a pipeline with an EL component can be pickled
def create_kb(vocab):
kb = KnowledgeBase(vocab, entity_vector_length=3)
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
return kb
@ -864,7 +883,7 @@ def test_nel_pickle():
def test_kb_to_bytes():
# Test that the KB's to_bytes method works correctly
nlp = English()
kb_1 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb_1.add_entity(entity="Q66", freq=9, entity_vector=[1, 2, 3])
kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
@ -874,7 +893,7 @@ def test_kb_to_bytes():
)
assert kb_1.contains_alias("Russ Cochran")
kb_bytes = kb_1.to_bytes()
kb_2 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
kb_2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
assert not kb_2.contains_alias("Russ Cochran")
kb_2 = kb_2.from_bytes(kb_bytes)
# check that both KBs are exactly the same
@ -897,7 +916,7 @@ def test_kb_to_bytes():
def test_nel_to_bytes():
# Test that a pipeline with an EL component can be converted to bytes
def create_kb(vocab):
kb = KnowledgeBase(vocab, entity_vector_length=3)
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
return kb
@ -987,7 +1006,7 @@ def test_legacy_architectures(name, config):
train_examples.append(Example.from_dict(doc, annotation))
def create_kb(vocab):
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
@ -1054,7 +1073,7 @@ def test_no_gold_ents(patterns):
def create_kb(vocab):
# create artificial KB
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Kirby", ["Q613241"], [0.9])
# Placeholder
@ -1104,7 +1123,7 @@ def test_tokenization_mismatch():
def create_kb(vocab):
# create placeholder KB
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Kirby", ["Q613241"], [0.9])
return mykb
@ -1121,6 +1140,12 @@ def test_tokenization_mismatch():
nlp.evaluate(train_examples)
def test_abstract_kb_instantiation():
"""Test whether instantiation of abstract KB base class fails."""
with pytest.raises(TypeError):
KnowledgeBase(None, 3)
# fmt: off
@pytest.mark.parametrize(
"meet_threshold,config",
@ -1151,7 +1176,7 @@ def test_threshold(meet_threshold: bool, config: Dict[str, Any]):
def create_kb(vocab):
# create artificial KB
mykb = KnowledgeBase(vocab, entity_vector_length=3)
mykb = InMemoryLookupKB(vocab, entity_vector_length=3)
mykb.add_entity(entity=entity_id, freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(
alias="Mahler",
@ -1176,3 +1201,18 @@ def test_threshold(meet_threshold: bool, config: Dict[str, Any]):
assert len(doc.ents) == 1
assert doc.ents[0].kb_id_ == entity_id if meet_threshold else EntityLinker.NIL
def test_span_maker_forward_with_empty():
"""The forward pass of the span maker may have a doc with no entities."""
nlp = English()
doc1 = nlp("a b c")
ent = doc1[0:1]
ent.label_ = "X"
doc1.ents = [ent]
# no entities
doc2 = nlp("x y z")
# just to get a model
span_maker = build_span_maker()
span_maker([doc1, doc2], False)

View File

@ -615,20 +615,18 @@ def test_enable_disable_conflict_with_config():
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict.
with pytest.raises(ValueError):
spacy.load(
tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
)
# Expected to succeed, as config and arguments do not conflict.
assert spacy.load(
tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
).disabled == ["senter", "sentencizer"]
# Expected to succeed without warning due to the lack of a conflicting config option.
spacy.load(tmp_dir, enable=["tagger"])
# Expected to succeed with a warning, as disable=[] should override the config setting.
with pytest.warns(UserWarning):
# Expected to fail due to conflict between enable and disabled.
with pytest.raises(ValueError):
spacy.load(
tmp_dir,
enable=["tagger"],
disable=[],
config={"nlp": {"disabled": ["senter"]}},
enable=["senter"],
config={"nlp": {"disabled": ["senter", "tagger"]}},
)

View File

@ -823,10 +823,10 @@ def test_textcat_loss(multi_label: bool, expected_loss: float):
assert loss == expected_loss
def test_textcat_threshold():
def test_textcat_multilabel_threshold():
# Ensure the scorer can be called with a different threshold
nlp = English()
nlp.add_pipe("textcat")
nlp.add_pipe("textcat_multilabel")
train_examples = []
for text, annotations in TRAIN_DATA_SINGLE_LABEL:
@ -849,7 +849,7 @@ def test_textcat_threshold():
)
pos_f = scores["cats_score"]
assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0
assert pos_f > macro_f
assert pos_f >= macro_f
def test_textcat_multi_threshold():

View File

@ -230,6 +230,97 @@ def test_tok2vec_listener_callback():
assert get_dX(Y) is not None
def test_tok2vec_listener_overfitting():
"""Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components"""
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses, annotates=["tok2vec"])
assert losses["tagger"] < 0.00001
# test the trained model
test_text = "I like blue eggs"
doc = nlp(test_text)
assert doc[0].tag_ == "N"
assert doc[1].tag_ == "V"
assert doc[2].tag_ == "J"
assert doc[3].tag_ == "N"
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert doc2[0].tag_ == "N"
assert doc2[1].tag_ == "V"
assert doc2[2].tag_ == "J"
assert doc2[3].tag_ == "N"
def test_tok2vec_frozen_not_annotating():
"""Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating"""
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
with pytest.raises(
ValueError, match=r"the tok2vec embedding layer is not updated"
):
nlp.update(
train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"]
)
def test_tok2vec_frozen_overfitting():
"""Test that a pipeline with a frozen & annotating tok2vec can still overfit"""
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(100):
losses = {}
nlp.update(
train_examples,
sgd=optimizer,
losses=losses,
exclude=["tok2vec"],
annotates=["tok2vec"],
)
assert losses["tagger"] < 0.0001
# test the trained model
test_text = "I like blue eggs"
doc = nlp(test_text)
assert doc[0].tag_ == "N"
assert doc[1].tag_ == "V"
assert doc[2].tag_ == "J"
assert doc[3].tag_ == "N"
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert doc2[0].tag_ == "N"
assert doc2[1].tag_ == "V"
assert doc2[2].tag_ == "J"
assert doc2[3].tag_ == "N"
def test_replace_listeners():
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)

View File

@ -3,7 +3,7 @@ from unittest import TestCase
import pytest
import srsly
from numpy import zeros
from spacy.kb import KnowledgeBase, Writer
from spacy.kb.kb_in_memory import InMemoryLookupKB, Writer
from spacy.vectors import Vectors
from spacy.language import Language
from spacy.pipeline import TrainablePipe
@ -71,7 +71,7 @@ def entity_linker():
nlp = Language()
def create_kb(vocab):
kb = KnowledgeBase(vocab, entity_vector_length=1)
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.add_entity("test", 0.0, zeros((1, 1), dtype="f"))
return kb
@ -120,7 +120,7 @@ def test_writer_with_path_py35():
def test_save_and_load_knowledge_base():
nlp = Language()
kb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
with make_tempdir() as d:
path = d / "kb"
try:
@ -129,7 +129,7 @@ def test_save_and_load_knowledge_base():
pytest.fail(str(e))
try:
kb_loaded = KnowledgeBase(nlp.vocab, entity_vector_length=1)
kb_loaded = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
kb_loaded.from_disk(path)
except Exception as e:
pytest.fail(str(e))

View File

@ -2,7 +2,7 @@ from typing import Callable
from spacy import util
from spacy.util import ensure_path, registry, load_model_from_config
from spacy.kb import KnowledgeBase
from spacy.kb.kb_in_memory import InMemoryLookupKB
from spacy.vocab import Vocab
from thinc.api import Config
@ -22,7 +22,7 @@ def test_serialize_kb_disk(en_vocab):
dir_path.mkdir()
file_path = dir_path / "kb"
kb1.to_disk(str(file_path))
kb2 = KnowledgeBase(vocab=en_vocab, entity_vector_length=3)
kb2 = InMemoryLookupKB(vocab=en_vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
# final assertions
@ -30,7 +30,7 @@ def test_serialize_kb_disk(en_vocab):
def _get_dummy_kb(vocab):
kb = KnowledgeBase(vocab, entity_vector_length=3)
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q53", freq=33, entity_vector=[0, 5, 3])
kb.add_entity(entity="Q17", freq=2, entity_vector=[7, 1, 0])
kb.add_entity(entity="Q007", freq=7, entity_vector=[0, 0, 7])
@ -104,7 +104,7 @@ def test_serialize_subclassed_kb():
custom_field = 666
"""
class SubKnowledgeBase(KnowledgeBase):
class SubInMemoryLookupKB(InMemoryLookupKB):
def __init__(self, vocab, entity_vector_length, custom_field):
super().__init__(vocab, entity_vector_length)
self.custom_field = custom_field
@ -112,9 +112,9 @@ def test_serialize_subclassed_kb():
@registry.misc("spacy.CustomKB.v1")
def custom_kb(
entity_vector_length: int, custom_field: int
) -> Callable[[Vocab], KnowledgeBase]:
) -> Callable[[Vocab], InMemoryLookupKB]:
def custom_kb_factory(vocab):
kb = SubKnowledgeBase(
kb = SubInMemoryLookupKB(
vocab=vocab,
entity_vector_length=entity_vector_length,
custom_field=custom_field,
@ -129,7 +129,7 @@ def test_serialize_subclassed_kb():
nlp.initialize()
entity_linker = nlp.get_pipe("entity_linker")
assert type(entity_linker.kb) == SubKnowledgeBase
assert type(entity_linker.kb) == SubInMemoryLookupKB
assert entity_linker.kb.entity_vector_length == 342
assert entity_linker.kb.custom_field == 666
@ -139,6 +139,6 @@ def test_serialize_subclassed_kb():
nlp2 = util.load_model_from_path(tmp_dir)
entity_linker2 = nlp2.get_pipe("entity_linker")
# After IO, the KB is the standard one
assert type(entity_linker2.kb) == KnowledgeBase
assert type(entity_linker2.kb) == InMemoryLookupKB
assert entity_linker2.kb.entity_vector_length == 342
assert not hasattr(entity_linker2.kb, "custom_field")

View File

@ -404,11 +404,10 @@ def test_serialize_pipeline_disable_enable():
assert nlp3.component_names == ["ner", "tagger"]
with make_tempdir() as d:
nlp3.to_disk(d)
with pytest.warns(UserWarning):
nlp4 = spacy.load(d, disable=["ner"])
assert nlp4.pipe_names == ["tagger"]
nlp4 = spacy.load(d, disable=["ner"])
assert nlp4.pipe_names == []
assert nlp4.component_names == ["ner", "tagger"]
assert nlp4.disabled == ["ner"]
assert nlp4.disabled == ["ner", "tagger"]
with make_tempdir() as d:
nlp.to_disk(d)
nlp5 = spacy.load(d, exclude=["tagger"])

View File

@ -1,5 +1,6 @@
import os
import math
import pkg_resources
from random import sample
from typing import Counter
@ -25,7 +26,7 @@ from spacy.cli.download import get_compatibility, get_version
from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config
from spacy.cli.package import get_third_party_dependencies
from spacy.cli.package import _is_permitted_package_name
from spacy.cli.project.run import run_commands
from spacy.cli.project.run import _check_requirements, run_commands
from spacy.cli.validate import get_model_pkgs
from spacy.compat import is_windows
from spacy.lang.en import English
@ -907,3 +908,42 @@ def test_run_commands(tmp_path):
finally:
# restore the original cwd so other tests are unaffected
os.chdir(cwd)
@pytest.mark.parametrize(
"reqs,output",
[
[
"""
spacy
# comment
thinc""",
(False, False),
],
[
"""# comment
--some-flag
spacy""",
(False, False),
],
[
"""# comment
--some-flag
spacy; python_version >= '3.6'""",
(False, False),
],
[
"""# comment
spacyunknowndoesnotexist12345""",
(True, False),
],
],
)
def test_project_check_requirements(reqs, output):
# excessive guard against unlikely package name
try:
pkg_resources.require("spacyunknowndoesnotexist12345")
except pkg_resources.DistributionNotFound:
assert output == _check_requirements([req.strip() for req in reqs.split("\n")])

View File

@ -23,7 +23,7 @@ def get_textcat_bow_kwargs():
def get_textcat_cnn_kwargs():
return {"tok2vec": test_tok2vec(), "exclusive_classes": False, "nO": 13}
return {"tok2vec": make_test_tok2vec(), "exclusive_classes": False, "nO": 13}
def get_all_params(model):
@ -65,7 +65,7 @@ def get_tok2vec_kwargs():
}
def test_tok2vec():
def make_test_tok2vec():
return build_Tok2Vec_model(**get_tok2vec_kwargs())

View File

@ -474,3 +474,50 @@ def test_prf_score():
assert (a.precision, a.recall, a.fscore) == approx(
(c.precision, c.recall, c.fscore)
)
def test_score_cats(en_tokenizer):
text = "some text"
gold_doc = en_tokenizer(text)
gold_doc.cats = {"POSITIVE": 1.0, "NEGATIVE": 0.0}
pred_doc = en_tokenizer(text)
pred_doc.cats = {"POSITIVE": 0.75, "NEGATIVE": 0.25}
example = Example(pred_doc, gold_doc)
# threshold is ignored for multi_label=False
scores1 = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=False,
positive_label="POSITIVE",
threshold=0.1,
)
scores2 = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=False,
positive_label="POSITIVE",
threshold=0.9,
)
assert scores1["cats_score"] == 1.0
assert scores2["cats_score"] == 1.0
assert scores1 == scores2
# threshold is relevant for multi_label=True
scores = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=True,
threshold=0.9,
)
assert scores["cats_macro_f"] == 0.0
# threshold is relevant for multi_label=True
scores = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=True,
threshold=0.1,
)
assert scores["cats_macro_f"] == 0.5

View File

@ -42,7 +42,8 @@ class SpanGroups(UserDict):
def copy(self, doc: Optional["Doc"] = None) -> "SpanGroups":
if doc is None:
doc = self._ensure_doc()
return SpanGroups(doc).from_bytes(self.to_bytes())
data_copy = ((k, v.copy(doc=doc)) for k, v in self.items())
return SpanGroups(doc, items=data_copy)
def setdefault(self, key, default=None):
if not isinstance(default, SpanGroup):

View File

@ -1608,24 +1608,20 @@ cdef class Doc:
Doc.set_extension(attr)
self._.set(attr, doc_json["_"][attr])
if doc_json.get("underscore_token", {}):
for token_attr in doc_json["underscore_token"]:
token_start = doc_json["underscore_token"][token_attr]["token_start"]
value = doc_json["underscore_token"][token_attr]["value"]
if not Token.has_extension(token_attr):
Token.set_extension(token_attr)
self[token_start]._.set(token_attr, value)
for token_attr in doc_json.get("underscore_token", {}):
if not Token.has_extension(token_attr):
Token.set_extension(token_attr)
for token_data in doc_json["underscore_token"][token_attr]:
start = token_by_char(self.c, self.length, token_data["start"])
value = token_data["value"]
self[start]._.set(token_attr, value)
if doc_json.get("underscore_span", {}):
for span_attr in doc_json["underscore_span"]:
token_start = doc_json["underscore_span"][span_attr]["token_start"]
token_end = doc_json["underscore_span"][span_attr]["token_end"]
value = doc_json["underscore_span"][span_attr]["value"]
if not Span.has_extension(span_attr):
Span.set_extension(span_attr)
self[token_start:token_end]._.set(span_attr, value)
for span_attr in doc_json.get("underscore_span", {}):
if not Span.has_extension(span_attr):
Span.set_extension(span_attr)
for span_data in doc_json["underscore_span"][span_attr]:
value = span_data["value"]
self.char_span(span_data["start"], span_data["end"])._.set(span_attr, value)
return self
def to_json(self, underscore=None):
@ -1672,31 +1668,44 @@ cdef class Doc:
if underscore:
user_keys = set()
# Handle doc attributes with .get to include values from getters
# and not only values stored in user_data, for backwards
# compatibility
for attr in underscore:
if self.has_extension(attr):
if "_" not in data:
data["_"] = {}
value = self._.get(attr)
if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
data["_"][attr] = value
user_keys.add(attr)
# Token and span attributes only include values stored in user_data
# and not values generated by getters
if self.user_data:
data["_"] = {}
data["underscore_token"] = {}
data["underscore_span"] = {}
for data_key in self.user_data:
for data_key, value in self.user_data.copy().items():
if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.":
attr = data_key[1]
start = data_key[2]
end = data_key[3]
if attr in underscore:
user_keys.add(attr)
value = self.user_data[data_key]
if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
# Check if doc attribute
if start is None:
data["_"][attr] = value
# Check if token attribute
elif end is None:
# Token attribute
if start is not None and end is None:
if "underscore_token" not in data:
data["underscore_token"] = {}
if attr not in data["underscore_token"]:
data["underscore_token"][attr] = {"token_start": start, "value": value}
# Else span attribute
else:
data["underscore_token"][attr] = []
data["underscore_token"][attr].append({"start": start, "value": value})
# Span attribute
elif start is not None and end is not None:
if "underscore_span" not in data:
data["underscore_span"] = {}
if attr not in data["underscore_span"]:
data["underscore_span"][attr] = {"token_start": start, "token_end": end, "value": value}
data["underscore_span"][attr] = []
data["underscore_span"][attr].append({"start": start, "end": end, "value": value})
for attr in underscore:
if attr not in user_keys:

View File

@ -117,15 +117,13 @@ class Span:
end_char: int
label: int
kb_id: int
id: int
ent_id: int
ent_id_: str
@property
def id(self) -> int: ...
@property
def id_(self) -> str: ...
@property
def orth_(self) -> str: ...
@property
def lemma_(self) -> str: ...
label_: str
kb_id_: str
id_: str

View File

@ -1,4 +1,4 @@
from typing import Any, Dict, Iterable
from typing import Any, Dict, Iterable, Optional
from .doc import Doc
from .span import Span
@ -24,4 +24,4 @@ class SpanGroup:
def __getitem__(self, i: int) -> Span: ...
def to_bytes(self) -> bytes: ...
def from_bytes(self, bytes_data: bytes) -> SpanGroup: ...
def copy(self) -> SpanGroup: ...
def copy(self, doc: Optional[Doc] = ...) -> SpanGroup: ...

View File

@ -241,15 +241,18 @@ cdef class SpanGroup:
cdef void push_back(self, SpanC span) nogil:
self.c.push_back(span)
def copy(self) -> SpanGroup:
def copy(self, doc: Optional["Doc"] = None) -> SpanGroup:
"""Clones the span group.
doc (Doc): New reference document to which the copy is bound.
RETURNS (SpanGroup): A copy of the span group.
DOCS: https://spacy.io/api/spangroup#copy
"""
if doc is None:
doc = self.doc
return SpanGroup(
self.doc,
doc,
name=self.name,
attrs=deepcopy(self.attrs),
spans=list(self),

View File

@ -443,9 +443,9 @@ def load_model_from_package(
name: str,
*,
vocab: Union["Vocab", bool] = True,
disable: Union[str, Iterable[str]] = SimpleFrozenList(),
enable: Union[str, Iterable[str]] = SimpleFrozenList(),
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from an installed package.
@ -619,9 +619,9 @@ def load_model_from_init_py(
init_file: Union[Path, str],
*,
vocab: Union["Vocab", bool] = True,
disable: Union[str, Iterable[str]] = SimpleFrozenList(),
enable: Union[str, Iterable[str]] = SimpleFrozenList(),
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Helper function to use in the `load()` method of a model package's

View File

@ -155,7 +155,7 @@ import Tag from 'components/tag'
> ```jsx
> <Tag>method</Tag>
> <Tag variant="new">2.1</Tag>
> <Tag variant="new">4</Tag>
> <Tag variant="model">tagger, parser</Tag>
> ```
@ -170,7 +170,7 @@ installed.
<InlineList>
<Tag>method</Tag> <Tag variant="new">2</Tag> <Tag variant="model">tagger,
<Tag>method</Tag> <Tag variant="new">4</Tag> <Tag variant="model">tagger,
parser</Tag>
</InlineList>

View File

@ -53,7 +53,7 @@ $ python -m spacy download [model] [--direct] [--sdist] [pip_args]
| `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ |
| `--sdist`, `-S` <Tag variant="new">3</Tag> | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| pip args <Tag variant="new">2.1</Tag> | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ |
| pip args | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ |
| **CREATES** | The installed pipeline package in your `site-packages` directory. |
## info {#info tag="command"}
@ -77,15 +77,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude]
$ python -m spacy info [model] [--markdown] [--silent] [--exclude]
```
| Name | Description |
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- |
| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
| `--silent`, `-s` <Tag variant="new">2.0.12</Tag> | Don't print anything, just return the values. ~~bool (flag)~~ |
| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
| `--url`, `-u` <Tag variant="new">3.5.0</Tag> | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **PRINTS** | Information about your spaCy installation. |
| Name | Description |
| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- |
| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
| `--silent`, `-s` | Don't print anything, just return the values. ~~bool (flag)~~ |
| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
| `--url`, `-u` <Tag variant="new">3.5.0</Tag> | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **PRINTS** | Information about your spaCy installation. |
## validate {#validate new="2" tag="command"}
@ -260,22 +260,22 @@ chosen based on the file extension of the input file.
$ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang]
```
| Name | Description |
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- |
| `input_path` | Input file or directory. ~~Path (positional)~~ |
| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ |
| `--converter`, `-c` <Tag variant="new">2</Tag> | Name of converter to use (see below). ~~str (option)~~ |
| `--file-type`, `-t` <Tag variant="new">2.1</Tag> | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
| `--seg-sents`, `-s` <Tag variant="new">2.2</Tag> | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
| `--lang`, `-l` <Tag variant="new">2.1</Tag> | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). |
| Name | Description |
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
| `input_path` | Input file or directory. ~~Path (positional)~~ |
| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ |
| `--converter`, `-c` | Name of converter to use (see below). ~~str (option)~~ |
| `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
| `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
| `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). |
### Converters {#converters}
@ -474,8 +474,7 @@ report span characteristics such as the average span length and the span (or
span boundary) distinctiveness. The distinctiveness measure shows how different
the tokens are with respect to the rest of the corpus using the KL-divergence of
the token distributions. To learn more, you can check out Papay et al.'s work on
[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP
2020)](https://aclanthology.org/2020.emnlp-main.396/).
[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP 2020)](https://aclanthology.org/2020.emnlp-main.396/).
</Infobox>
@ -1229,19 +1228,19 @@ $ python -m spacy package [input_dir] [output_dir] [--code] [--meta-path] [--cre
> $ pip install dist/en_pipeline-0.0.0.tar.gz
> ```
| Name | Description |
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ |
| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ |
| `--code`, `-c` <Tag variant="new">3</Tag> | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ |
| `--meta-path`, `-m` <Tag variant="new">2</Tag> | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ |
| `--create-meta`, `-C` <Tag variant="new">2</Tag> | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ |
| `--build`, `-b` <Tag variant="new">3</Tag> | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ |
| `--name`, `-n` <Tag variant="new">3</Tag> | Package name to override in meta. ~~Optional[str] \(option)~~ |
| `--version`, `-v` <Tag variant="new">3</Tag> | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ |
| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A Python package containing the spaCy pipeline. |
| Name | Description |
| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ |
| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ |
| `--code`, `-c` <Tag variant="new">3</Tag> | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ |
| `--meta-path`, `-m` | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ |
| `--create-meta`, `-C` | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ |
| `--build`, `-b` <Tag variant="new">3</Tag> | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ |
| `--name`, `-n` <Tag variant="new">3</Tag> | Package name to override in meta. ~~Optional[str] \(option)~~ |
| `--version`, `-v` <Tag variant="new">3</Tag> | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ |
| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A Python package containing the spaCy pipeline. |
## project {#project new="3"}

View File

@ -209,15 +209,15 @@ alignment mode `"strict".
> assert span.text == "New York"
> ```
| Name | Description |
| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` <Tag variant="new">2.2</Tag> | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
| Name | Description |
| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Doc.set_ents {#set_ents tag="method" new="3"}
@ -751,22 +751,22 @@ The L2 norm of the document's vector representation.
## Attributes {#attributes}
| Name | Description |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------- |
| `text` | A string representation of the document text. ~~str~~ |
| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
| `vocab` | The store of lexical types. ~~Vocab~~ |
| `tensor` <Tag variant="new">2</Tag> | Container for dense vector representations. ~~numpy.ndarray~~ |
| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
| `lang` <Tag variant="new">2.1</Tag> | Language of the document's vocabulary. ~~int~~ |
| `lang_` <Tag variant="new">2.1</Tag> | Language of the document's vocabulary. ~~str~~ |
| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
| Name | Description |
| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `text` | A string representation of the document text. ~~str~~ |
| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
| `vocab` | The store of lexical types. ~~Vocab~~ |
| `tensor` | Container for dense vector representations. ~~numpy.ndarray~~ |
| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
| `lang` | Language of the document's vocabulary. ~~int~~ |
| `lang_` | Language of the document's vocabulary. ~~str~~ |
| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
## Serialization fields {#serialization-fields}

View File

@ -14,7 +14,8 @@ entities) to unique identifiers, grounding the named entities into the "real
world". It requires a `KnowledgeBase`, as well as a function to generate
plausible candidates from that `KnowledgeBase` given a certain textual mention,
and a machine learning model to pick the right candidate, given the local
context of the mention.
context of the mention. `EntityLinker` defaults to using the
[`InMemoryLookupKB`](/api/kb_in_memory) implementation.
## Assigned Attributes {#assigned-attributes}
@ -170,7 +171,7 @@ with the current vocab.
>
> ```python
> def create_kb(vocab):
> kb = KnowledgeBase(vocab, entity_vector_length=128)
> kb = InMemoryLookupKB(vocab, entity_vector_length=128)
> kb.add_entity(...)
> kb.add_alias(...)
> return kb

View File

@ -4,27 +4,45 @@ teaser:
A storage class for entities and aliases of a specific knowledge base
(ontology)
tag: class
source: spacy/kb.pyx
source: spacy/kb/kb.pyx
new: 2.2
---
The `KnowledgeBase` object provides a method to generate
[`Candidate`](/api/kb/#candidate) objects, which are plausible external
The `KnowledgeBase` object is an abstract class providing a method to generate
[`Candidate`](/api/kb#candidate) objects, which are plausible external
identifiers given a certain textual mention. Each such `Candidate` holds
information from the relevant KB entities, such as its frequency in text and
possible aliases. Each entity in the knowledge base also has a pretrained entity
vector of a fixed size.
Beyond that, `KnowledgeBase` classes have to implement a number of utility
functions called by the [`EntityLinker`](/api/entitylinker) component.
<Infobox variant="warning">
This class was not abstract up to spaCy version 3.5. The `KnowledgeBase`
implementation up to that point is available as `InMemoryLookupKB` from 3.5
onwards.
</Infobox>
## KnowledgeBase.\_\_init\_\_ {#init tag="method"}
Create the knowledge base.
`KnowledgeBase` is an abstract class and cannot be instantiated. Its child
classes should call `__init__()` to set up some necessary attributes.
> #### Example
>
> ```python
> from spacy.kb import KnowledgeBase
> from spacy.vocab import Vocab
>
> class FullyImplementedKB(KnowledgeBase):
> def __init__(self, vocab: Vocab, entity_vector_length: int):
> super().__init__(vocab, entity_vector_length)
> ...
> vocab = nlp.vocab
> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64)
> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64)
> ```
| Name | Description |
@ -40,133 +58,66 @@ The length of the fixed-size entity vectors in the knowledge base.
| ----------- | ------------------------------------------------ |
| **RETURNS** | Length of the fixed-size entity vectors. ~~int~~ |
## KnowledgeBase.add_entity {#add_entity tag="method"}
## KnowledgeBase.get_candidates {#get_candidates tag="method"}
Add an entity to the knowledge base, specifying its corpus frequency and entity
vector, which should be of length
[`entity_vector_length`](/api/kb#entity_vector_length).
Given a certain textual mention as input, retrieve a list of candidate entities
of type [`Candidate`](/api/kb#candidate).
> #### Example
>
> ```python
> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1)
> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2)
> from spacy.lang.en import English
> nlp = English()
> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
> candidates = kb.get_candidates(doc[0:2])
> ```
| Name | Description |
| --------------- | ---------------------------------------------------------- |
| `entity` | The unique entity identifier. ~~str~~ |
| `freq` | The frequency of the entity in a typical corpus. ~~float~~ |
| `entity_vector` | The pretrained vector of the entity. ~~numpy.ndarray~~ |
| Name | Description |
| ----------- | -------------------------------------------------------------------- |
| `mention` | The textual mention or alias. ~~Span~~ |
| **RETURNS** | An iterable of relevant `Candidate` objects. ~~Iterable[Candidate]~~ |
## KnowledgeBase.set_entities {#set_entities tag="method"}
## KnowledgeBase.get_candidates_batch {#get_candidates_batch tag="method"}
Define the full list of entities in the knowledge base, specifying the corpus
frequency and entity vector for each entity.
Same as [`get_candidates()`](/api/kb#get_candidates), but for an arbitrary
number of mentions. The [`EntityLinker`](/api/entitylinker) component will call
`get_candidates_batch()` instead of `get_candidates()`, if the config parameter
`candidates_batch_size` is greater or equal than 1.
The default implementation of `get_candidates_batch()` executes
`get_candidates()` in a loop. We recommend implementing a more efficient way to
retrieve candidates for multiple mentions at once, if performance is of concern
to you.
> #### Example
>
> ```python
> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2])
> from spacy.lang.en import English
> nlp = English()
> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
> candidates = kb.get_candidates((doc[0:2], doc[3:]))
> ```
| Name | Description |
| ------------- | ---------------------------------------------------------------- |
| `entity_list` | List of unique entity identifiers. ~~Iterable[Union[str, int]]~~ |
| `freq_list` | List of entity frequencies. ~~Iterable[int]~~ |
| `vector_list` | List of entity vectors. ~~Iterable[numpy.ndarray]~~ |
## KnowledgeBase.add_alias {#add_alias tag="method"}
Add an alias or mention to the knowledge base, specifying its potential KB
identifiers and their prior probabilities. The entity identifiers should refer
to entities previously added with [`add_entity`](/api/kb#add_entity) or
[`set_entities`](/api/kb#set_entities). The sum of the prior probabilities
should not exceed 1. Note that an empty string can not be used as alias.
> #### Example
>
> ```python
> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3])
> ```
| Name | Description |
| --------------- | --------------------------------------------------------------------------------- |
| `alias` | The textual mention or alias. Can not be the empty string. ~~str~~ |
| `entities` | The potential entities that the alias may refer to. ~~Iterable[Union[str, int]]~~ |
| `probabilities` | The prior probabilities of each entity. ~~Iterable[float]~~ |
## KnowledgeBase.\_\_len\_\_ {#len tag="method"}
Get the total number of entities in the knowledge base.
> #### Example
>
> ```python
> total_entities = len(kb)
> ```
| Name | Description |
| ----------- | ----------------------------------------------------- |
| **RETURNS** | The number of entities in the knowledge base. ~~int~~ |
## KnowledgeBase.get_entity_strings {#get_entity_strings tag="method"}
Get a list of all entity IDs in the knowledge base.
> #### Example
>
> ```python
> all_entities = kb.get_entity_strings()
> ```
| Name | Description |
| ----------- | --------------------------------------------------------- |
| **RETURNS** | The list of entities in the knowledge base. ~~List[str]~~ |
## KnowledgeBase.get_size_aliases {#get_size_aliases tag="method"}
Get the total number of aliases in the knowledge base.
> #### Example
>
> ```python
> total_aliases = kb.get_size_aliases()
> ```
| Name | Description |
| ----------- | ---------------------------------------------------- |
| **RETURNS** | The number of aliases in the knowledge base. ~~int~~ |
## KnowledgeBase.get_alias_strings {#get_alias_strings tag="method"}
Get a list of all aliases in the knowledge base.
> #### Example
>
> ```python
> all_aliases = kb.get_alias_strings()
> ```
| Name | Description |
| ----------- | -------------------------------------------------------- |
| **RETURNS** | The list of aliases in the knowledge base. ~~List[str]~~ |
| Name | Description |
| ----------- | -------------------------------------------------------------------------------------------- |
| `mentions` | The textual mention or alias. ~~Iterable[Span]~~ |
| **RETURNS** | An iterable of iterable with relevant `Candidate` objects. ~~Iterable[Iterable[Candidate]]~~ |
## KnowledgeBase.get_alias_candidates {#get_alias_candidates tag="method"}
Given a certain textual mention as input, retrieve a list of candidate entities
of type [`Candidate`](/api/kb/#candidate).
<Infobox variant="warning">
This method is _not_ available from spaCy 3.5 onwards.
</Infobox>
> #### Example
>
> ```python
> candidates = kb.get_alias_candidates("Douglas")
> ```
| Name | Description |
| ----------- | ------------------------------------------------------------- |
| `alias` | The textual mention or alias. ~~str~~ |
| **RETURNS** | The list of relevant `Candidate` objects. ~~List[Candidate]~~ |
From spaCy 3.5 on `KnowledgeBase` is an abstract class (with
[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow
more flexibility in customizing knowledge bases. Some of its methods were moved
to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those
being `get_alias_candidates()`. This method is now available as
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates)
defaults to
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
## KnowledgeBase.get_vector {#get_vector tag="method"}
@ -178,27 +129,30 @@ Given a certain entity ID, retrieve its pretrained entity vector.
> vector = kb.get_vector("Q42")
> ```
| Name | Description |
| ----------- | ------------------------------------ |
| `entity` | The entity ID. ~~str~~ |
| **RETURNS** | The entity vector. ~~numpy.ndarray~~ |
| Name | Description |
| ----------- | -------------------------------------- |
| `entity` | The entity ID. ~~str~~ |
| **RETURNS** | The entity vector. ~~Iterable[float]~~ |
## KnowledgeBase.get_prior_prob {#get_prior_prob tag="method"}
## KnowledgeBase.get_vectors {#get_vectors tag="method"}
Given a certain entity ID and a certain textual mention, retrieve the prior
probability of the fact that the mention links to the entity ID.
Same as [`get_vector()`](/api/kb#get_vector), but for an arbitrary number of
entity IDs.
The default implementation of `get_vectors()` executes `get_vector()` in a loop.
We recommend implementing a more efficient way to retrieve vectors for multiple
entities at once, if performance is of concern to you.
> #### Example
>
> ```python
> probability = kb.get_prior_prob("Q42", "Douglas")
> vectors = kb.get_vectors(("Q42", "Q3107329"))
> ```
| Name | Description |
| ----------- | ------------------------------------------------------------------------- |
| `entity` | The entity ID. ~~str~~ |
| `alias` | The textual mention or alias. ~~str~~ |
| **RETURNS** | The prior probability of the `alias` referring to the `entity`. ~~float~~ |
| Name | Description |
| ----------- | --------------------------------------------------------- |
| `entities` | The entity IDs. ~~Iterable[str]~~ |
| **RETURNS** | The entity vectors. ~~Iterable[Iterable[numpy.ndarray]]~~ |
## KnowledgeBase.to_disk {#to_disk tag="method"}
@ -207,12 +161,13 @@ Save the current state of the knowledge base to a directory.
> #### Example
>
> ```python
> kb.to_disk(loc)
> kb.to_disk(path)
> ```
| Name | Description |
| ----- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| `loc` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
| Name | Description |
| --------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
## KnowledgeBase.from_disk {#from_disk tag="method"}
@ -222,16 +177,16 @@ Restore the state of the knowledge base from a given directory. Note that the
> #### Example
>
> ```python
> from spacy.kb import KnowledgeBase
> from spacy.vocab import Vocab
> vocab = Vocab().from_disk("/path/to/vocab")
> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64)
> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64)
> kb.from_disk("/path/to/kb")
> ```
| Name | Description |
| ----------- | ----------------------------------------------------------------------------------------------- |
| `loc` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
| **RETURNS** | The modified `KnowledgeBase` object. ~~KnowledgeBase~~ |
## Candidate {#candidate tag="class"}

View File

@ -0,0 +1,302 @@
---
title: InMemoryLookupKB
teaser:
The default implementation of the KnowledgeBase interface. Stores all
information in-memory.
tag: class
source: spacy/kb/kb_in_memory.pyx
new: 3.5
---
The `InMemoryLookupKB` class inherits from [`KnowledgeBase`](/api/kb) and
implements all of its methods. It stores all KB data in-memory and generates
[`Candidate`](/api/kb#candidate) objects by exactly matching mentions with
entity names. It's highly optimized for both a low memory footprint and speed of
retrieval.
## InMemoryLookupKB.\_\_init\_\_ {#init tag="method"}
Create the knowledge base.
> #### Example
>
> ```python
> from spacy.kb import InMemoryLookupKB
> vocab = nlp.vocab
> kb = InMemoryLookupKB(vocab=vocab, entity_vector_length=64)
> ```
| Name | Description |
| ---------------------- | ------------------------------------------------ |
| `vocab` | The shared vocabulary. ~~Vocab~~ |
| `entity_vector_length` | Length of the fixed-size entity vectors. ~~int~~ |
## InMemoryLookupKB.entity_vector_length {#entity_vector_length tag="property"}
The length of the fixed-size entity vectors in the knowledge base.
| Name | Description |
| ----------- | ------------------------------------------------ |
| **RETURNS** | Length of the fixed-size entity vectors. ~~int~~ |
## InMemoryLookupKB.add_entity {#add_entity tag="method"}
Add an entity to the knowledge base, specifying its corpus frequency and entity
vector, which should be of length
[`entity_vector_length`](/api/kb_in_memory#entity_vector_length).
> #### Example
>
> ```python
> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1)
> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2)
> ```
| Name | Description |
| --------------- | ---------------------------------------------------------- |
| `entity` | The unique entity identifier. ~~str~~ |
| `freq` | The frequency of the entity in a typical corpus. ~~float~~ |
| `entity_vector` | The pretrained vector of the entity. ~~numpy.ndarray~~ |
## InMemoryLookupKB.set_entities {#set_entities tag="method"}
Define the full list of entities in the knowledge base, specifying the corpus
frequency and entity vector for each entity.
> #### Example
>
> ```python
> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2])
> ```
| Name | Description |
| ------------- | ---------------------------------------------------------------- |
| `entity_list` | List of unique entity identifiers. ~~Iterable[Union[str, int]]~~ |
| `freq_list` | List of entity frequencies. ~~Iterable[int]~~ |
| `vector_list` | List of entity vectors. ~~Iterable[numpy.ndarray]~~ |
## InMemoryLookupKB.add_alias {#add_alias tag="method"}
Add an alias or mention to the knowledge base, specifying its potential KB
identifiers and their prior probabilities. The entity identifiers should refer
to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity)
or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior
probabilities should not exceed 1. Note that an empty string can not be used as
alias.
> #### Example
>
> ```python
> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3])
> ```
| Name | Description |
| --------------- | --------------------------------------------------------------------------------- |
| `alias` | The textual mention or alias. Can not be the empty string. ~~str~~ |
| `entities` | The potential entities that the alias may refer to. ~~Iterable[Union[str, int]]~~ |
| `probabilities` | The prior probabilities of each entity. ~~Iterable[float]~~ |
## InMemoryLookupKB.\_\_len\_\_ {#len tag="method"}
Get the total number of entities in the knowledge base.
> #### Example
>
> ```python
> total_entities = len(kb)
> ```
| Name | Description |
| ----------- | ----------------------------------------------------- |
| **RETURNS** | The number of entities in the knowledge base. ~~int~~ |
## InMemoryLookupKB.get_entity_strings {#get_entity_strings tag="method"}
Get a list of all entity IDs in the knowledge base.
> #### Example
>
> ```python
> all_entities = kb.get_entity_strings()
> ```
| Name | Description |
| ----------- | --------------------------------------------------------- |
| **RETURNS** | The list of entities in the knowledge base. ~~List[str]~~ |
## InMemoryLookupKB.get_size_aliases {#get_size_aliases tag="method"}
Get the total number of aliases in the knowledge base.
> #### Example
>
> ```python
> total_aliases = kb.get_size_aliases()
> ```
| Name | Description |
| ----------- | ---------------------------------------------------- |
| **RETURNS** | The number of aliases in the knowledge base. ~~int~~ |
## InMemoryLookupKB.get_alias_strings {#get_alias_strings tag="method"}
Get a list of all aliases in the knowledge base.
> #### Example
>
> ```python
> all_aliases = kb.get_alias_strings()
> ```
| Name | Description |
| ----------- | -------------------------------------------------------- |
| **RETURNS** | The list of aliases in the knowledge base. ~~List[str]~~ |
## InMemoryLookupKB.get_candidates {#get_candidates tag="method"}
Given a certain textual mention as input, retrieve a list of candidate entities
of type [`Candidate`](/api/kb#candidate). Wraps
[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
> #### Example
>
> ```python
> from spacy.lang.en import English
> nlp = English()
> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
> candidates = kb.get_candidates(doc[0:2])
> ```
| Name | Description |
| ----------- | -------------------------------------------------------------------- |
| `mention` | The textual mention or alias. ~~Span~~ |
| **RETURNS** | An iterable of relevant `Candidate` objects. ~~Iterable[Candidate]~~ |
## InMemoryLookupKB.get_candidates_batch {#get_candidates_batch tag="method"}
Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an
arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component
will call `get_candidates_batch()` instead of `get_candidates()`, if the config
parameter `candidates_batch_size` is greater or equal than 1.
The default implementation of `get_candidates_batch()` executes
`get_candidates()` in a loop. We recommend implementing a more efficient way to
retrieve candidates for multiple mentions at once, if performance is of concern
to you.
> #### Example
>
> ```python
> from spacy.lang.en import English
> nlp = English()
> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
> candidates = kb.get_candidates((doc[0:2], doc[3:]))
> ```
| Name | Description |
| ----------- | -------------------------------------------------------------------------------------------- |
| `mentions` | The textual mention or alias. ~~Iterable[Span]~~ |
| **RETURNS** | An iterable of iterable with relevant `Candidate` objects. ~~Iterable[Iterable[Candidate]]~~ |
## InMemoryLookupKB.get_alias_candidates {#get_alias_candidates tag="method"}
Given a certain textual mention as input, retrieve a list of candidate entities
of type [`Candidate`](/api/kb#candidate).
> #### Example
>
> ```python
> candidates = kb.get_alias_candidates("Douglas")
> ```
| Name | Description |
| ----------- | ------------------------------------------------------------- |
| `alias` | The textual mention or alias. ~~str~~ |
| **RETURNS** | The list of relevant `Candidate` objects. ~~List[Candidate]~~ |
## InMemoryLookupKB.get_vector {#get_vector tag="method"}
Given a certain entity ID, retrieve its pretrained entity vector.
> #### Example
>
> ```python
> vector = kb.get_vector("Q42")
> ```
| Name | Description |
| ----------- | ------------------------------------ |
| `entity` | The entity ID. ~~str~~ |
| **RETURNS** | The entity vector. ~~numpy.ndarray~~ |
## InMemoryLookupKB.get_vectors {#get_vectors tag="method"}
Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary
number of entity IDs.
The default implementation of `get_vectors()` executes `get_vector()` in a loop.
We recommend implementing a more efficient way to retrieve vectors for multiple
entities at once, if performance is of concern to you.
> #### Example
>
> ```python
> vectors = kb.get_vectors(("Q42", "Q3107329"))
> ```
| Name | Description |
| ----------- | --------------------------------------------------------- |
| `entities` | The entity IDs. ~~Iterable[str]~~ |
| **RETURNS** | The entity vectors. ~~Iterable[Iterable[numpy.ndarray]]~~ |
## InMemoryLookupKB.get_prior_prob {#get_prior_prob tag="method"}
Given a certain entity ID and a certain textual mention, retrieve the prior
probability of the fact that the mention links to the entity ID.
> #### Example
>
> ```python
> probability = kb.get_prior_prob("Q42", "Douglas")
> ```
| Name | Description |
| ----------- | ------------------------------------------------------------------------- |
| `entity` | The entity ID. ~~str~~ |
| `alias` | The textual mention or alias. ~~str~~ |
| **RETURNS** | The prior probability of the `alias` referring to the `entity`. ~~float~~ |
## InMemoryLookupKB.to_disk {#to_disk tag="method"}
Save the current state of the knowledge base to a directory.
> #### Example
>
> ```python
> kb.to_disk(path)
> ```
| Name | Description |
| --------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
## InMemoryLookupKB.from_disk {#from_disk tag="method"}
Restore the state of the knowledge base from a given directory. Note that the
[`Vocab`](/api/vocab) should also be the same as the one used to create the KB.
> #### Example
>
> ```python
> from spacy.vocab import Vocab
> vocab = Vocab().from_disk("/path/to/vocab")
> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64)
> kb.from_disk("/path/to/kb")
> ```
| Name | Description |
| ----------- | ----------------------------------------------------------------------------------------------- |
| `loc` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
| **RETURNS** | The modified `KnowledgeBase` object. ~~KnowledgeBase~~ |

View File

@ -63,18 +63,18 @@ spaCy loads a model under the hood based on its
> nlp = Language.from_config(config)
> ```
| Name | Description |
| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
| _keyword-only_ | |
| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
| `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
| **RETURNS** | The initialized object. ~~Language~~ |
| Name | Description |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
| _keyword-only_ | |
| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ |
| `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
| **RETURNS** | The initialized object. ~~Language~~ |
## Language.component {#component tag="classmethod" new="3"}
@ -198,16 +198,16 @@ tokenization is skipped but the rest of the pipeline is run.
> assert doc.has_annotation("DEP")
> ```
| Name | Description |
| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ |
| _keyword-only_ | |
| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ |
| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ |
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
| `n_process` <Tag variant="new">2.2.2</Tag> | Number of processors to use. Defaults to `1`. ~~int~~ |
| **YIELDS** | Documents in the order of the original text. ~~Doc~~ |
| Name | Description |
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ |
| _keyword-only_ | |
| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ |
| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ |
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
| `n_process` | Number of processors to use. Defaults to `1`. ~~int~~ |
| **YIELDS** | Documents in the order of the original text. ~~Doc~~ |
## Language.set_error_handler {#set_error_handler tag="method" new="3"}
@ -1030,21 +1030,21 @@ details.
## Attributes {#attributes}
| Name | Description |
| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | A container for the lexical types. ~~Vocab~~ |
| `tokenizer` | The tokenizer. ~~Tokenizer~~ |
| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
| `pipe_names` <Tag variant="new">2</Tag> | List of pipeline component names, in order. ~~List[str]~~ |
| `pipe_labels` <Tag variant="new">2.2</Tag> | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
| `pipe_factories` <Tag variant="new">2.2</Tag> | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
| `factory_names` <Tag variant="new">3</Tag> | List of all available factory names. ~~List[str]~~ |
| `components` <Tag variant="new">3</Tag> | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
| `component_names` <Tag variant="new">3</Tag> | List of all available component names, including components that are currently disabled. ~~List[str]~~ |
| `disabled` <Tag variant="new">3</Tag> | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ |
| `path` <Tag variant="new">2</Tag> | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ |
| Name | Description |
| -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | A container for the lexical types. ~~Vocab~~ |
| `tokenizer` | The tokenizer. ~~Tokenizer~~ |
| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
| `pipe_names` | List of pipeline component names, in order. ~~List[str]~~ |
| `pipe_labels` | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
| `pipe_factories` | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
| `factory_names` <Tag variant="new">3</Tag> | List of all available factory names. ~~List[str]~~ |
| `components` <Tag variant="new">3</Tag> | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
| `component_names` <Tag variant="new">3</Tag> | List of all available component names, including components that are currently disabled. ~~List[str]~~ |
| `disabled` <Tag variant="new">3</Tag> | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ |
| `path` | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ |
## Class attributes {#class-attributes}

View File

@ -70,7 +70,7 @@ lemmatizer is available. The lemmatizer modes `rule` and `pos_lookup` require
[`token.pos`](/api/token) from a previous pipeline component (see example
pipeline configurations in the
[pretrained pipeline design details](/models#design-cnn)) or rely on third-party
libraries (`pymorphy2`).
libraries (`pymorphy3`).
| Language | Default Mode |
| -------- | ------------ |
@ -86,9 +86,9 @@ libraries (`pymorphy2`).
| `nb` | `rule` |
| `nl` | `rule` |
| `pl` | `pos_lookup` |
| `ru` | `pymorphy2` |
| `ru` | `pymorphy3` |
| `sv` | `rule` |
| `uk` | `pymorphy2` |
| `uk` | `pymorphy3` |
```python
%%GITHUB_SPACY/spacy/pipeline/lemmatizer.py

View File

@ -121,44 +121,44 @@ The L2 norm of the lexeme's vector representation.
## Attributes {#attributes}
| Name | Description |
| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | The lexeme's vocabulary. ~~Vocab~~ |
| `text` | Verbatim text content. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `flags` | Container of the lexeme's binary flags. ~~int~~ |
| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ |
| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ |
| `lower` | Lowercase form of the word. ~~int~~ |
| `lower_` | Lowercase form of the word. ~~str~~ |
| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ |
| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ |
| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ |
| `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ |
| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ |
| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ |
| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ |
| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ |
| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ |
| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ |
| `is_punct` | Is the lexeme punctuation? ~~bool~~ |
| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ |
| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ |
| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ |
| `is_bracket` | Is the lexeme a bracket? ~~bool~~ |
| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ |
| `is_currency` <Tag variant="new">2.0.8</Tag> | Is the lexeme a currency symbol? ~~bool~~ |
| `like_url` | Does the lexeme resemble a URL? ~~bool~~ |
| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
| `like_email` | Does the lexeme resemble an email address? ~~bool~~ |
| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ |
| `lang` | Language of the parent vocabulary. ~~int~~ |
| `lang_` | Language of the parent vocabulary. ~~str~~ |
| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ |
| `cluster` | Brown cluster ID. ~~int~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ |
| Name | Description |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | The lexeme's vocabulary. ~~Vocab~~ |
| `text` | Verbatim text content. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `flags` | Container of the lexeme's binary flags. ~~int~~ |
| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ |
| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ |
| `lower` | Lowercase form of the word. ~~int~~ |
| `lower_` | Lowercase form of the word. ~~str~~ |
| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ |
| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ |
| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ |
| `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ |
| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ |
| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ |
| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ |
| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ |
| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ |
| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ |
| `is_punct` | Is the lexeme punctuation? ~~bool~~ |
| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ |
| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ |
| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ |
| `is_bracket` | Is the lexeme a bracket? ~~bool~~ |
| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ |
| `is_currency` | Is the lexeme a currency symbol? ~~bool~~ |
| `like_url` | Does the lexeme resemble a URL? ~~bool~~ |
| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
| `like_email` | Does the lexeme resemble an email address? ~~bool~~ |
| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ |
| `lang` | Language of the parent vocabulary. ~~int~~ |
| `lang_` | Language of the parent vocabulary. ~~str~~ |
| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ |
| `cluster` | Brown cluster ID. ~~int~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ |

View File

@ -33,7 +33,7 @@ rule-based matching are:
| Attribute | Description |
| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `TEXT` <Tag variant="new">2.1</Tag> | The exact verbatim text of a token. ~~str~~ |
| `TEXT` | The exact verbatim text of a token. ~~str~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
@ -48,7 +48,7 @@ rule-based matching are:
| `ENT_IOB` | The IOB part of the token's entity tag. ~~str~~ |
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
| `ENT_KB_ID` | The token's entity knowledge base ID (`ent_kb_id`). ~~str~~ |
| `_` <Tag variant="new">2.1</Tag> | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `OP` | Operator or quantifier to determine how often to match a token pattern. ~~str~~ |
Operators and quantifiers define **how often** a token pattern should be
@ -64,7 +64,7 @@ matched:
> ```
| OP | Description |
|---------|------------------------------------------------------------------------|
| ------- | ---------------------------------------------------------------------- |
| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
| `+` | Require the pattern to match 1 or more times. |
@ -109,10 +109,10 @@ string where an integer is expected) or unexpected property names.
> matcher = Matcher(nlp.vocab)
> ```
| Name | Description |
| --------------------------------------- | ----------------------------------------------------------------------------------------------------- |
| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
| `validate` <Tag variant="new">2.1</Tag> | Validate all patterns added to this matcher. ~~bool~~ |
| Name | Description |
| ---------- | ----------------------------------------------------------------------------------------------------- |
| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
| `validate` | Validate all patterns added to this matcher. ~~bool~~ |
## Matcher.\_\_call\_\_ {#call tag="method"}

View File

@ -36,11 +36,11 @@ be shown.
> matcher = PhraseMatcher(nlp.vocab)
> ```
| Name | Description |
| --------------------------------------- | ------------------------------------------------------------------------------------------------------ |
| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
| `attr` <Tag variant="new">2.1</Tag> | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ |
| `validate` <Tag variant="new">2.1</Tag> | Validate patterns added to the matcher. ~~bool~~ |
| Name | Description |
| ---------- | ------------------------------------------------------------------------------------------------------ |
| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
| `attr` | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ |
| `validate` | Validate patterns added to the matcher. ~~bool~~ |
## PhraseMatcher.\_\_call\_\_ {#call tag="method"}

View File

@ -229,16 +229,17 @@ The reported `{attr}_score` depends on the classification properties:
> print(scores["cats_macro_auc"])
> ```
| Name | Description |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
| `attr` | The attribute to score. ~~str~~ |
| _keyword-only_ | |
| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ |
| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ |
| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. ~~bool~~ |
| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ |
| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ |
| Name | Description |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
| `attr` | The attribute to score. ~~str~~ |
| _keyword-only_ | |
| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ |
| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ |
| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. When set to `False` (exclusive labels), missing gold labels are interpreted as `0.0` and the threshold is set to `0.0`. ~~bool~~ |
| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ |
| `threshold` | Cutoff to consider a prediction "positive". Defaults to `0.5` for multi-label, and `0.0` (i.e. whatever's highest scoring) otherwise. ~~float~~ |
| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ |
## Scorer.score_links {#score_links tag="staticmethod" new="3"}

View File

@ -186,14 +186,14 @@ the character indices don't map to a valid span.
> assert span.text == "New York"
> ```
| Name | Description |
| ------------------------------------ | ----------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` <Tag variant="new">2.2</Tag> | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
| Name | Description |
| ----------- | ----------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Span.similarity {#similarity tag="method" model="vectors"}
@ -544,26 +544,26 @@ overlaps with will be returned.
## Attributes {#attributes}
| Name | Description |
| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ |
| `tensor` <Tag variant="new">2.1.7</Tag> | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `start` | The token offset for the start of the span. ~~int~~ |
| `end` | The token offset for the end of the span. ~~int~~ |
| `start_char` | The character offset for the start of the span. ~~int~~ |
| `end_char` | The character offset for the end of the span. ~~int~~ |
| `text` | A string representation of the span text. ~~str~~ |
| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `label` | The hash value of the span's label. ~~int~~ |
| `label_` | The span's label. ~~str~~ |
| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
| `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ |
| `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ |
| `id` | The hash value of the span's ID. ~~int~~ |
| `id_` | The span's ID. ~~str~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
| Name | Description |
| -------------- | ----------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ |
| `tensor` | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `start` | The token offset for the start of the span. ~~int~~ |
| `end` | The token offset for the end of the span. ~~int~~ |
| `start_char` | The character offset for the start of the span. ~~int~~ |
| `end_char` | The character offset for the end of the span. ~~int~~ |
| `text` | A string representation of the span text. ~~str~~ |
| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `label` | The hash value of the span's label. ~~int~~ |
| `label_` | The span's label. ~~str~~ |
| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
| `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ |
| `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ |
| `id` | The hash value of the span's ID. ~~int~~ |
| `id_` | The span's ID. ~~str~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |

View File

@ -255,9 +255,10 @@ Return a copy of the span group.
> new_group = doc.spans["errors"].copy()
> ```
| Name | Description |
| ----------- | ----------------------------------------------- |
| **RETURNS** | A copy of the `SpanGroup` object. ~~SpanGroup~~ |
| Name | Description |
| ----------- | -------------------------------------------------------------------------------------------------- |
| `doc` | The document to which the copy is bound. Defaults to `None` for the current doc. ~~Optional[Doc]~~ |
| **RETURNS** | A copy of the `SpanGroup` object. ~~SpanGroup~~ |
## SpanGroup.to_bytes {#to_bytes tag="method"}

View File

@ -63,7 +63,6 @@ architectures and their arguments and hyperparameters.
> ```python
> from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL
> config = {
> "threshold": 0.5,
> "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
> }
> nlp.add_pipe("textcat", config=config)
@ -82,7 +81,7 @@ architectures and their arguments and hyperparameters.
| Setting | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
| `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ |
| `model` | A model instance that predicts scores for each category. Defaults to [TextCatEnsemble](/api/architectures#TextCatEnsemble). ~~Model[List[Doc], List[Floats2d]]~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
@ -123,7 +122,7 @@ shortcut for this and instantiate the component using its string name and
| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ |
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
| _keyword-only_ | |
| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
| `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
## TextCategorizer.\_\_call\_\_ {#call tag="method"}

View File

@ -403,75 +403,75 @@ The L2 norm of the token's vector representation.
## Attributes {#attributes}
| Name | Description |
| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ |
| `lex` <Tag variant="new">3</Tag> | The underlying lexeme. ~~Lexeme~~ |
| `sent` <Tag variant="new">2.0.12</Tag> | The sentence span that this token is a part of. ~~Span~~ |
| `text` | Verbatim text content. ~~str~~ |
| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ |
| `whitespace_` | Trailing space character if present. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ |
| `tensor` <Tag variant="new">2.1.7</Tag> | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ |
| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ |
| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ |
| `i` | The index of the token within the parent document. ~~int~~ |
| `ent_type` | Named entity type. ~~int~~ |
| `ent_type_` | Named entity type. ~~str~~ |
| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ |
| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ |
| `ent_kb_id` <Tag variant="new">2.2</Tag> | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ |
| `ent_kb_id_` <Tag variant="new">2.2</Tag> | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ |
| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ |
| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ |
| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
| `lower` | Lowercase form of the token. ~~int~~ |
| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ |
| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ |
| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ |
| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ |
| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ |
| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ |
| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ |
| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ |
| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ |
| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ |
| `is_punct` | Is the token punctuation? ~~bool~~ |
| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ |
| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ |
| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. |
| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. |
| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ |
| `is_bracket` | Is the token a bracket? ~~bool~~ |
| `is_quote` | Is the token a quotation mark? ~~bool~~ |
| `is_currency` <Tag variant="new">2.0.8</Tag> | Is the token a currency symbol? ~~bool~~ |
| `like_url` | Does the token resemble a URL? ~~bool~~ |
| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
| `like_email` | Does the token resemble an email address? ~~bool~~ |
| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
| `is_stop` | Is the token part of a "stop list"? ~~bool~~ |
| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ |
| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ |
| `tag` | Fine-grained part-of-speech. ~~int~~ |
| `tag_` | Fine-grained part-of-speech. ~~str~~ |
| `morph` <Tag variant="new">3</Tag> | Morphological analysis. ~~MorphAnalysis~~ |
| `dep` | Syntactic dependency relation. ~~int~~ |
| `dep_` | Syntactic dependency relation. ~~str~~ |
| `lang` | Language of the parent document's vocabulary. ~~int~~ |
| `lang_` | Language of the parent document's vocabulary. ~~str~~ |
| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ |
| `idx` | The character offset of the token within the parent document. ~~int~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ |
| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `cluster` | Brown cluster ID. ~~int~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
| Name | Description |
| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ |
| `lex` <Tag variant="new">3</Tag> | The underlying lexeme. ~~Lexeme~~ |
| `sent` | The sentence span that this token is a part of. ~~Span~~ |
| `text` | Verbatim text content. ~~str~~ |
| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ |
| `whitespace_` | Trailing space character if present. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ |
| `tensor` | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ |
| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ |
| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ |
| `i` | The index of the token within the parent document. ~~int~~ |
| `ent_type` | Named entity type. ~~int~~ |
| `ent_type_` | Named entity type. ~~str~~ |
| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ |
| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ |
| `ent_kb_id` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ |
| `ent_kb_id_` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ |
| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ |
| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ |
| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
| `lower` | Lowercase form of the token. ~~int~~ |
| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ |
| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ |
| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ |
| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ |
| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ |
| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ |
| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ |
| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ |
| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ |
| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ |
| `is_punct` | Is the token punctuation? ~~bool~~ |
| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ |
| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ |
| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. |
| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. |
| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ |
| `is_bracket` | Is the token a bracket? ~~bool~~ |
| `is_quote` | Is the token a quotation mark? ~~bool~~ |
| `is_currency` | Is the token a currency symbol? ~~bool~~ |
| `like_url` | Does the token resemble a URL? ~~bool~~ |
| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
| `like_email` | Does the token resemble an email address? ~~bool~~ |
| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
| `is_stop` | Is the token part of a "stop list"? ~~bool~~ |
| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ |
| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ |
| `tag` | Fine-grained part-of-speech. ~~int~~ |
| `tag_` | Fine-grained part-of-speech. ~~str~~ |
| `morph` <Tag variant="new">3</Tag> | Morphological analysis. ~~MorphAnalysis~~ |
| `dep` | Syntactic dependency relation. ~~int~~ |
| `dep_` | Syntactic dependency relation. ~~str~~ |
| `lang` | Language of the parent document's vocabulary. ~~int~~ |
| `lang_` | Language of the parent document's vocabulary. ~~str~~ |
| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ |
| `idx` | The character offset of the token within the parent document. ~~int~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ |
| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `cluster` | Brown cluster ID. ~~int~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |

View File

@ -45,16 +45,16 @@ specified separately using the new `exclude` keyword argument.
> nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"])
> ```
| Name | Description |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
| _keyword-only_ | |
| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
| `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ |
| `exclude` <Tag variant="new">3</Tag> | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
| Name | Description |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
| _keyword-only_ | |
| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ |
| `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ |
| `exclude` <Tag variant="new">3</Tag> | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's
[`config.cfg`](/api/data-formats#config), uses the language and pipeline
@ -354,22 +354,22 @@ If a setting is not present in the options, the default value will be used.
> displacy.serve(doc, style="dep", options=options)
> ```
| Name | Description |
| ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- |
| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ |
| `add_lemma` <Tag variant="new">2.2.4</Tag> | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ |
| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ |
| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ |
| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ |
| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ |
| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ |
| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ |
| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ |
| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ |
| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ |
| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ |
| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ |
| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ |
| Name | Description |
| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- |
| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ |
| `add_lemma` | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ |
| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ |
| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ |
| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ |
| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ |
| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ |
| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ |
| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ |
| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ |
| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ |
| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ |
| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ |
| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ |
#### Named Entity Visualizer options {#displacy_options-ent}
@ -385,7 +385,7 @@ If a setting is not present in the options, the default value will be used.
| ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ |
| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ |
| `template` <Tag variant="new">2.2</Tag> | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
| `template` | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
| `kb_url_template` <Tag variant="new">3.2.1</Tag> | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in. ~~Optional[str]~~ |
#### Span Visualizer options {#displacy_options-span}

View File

@ -21,15 +21,15 @@ Create the vocabulary.
> vocab = Vocab(strings=["hello", "world"])
> ```
| Name | Description |
| ------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ |
| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ |
| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ |
| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ |
| `vectors_name` <Tag variant="new">2.2</Tag> | A name to identify the vectors table. ~~str~~ |
| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ |
| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
| Name | Description |
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ |
| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ |
| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ |
| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ |
| `vectors_name` | A name to identify the vectors table. ~~str~~ |
| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ |
| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
## Vocab.\_\_len\_\_ {#len tag="method"}
@ -311,10 +311,10 @@ Load state from a binary string.
| Name | Description |
| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ |
| `vectors` <Tag variant="new">2</Tag> | A table associating word IDs to word vectors. ~~Vectors~~ |
| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ |
| `vectors_length` | Number of dimensions for each word vector. ~~int~~ |
| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ |
| `writing_system` <Tag variant="new">2.1</Tag> | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
| `get_noun_chunks` <Tag variant="new">3.0</Tag> | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
## Serialization fields {#serialization-fields}

View File

@ -78,7 +78,9 @@ operates on a `Doc` and gives you access to the matched tokens **in context**.
| Name | Description |
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
| [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. |
| [`KnowledgeBase`](/api/kb) | Storage for entities and aliases of a knowledge base for entity linking. |
| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. |
| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. |
| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. |
| [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. |
| [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. |
| [`Morphology`](/api/morphology) | Store morphological analyses and map them to and from hash values. |

View File

@ -363,7 +363,8 @@ nlp.enable_pipe("tagger")
```
In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is
set, all components except for those in `enable` are disabled.
set, all components except for those in `enable` are disabled. If `enable` and
`disable` conflict (i.e. the same component is included in both), an error is raised.
```python
# Load the complete pipeline, but disable all components except for tok2vec and tagger

View File

@ -243,6 +243,27 @@ pipelines.
> python -m spacy project run test . --vars.foo bar
> ```
> #### Tip: Environment Variables
>
> Commands in a project file are not executed in a shell, so they don't have
> direct access to environment variables. But you can insert environment
> variables using the `env` dictionary to make values available for
> interpolation, just like values in `vars`. Here's an example `env` dict that
> makes `$PATH` available as `ENV_PATH`:
>
> ```yaml
> env:
> ENV_PATH: PATH
> ```
>
> This can be used in a project command like so:
>
> ```yaml
> - name: "echo-path"
> script:
> - "echo ${env.ENV_PATH}"
> ```
| Section | Description |
| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). |

View File

@ -162,7 +162,7 @@ rule-based matching are:
| Attribute | Description |
| ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `TEXT` <Tag variant="new">2.1</Tag> | The exact verbatim text of a token. ~~str~~ |
| `TEXT` | The exact verbatim text of a token. ~~str~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
@ -174,7 +174,7 @@ rule-based matching are:
| `SPACY` | Token has a trailing space. ~~bool~~ |
| `POS`, `TAG`, `MORPH`, `DEP`, `LEMMA`, `SHAPE` | The token's simple and extended part-of-speech tag, morphological analysis, dependency label, lemma, shape. Note that the values of these attributes are case-sensitive. For a list of available part-of-speech tags and dependency labels, see the [Annotation Specifications](/api/annotation). ~~str~~ |
| `ENT_TYPE` | The token's entity label. ~~str~~ |
| `_` <Tag variant="new">2.1</Tag> | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `OP` | [Operator or quantifier](#quantifiers) to determine how often to match a token pattern. ~~str~~ |
<Accordion title="Does it matter if the attribute names are uppercase or lowercase?">
@ -375,7 +375,7 @@ scoped quantifiers instead, you can build those behaviors with `on_match`
callbacks.
| OP | Description |
|---------|------------------------------------------------------------------------|
| ------- | ---------------------------------------------------------------------- |
| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
| `+` | Require the pattern to match 1 or more times. |
@ -1792,7 +1792,7 @@ the entity `Span` for example `._.orgs` or `._.prev_orgs` and
> [`Doc.retokenize`](/api/doc#retokenize) context manager:
>
> ```python
> with doc.retokenize() as retokenize:
> with doc.retokenize() as retokenizer:
> for ent in doc.ents:
> retokenizer.merge(ent)
> ```

View File

@ -306,12 +306,12 @@ pipeline component factories, language classes and other settings. To make spaCy
use your entry points, your package needs to expose them and it needs to be
installed in the same environment that's it.
| Entry point | Description |
| ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. |
| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. |
| `spacy_lookups` <Tag variant="new">2.2</Tag> | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
| [`spacy_displacy_colors`](#entry-points-displacy) <Tag variant="new">2.2</Tag> | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
| Entry point | Description |
| ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. |
| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. |
| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
### Custom components via entry points {#entry-points-components}

View File

@ -480,7 +480,7 @@ as-is. They are also excluded when calling
> parse. So the evaluation results should always reflect what your pipeline will
> produce at runtime. If you want a frozen component to run (without updating)
> during training as well, so that downstream components can use its
> **predictions**, you can add it to the list of
> **predictions**, you should add it to the list of
> [`annotating_components`](/usage/training#annotating-components).
```ini

View File

@ -4,12 +4,22 @@
"code": "af",
"name": "Afrikaans"
},
{
"code": "am",
"name": "Amharic",
"has_examples": true
},
{
"code": "ar",
"name": "Arabic",
"example": "هذه جملة",
"has_examples": true
},
{
"code": "az",
"name": "Azerbaijani",
"has_examples": true
},
{
"code": "bg",
"name": "Bulgarian",
@ -65,7 +75,7 @@
{
"code": "dsb",
"name": "Lower Sorbian",
"has_examples": true
"has_examples": true
},
{
"code": "el",
@ -142,6 +152,11 @@
"code": "ga",
"name": "Irish"
},
{
"code": "grc",
"name": "Ancient Greek",
"has_examples": true
},
{
"code": "gu",
"name": "Gujarati",
@ -172,7 +187,7 @@
{
"code": "hsb",
"name": "Upper Sorbian",
"has_examples": true
"has_examples": true
},
{
"code": "hu",
@ -260,6 +275,10 @@
"example": "Адамга эң кыйыны — күн сайын адам болуу",
"has_examples": true
},
{
"code": "la",
"name": "Latin"
},
{
"code": "lb",
"name": "Luxembourgish",
@ -374,8 +393,8 @@
"has_examples": true,
"dependencies": [
{
"name": "pymorphy2",
"url": "https://github.com/kmike/pymorphy2"
"name": "pymorphy3",
"url": "https://github.com/no-plagiarism/pymorphy3"
}
],
"models": [
@ -448,6 +467,11 @@
"example": "นี่คือประโยค",
"has_examples": true
},
{
"code": "ti",
"name": "Tigrinya",
"has_examples": true
},
{
"code": "tl",
"name": "Tagalog"
@ -480,12 +504,12 @@
],
"dependencies": [
{
"name": "pymorphy2",
"url": "https://github.com/kmike/pymorphy2"
"name": "pymorphy3",
"url": "https://github.com/no-plagiarism/pymorphy3"
},
{
"name": "pymorphy2-dicts-uk",
"url": "https://github.com/kmike/pymorphy2-dicts/"
"name": "pymorphy3-dicts-uk",
"url": "https://github.com/no-plagiarism/pymorphy3-dicts"
}
]
},

View File

@ -1,5 +1,72 @@
{
"resources": [
{
"id": "grecy",
"title": "greCy",
"slogan": "Ancient Greek pipelines for spaCy",
"description": "greCy offers state-of-the-art pipelines for ancient Greek NLP. The repository makes language models available in various sizes, some of them containing floret word vectors and a BERT transformer layer.",
"github": "jmyerston/greCy",
"code_example": [
"import spacy",
"#After installing the grc_ud_proiel_trf wheel package from the greCy repository",
"",
"nlp = spacy.load('grc_ud_proiel_trf')",
"doc = nlp('δοκῶ μοι περὶ ὧν πυνθάνεσθε οὐκ ἀμελέτητος εἶναι.')",
"",
"for token in doc:",
" print(token.text, token.norm_, token.lemma_, token.pos_, token.tag_)"
],
"code_language": "python",
"author": "Jacobo Myerston",
"author_links": {
"twitter": "@jcbmyrstn",
"github": "jmyerston",
"website": "https://huggingface.co/spaces/Jacobo/syntax"
},
"category": ["pipeline", "research"],
"tags": ["ancient Greek"]
},
{
"id": "spacy-cleaner",
"title": "spacy-cleaner",
"slogan": "Easily clean text with spaCy!",
"description": "**spacy-cleaner** utilises spaCy `Language` models to replace, remove, and \n mutate spaCy tokens. Cleaning actions available are:\n\n* Remove/replace stopwords.\n* Remove/replace punctuation.\n* Remove/replace numbers.\n* Remove/replace emails.\n* Remove/replace URLs.\n* Perform lemmatisation.\n\nSee our [docs](https://ce11an.github.io/spacy-cleaner/) for more information.",
"github": "Ce11an/spacy-cleaner",
"pip": "spacy-cleaner",
"code_example": [
"import spacy",
"import spacy_cleaner",
"from spacy_cleaner.processing import removers, replacers, mutators",
"",
"model = spacy.load(\"en_core_web_sm\")",
"pipeline = spacy_cleaner.Pipeline(",
" model,",
" removers.remove_stopword_token,",
" replacers.replace_punctuation_token,",
" mutators.mutate_lemma_token,",
")",
"",
"texts = [\"Hello, my name is Cellan! I love to swim!\"]",
"",
"pipeline.clean(texts)",
"# ['hello _IS_PUNCT_ Cellan _IS_PUNCT_ love swim _IS_PUNCT_']"
],
"code_language": "python",
"url": "https://ce11an.github.io/spacy-cleaner/",
"image": "https://raw.githubusercontent.com/Ce11an/spacy-cleaner/main/docs/assets/images/spacemen.png",
"author": "Cellan Hall",
"author_links": {
"twitter": "Ce11an",
"github": "Ce11an",
"website": "https://www.linkedin.com/in/cellan-hall/"
},
"category": [
"extension"
],
"tags": [
"text-processing"
]
},
{
"id": "Zshot",
"title": "Zshot",

View File

@ -149,6 +149,9 @@
& > span
display: block
a
text-decoration: underline
.small
font-size: var(--font-size-code)
line-height: 1.65

View File

@ -159,6 +159,9 @@ const QuickstartInstall = ({ id, title }) => {
setters={setters}
showDropdown={showDropdown}
>
<QS os="mac" hardware="gpu" platform="arm">
# Note M1 GPU support is experimental, see <a href="https://github.com/explosion/thinc/issues/792">Thinc issue #792</a>
</QS>
<QS package="pip" config="venv">
python -m venv .env
</QS>
@ -198,7 +201,13 @@ const QuickstartInstall = ({ id, title }) => {
{nightly ? ' --pre' : ''}
</QS>
<QS package="conda">conda install -c conda-forge spacy</QS>
<QS package="conda" hardware="gpu">
<QS package="conda" hardware="gpu" os="windows">
conda install -c conda-forge cupy
</QS>
<QS package="conda" hardware="gpu" os="linux">
conda install -c conda-forge cupy
</QS>
<QS package="conda" hardware="gpu" os="mac" platform="x86">
conda install -c conda-forge cupy
</QS>
<QS package="conda" config="train">