Merge remote-tracking branch 'upstream/master' into rapidfuzz

This commit is contained in:
Adriane Boyd 2022-11-28 10:38:18 +01:00
commit 5088949cf8
72 changed files with 1284 additions and 769 deletions

View File

@ -1,9 +1,7 @@
parameters: parameters:
python_version: '' python_version: ''
architecture: '' architecture: 'x64'
prefix: '' num_build_jobs: 2
gpu: false
num_build_jobs: 1
steps: steps:
- task: UsePythonVersion@0 - task: UsePythonVersion@0
@ -17,16 +15,16 @@ steps:
displayName: 'Set variables' displayName: 'Set variables'
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U pip setuptools python -m pip install -U build pip setuptools
${{ parameters.prefix }} python -m pip install -U -r requirements.txt python -m pip install -U -r requirements.txt
displayName: "Install dependencies" displayName: "Install dependencies"
- script: | - script: |
${{ parameters.prefix }} python setup.py build_ext --inplace -j ${{ parameters.num_build_jobs }} python -m build --sdist
${{ parameters.prefix }} python setup.py sdist --formats=gztar displayName: "Build sdist"
displayName: "Compile and build sdist"
- script: python -m mypy spacy - script: |
python -m mypy spacy
displayName: 'Run mypy' displayName: 'Run mypy'
condition: ne(variables['python_version'], '3.6') condition: ne(variables['python_version'], '3.6')
@ -35,35 +33,24 @@ steps:
contents: "spacy" contents: "spacy"
displayName: "Delete source directory" displayName: "Delete source directory"
- task: DeleteFiles@1
inputs:
contents: "*.egg-info"
displayName: "Delete egg-info directory"
- script: | - script: |
${{ parameters.prefix }} python -m pip freeze --exclude torch --exclude cupy-cuda110 > installed.txt python -m pip freeze > installed.txt
${{ parameters.prefix }} python -m pip uninstall -y -r installed.txt python -m pip uninstall -y -r installed.txt
displayName: "Uninstall all packages" displayName: "Uninstall all packages"
- bash: | - bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1) SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
${{ parameters.prefix }} SPACY_NUM_BUILD_JOBS=2 python -m pip install dist/$SDIST SPACY_NUM_BUILD_JOBS=${{ parameters.num_build_jobs }} python -m pip install dist/$SDIST
displayName: "Install from sdist" displayName: "Install from sdist"
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U -r requirements.txt python -W error -c "import spacy"
displayName: "Install test requirements" displayName: "Test import"
- script: |
${{ parameters.prefix }} python -m pip install -U cupy-cuda110 -f https://github.com/cupy/cupy/releases/v9.0.0
${{ parameters.prefix }} python -m pip install "torch==1.7.1+cu110" -f https://download.pytorch.org/whl/torch_stable.html
displayName: "Install GPU requirements"
condition: eq(${{ parameters.gpu }}, true)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
condition: eq(${{ parameters.gpu }}, false)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -W error -p spacy.tests.enable_gpu
displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true)
- script: | - script: |
python -m spacy download ca_core_news_sm python -m spacy download ca_core_news_sm
@ -72,6 +59,11 @@ steps:
displayName: 'Test download CLI' displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
displayName: 'Test no warnings on load (#11713)'
condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
displayName: 'Test convert CLI' displayName: 'Test convert CLI'
@ -106,13 +98,22 @@ steps:
displayName: 'Test assemble CLI vectors warning' displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
python -m pip install -U -r requirements.txt
displayName: "Install test requirements"
- script: |
python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
- script: |
python -m pip install --pre thinc-apple-ops
python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11'))
- script: | - script: |
python .github/validate_universe_json.py website/meta/universe.json python .github/validate_universe_json.py website/meta/universe.json
displayName: 'Test website/meta/universe.json' displayName: 'Test website/meta/universe.json'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

View File

@ -15,7 +15,7 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}
- uses: actions/setup-python@v3 - uses: actions/setup-python@v4
- run: pip install black - run: pip install black
- name: Auto-format code if needed - name: Auto-format code if needed
run: black spacy run: black spacy

View File

@ -8,14 +8,14 @@ on:
jobs: jobs:
explosion-bot: explosion-bot:
runs-on: ubuntu-18.04 runs-on: ubuntu-latest
steps: steps:
- name: Dump GitHub context - name: Dump GitHub context
env: env:
GITHUB_CONTEXT: ${{ toJson(github) }} GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT" run: echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
- name: Install and run explosion-bot - name: Install and run explosion-bot
run: | run: |
pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v1 uses: actions/checkout@v3
with: with:
ref: ${{ matrix.branch }} ref: ${{ matrix.branch }}
- name: Get commits from past 24 hours - name: Get commits from past 24 hours
@ -23,9 +23,9 @@ jobs:
today=$(date '+%Y-%m-%d %H:%M:%S') today=$(date '+%Y-%m-%d %H:%M:%S')
yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S') yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S')
if git log --after="$yesterday" --before="$today" | grep commit ; then if git log --after="$yesterday" --before="$today" | grep commit ; then
echo "::set-output name=run_tests::true" echo run_tests=true >> $GITHUB_OUTPUT
else else
echo "::set-output name=run_tests::false" echo run_tests=false >> $GITHUB_OUTPUT
fi fi
- name: Trigger buildkite build - name: Trigger buildkite build

View File

@ -17,8 +17,10 @@ jobs:
run: | run: |
echo "$GITHUB_CONTEXT" echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Bernadette app dependency and send an alert - name: Install Bernadette app dependency and send an alert
env: env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}

View File

@ -5,7 +5,7 @@ repos:
- id: black - id: black
language_version: python3.7 language_version: python3.7
additional_dependencies: ['click==8.0.4'] additional_dependencies: ['click==8.0.4']
- repo: https://gitlab.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 5.0.4 rev: 5.0.4
hooks: hooks:
- id: flake8 - id: flake8

View File

@ -87,13 +87,13 @@ jobs:
# python.version: "3.10" # python.version: "3.10"
Python311Linux: Python311Linux:
imageName: 'ubuntu-latest' imageName: 'ubuntu-latest'
python.version: '3.11.0-rc.2' python.version: '3.11'
Python311Windows: Python311Windows:
imageName: 'windows-latest' imageName: 'windows-latest'
python.version: '3.11.0-rc.2' python.version: '3.11'
Python311Mac: Python311Mac:
imageName: 'macos-latest' imageName: 'macos-latest'
python.version: '3.11.0-rc.2' python.version: '3.11'
maxParallel: 4 maxParallel: 4
pool: pool:
vmImage: $(imageName) vmImage: $(imageName)
@ -101,20 +101,3 @@ jobs:
- template: .github/azure-steps.yml - template: .github/azure-steps.yml
parameters: parameters:
python_version: '$(python.version)' python_version: '$(python.version)'
architecture: 'x64'
# - job: "TestGPU"
# dependsOn: "Validate"
# strategy:
# matrix:
# Python38LinuxX64_GPU:
# python.version: '3.8'
# pool:
# name: "LinuxX64_GPU"
# steps:
# - template: .github/azure-steps.yml
# parameters:
# python_version: '$(python.version)'
# architecture: 'x64'
# gpu: true
# num_build_jobs: 24

View File

@ -9,8 +9,9 @@ murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.3.5
smart-open>=5.2.1,<7.0.0
# Third party dependencies # Third party dependencies
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
@ -30,7 +31,7 @@ pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0 mock>=2.0.0,<3.0.0
flake8>=3.8.0,<6.0.0 flake8>=3.8.0,<6.0.0
hypothesis>=3.27.0,<7.0.0 hypothesis>=3.27.0,<7.0.0
mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7" mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
types-dataclasses>=0.1.3; python_version < "3.7" types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1 types-mock>=0.1.1
types-setuptools>=57.0.0 types-setuptools>=57.0.0

View File

@ -51,8 +51,9 @@ install_requires =
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
# Third-party dependencies # Third-party dependencies
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.3.5
smart-open>=5.2.1,<7.0.0
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0

View File

@ -27,6 +27,7 @@ from .project.dvc import project_update_dvc # noqa: F401
from .project.push import project_push # noqa: F401 from .project.push import project_push # noqa: F401
from .project.pull import project_pull # noqa: F401 from .project.pull import project_pull # noqa: F401
from .project.document import project_document # noqa: F401 from .project.document import project_document # noqa: F401
from .find_threshold import find_threshold # noqa: F401
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True) @app.command("link", no_args_is_help=True, deprecated=True, hidden=True)

View File

@ -358,7 +358,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False)
if dest.exists() and not force: if dest.exists() and not force:
return None return None
src = str(src) src = str(src)
with smart_open.open(src, mode="rb", ignore_ext=True) as input_file: with smart_open.open(src, mode="rb", compression="disable") as input_file:
with dest.open(mode="wb") as output_file: with dest.open(mode="wb") as output_file:
shutil.copyfileobj(input_file, output_file) shutil.copyfileobj(input_file, output_file)

233
spacy/cli/find_threshold.py Normal file
View File

@ -0,0 +1,233 @@
import functools
import operator
from pathlib import Path
import logging
from typing import Optional, Tuple, Any, Dict, List
import numpy
import wasabi.tables
from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer
from ..errors import Errors
from ..training import Corpus
from ._util import app, Arg, Opt, import_code, setup_gpu
from .. import util
_DEFAULTS = {
"n_trials": 11,
"use_gpu": -1,
"gold_preproc": False,
}
@app.command(
"find-threshold",
context_settings={"allow_extra_args": False, "ignore_unknown_options": True},
)
def find_threshold_cli(
# fmt: off
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"),
threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"),
scores_key: str = Arg(..., help="Metric to optimize"),
n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"),
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"),
verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"),
# fmt: on
):
"""
Runs prediction trials for a trained model with varying tresholds to maximize
the specified metric. The search space for the threshold is traversed linearly
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
returns all results).
This is applicable only for components whose predictions are influenced by
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
that the full path to the corresponding threshold attribute in the config has to
be provided.
DOCS: https://spacy.io/api/cli#find-threshold
"""
util.logger.setLevel(logging.DEBUG if verbose else logging.INFO)
import_code(code_path)
find_threshold(
model=model,
data_path=data_path,
pipe_name=pipe_name,
threshold_key=threshold_key,
scores_key=scores_key,
n_trials=n_trials,
use_gpu=use_gpu,
gold_preproc=gold_preproc,
silent=False,
)
def find_threshold(
model: str,
data_path: Path,
pipe_name: str,
threshold_key: str,
scores_key: str,
*,
n_trials: int = _DEFAULTS["n_trials"], # type: ignore
use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore
gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore
silent: bool = True,
) -> Tuple[float, float, Dict[float, float]]:
"""
Runs prediction trials for models with varying tresholds to maximize the specified metric.
model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory.
data_path (Path): Path to file with DocBin with docs to use for threshold search.
pipe_name (str): Name of pipe to examine thresholds for.
threshold_key (str): Key of threshold attribute in component's configuration.
scores_key (str): Name of score to metric to optimize.
n_trials (int): Number of trials to determine optimal thresholds.
use_gpu (int): GPU ID or -1 for CPU.
gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the
tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due
to train/test skew.
silent (bool): Whether to print non-error-related output to stdout.
RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all
evaluated thresholds.
"""
setup_gpu(use_gpu, silent=silent)
data_path = util.ensure_path(data_path)
if not data_path.exists():
wasabi.msg.fail("Evaluation data not found", data_path, exits=1)
nlp = util.load_model(model)
if pipe_name not in nlp.component_names:
raise AttributeError(
Errors.E001.format(name=pipe_name, opts=nlp.component_names)
)
pipe = nlp.get_pipe(pipe_name)
if not hasattr(pipe, "scorer"):
raise AttributeError(Errors.E1045)
if type(pipe) == TextCategorizer:
wasabi.msg.warn(
"The `textcat` component doesn't use a threshold as it's not applicable to the concept of "
"exclusive classes. All thresholds will yield the same results."
)
if not silent:
wasabi.msg.info(
title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} "
f"trials."
)
# Load evaluation corpus.
corpus = Corpus(data_path, gold_preproc=gold_preproc)
dev_dataset = list(corpus(nlp))
config_keys = threshold_key.split(".")
def set_nested_item(
config: Dict[str, Any], keys: List[str], value: float
) -> Dict[str, Any]:
"""Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200.
config (Dict[str, Any]): Configuration dictionary.
keys (List[Any]): Path to value to set.
value (float): Value to set.
RETURNS (Dict[str, Any]): Updated dictionary.
"""
functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value
return config
def filter_config(
config: Dict[str, Any], keys: List[str], full_key: str
) -> Dict[str, Any]:
"""Filters provided config dictionary so that only the specified keys path remains.
config (Dict[str, Any]): Configuration dictionary.
keys (List[Any]): Path to value to set.
full_key (str): Full user-specified key.
RETURNS (Dict[str, Any]): Filtered dictionary.
"""
if keys[0] not in config:
wasabi.msg.fail(
title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.",
text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: "
f"{list(config.keys())}",
exits=1,
)
return {
keys[0]: filter_config(config[keys[0]], keys[1:], full_key)
if len(keys) > 1
else config[keys[0]]
}
# Evaluate with varying threshold values.
scores: Dict[float, float] = {}
config_keys_full = ["components", pipe_name, *config_keys]
table_col_widths = (10, 10)
thresholds = numpy.linspace(0, 1, n_trials)
print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths))
for threshold in thresholds:
# Reload pipeline with overrides specifying the new threshold.
nlp = util.load_model(
model,
config=set_nested_item(
filter_config(
nlp.config, config_keys_full, ".".join(config_keys_full)
).copy(),
config_keys_full,
threshold,
),
)
if hasattr(pipe, "cfg"):
setattr(
nlp.get_pipe(pipe_name),
"cfg",
set_nested_item(getattr(pipe, "cfg"), config_keys, threshold),
)
eval_scores = nlp.evaluate(dev_dataset)
if scores_key not in eval_scores:
wasabi.msg.fail(
title=f"Failed to look up score `{scores_key}` in evaluation results.",
text=f"Make sure you specified the correct value for `scores_key`. The following scores are "
f"available: {list(eval_scores.keys())}",
exits=1,
)
scores[threshold] = eval_scores[scores_key]
if not isinstance(scores[threshold], (float, int)):
wasabi.msg.fail(
f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric "
f"scores.",
exits=1,
)
print(
wasabi.row(
[round(threshold, 3), round(scores[threshold], 3)],
widths=table_col_widths,
)
)
best_threshold = max(scores.keys(), key=(lambda key: scores[key]))
# If all scores are identical, emit warning.
if len(set(scores.values())) == 1:
wasabi.msg.warn(
title="All scores are identical. Verify that all settings are correct.",
text=""
if (
not isinstance(pipe, MultiLabel_TextCategorizer)
or scores_key in ("cats_macro_f", "cats_micro_f")
)
else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.",
)
else:
if not silent:
print(
f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}."
)
return best_threshold, scores[best_threshold], scores

View File

@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str:
RETURNS (str): The converted URL. RETURNS (str): The converted URL.
""" """
# If the asset URL is a regular GitHub URL it's likely a mistake # If the asset URL is a regular GitHub URL it's likely a mistake
if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url: if (
re.match(r"(http(s?)):\/\/github.com", url)
and "releases/download" not in url
and "/raw/" not in url
):
converted = url.replace("github.com", "raw.githubusercontent.com") converted = url.replace("github.com", "raw.githubusercontent.com")
converted = re.sub(r"/(tree|blob)/", "/", converted) converted = re.sub(r"/(tree|blob)/", "/", converted)
msg.warn( msg.warn(

View File

@ -10,6 +10,7 @@ from .._util import get_hash, get_checksum, download_file, ensure_pathy
from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var
from ...git_info import GIT_VERSION from ...git_info import GIT_VERSION
from ... import about from ... import about
from ...errors import Errors
if TYPE_CHECKING: if TYPE_CHECKING:
from pathy import Pathy # noqa: F401 from pathy import Pathy # noqa: F401
@ -84,7 +85,23 @@ class RemoteStorage:
with tarfile.open(tar_loc, mode=mode_string) as tar_file: with tarfile.open(tar_loc, mode=mode_string) as tar_file:
# This requires that the path is added correctly, relative # This requires that the path is added correctly, relative
# to root. This is how we set things up in push() # to root. This is how we set things up in push()
tar_file.extractall(self.root)
# Disallow paths outside the current directory for the tar
# file (CVE-2007-4559, directory traversal vulnerability)
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise ValueError(Errors.E852)
tar.extractall(path)
safe_extract(tar_file, self.root)
return url return url
def find( def find(

View File

@ -53,6 +53,7 @@ def project_run(
force: bool = False, force: bool = False,
dry: bool = False, dry: bool = False,
capture: bool = False, capture: bool = False,
skip_requirements_check: bool = False,
) -> None: ) -> None:
"""Run a named script defined in the project.yml. If the script is part """Run a named script defined in the project.yml. If the script is part
of the default pipeline (defined in the "run" section), DVC is used to of the default pipeline (defined in the "run" section), DVC is used to
@ -69,6 +70,7 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function. when you want to run the command more like a function.
skip_requirements_check (bool): Whether to skip the requirements check.
""" """
config = load_project_config(project_dir, overrides=overrides) config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
@ -76,9 +78,10 @@ def project_run(
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
req_path = project_dir / "requirements.txt" req_path = project_dir / "requirements.txt"
if config.get("check_requirements", True) and os.path.exists(req_path): if not skip_requirements_check:
with req_path.open() as requirements_file: if config.get("check_requirements", True) and os.path.exists(req_path):
_check_requirements([req.replace("\n", "") for req in requirements_file]) with req_path.open() as requirements_file:
_check_requirements([req.strip() for req in requirements_file])
if subcommand in workflows: if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'") msg.info(f"Running workflow '{subcommand}'")
@ -90,6 +93,7 @@ def project_run(
force=force, force=force,
dry=dry, dry=dry,
capture=capture, capture=capture,
skip_requirements_check=True,
) )
else: else:
cmd = commands[subcommand] cmd = commands[subcommand]
@ -338,6 +342,12 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
failed_pkgs_msgs.append(dnf.report()) failed_pkgs_msgs.append(dnf.report())
except pkg_resources.VersionConflict as vc: except pkg_resources.VersionConflict as vc:
conflicting_pkgs_msgs.append(vc.report()) conflicting_pkgs_msgs.append(vc.report())
except Exception:
msg.warn(
f"Unable to check requirement: {req} "
"Checks are currently limited to requirement specifiers "
"(PEP 508)"
)
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
msg.warn( msg.warn(

View File

@ -37,6 +37,15 @@ bn:
accuracy: accuracy:
name: sagorsarker/bangla-bert-base name: sagorsarker/bangla-bert-base
size_factor: 3 size_factor: 3
ca:
word_vectors: null
transformer:
efficiency:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
accuracy:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
da: da:
word_vectors: da_core_news_lg word_vectors: da_core_news_lg
transformer: transformer:

View File

@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
train_corpus = "corpora.train" train_corpus = "corpora.train"
# Optional callback before nlp object is saved to disk after training # Optional callback before nlp object is saved to disk after training
before_to_disk = null before_to_disk = null
# Optional callback that is invoked at the start of each training step
before_update = null
[training.logger] [training.logger]
@loggers = "spacy.ConsoleLogger.v1" @loggers = "spacy.ConsoleLogger.v1"

View File

@ -228,12 +228,13 @@ def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"kb_id": span.kb_id_ if span.kb_id_ else "", "kb_id": span.kb_id_ if span.kb_id_ else "",
"kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#", "kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#",
} }
for span in doc.spans[spans_key] for span in doc.spans.get(spans_key, [])
] ]
tokens = [token.text for token in doc] tokens = [token.text for token in doc]
if not spans: if not spans:
warnings.warn(Warnings.W117.format(spans_key=spans_key)) keys = list(doc.spans.keys())
warnings.warn(Warnings.W117.format(spans_key=spans_key, keys=keys))
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
settings = get_doc_settings(doc) settings = get_doc_settings(doc)
return { return {

View File

@ -199,7 +199,7 @@ class Warnings(metaclass=ErrorsWithCodes):
W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is " W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is "
"surprising to you, make sure the Doc was processed using a model " "surprising to you, make sure the Doc was processed using a model "
"that supports span categorization, and check the `doc.spans[spans_key]` " "that supports span categorization, and check the `doc.spans[spans_key]` "
"property manually if necessary.") "property manually if necessary.\n\nAvailable keys: {keys}")
W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation " W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation "
"for the corpora used to train the language. Please check " "for the corpora used to train the language. Please check "
"`nlp.meta[\"sources\"]` for any relevant links.") "`nlp.meta[\"sources\"]` for any relevant links.")
@ -212,8 +212,8 @@ class Warnings(metaclass=ErrorsWithCodes):
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'") W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class " W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.") "is a Cython extension type.")
W123 = ("Argument {arg} with value {arg_value} is used instead of {config_value} as specified in the config. Be " W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option "
"aware that this might affect other components in your pipeline.") "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.")
class Errors(metaclass=ErrorsWithCodes): class Errors(metaclass=ErrorsWithCodes):
@ -544,6 +544,10 @@ class Errors(metaclass=ErrorsWithCodes):
"during training, make sure to include it in 'annotating components'") "during training, make sure to include it in 'annotating components'")
# New errors added in v3.x # New errors added in v3.x
E851 = ("The 'textcat' component labels should only have values of 0 or 1, "
"but found value of '{val}'.")
E852 = ("The tar file pulled from the remote attempted an unsafe path "
"traversal.")
E853 = ("Unsupported component factory name '{name}'. The character '.' is " E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.") "not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
@ -952,6 +956,7 @@ class Errors(metaclass=ErrorsWithCodes):
"sure it's overwritten on the subclass.") "sure it's overwritten on the subclass.")
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default " E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
"knowledge base, use `InMemoryLookupKB`.") "knowledge base, use `InMemoryLookupKB`.")
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
# Deprecated model shortcuts, only used in errors and warnings # Deprecated model shortcuts, only used in errors and warnings

View File

@ -28,34 +28,39 @@ class RussianLemmatizer(Lemmatizer):
from pymorphy2 import MorphAnalyzer from pymorphy2 import MorphAnalyzer
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"The Russian lemmatizer mode 'pymorphy2' requires the " "The lemmatizer mode 'pymorphy2' requires the "
"pymorphy2 library. Install it with: pip install pymorphy2" "pymorphy2 library and dictionaries. Install them with: "
"pip install pymorphy2"
"# for Ukrainian dictionaries:"
"pip install pymorphy2-dicts-uk"
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer(lang="ru")
elif mode == "pymorphy3": elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try: try:
from pymorphy3 import MorphAnalyzer from pymorphy3 import MorphAnalyzer
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"The Russian lemmatizer mode 'pymorphy3' requires the " "The lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library. Install it with: pip install pymorphy3" "pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3"
"# for Ukrainian dictionaries:"
"pip install pymorphy3-dicts-uk"
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer(lang="ru")
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )
def pymorphy2_lemmatize(self, token: Token) -> List[str]: def _pymorphy_lemmatize(self, token: Token) -> List[str]:
string = token.text string = token.text
univ_pos = token.pos_ univ_pos = token.pos_
morphology = token.morph.to_dict() morphology = token.morph.to_dict()
if univ_pos == "PUNCT": if univ_pos == "PUNCT":
return [PUNCT_RULES.get(string, string)] return [PUNCT_RULES.get(string, string)]
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"): if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
# Skip unchangeable pos return self._pymorphy_lookup_lemmatize(token)
return [string.lower()]
analyses = self._morph.parse(string) analyses = self._morph.parse(string)
filtered_analyses = [] filtered_analyses = []
for analysis in analyses: for analysis in analyses:
@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer):
# Skip suggested parse variant for unknown word for pymorphy # Skip suggested parse variant for unknown word for pymorphy
continue continue
analysis_pos, _ = oc2ud(str(analysis.tag)) analysis_pos, _ = oc2ud(str(analysis.tag))
if analysis_pos == univ_pos or ( if (
analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN") analysis_pos == univ_pos
or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN"))
or ((analysis_pos == "PRON") and (univ_pos == "DET"))
): ):
filtered_analyses.append(analysis) filtered_analyses.append(analysis)
if not len(filtered_analyses): if not len(filtered_analyses):
@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer):
dict.fromkeys([analysis.normal_form for analysis in filtered_analyses]) dict.fromkeys([analysis.normal_form for analysis in filtered_analyses])
) )
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text string = token.text
analyses = self._morph.parse(string) analyses = self._morph.parse(string)
if len(analyses) == 1: # often multiple forms would derive from the same normal form
return [analyses[0].normal_form] # thus check _unique_ normal forms
normal_forms = set([an.normal_form for an in analyses])
if len(normal_forms) == 1:
return [next(iter(normal_forms))]
return [string] return [string]
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lemmatize(token)
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lookup_lemmatize(token)
def pymorphy3_lemmatize(self, token: Token) -> List[str]: def pymorphy3_lemmatize(self, token: Token) -> List[str]:
return self.pymorphy2_lemmatize(token) return self._pymorphy_lemmatize(token)
def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lookup_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:

View File

@ -61,6 +61,11 @@ for abbr in [
{ORTH: "2к23", NORM: "2023"}, {ORTH: "2к23", NORM: "2023"},
{ORTH: "2к24", NORM: "2024"}, {ORTH: "2к24", NORM: "2024"},
{ORTH: "2к25", NORM: "2025"}, {ORTH: "2к25", NORM: "2025"},
{ORTH: "2к26", NORM: "2026"},
{ORTH: "2к27", NORM: "2027"},
{ORTH: "2к28", NORM: "2028"},
{ORTH: "2к29", NORM: "2029"},
{ORTH: "2к30", NORM: "2030"},
]: ]:
_exc[abbr[ORTH]] = [abbr] _exc[abbr[ORTH]] = [abbr]
@ -268,8 +273,8 @@ for abbr in [
{ORTH: "з-ка", NORM: "заимка"}, {ORTH: "з-ка", NORM: "заимка"},
{ORTH: "п-к", NORM: "починок"}, {ORTH: "п-к", NORM: "починок"},
{ORTH: "киш.", NORM: "кишлак"}, {ORTH: "киш.", NORM: "кишлак"},
{ORTH: "п. ст. ", NORM: "поселок станция"}, {ORTH: "п. ст.", NORM: "поселок станция"},
{ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"}, {ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"},
{ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"}, {ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
{ORTH: "ж/д б-ка", NORM: "железнодорожная будка"}, {ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
{ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"}, {ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
@ -280,12 +285,12 @@ for abbr in [
{ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"}, {ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
{ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"}, {ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
{ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"}, {ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
{ORTH: "ж/д ст. ", NORM: "железнодорожная станция"}, {ORTH: "ж/д ст.", NORM: "железнодорожная станция"},
{ORTH: "м-ко", NORM: "местечко"}, {ORTH: "м-ко", NORM: "местечко"},
{ORTH: "д.", NORM: "деревня"}, {ORTH: "д.", NORM: "деревня"},
{ORTH: "с.", NORM: "село"}, {ORTH: "с.", NORM: "село"},
{ORTH: "сл.", NORM: "слобода"}, {ORTH: "сл.", NORM: "слобода"},
{ORTH: "ст. ", NORM: "станция"}, {ORTH: "ст.", NORM: "станция"},
{ORTH: "ст-ца", NORM: "станица"}, {ORTH: "ст-ца", NORM: "станица"},
{ORTH: "у.", NORM: "улус"}, {ORTH: "у.", NORM: "улус"},
{ORTH: "х.", NORM: "хутор"}, {ORTH: "х.", NORM: "хутор"},
@ -388,8 +393,9 @@ for abbr in [
{ORTH: "прим.", NORM: "примечание"}, {ORTH: "прим.", NORM: "примечание"},
{ORTH: "прим.ред.", NORM: "примечание редакции"}, {ORTH: "прим.ред.", NORM: "примечание редакции"},
{ORTH: "см. также", NORM: "смотри также"}, {ORTH: "см. также", NORM: "смотри также"},
{ORTH: "кв.м.", NORM: "квадрантный метр"}, {ORTH: "см.", NORM: "смотри"},
{ORTH: "м2", NORM: "квадрантный метр"}, {ORTH: "кв.м.", NORM: "квадратный метр"},
{ORTH: "м2", NORM: "квадратный метр"},
{ORTH: "б/у", NORM: "бывший в употреблении"}, {ORTH: "б/у", NORM: "бывший в употреблении"},
{ORTH: "сокр.", NORM: "сокращение"}, {ORTH: "сокр.", NORM: "сокращение"},
{ORTH: "чел.", NORM: "человек"}, {ORTH: "чел.", NORM: "человек"},

View File

@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk") self._morph = MorphAnalyzer(lang="uk")
elif mode == "pymorphy3": elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try: try:
from pymorphy3 import MorphAnalyzer from pymorphy3 import MorphAnalyzer
except ImportError: except ImportError:

View File

@ -706,13 +706,7 @@ class Language:
# Check source type # Check source type
if not isinstance(source, Language): if not isinstance(source, Language):
raise ValueError(Errors.E945.format(name=source_name, source=type(source))) raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
# Check vectors, with faster checks first if self.vocab.vectors != source.vocab.vectors:
if (
self.vocab.vectors.shape != source.vocab.vectors.shape
or self.vocab.vectors.key2row != source.vocab.vectors.key2row
or self.vocab.vectors.to_bytes(exclude=["strings"])
!= source.vocab.vectors.to_bytes(exclude=["strings"])
):
warnings.warn(Warnings.W113.format(name=source_name)) warnings.warn(Warnings.W113.format(name=source_name))
if source_name not in source.component_names: if source_name not in source.component_names:
raise KeyError( raise KeyError(
@ -790,14 +784,6 @@ class Language:
factory_name, source, name=name factory_name, source, name=name
) )
else: else:
if not self.has_factory(factory_name):
err = Errors.E002.format(
name=factory_name,
opts=", ".join(self.factory_names),
method="add_pipe",
lang=util.get_object_name(self),
lang_code=self.lang,
)
pipe_component = self.create_pipe( pipe_component = self.create_pipe(
factory_name, factory_name,
name=name, name=name,
@ -1879,31 +1865,22 @@ class Language:
if isinstance(exclude, str): if isinstance(exclude, str):
exclude = [exclude] exclude = [exclude]
def fetch_pipes_status(value: Iterable[str], key: str) -> Iterable[str]: # `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config
"""Fetch value for `enable` or `disable` w.r.t. the specified config and passed arguments passed to # specifies values for `enabled` not included in `enable`, emit warning.
.load(). If both arguments and config specified values for this field, the passed arguments take precedence if id(enable) != id(_DEFAULT_EMPTY_PIPES):
and a warning is printed. enabled = config["nlp"].get("enabled", [])
value (Iterable[str]): Passed value for `enable` or `disable`. if len(enabled) and not set(enabled).issubset(enable):
key (str): Key for field in config (either "enabled" or "disabled"). warnings.warn(
RETURN (Iterable[str]): Warnings.W123.format(
""" enable=enable,
# We assume that no argument was passed if the value is the specified default value. enabled=enabled,
if id(value) == id(_DEFAULT_EMPTY_PIPES):
return config["nlp"].get(key, [])
else:
if len(config["nlp"].get(key, [])):
warnings.warn(
Warnings.W123.format(
arg=key[:-1],
arg_value=value,
config_value=config["nlp"][key],
)
) )
return value )
# Ensure sets of disabled/enabled pipe names are not contradictory.
disabled_pipes = cls._resolve_component_status( disabled_pipes = cls._resolve_component_status(
fetch_pipes_status(disable, "disabled"), list({*disable, *config["nlp"].get("disabled", [])}),
fetch_pipes_status(enable, "enabled"), enable,
config["nlp"]["pipeline"], config["nlp"]["pipeline"],
) )
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
@ -2084,10 +2061,12 @@ class Language:
if enable: if enable:
if isinstance(enable, str): if isinstance(enable, str):
enable = [enable] enable = [enable]
to_disable = [ to_disable = {
pipe_name for pipe_name in pipe_names if pipe_name not in enable *[pipe_name for pipe_name in pipe_names if pipe_name not in enable],
] *disable,
if disable and disable != to_disable: }
# If any pipe to be enabled is in to_disable, the specification is inconsistent.
if len(set(enable) & to_disable):
raise ValueError(Errors.E1042.format(enable=enable, disable=disable)) raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
return tuple(to_disable) return tuple(to_disable)

View File

@ -1,4 +1,4 @@
# cython: infer_types=True, cython: profile=True # cython: infer_types=True, profile=True
from typing import List, Iterable from typing import List, Iterable
from libcpp.vector cimport vector from libcpp.vector cimport vector

View File

@ -1,7 +1,7 @@
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d from thinc.types import Ragged, Ints2d, Floats2d
import numpy import numpy

View File

@ -72,7 +72,7 @@ subword_features = true
"textcat", "textcat",
assigns=["doc.cats"], assigns=["doc.cats"],
default_config={ default_config={
"threshold": 0.5, "threshold": 0.0,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL, "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v1"}, "scorer": {"@scorers": "spacy.textcat_scorer.v1"},
}, },
@ -144,7 +144,8 @@ class TextCategorizer(TrainablePipe):
model (thinc.api.Model): The Thinc Model powering the pipeline component. model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Unused, not needed for single-label (exclusive
classes) classification.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_cats for the attribute "cats". Scorer.score_cats for the attribute "cats".
@ -154,7 +155,11 @@ class TextCategorizer(TrainablePipe):
self.model = model self.model = model
self.name = name self.name = name
self._rehearsal_model = None self._rehearsal_model = None
cfg = {"labels": [], "threshold": threshold, "positive_label": None} cfg: Dict[str, Any] = {
"labels": [],
"threshold": threshold,
"positive_label": None,
}
self.cfg = dict(cfg) self.cfg = dict(cfg)
self.scorer = scorer self.scorer = scorer
@ -396,5 +401,9 @@ class TextCategorizer(TrainablePipe):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""Check whether the provided examples all have single-label cats annotations.""" """Check whether the provided examples all have single-label cats annotations."""
for ex in examples: for ex in examples:
if list(ex.reference.cats.values()).count(1.0) > 1: vals = list(ex.reference.cats.values())
if vals.count(1.0) > 1:
raise ValueError(Errors.E895.format(value=ex.reference.cats)) raise ValueError(Errors.E895.format(value=ex.reference.cats))
for val in vals:
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -192,6 +192,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
for label in labels: for label in labels:
self.add_label(label) self.add_label(label)
subbatch = list(islice(get_examples(), 10)) subbatch = list(islice(get_examples(), 10))
self._validate_categories(subbatch)
doc_sample = [eg.reference for eg in subbatch] doc_sample = [eg.reference for eg in subbatch]
label_sample, _ = self._examples_to_truth(subbatch) label_sample, _ = self._examples_to_truth(subbatch)
self._require_labels() self._require_labels()
@ -202,4 +204,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""This component allows any type of single- or multi-label annotations. """This component allows any type of single- or multi-label annotations.
This method overwrites the more strict one from 'textcat'.""" This method overwrites the more strict one from 'textcat'."""
pass # check that annotation values are valid
for ex in examples:
for val in ex.reference.cats.values():
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -335,6 +335,7 @@ class ConfigSchemaTraining(BaseModel):
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training") frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training") annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk") before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
# fmt: on # fmt: on
class Config: class Config:

View File

@ -446,7 +446,7 @@ class Scorer:
labels (Iterable[str]): The set of possible labels. Defaults to []. labels (Iterable[str]): The set of possible labels. Defaults to [].
multi_label (bool): Whether the attribute allows multiple labels. multi_label (bool): Whether the attribute allows multiple labels.
Defaults to True. When set to False (exclusive labels), missing Defaults to True. When set to False (exclusive labels), missing
gold labels are interpreted as 0.0. gold labels are interpreted as 0.0 and the threshold is set to 0.0.
positive_label (str): The positive label for a binary task with positive_label (str): The positive label for a binary task with
exclusive classes. Defaults to None. exclusive classes. Defaults to None.
threshold (float): Cutoff to consider a prediction "positive". Defaults threshold (float): Cutoff to consider a prediction "positive". Defaults
@ -471,6 +471,8 @@ class Scorer:
""" """
if threshold is None: if threshold is None:
threshold = 0.5 if multi_label else 0.0 threshold = 0.5 if multi_label else 0.0
if not multi_label:
threshold = 0.0
f_per_type = {label: PRFScore() for label in labels} f_per_type = {label: PRFScore() for label in labels}
auc_per_type = {label: ROCAUCScore() for label in labels} auc_per_type = {label: ROCAUCScore() for label in labels}
labels = set(labels) labels = set(labels)
@ -505,20 +507,18 @@ class Scorer:
# Get the highest-scoring for each. # Get the highest-scoring for each.
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1]) gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1])
if pred_label == gold_label and pred_score >= threshold: if pred_label == gold_label:
f_per_type[pred_label].tp += 1 f_per_type[pred_label].tp += 1
else: else:
f_per_type[gold_label].fn += 1 f_per_type[gold_label].fn += 1
if pred_score >= threshold: f_per_type[pred_label].fp += 1
f_per_type[pred_label].fp += 1
elif gold_cats: elif gold_cats:
gold_label, gold_score = max(gold_cats, key=lambda it: it[1]) gold_label, gold_score = max(gold_cats, key=lambda it: it[1])
if gold_score > 0: if gold_score > 0:
f_per_type[gold_label].fn += 1 f_per_type[gold_label].fn += 1
elif pred_cats: elif pred_cats:
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
if pred_score >= threshold: f_per_type[pred_label].fp += 1
f_per_type[pred_label].fp += 1
micro_prf = PRFScore() micro_prf = PRFScore()
for label_prf in f_per_type.values(): for label_prf in f_per_type.values():
micro_prf.tp += label_prf.tp micro_prf.tp += label_prf.tp

View File

@ -337,17 +337,17 @@ def ru_tokenizer():
return get_lang_class("ru")().tokenizer return get_lang_class("ru")().tokenizer
@pytest.fixture @pytest.fixture(scope="session")
def ru_lemmatizer(): def ru_lemmatizer():
pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer") return get_lang_class("ru")().add_pipe("lemmatizer")
@pytest.fixture @pytest.fixture(scope="session")
def ru_lookup_lemmatizer(): def ru_lookup_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe( return get_lang_class("ru")().add_pipe(
"lemmatizer", config={"mode": "pymorphy2_lookup"} "lemmatizer", config={"mode": "pymorphy3_lookup"}
) )
@ -423,19 +423,19 @@ def uk_tokenizer():
return get_lang_class("uk")().tokenizer return get_lang_class("uk")().tokenizer
@pytest.fixture @pytest.fixture(scope="session")
def uk_lemmatizer(): def uk_lemmatizer():
pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy3_dicts_uk") pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer") return get_lang_class("uk")().add_pipe("lemmatizer")
@pytest.fixture @pytest.fixture(scope="session")
def uk_lookup_lemmatizer(): def uk_lookup_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy2_dicts_uk") pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe( return get_lang_class("uk")().add_pipe(
"lemmatizer", config={"mode": "pymorphy2_lookup"} "lemmatizer", config={"mode": "pymorphy3_lookup"}
) )

View File

@ -370,3 +370,12 @@ def test_json_to_doc_validation_error(doc):
doc_json.pop("tokens") doc_json.pop("tokens")
with pytest.raises(ValueError): with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json, validate=True) Doc(doc.vocab).from_json(doc_json, validate=True)
def test_to_json_underscore_doc_getters(doc):
def get_text_length(doc):
return len(doc.text)
Doc.set_extension("text_length", getter=get_text_length)
doc_json = doc.to_json(underscore=["text_length"])
assert doc_json["_"]["text_length"] == get_text_length(doc)

View File

@ -81,6 +81,7 @@ def test_ru_lemmatizer_punct(ru_lemmatizer):
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
words = ["мама", "мыла", "раму"] words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"] pos = ["NOUN", "VERB", "NOUN"]
morphs = [ morphs = [
@ -92,3 +93,17 @@ def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
doc = ru_lookup_lemmatizer(doc) doc = ru_lookup_lemmatizer(doc)
lemmas = [token.lemma_ for token in doc] lemmas = [token.lemma_ for token in doc]
assert lemmas == ["мама", "мыла", "раму"] assert lemmas == ["мама", "мыла", "раму"]
@pytest.mark.parametrize(
"word,lemma",
(
("бременем", "бремя"),
("будешь", "быть"),
("какая-то", "какой-то"),
),
)
def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(ru_lookup_lemmatizer.vocab, words=[word])
assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma

View File

@ -8,12 +8,20 @@ pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_uk_lemmatizer(uk_lemmatizer): def test_uk_lemmatizer(uk_lemmatizer):
"""Check that the default uk lemmatizer runs.""" """Check that the default uk lemmatizer runs."""
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"]) doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
assert uk_lemmatizer.mode == "pymorphy3"
uk_lemmatizer(doc) uk_lemmatizer(doc)
assert [token.lemma for token in doc] assert [token.lemma for token in doc]
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer): @pytest.mark.parametrize(
"""Check that the lookup uk lemmatizer runs.""" "word,lemma",
doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"]) (
uk_lookup_lemmatizer(doc) ("якийсь", "якийсь"),
assert [token.lemma for token in doc] ("розповідають", "розповідати"),
("розповіси", "розповісти"),
),
)
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma):
assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(uk_lookup_lemmatizer.vocab, words=[word])
assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma

View File

@ -615,20 +615,18 @@ def test_enable_disable_conflict_with_config():
with make_tempdir() as tmp_dir: with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir) nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict. # Expected to succeed, as config and arguments do not conflict.
with pytest.raises(ValueError): assert spacy.load(
spacy.load( tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} ).disabled == ["senter", "sentencizer"]
)
# Expected to succeed without warning due to the lack of a conflicting config option. # Expected to succeed without warning due to the lack of a conflicting config option.
spacy.load(tmp_dir, enable=["tagger"]) spacy.load(tmp_dir, enable=["tagger"])
# Expected to succeed with a warning, as disable=[] should override the config setting. # Expected to fail due to conflict between enable and disabled.
with pytest.warns(UserWarning): with pytest.raises(ValueError):
spacy.load( spacy.load(
tmp_dir, tmp_dir,
enable=["tagger"], enable=["senter"],
disable=[], config={"nlp": {"disabled": ["senter", "tagger"]}},
config={"nlp": {"disabled": ["senter"]}},
) )

View File

@ -360,6 +360,30 @@ def test_label_types(name):
nlp.initialize() nlp.initialize()
@pytest.mark.parametrize(
"name,get_examples",
[
("textcat", make_get_examples_single_label),
("textcat_multilabel", make_get_examples_multi_label),
],
)
def test_invalid_label_value(name, get_examples):
nlp = Language()
textcat = nlp.add_pipe(name)
example_getter = get_examples(nlp)
def invalid_examples():
# make one example with an invalid score
examples = example_getter()
ref = examples[0].reference
key = list(ref.cats.keys())[0]
ref.cats[key] = 2.0
return examples
with pytest.raises(ValueError):
nlp.initialize(get_examples=invalid_examples)
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"]) @pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
def test_no_label(name): def test_no_label(name):
nlp = Language() nlp = Language()
@ -823,10 +847,10 @@ def test_textcat_loss(multi_label: bool, expected_loss: float):
assert loss == expected_loss assert loss == expected_loss
def test_textcat_threshold(): def test_textcat_multilabel_threshold():
# Ensure the scorer can be called with a different threshold # Ensure the scorer can be called with a different threshold
nlp = English() nlp = English()
nlp.add_pipe("textcat") nlp.add_pipe("textcat_multilabel")
train_examples = [] train_examples = []
for text, annotations in TRAIN_DATA_SINGLE_LABEL: for text, annotations in TRAIN_DATA_SINGLE_LABEL:
@ -849,7 +873,7 @@ def test_textcat_threshold():
) )
pos_f = scores["cats_score"] pos_f = scores["cats_score"]
assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0 assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0
assert pos_f > macro_f assert pos_f >= macro_f
def test_textcat_multi_threshold(): def test_textcat_multi_threshold():

View File

@ -404,11 +404,10 @@ def test_serialize_pipeline_disable_enable():
assert nlp3.component_names == ["ner", "tagger"] assert nlp3.component_names == ["ner", "tagger"]
with make_tempdir() as d: with make_tempdir() as d:
nlp3.to_disk(d) nlp3.to_disk(d)
with pytest.warns(UserWarning): nlp4 = spacy.load(d, disable=["ner"])
nlp4 = spacy.load(d, disable=["ner"]) assert nlp4.pipe_names == []
assert nlp4.pipe_names == ["tagger"]
assert nlp4.component_names == ["ner", "tagger"] assert nlp4.component_names == ["ner", "tagger"]
assert nlp4.disabled == ["ner"] assert nlp4.disabled == ["ner", "tagger"]
with make_tempdir() as d: with make_tempdir() as d:
nlp.to_disk(d) nlp.to_disk(d)
nlp5 = spacy.load(d, exclude=["tagger"]) nlp5 = spacy.load(d, exclude=["tagger"])

View File

@ -1,8 +1,10 @@
import os import os
import math import math
from random import sample from collections import Counter
from typing import Counter from typing import Tuple, List, Dict, Any
import pkg_resources
import numpy
import pytest import pytest
import srsly import srsly
from click import NoSuchOption from click import NoSuchOption
@ -15,6 +17,7 @@ from spacy.cli._util import is_subpath_of, load_project_config
from spacy.cli._util import parse_config_overrides, string_to_list from spacy.cli._util import parse_config_overrides, string_to_list
from spacy.cli._util import substitute_project_variables from spacy.cli._util import substitute_project_variables
from spacy.cli._util import validate_project_commands from spacy.cli._util import validate_project_commands
from spacy.cli._util import upload_file, download_file
from spacy.cli.debug_data import _compile_gold, _get_labels_from_model from spacy.cli.debug_data import _compile_gold, _get_labels_from_model
from spacy.cli.debug_data import _get_labels_from_spancat from spacy.cli.debug_data import _get_labels_from_spancat
from spacy.cli.debug_data import _get_distribution, _get_kl_divergence from spacy.cli.debug_data import _get_distribution, _get_kl_divergence
@ -25,12 +28,14 @@ from spacy.cli.download import get_compatibility, get_version
from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config
from spacy.cli.package import get_third_party_dependencies from spacy.cli.package import get_third_party_dependencies
from spacy.cli.package import _is_permitted_package_name from spacy.cli.package import _is_permitted_package_name
from spacy.cli.project.run import _check_requirements
from spacy.cli.validate import get_model_pkgs from spacy.cli.validate import get_model_pkgs
from spacy.cli.find_threshold import find_threshold
from spacy.lang.en import English from spacy.lang.en import English
from spacy.lang.nl import Dutch from spacy.lang.nl import Dutch
from spacy.language import Language from spacy.language import Language
from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate
from spacy.tokens import Doc from spacy.tokens import Doc, DocBin
from spacy.tokens.span import Span from spacy.tokens.span import Span
from spacy.training import Example, docs_to_json, offsets_to_biluo_tags from spacy.training import Example, docs_to_json, offsets_to_biluo_tags
from spacy.training.converters import conll_ner_to_docs, conllu_to_docs from spacy.training.converters import conll_ner_to_docs, conllu_to_docs
@ -855,3 +860,173 @@ def test_span_length_freq_dist_output_must_be_correct():
span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold) span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold)
assert sum(span_freqs.values()) >= threshold assert sum(span_freqs.values()) >= threshold
assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] assert list(span_freqs.keys()) == [3, 1, 4, 5, 2]
def test_cli_find_threshold(capsys):
thresholds = numpy.linspace(0, 1, 10)
def make_examples(nlp: Language) -> List[Example]:
docs: List[Example] = []
for t in [
(
"I am angry and confused in the Bank of America.",
{
"cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0},
"spans": {"sc": [(31, 46, "ORG")]},
},
),
(
"I am confused but happy in New York.",
{
"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0},
"spans": {"sc": [(27, 35, "GPE")]},
},
),
]:
doc = nlp.make_doc(t[0])
docs.append(Example.from_dict(doc, t[1]))
return docs
def init_nlp(
components: Tuple[Tuple[str, Dict[str, Any]], ...] = ()
) -> Tuple[Language, List[Example]]:
new_nlp = English()
new_nlp.add_pipe( # type: ignore
factory_name="textcat_multilabel",
name="tc_multi",
config={"threshold": 0.9},
)
# Append additional components to pipeline.
for cfn, comp_config in components:
new_nlp.add_pipe(cfn, config=comp_config)
new_examples = make_examples(new_nlp)
new_nlp.initialize(get_examples=lambda: new_examples)
for i in range(5):
new_nlp.update(new_examples)
return new_nlp, new_examples
with make_tempdir() as docs_dir:
# Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches
# the current model behavior with the examples above. This can break once the model behavior changes and serves
# mostly as a smoke test.
nlp, examples = init_nlp()
DocBin(docs=[example.reference for example in examples]).to_disk(
docs_dir / "docs.spacy"
)
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
res = find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="tc_multi",
threshold_key="threshold",
scores_key="cats_macro_f",
silent=True,
)
assert res[0] != thresholds[0]
assert thresholds[0] < res[0] < thresholds[9]
assert res[1] == 1.0
assert res[2][1.0] == 0.0
# Test with spancat.
nlp, _ = init_nlp((("spancat", {}),))
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
res = find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="spancat",
threshold_key="threshold",
scores_key="spans_sc_f",
silent=True,
)
assert res[0] != thresholds[0]
assert thresholds[0] < res[0] < thresholds[8]
assert res[1] >= 0.6
assert res[2][1.0] == 0.0
# Having multiple textcat_multilabel components should work, since the name has to be specified.
nlp, _ = init_nlp((("textcat_multilabel", {}),))
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
assert find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="tc_multi",
threshold_key="threshold",
scores_key="cats_macro_f",
silent=True,
)
# Specifying the name of an non-existing pipe should fail.
nlp, _ = init_nlp()
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
with pytest.raises(AttributeError):
find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="_",
threshold_key="threshold",
scores_key="cats_macro_f",
silent=True,
)
@pytest.mark.parametrize(
"reqs,output",
[
[
"""
spacy
# comment
thinc""",
(False, False),
],
[
"""# comment
--some-flag
spacy""",
(False, False),
],
[
"""# comment
--some-flag
spacy; python_version >= '3.6'""",
(False, False),
],
[
"""# comment
spacyunknowndoesnotexist12345""",
(True, False),
],
],
)
def test_project_check_requirements(reqs, output):
# excessive guard against unlikely package name
try:
pkg_resources.require("spacyunknowndoesnotexist12345")
except pkg_resources.DistributionNotFound:
assert output == _check_requirements([req.strip() for req in reqs.split("\n")])
def test_upload_download_local_file():
with make_tempdir() as d1, make_tempdir() as d2:
filename = "f.txt"
content = "content"
local_file = d1 / filename
remote_file = d2 / filename
with local_file.open(mode="w") as file_:
file_.write(content)
upload_file(local_file, remote_file)
local_file.unlink()
download_file(remote_file, local_file)
with local_file.open(mode="r") as file_:
assert file_.read() == content

View File

@ -203,6 +203,16 @@ def test_displacy_parse_spans_different_spans_key(en_vocab):
] ]
def test_displacy_parse_empty_spans_key(en_vocab):
"""Test that having an unset spans key doesn't raise an error"""
doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"])
doc.spans["custom"] = [Span(doc, 3, 6, "BANK")]
with pytest.warns(UserWarning, match="W117"):
spans = displacy.parse_spans(doc)
assert isinstance(spans, dict)
def test_displacy_parse_ents(en_vocab): def test_displacy_parse_ents(en_vocab):
"""Test that named entities on a Doc are converted into displaCy's format.""" """Test that named entities on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])

View File

@ -474,3 +474,50 @@ def test_prf_score():
assert (a.precision, a.recall, a.fscore) == approx( assert (a.precision, a.recall, a.fscore) == approx(
(c.precision, c.recall, c.fscore) (c.precision, c.recall, c.fscore)
) )
def test_score_cats(en_tokenizer):
text = "some text"
gold_doc = en_tokenizer(text)
gold_doc.cats = {"POSITIVE": 1.0, "NEGATIVE": 0.0}
pred_doc = en_tokenizer(text)
pred_doc.cats = {"POSITIVE": 0.75, "NEGATIVE": 0.25}
example = Example(pred_doc, gold_doc)
# threshold is ignored for multi_label=False
scores1 = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=False,
positive_label="POSITIVE",
threshold=0.1,
)
scores2 = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=False,
positive_label="POSITIVE",
threshold=0.9,
)
assert scores1["cats_score"] == 1.0
assert scores2["cats_score"] == 1.0
assert scores1 == scores2
# threshold is relevant for multi_label=True
scores = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=True,
threshold=0.9,
)
assert scores["cats_macro_f"] == 0.0
# threshold is relevant for multi_label=True
scores = Scorer.score_cats(
[example],
"cats",
labels=list(gold_doc.cats.keys()),
multi_label=True,
threshold=0.1,
)
assert scores["cats_macro_f"] == 0.5

View File

@ -2,6 +2,7 @@ import random
import numpy import numpy
import pytest import pytest
import spacy
import srsly import srsly
from spacy.lang.en import English from spacy.lang.en import English
from spacy.tokens import Doc, DocBin from spacy.tokens import Doc, DocBin
@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags
from spacy.training.alignment_array import AlignmentArray from spacy.training.alignment_array import AlignmentArray
from spacy.training.align import get_alignments from spacy.training.align import get_alignments
from spacy.training.converters import json_to_docs from spacy.training.converters import json_to_docs
from spacy.training.loop import train_while_improving
from spacy.util import get_words_and_spaces, load_model_from_path, minibatch from spacy.util import get_words_and_spaces, load_model_from_path, minibatch
from spacy.util import load_config_from_str from spacy.util import load_config_from_str
from thinc.api import compounding from thinc.api import compounding, Adam
from ..util import make_tempdir from ..util import make_tempdir
@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc):
retokenizer.merge(doc1[0:2]) retokenizer.merge(doc1[0:2])
retokenizer.merge(doc1[5:7]) retokenizer.merge(doc1[5:7])
assert example.get_aligned("ORTH", as_string=True) == expected2 assert example.get_aligned("ORTH", as_string=True) == expected2
def test_training_before_update(doc):
def before_update(nlp, args):
assert args["step"] == 0
assert args["epoch"] == 1
# Raise an error here as the rest of the loop
# will not run to completion due to uninitialized
# models.
raise ValueError("ran_before_update")
def generate_batch():
yield 1, [Example(doc, doc)]
nlp = spacy.blank("en")
nlp.add_pipe("tagger")
optimizer = Adam()
generator = train_while_improving(
nlp,
optimizer,
generate_batch(),
lambda: None,
dropout=0.1,
eval_frequency=100,
accumulate_gradient=10,
patience=10,
max_steps=100,
exclude=[],
annotating_components=[],
before_update=before_update,
)
with pytest.raises(ValueError, match="ran_before_update"):
for _ in generator:
pass

View File

@ -626,3 +626,23 @@ def test_floret_vectors(floret_vectors_vec_str, floret_vectors_hashvec_str):
OPS.to_numpy(vocab_r[word].vector), OPS.to_numpy(vocab_r[word].vector),
decimal=6, decimal=6,
) )
def test_equality():
vectors1 = Vectors(shape=(10, 10))
vectors2 = Vectors(shape=(10, 8))
assert vectors1 != vectors2
vectors2 = Vectors(shape=(10, 10))
assert vectors1 == vectors2
vectors1.add("hello", row=2)
assert vectors1 != vectors2
vectors2.add("hello", row=2)
assert vectors1 == vectors2
vectors1.resize((5, 9))
vectors2.resize((5, 9))
assert vectors1 == vectors2

View File

@ -1,8 +1,13 @@
import os
import pytest import pytest
from spacy.attrs import IS_ALPHA, LEMMA, ORTH from spacy.attrs import IS_ALPHA, LEMMA, ORTH
from spacy.lang.en import English
from spacy.parts_of_speech import NOUN, VERB from spacy.parts_of_speech import NOUN, VERB
from spacy.vocab import Vocab from spacy.vocab import Vocab
from ..util import make_tempdir
@pytest.mark.issue(1868) @pytest.mark.issue(1868)
def test_issue1868(): def test_issue1868():
@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text):
def test_vocab_writing_system(en_vocab): def test_vocab_writing_system(en_vocab):
assert en_vocab.writing_system["direction"] == "ltr" assert en_vocab.writing_system["direction"] == "ltr"
assert en_vocab.writing_system["has_case"] is True assert en_vocab.writing_system["has_case"] is True
def test_to_disk():
nlp = English()
with make_tempdir() as d:
nlp.vocab.to_disk(d)
assert "vectors" in os.listdir(d)
assert "lookups.bin" in os.listdir(d)
def test_to_disk_exclude():
nlp = English()
with make_tempdir() as d:
nlp.vocab.to_disk(d, exclude=("vectors", "lookups"))
assert "vectors" not in os.listdir(d)
assert "lookups.bin" not in os.listdir(d)

View File

@ -1668,6 +1668,20 @@ cdef class Doc:
if underscore: if underscore:
user_keys = set() user_keys = set()
# Handle doc attributes with .get to include values from getters
# and not only values stored in user_data, for backwards
# compatibility
for attr in underscore:
if self.has_extension(attr):
if "_" not in data:
data["_"] = {}
value = self._.get(attr)
if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
data["_"][attr] = value
user_keys.add(attr)
# Token and span attributes only include values stored in user_data
# and not values generated by getters
if self.user_data: if self.user_data:
for data_key, value in self.user_data.copy().items(): for data_key, value in self.user_data.copy().items():
if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.": if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.":
@ -1678,20 +1692,15 @@ cdef class Doc:
user_keys.add(attr) user_keys.add(attr)
if not srsly.is_json_serializable(value): if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value))) raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
# Check if doc attribute # Token attribute
if start is None: if start is not None and end is None:
if "_" not in data:
data["_"] = {}
data["_"][attr] = value
# Check if token attribute
elif end is None:
if "underscore_token" not in data: if "underscore_token" not in data:
data["underscore_token"] = {} data["underscore_token"] = {}
if attr not in data["underscore_token"]: if attr not in data["underscore_token"]:
data["underscore_token"][attr] = [] data["underscore_token"][attr] = []
data["underscore_token"][attr].append({"start": start, "value": value}) data["underscore_token"][attr].append({"start": start, "value": value})
# Else span attribute # Span attribute
else: elif start is not None and end is not None:
if "underscore_span" not in data: if "underscore_span" not in data:
data["underscore_span"] = {} data["underscore_span"] = {}
if attr not in data["underscore_span"]: if attr not in data["underscore_span"]:

View File

@ -117,15 +117,13 @@ class Span:
end_char: int end_char: int
label: int label: int
kb_id: int kb_id: int
id: int
ent_id: int ent_id: int
ent_id_: str ent_id_: str
@property @property
def id(self) -> int: ...
@property
def id_(self) -> str: ...
@property
def orth_(self) -> str: ... def orth_(self) -> str: ...
@property @property
def lemma_(self) -> str: ... def lemma_(self) -> str: ...
label_: str label_: str
kb_id_: str kb_id_: str
id_: str

View File

@ -59,6 +59,7 @@ def train(
batcher = T["batcher"] batcher = T["batcher"]
train_logger = T["logger"] train_logger = T["logger"]
before_to_disk = create_before_to_disk_callback(T["before_to_disk"]) before_to_disk = create_before_to_disk_callback(T["before_to_disk"])
before_update = T["before_update"]
# Helper function to save checkpoints. This is a closure for convenience, # Helper function to save checkpoints. This is a closure for convenience,
# to avoid passing in all the args all the time. # to avoid passing in all the args all the time.
@ -89,6 +90,7 @@ def train(
eval_frequency=T["eval_frequency"], eval_frequency=T["eval_frequency"],
exclude=frozen_components, exclude=frozen_components,
annotating_components=annotating_components, annotating_components=annotating_components,
before_update=before_update,
) )
clean_output_dir(output_path) clean_output_dir(output_path)
stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n") stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n")
@ -150,6 +152,7 @@ def train_while_improving(
max_steps: int, max_steps: int,
exclude: List[str], exclude: List[str],
annotating_components: List[str], annotating_components: List[str],
before_update: Optional[Callable[["Language", Dict[str, Any]], None]],
): ):
"""Train until an evaluation stops improving. Works as a generator, """Train until an evaluation stops improving. Works as a generator,
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`, with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
@ -198,6 +201,9 @@ def train_while_improving(
words_seen = 0 words_seen = 0
start_time = timer() start_time = timer()
for step, (epoch, batch) in enumerate(train_data): for step, (epoch, batch) in enumerate(train_data):
if before_update:
before_update_args = {"step": step, "epoch": epoch}
before_update(nlp, before_update_args)
dropout = next(dropouts) # type: ignore dropout = next(dropouts) # type: ignore
for subbatch in subdivide_batch(batch, accumulate_gradient): for subbatch in subdivide_batch(batch, accumulate_gradient):
nlp.update( nlp.update(

View File

@ -443,9 +443,9 @@ def load_model_from_package(
name: str, name: str,
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Union[str, Iterable[str]] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Union[str, Iterable[str]] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Union[str, Iterable[str]] = SimpleFrozenList(), exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
"""Load a model from an installed package. """Load a model from an installed package.
@ -619,9 +619,9 @@ def load_model_from_init_py(
init_file: Union[Path, str], init_file: Union[Path, str],
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Union[str, Iterable[str]] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Union[str, Iterable[str]] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Union[str, Iterable[str]] = SimpleFrozenList(), exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
"""Helper function to use in the `load()` method of a model package's """Helper function to use in the `load()` method of a model package's

View File

@ -243,6 +243,15 @@ cdef class Vectors:
else: else:
return key in self.key2row return key in self.key2row
def __eq__(self, other):
# Check for equality, with faster checks first
return (
self.shape == other.shape
and self.key2row == other.key2row
and self.to_bytes(exclude=["strings"])
== other.to_bytes(exclude=["strings"])
)
def resize(self, shape, inplace=False): def resize(self, shape, inplace=False):
"""Resize the underlying vectors array. If inplace=True, the memory """Resize the underlying vectors array. If inplace=True, the memory
is reallocated. This may cause other references to the data to become is reallocated. This may cause other references to the data to become

View File

@ -468,9 +468,9 @@ cdef class Vocab:
setters = ["strings", "vectors"] setters = ["strings", "vectors"]
if "strings" not in exclude: if "strings" not in exclude:
self.strings.to_disk(path / "strings.json") self.strings.to_disk(path / "strings.json")
if "vectors" not in "exclude": if "vectors" not in exclude:
self.vectors.to_disk(path, exclude=["strings"]) self.vectors.to_disk(path, exclude=["strings"])
if "lookups" not in "exclude": if "lookups" not in exclude:
self.lookups.to_disk(path) self.lookups.to_disk(path)
def from_disk(self, path, *, exclude=tuple()): def from_disk(self, path, *, exclude=tuple()):

View File

@ -155,7 +155,7 @@ import Tag from 'components/tag'
> ```jsx > ```jsx
> <Tag>method</Tag> > <Tag>method</Tag>
> <Tag variant="new">2.1</Tag> > <Tag variant="new">4</Tag>
> <Tag variant="model">tagger, parser</Tag> > <Tag variant="model">tagger, parser</Tag>
> ``` > ```
@ -170,7 +170,7 @@ installed.
<InlineList> <InlineList>
<Tag>method</Tag> <Tag variant="new">2</Tag> <Tag variant="model">tagger, <Tag>method</Tag> <Tag variant="new">4</Tag> <Tag variant="model">tagger,
parser</Tag> parser</Tag>
</InlineList> </InlineList>

View File

@ -12,10 +12,10 @@ menu:
- ['train', 'train'] - ['train', 'train']
- ['pretrain', 'pretrain'] - ['pretrain', 'pretrain']
- ['evaluate', 'evaluate'] - ['evaluate', 'evaluate']
- ['find-threshold', 'find-threshold']
- ['assemble', 'assemble'] - ['assemble', 'assemble']
- ['package', 'package'] - ['package', 'package']
- ['project', 'project'] - ['project', 'project']
- ['ray', 'ray']
- ['huggingface-hub', 'huggingface-hub'] - ['huggingface-hub', 'huggingface-hub']
--- ---
@ -53,7 +53,7 @@ $ python -m spacy download [model] [--direct] [--sdist] [pip_args]
| `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ | | `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ |
| `--sdist`, `-S` <Tag variant="new">3</Tag> | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ | | `--sdist`, `-S` <Tag variant="new">3</Tag> | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| pip args <Tag variant="new">2.1</Tag> | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ | | pip args | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ |
| **CREATES** | The installed pipeline package in your `site-packages` directory. | | **CREATES** | The installed pipeline package in your `site-packages` directory. |
## info {#info tag="command"} ## info {#info tag="command"}
@ -77,15 +77,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude]
$ python -m spacy info [model] [--markdown] [--silent] [--exclude] $ python -m spacy info [model] [--markdown] [--silent] [--exclude]
``` ```
| Name | Description | | Name | Description |
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | | -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- |
| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ | | `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ | | `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
| `--silent`, `-s` <Tag variant="new">2.0.12</Tag> | Don't print anything, just return the values. ~~bool (flag)~~ | | `--silent`, `-s` | Don't print anything, just return the values. ~~bool (flag)~~ |
| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ | | `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
| `--url`, `-u` <Tag variant="new">3.5.0</Tag> | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ | | `--url`, `-u` <Tag variant="new">3.5.0</Tag> | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **PRINTS** | Information about your spaCy installation. | | **PRINTS** | Information about your spaCy installation. |
## validate {#validate new="2" tag="command"} ## validate {#validate new="2" tag="command"}
@ -260,22 +260,22 @@ chosen based on the file extension of the input file.
$ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang] $ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang]
``` ```
| Name | Description | | Name | Description |
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
| `input_path` | Input file or directory. ~~Path (positional)~~ | | `input_path` | Input file or directory. ~~Path (positional)~~ |
| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ | | `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ |
| `--converter`, `-c` <Tag variant="new">2</Tag> | Name of converter to use (see below). ~~str (option)~~ | | `--converter`, `-c` | Name of converter to use (see below). ~~str (option)~~ |
| `--file-type`, `-t` <Tag variant="new">2.1</Tag> | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ | | `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ | | `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
| `--seg-sents`, `-s` <Tag variant="new">2.2</Tag> | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ | | `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ | | `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ | | `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ | | `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ | | `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
| `--lang`, `-l` <Tag variant="new">2.1</Tag> | Language code (if tokenizer required). ~~Optional[str] \(option)~~ | | `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ | | `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). | | **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). |
### Converters {#converters} ### Converters {#converters}
@ -474,8 +474,7 @@ report span characteristics such as the average span length and the span (or
span boundary) distinctiveness. The distinctiveness measure shows how different span boundary) distinctiveness. The distinctiveness measure shows how different
the tokens are with respect to the rest of the corpus using the KL-divergence of the tokens are with respect to the rest of the corpus using the KL-divergence of
the token distributions. To learn more, you can check out Papay et al.'s work on the token distributions. To learn more, you can check out Papay et al.'s work on
[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP [*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP 2020)](https://aclanthology.org/2020.emnlp-main.396/).
2020)](https://aclanthology.org/2020.emnlp-main.396/).
</Infobox> </Infobox>
@ -1163,6 +1162,46 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Training results and optional metrics and visualizations. | | **CREATES** | Training results and optional metrics and visualizations. |
## find-threshold {#find-threshold new="3.5" tag="command"}
Runs prediction trials for a trained model with varying tresholds to maximize
the specified metric. The search space for the threshold is traversed linearly
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
returns all results).
This is applicable only for components whose predictions are influenced by
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
that the full path to the corresponding threshold attribute in the config has to
be provided.
> #### Examples
>
> ```cli
> # For textcat_multilabel:
> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f
> ```
>
> ```cli
> # For spancat:
> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f
> ```
| Name | Description |
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ |
| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ |
| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ |
| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ |
| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ |
| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ |
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ |
| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
## assemble {#assemble tag="command"} ## assemble {#assemble tag="command"}
Assemble a pipeline from a config file without additional training. Expects a Assemble a pipeline from a config file without additional training. Expects a
@ -1229,19 +1268,19 @@ $ python -m spacy package [input_dir] [output_dir] [--code] [--meta-path] [--cre
> $ pip install dist/en_pipeline-0.0.0.tar.gz > $ pip install dist/en_pipeline-0.0.0.tar.gz
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ | | `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ |
| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ | | `output_dir` | Directory to create package folder in. ~~Path (positional)~~ |
| `--code`, `-c` <Tag variant="new">3</Tag> | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ | | `--code`, `-c` <Tag variant="new">3</Tag> | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ |
| `--meta-path`, `-m` <Tag variant="new">2</Tag> | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ | | `--meta-path`, `-m` | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ |
| `--create-meta`, `-C` <Tag variant="new">2</Tag> | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ | | `--create-meta`, `-C` | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ |
| `--build`, `-b` <Tag variant="new">3</Tag> | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ | | `--build`, `-b` <Tag variant="new">3</Tag> | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ |
| `--name`, `-n` <Tag variant="new">3</Tag> | Package name to override in meta. ~~Optional[str] \(option)~~ | | `--name`, `-n` <Tag variant="new">3</Tag> | Package name to override in meta. ~~Optional[str] \(option)~~ |
| `--version`, `-v` <Tag variant="new">3</Tag> | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ | | `--version`, `-v` <Tag variant="new">3</Tag> | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ |
| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ | | `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A Python package containing the spaCy pipeline. | | **CREATES** | A Python package containing the spaCy pipeline. |
## project {#project new="3"} ## project {#project new="3"}
@ -1503,50 +1542,6 @@ $ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] [--
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. | | **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. |
## ray {#ray new="3"}
The `spacy ray` CLI includes commands for parallel and distributed computing via
[Ray](https://ray.io).
<Infobox variant="warning">
To use this command, you need the
[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed.
Installing the package will automatically add the `ray` command to the spaCy
CLI.
</Infobox>
### ray train {#ray-train tag="command"}
Train a spaCy pipeline using [Ray](https://ray.io) for parallel training. The
command works just like [`spacy train`](/api/cli#train). For more details and
examples, see the usage guide on
[parallel training](/usage/training#parallel-training) and the spaCy project
[integration](/usage/projects#ray).
```cli
$ python -m spacy ray train [config_path] [--code] [--output] [--n-workers] [--address] [--gpu-id] [--verbose] [overrides]
```
> #### Example
>
> ```cli
> $ python -m spacy ray train config.cfg --n-workers 2
> ```
| Name | Description |
| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `config_path` | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. ~~Path (positional)~~ |
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
| `--output`, `-o` | Directory or remote storage URL for saving trained pipeline. The directory will be created if it doesn't exist. ~~Optional[Path] \(option)~~ |
| `--n-workers`, `-n` | The number of workers. Defaults to `1`. ~~int (option)~~ |
| `--address`, `-a` | Optional address of the Ray cluster. If not set (default), Ray will run locally. ~~Optional[str] \(option)~~ |
| `--gpu-id`, `-g` | GPU ID or `-1` for CPU. Defaults to `-1`. ~~int (option)~~ |
| `--verbose`, `-V` | Display more information for debugging purposes. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ |
## huggingface-hub {#huggingface-hub new="3.1"} ## huggingface-hub {#huggingface-hub new="3.1"}
The `spacy huggingface-cli` CLI includes commands for uploading your trained The `spacy huggingface-cli` CLI includes commands for uploading your trained

View File

@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train).
| `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ | | `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ |
| `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ | | `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ |
| `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ | | `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ |
| `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ | | `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ |
| `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ | | `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ |
| `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ | | `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ |

View File

@ -209,15 +209,15 @@ alignment mode `"strict".
> assert span.text == "New York" > assert span.text == "New York"
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ | | `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ | | `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | | `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` <Tag variant="new">2.2</Tag> | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | | `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | | `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ | | `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | | **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Doc.set_ents {#set_ents tag="method" new="3"} ## Doc.set_ents {#set_ents tag="method" new="3"}
@ -751,22 +751,22 @@ The L2 norm of the document's vector representation.
## Attributes {#attributes} ## Attributes {#attributes}
| Name | Description | | Name | Description |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------- | | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
| `text` | A string representation of the document text. ~~str~~ | | `text` | A string representation of the document text. ~~str~~ |
| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ | | `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ | | `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
| `vocab` | The store of lexical types. ~~Vocab~~ | | `vocab` | The store of lexical types. ~~Vocab~~ |
| `tensor` <Tag variant="new">2</Tag> | Container for dense vector representations. ~~numpy.ndarray~~ | | `tensor` | Container for dense vector representations. ~~numpy.ndarray~~ |
| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ | | `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
| `lang` <Tag variant="new">2.1</Tag> | Language of the document's vocabulary. ~~int~~ | | `lang` | Language of the document's vocabulary. ~~int~~ |
| `lang_` <Tag variant="new">2.1</Tag> | Language of the document's vocabulary. ~~str~~ | | `lang_` | Language of the document's vocabulary. ~~str~~ |
| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ | | `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ | | `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ | | `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ | | `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ | | `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | | `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
## Serialization fields {#serialization-fields} ## Serialization fields {#serialization-fields}

View File

@ -63,18 +63,18 @@ spaCy loads a model under the hood based on its
> nlp = Language.from_config(config) > nlp = Language.from_config(config)
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ | | `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ | | `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | | `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ |
| `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | | `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | | `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ | | `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ | | `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ | | `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
| **RETURNS** | The initialized object. ~~Language~~ | | **RETURNS** | The initialized object. ~~Language~~ |
## Language.component {#component tag="classmethod" new="3"} ## Language.component {#component tag="classmethod" new="3"}
@ -198,16 +198,16 @@ tokenization is skipped but the rest of the pipeline is run.
> assert doc.has_annotation("DEP") > assert doc.has_annotation("DEP")
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ | | `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ | | `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ |
| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ | | `batch_size` | The number of texts to buffer. ~~Optional[int]~~ |
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ | | `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | | `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
| `n_process` <Tag variant="new">2.2.2</Tag> | Number of processors to use. Defaults to `1`. ~~int~~ | | `n_process` | Number of processors to use. Defaults to `1`. ~~int~~ |
| **YIELDS** | Documents in the order of the original text. ~~Doc~~ | | **YIELDS** | Documents in the order of the original text. ~~Doc~~ |
## Language.set_error_handler {#set_error_handler tag="method" new="3"} ## Language.set_error_handler {#set_error_handler tag="method" new="3"}
@ -1030,21 +1030,21 @@ details.
## Attributes {#attributes} ## Attributes {#attributes}
| Name | Description | | Name | Description |
| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | | -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | A container for the lexical types. ~~Vocab~~ | | `vocab` | A container for the lexical types. ~~Vocab~~ |
| `tokenizer` | The tokenizer. ~~Tokenizer~~ | | `tokenizer` | The tokenizer. ~~Tokenizer~~ |
| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ | | `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ | | `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
| `pipe_names` <Tag variant="new">2</Tag> | List of pipeline component names, in order. ~~List[str]~~ | | `pipe_names` | List of pipeline component names, in order. ~~List[str]~~ |
| `pipe_labels` <Tag variant="new">2.2</Tag> | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ | | `pipe_labels` | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
| `pipe_factories` <Tag variant="new">2.2</Tag> | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ | | `pipe_factories` | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ | | `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
| `factory_names` <Tag variant="new">3</Tag> | List of all available factory names. ~~List[str]~~ | | `factory_names` <Tag variant="new">3</Tag> | List of all available factory names. ~~List[str]~~ |
| `components` <Tag variant="new">3</Tag> | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ | | `components` <Tag variant="new">3</Tag> | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
| `component_names` <Tag variant="new">3</Tag> | List of all available component names, including components that are currently disabled. ~~List[str]~~ | | `component_names` <Tag variant="new">3</Tag> | List of all available component names, including components that are currently disabled. ~~List[str]~~ |
| `disabled` <Tag variant="new">3</Tag> | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ | | `disabled` <Tag variant="new">3</Tag> | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ |
| `path` <Tag variant="new">2</Tag> | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ | | `path` | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ |
## Class attributes {#class-attributes} ## Class attributes {#class-attributes}

View File

@ -121,44 +121,44 @@ The L2 norm of the lexeme's vector representation.
## Attributes {#attributes} ## Attributes {#attributes}
| Name | Description | | Name | Description |
| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | The lexeme's vocabulary. ~~Vocab~~ | | `vocab` | The lexeme's vocabulary. ~~Vocab~~ |
| `text` | Verbatim text content. ~~str~~ | | `text` | Verbatim text content. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ | | `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ | | `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | | `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `flags` | Container of the lexeme's binary flags. ~~int~~ | | `flags` | Container of the lexeme's binary flags. ~~int~~ |
| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ | | `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ |
| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ | | `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ |
| `lower` | Lowercase form of the word. ~~int~~ | | `lower` | Lowercase form of the word. ~~int~~ |
| `lower_` | Lowercase form of the word. ~~str~~ | | `lower_` | Lowercase form of the word. ~~str~~ |
| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | | `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | | `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ | | `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ |
| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ | | `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ |
| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ | | `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ |
| `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ | | `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ |
| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ | | `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ |
| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ | | `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ |
| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ | | `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ |
| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ | | `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ |
| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ | | `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ |
| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ | | `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ |
| `is_punct` | Is the lexeme punctuation? ~~bool~~ | | `is_punct` | Is the lexeme punctuation? ~~bool~~ |
| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ | | `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ |
| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ | | `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ |
| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ | | `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ |
| `is_bracket` | Is the lexeme a bracket? ~~bool~~ | | `is_bracket` | Is the lexeme a bracket? ~~bool~~ |
| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ | | `is_quote` | Is the lexeme a quotation mark? ~~bool~~ |
| `is_currency` <Tag variant="new">2.0.8</Tag> | Is the lexeme a currency symbol? ~~bool~~ | | `is_currency` | Is the lexeme a currency symbol? ~~bool~~ |
| `like_url` | Does the lexeme resemble a URL? ~~bool~~ | | `like_url` | Does the lexeme resemble a URL? ~~bool~~ |
| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | | `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
| `like_email` | Does the lexeme resemble an email address? ~~bool~~ | | `like_email` | Does the lexeme resemble an email address? ~~bool~~ |
| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ | | `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ | | `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ |
| `lang` | Language of the parent vocabulary. ~~int~~ | | `lang` | Language of the parent vocabulary. ~~int~~ |
| `lang_` | Language of the parent vocabulary. ~~str~~ | | `lang_` | Language of the parent vocabulary. ~~str~~ |
| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ | | `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ |
| `cluster` | Brown cluster ID. ~~int~~ | | `cluster` | Brown cluster ID. ~~int~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ | | `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ |

View File

@ -33,7 +33,7 @@ rule-based matching are:
| Attribute | Description | | Attribute | Description |
| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
| `ORTH` | The exact verbatim text of a token. ~~str~~ | | `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `TEXT` <Tag variant="new">2.1</Tag> | The exact verbatim text of a token. ~~str~~ | | `TEXT` | The exact verbatim text of a token. ~~str~~ |
| `NORM` | The normalized form of the token text. ~~str~~ | | `NORM` | The normalized form of the token text. ~~str~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ | | `LOWER` | The lowercase form of the token text. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ | | `LENGTH` | The length of the token text. ~~int~~ |
@ -48,7 +48,7 @@ rule-based matching are:
| `ENT_IOB` | The IOB part of the token's entity tag. ~~str~~ | | `ENT_IOB` | The IOB part of the token's entity tag. ~~str~~ |
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ | | `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
| `ENT_KB_ID` | The token's entity knowledge base ID (`ent_kb_id`). ~~str~~ | | `ENT_KB_ID` | The token's entity knowledge base ID (`ent_kb_id`). ~~str~~ |
| `_` <Tag variant="new">2.1</Tag> | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ | | `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `OP` | Operator or quantifier to determine how often to match a token pattern. ~~str~~ | | `OP` | Operator or quantifier to determine how often to match a token pattern. ~~str~~ |
Operators and quantifiers define **how often** a token pattern should be Operators and quantifiers define **how often** a token pattern should be
@ -64,7 +64,7 @@ matched:
> ``` > ```
| OP | Description | | OP | Description |
|---------|------------------------------------------------------------------------| | ------- | ---------------------------------------------------------------------- |
| `!` | Negate the pattern, by requiring it to match exactly 0 times. | | `!` | Negate the pattern, by requiring it to match exactly 0 times. |
| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. | | `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
| `+` | Require the pattern to match 1 or more times. | | `+` | Require the pattern to match 1 or more times. |
@ -109,10 +109,10 @@ string where an integer is expected) or unexpected property names.
> matcher = Matcher(nlp.vocab) > matcher = Matcher(nlp.vocab)
> ``` > ```
| Name | Description | | Name | Description |
| --------------------------------------- | ----------------------------------------------------------------------------------------------------- | | ---------- | ----------------------------------------------------------------------------------------------------- |
| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ | | `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
| `validate` <Tag variant="new">2.1</Tag> | Validate all patterns added to this matcher. ~~bool~~ | | `validate` | Validate all patterns added to this matcher. ~~bool~~ |
## Matcher.\_\_call\_\_ {#call tag="method"} ## Matcher.\_\_call\_\_ {#call tag="method"}

View File

@ -36,11 +36,11 @@ be shown.
> matcher = PhraseMatcher(nlp.vocab) > matcher = PhraseMatcher(nlp.vocab)
> ``` > ```
| Name | Description | | Name | Description |
| --------------------------------------- | ------------------------------------------------------------------------------------------------------ | | ---------- | ------------------------------------------------------------------------------------------------------ |
| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ | | `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
| `attr` <Tag variant="new">2.1</Tag> | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ | | `attr` | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ |
| `validate` <Tag variant="new">2.1</Tag> | Validate patterns added to the matcher. ~~bool~~ | | `validate` | Validate patterns added to the matcher. ~~bool~~ |
## PhraseMatcher.\_\_call\_\_ {#call tag="method"} ## PhraseMatcher.\_\_call\_\_ {#call tag="method"}

View File

@ -229,16 +229,17 @@ The reported `{attr}_score` depends on the classification properties:
> print(scores["cats_macro_auc"]) > print(scores["cats_macro_auc"])
> ``` > ```
| Name | Description | | Name | Description |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ | | `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
| `attr` | The attribute to score. ~~str~~ | | `attr` | The attribute to score. ~~str~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ | | `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ |
| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ | | labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ |
| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. ~~bool~~ | | `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. When set to `False` (exclusive labels), missing gold labels are interpreted as `0.0` and the threshold is set to `0.0`. ~~bool~~ |
| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ | | `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ |
| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ | | `threshold` | Cutoff to consider a prediction "positive". Defaults to `0.5` for multi-label, and `0.0` (i.e. whatever's highest scoring) otherwise. ~~float~~ |
| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ |
## Scorer.score_links {#score_links tag="staticmethod" new="3"} ## Scorer.score_links {#score_links tag="staticmethod" new="3"}

View File

@ -186,14 +186,14 @@ the character indices don't map to a valid span.
> assert span.text == "New York" > assert span.text == "New York"
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------ | ----------------------------------------------------------------------------------------- | | ----------- | ----------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ | | `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ | | `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | | `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` <Tag variant="new">2.2</Tag> | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | | `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | | `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | | **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Span.similarity {#similarity tag="method" model="vectors"} ## Span.similarity {#similarity tag="method" model="vectors"}
@ -544,26 +544,26 @@ overlaps with will be returned.
## Attributes {#attributes} ## Attributes {#attributes}
| Name | Description | | Name | Description |
| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | | -------------- | ----------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ | | `doc` | The parent document. ~~Doc~~ |
| `tensor` <Tag variant="new">2.1.7</Tag> | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | | `tensor` | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `start` | The token offset for the start of the span. ~~int~~ | | `start` | The token offset for the start of the span. ~~int~~ |
| `end` | The token offset for the end of the span. ~~int~~ | | `end` | The token offset for the end of the span. ~~int~~ |
| `start_char` | The character offset for the start of the span. ~~int~~ | | `start_char` | The character offset for the start of the span. ~~int~~ |
| `end_char` | The character offset for the end of the span. ~~int~~ | | `end_char` | The character offset for the end of the span. ~~int~~ |
| `text` | A string representation of the span text. ~~str~~ | | `text` | A string representation of the span text. ~~str~~ |
| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ | | `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ | | `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ | | `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `label` | The hash value of the span's label. ~~int~~ | | `label` | The hash value of the span's label. ~~int~~ |
| `label_` | The span's label. ~~str~~ | | `label_` | The span's label. ~~str~~ |
| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ | | `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ | | `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ | | `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
| `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ | | `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ |
| `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ | | `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ |
| `id` | The hash value of the span's ID. ~~int~~ | | `id` | The hash value of the span's ID. ~~int~~ |
| `id_` | The span's ID. ~~str~~ | | `id_` | The span's ID. ~~str~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ | | `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | | `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |

View File

@ -63,7 +63,6 @@ architectures and their arguments and hyperparameters.
> ```python > ```python
> from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL > from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL
> config = { > config = {
> "threshold": 0.5,
> "model": DEFAULT_SINGLE_TEXTCAT_MODEL, > "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
> } > }
> nlp.add_pipe("textcat", config=config) > nlp.add_pipe("textcat", config=config)
@ -82,7 +81,7 @@ architectures and their arguments and hyperparameters.
| Setting | Description | | Setting | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ | | `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ |
| `model` | A model instance that predicts scores for each category. Defaults to [TextCatEnsemble](/api/architectures#TextCatEnsemble). ~~Model[List[Doc], List[Floats2d]]~~ | | `model` | A model instance that predicts scores for each category. Defaults to [TextCatEnsemble](/api/architectures#TextCatEnsemble). ~~Model[List[Doc], List[Floats2d]]~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ | | `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
@ -123,7 +122,7 @@ shortcut for this and instantiate the component using its string name and
| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ | | `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ |
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | | `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ | | `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ | | `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
## TextCategorizer.\_\_call\_\_ {#call tag="method"} ## TextCategorizer.\_\_call\_\_ {#call tag="method"}

View File

@ -403,75 +403,75 @@ The L2 norm of the token's vector representation.
## Attributes {#attributes} ## Attributes {#attributes}
| Name | Description | | Name | Description |
| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ | | `doc` | The parent document. ~~Doc~~ |
| `lex` <Tag variant="new">3</Tag> | The underlying lexeme. ~~Lexeme~~ | | `lex` <Tag variant="new">3</Tag> | The underlying lexeme. ~~Lexeme~~ |
| `sent` <Tag variant="new">2.0.12</Tag> | The sentence span that this token is a part of. ~~Span~~ | | `sent` | The sentence span that this token is a part of. ~~Span~~ |
| `text` | Verbatim text content. ~~str~~ | | `text` | Verbatim text content. ~~str~~ |
| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ | | `text_with_ws` | Text content, with trailing space character if present. ~~str~~ |
| `whitespace_` | Trailing space character if present. ~~str~~ | | `whitespace_` | Trailing space character if present. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ | | `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ | | `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ | | `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ |
| `tensor` <Tag variant="new">2.1.7</Tag> | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | | `tensor` | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ | | `head` | The syntactic parent, or "governor", of this token. ~~Token~~ |
| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ | | `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ |
| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ | | `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ |
| `i` | The index of the token within the parent document. ~~int~~ | | `i` | The index of the token within the parent document. ~~int~~ |
| `ent_type` | Named entity type. ~~int~~ | | `ent_type` | Named entity type. ~~int~~ |
| `ent_type_` | Named entity type. ~~str~~ | | `ent_type_` | Named entity type. ~~str~~ |
| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ | | `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ |
| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ | | `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ |
| `ent_kb_id` <Tag variant="new">2.2</Tag> | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ | | `ent_kb_id` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ |
| `ent_kb_id_` <Tag variant="new">2.2</Tag> | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ | | `ent_kb_id_` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ |
| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ | | `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ |
| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ | | `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ |
| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ | | `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ | | `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ | | `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ | | `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
| `lower` | Lowercase form of the token. ~~int~~ | | `lower` | Lowercase form of the token. ~~int~~ |
| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | | `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | | `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | | `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ | | `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ |
| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ | | `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ |
| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ | | `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ |
| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ | | `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ |
| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ | | `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ |
| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ | | `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ |
| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ | | `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ |
| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ | | `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ |
| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ | | `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ |
| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ | | `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ |
| `is_punct` | Is the token punctuation? ~~bool~~ | | `is_punct` | Is the token punctuation? ~~bool~~ |
| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ | | `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ |
| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ | | `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ |
| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. | | `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. |
| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. | | `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. |
| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ | | `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ |
| `is_bracket` | Is the token a bracket? ~~bool~~ | | `is_bracket` | Is the token a bracket? ~~bool~~ |
| `is_quote` | Is the token a quotation mark? ~~bool~~ | | `is_quote` | Is the token a quotation mark? ~~bool~~ |
| `is_currency` <Tag variant="new">2.0.8</Tag> | Is the token a currency symbol? ~~bool~~ | | `is_currency` | Is the token a currency symbol? ~~bool~~ |
| `like_url` | Does the token resemble a URL? ~~bool~~ | | `like_url` | Does the token resemble a URL? ~~bool~~ |
| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | | `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
| `like_email` | Does the token resemble an email address? ~~bool~~ | | `like_email` | Does the token resemble an email address? ~~bool~~ |
| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ | | `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
| `is_stop` | Is the token part of a "stop list"? ~~bool~~ | | `is_stop` | Is the token part of a "stop list"? ~~bool~~ |
| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ | | `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ |
| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ | | `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ |
| `tag` | Fine-grained part-of-speech. ~~int~~ | | `tag` | Fine-grained part-of-speech. ~~int~~ |
| `tag_` | Fine-grained part-of-speech. ~~str~~ | | `tag_` | Fine-grained part-of-speech. ~~str~~ |
| `morph` <Tag variant="new">3</Tag> | Morphological analysis. ~~MorphAnalysis~~ | | `morph` <Tag variant="new">3</Tag> | Morphological analysis. ~~MorphAnalysis~~ |
| `dep` | Syntactic dependency relation. ~~int~~ | | `dep` | Syntactic dependency relation. ~~int~~ |
| `dep_` | Syntactic dependency relation. ~~str~~ | | `dep_` | Syntactic dependency relation. ~~str~~ |
| `lang` | Language of the parent document's vocabulary. ~~int~~ | | `lang` | Language of the parent document's vocabulary. ~~int~~ |
| `lang_` | Language of the parent document's vocabulary. ~~str~~ | | `lang_` | Language of the parent document's vocabulary. ~~str~~ |
| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ | | `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ |
| `idx` | The character offset of the token within the parent document. ~~int~~ | | `idx` | The character offset of the token within the parent document. ~~int~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ | | `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ |
| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | | `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | | `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `cluster` | Brown cluster ID. ~~int~~ | | `cluster` | Brown cluster ID. ~~int~~ |
| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | | `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |

View File

@ -45,16 +45,16 @@ specified separately using the new `exclude` keyword argument.
> nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"]) > nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"])
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ | | `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | | `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ | | `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ |
| `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ | | `enable` <Tag variant="new">3.4</Tag> | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ |
| `exclude` <Tag variant="new">3</Tag> | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ | | `exclude` <Tag variant="new">3</Tag> | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
| `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | | `config` <Tag variant="new">3</Tag> | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | | **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's
[`config.cfg`](/api/data-formats#config), uses the language and pipeline [`config.cfg`](/api/data-formats#config), uses the language and pipeline
@ -354,22 +354,22 @@ If a setting is not present in the options, the default value will be used.
> displacy.serve(doc, style="dep", options=options) > displacy.serve(doc, style="dep", options=options)
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- | | ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- |
| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ | | `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ |
| `add_lemma` <Tag variant="new">2.2.4</Tag> | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ | | `add_lemma` | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ |
| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ | | `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ |
| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ | | `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ |
| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ | | `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ |
| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ | | `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ |
| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ | | `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ |
| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ | | `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ |
| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ | | `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ |
| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ | | `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ |
| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ | | `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ |
| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ | | `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ |
| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ | | `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ |
| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ | | `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ |
#### Named Entity Visualizer options {#displacy_options-ent} #### Named Entity Visualizer options {#displacy_options-ent}
@ -385,7 +385,7 @@ If a setting is not present in the options, the default value will be used.
| ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ | | `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ |
| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ | | `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ |
| `template` <Tag variant="new">2.2</Tag> | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ | | `template` | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
| `kb_url_template` <Tag variant="new">3.2.1</Tag> | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in. ~~Optional[str]~~ | | `kb_url_template` <Tag variant="new">3.2.1</Tag> | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in. ~~Optional[str]~~ |
#### Span Visualizer options {#displacy_options-span} #### Span Visualizer options {#displacy_options-span}

View File

@ -50,7 +50,7 @@ modified later.
| _keyword-only_ | | | _keyword-only_ | |
| `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ | | `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ |
| `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ | | `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ |
| `data` | The vector data. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | | `data` | The vector data. ~~numpy.ndarray[ndim=2, dtype=float32]~~ |
| `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ | | `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ |
| `name` | A name to identify the vectors table. ~~str~~ | | `name` | A name to identify the vectors table. ~~str~~ |
| `mode` <Tag variant="new">3.2</Tag> | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ | | `mode` <Tag variant="new">3.2</Tag> | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ |

View File

@ -21,15 +21,15 @@ Create the vocabulary.
> vocab = Vocab(strings=["hello", "world"]) > vocab = Vocab(strings=["hello", "world"])
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ | | `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ |
| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ | | `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ |
| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ | | `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ |
| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ | | `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ |
| `vectors_name` <Tag variant="new">2.2</Tag> | A name to identify the vectors table. ~~str~~ | | `vectors_name` | A name to identify the vectors table. ~~str~~ |
| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ | | `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ |
| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | | `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
## Vocab.\_\_len\_\_ {#len tag="method"} ## Vocab.\_\_len\_\_ {#len tag="method"}
@ -311,10 +311,10 @@ Load state from a binary string.
| Name | Description | | Name | Description |
| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ | | `strings` | A table managing the string-to-int mapping. ~~StringStore~~ |
| `vectors` <Tag variant="new">2</Tag> | A table associating word IDs to word vectors. ~~Vectors~~ | | `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ |
| `vectors_length` | Number of dimensions for each word vector. ~~int~~ | | `vectors_length` | Number of dimensions for each word vector. ~~int~~ |
| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ | | `lookups` | The available lookup tables in this vocab. ~~Lookups~~ |
| `writing_system` <Tag variant="new">2.1</Tag> | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | | `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
| `get_noun_chunks` <Tag variant="new">3.0</Tag> | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | | `get_noun_chunks` <Tag variant="new">3.0</Tag> | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
## Serialization fields {#serialization-fields} ## Serialization fields {#serialization-fields}

View File

@ -75,7 +75,6 @@ spaCy's [`setup.cfg`](%%GITHUB_SPACY/setup.cfg) for details on what's included.
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. | | `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. |
| `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. | | `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. |
| `ray` | Install [`spacy-ray`](https://github.com/explosion/spacy-ray) to add CLI commands for [parallel training](/usage/training#parallel-training). |
| `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. | | `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. |
| `apple` | Install [`thinc-apple-ops`](https://github.com/explosion/thinc-apple-ops) to improve performance on an Apple M1. | | `apple` | Install [`thinc-apple-ops`](https://github.com/explosion/thinc-apple-ops) to improve performance on an Apple M1. |
| `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). | | `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). |

View File

@ -363,7 +363,8 @@ nlp.enable_pipe("tagger")
``` ```
In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is
set, all components except for those in `enable` are disabled. set, all components except for those in `enable` are disabled. If `enable` and
`disable` conflict (i.e. the same component is included in both), an error is raised.
```python ```python
# Load the complete pipeline, but disable all components except for tok2vec and tagger # Load the complete pipeline, but disable all components except for tok2vec and tagger

View File

@ -1014,54 +1014,6 @@ https://github.com/explosion/projects/blob/v3/integrations/fastapi/scripts/main.
--- ---
### Ray {#ray} <IntegrationLogo name="ray" width={100} height="auto" align="right" />
> #### Installation
>
> ```cli
> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS
> # Check that the CLI is registered
> $ python -m spacy ray --help
> ```
[Ray](https://ray.io/) is a fast and simple framework for building and running
**distributed applications**. You can use Ray for parallel and distributed
training with spaCy via our lightweight
[`spacy-ray`](https://github.com/explosion/spacy-ray) extension package. If the
package is installed in the same environment as spaCy, it will automatically add
[`spacy ray`](/api/cli#ray) commands to your spaCy CLI. See the usage guide on
[parallel training](/usage/training#parallel-training) for more details on how
it works under the hood.
<Project id="integrations/ray">
Get started with parallel training using our project template. It trains a
simple model on a Universal Dependencies Treebank and lets you parallelize the
training with Ray.
</Project>
You can integrate [`spacy ray train`](/api/cli#ray-train) into your
`project.yml` just like the regular training command and pass it the config, and
optional output directory or remote storage URL and config overrides if needed.
<!-- prettier-ignore -->
```yaml
### project.yml
commands:
- name: "ray"
help: "Train a model via parallel training with Ray"
script:
- "python -m spacy ray train configs/config.cfg -o training/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy"
deps:
- "corpus/train.spacy"
- "corpus/dev.spacy"
outputs:
- "training/model-best"
```
---
### Weights & Biases {#wandb} <IntegrationLogo name="wandb" width={175} height="auto" align="right" /> ### Weights & Biases {#wandb} <IntegrationLogo name="wandb" width={175} height="auto" align="right" />
[Weights & Biases](https://www.wandb.com/) is a popular platform for experiment [Weights & Biases](https://www.wandb.com/) is a popular platform for experiment

View File

@ -162,7 +162,7 @@ rule-based matching are:
| Attribute | Description | | Attribute | Description |
| ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ORTH` | The exact verbatim text of a token. ~~str~~ | | `ORTH` | The exact verbatim text of a token. ~~str~~ |
| `TEXT` <Tag variant="new">2.1</Tag> | The exact verbatim text of a token. ~~str~~ | | `TEXT` | The exact verbatim text of a token. ~~str~~ |
| `NORM` | The normalized form of the token text. ~~str~~ | | `NORM` | The normalized form of the token text. ~~str~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ | | `LOWER` | The lowercase form of the token text. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ | | `LENGTH` | The length of the token text. ~~int~~ |
@ -174,7 +174,7 @@ rule-based matching are:
| `SPACY` | Token has a trailing space. ~~bool~~ | | `SPACY` | Token has a trailing space. ~~bool~~ |
| `POS`, `TAG`, `MORPH`, `DEP`, `LEMMA`, `SHAPE` | The token's simple and extended part-of-speech tag, morphological analysis, dependency label, lemma, shape. Note that the values of these attributes are case-sensitive. For a list of available part-of-speech tags and dependency labels, see the [Annotation Specifications](/api/annotation). ~~str~~ | | `POS`, `TAG`, `MORPH`, `DEP`, `LEMMA`, `SHAPE` | The token's simple and extended part-of-speech tag, morphological analysis, dependency label, lemma, shape. Note that the values of these attributes are case-sensitive. For a list of available part-of-speech tags and dependency labels, see the [Annotation Specifications](/api/annotation). ~~str~~ |
| `ENT_TYPE` | The token's entity label. ~~str~~ | | `ENT_TYPE` | The token's entity label. ~~str~~ |
| `_` <Tag variant="new">2.1</Tag> | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ | | `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `OP` | [Operator or quantifier](#quantifiers) to determine how often to match a token pattern. ~~str~~ | | `OP` | [Operator or quantifier](#quantifiers) to determine how often to match a token pattern. ~~str~~ |
<Accordion title="Does it matter if the attribute names are uppercase or lowercase?"> <Accordion title="Does it matter if the attribute names are uppercase or lowercase?">
@ -375,7 +375,7 @@ scoped quantifiers instead, you can build those behaviors with `on_match`
callbacks. callbacks.
| OP | Description | | OP | Description |
|---------|------------------------------------------------------------------------| | ------- | ---------------------------------------------------------------------- |
| `!` | Negate the pattern, by requiring it to match exactly 0 times. | | `!` | Negate the pattern, by requiring it to match exactly 0 times. |
| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. | | `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
| `+` | Require the pattern to match 1 or more times. | | `+` | Require the pattern to match 1 or more times. |
@ -1792,7 +1792,7 @@ the entity `Span` for example `._.orgs` or `._.prev_orgs` and
> [`Doc.retokenize`](/api/doc#retokenize) context manager: > [`Doc.retokenize`](/api/doc#retokenize) context manager:
> >
> ```python > ```python
> with doc.retokenize() as retokenize: > with doc.retokenize() as retokenizer:
> for ent in doc.ents: > for ent in doc.ents:
> retokenizer.merge(ent) > retokenizer.merge(ent)
> ``` > ```

View File

@ -306,12 +306,12 @@ pipeline component factories, language classes and other settings. To make spaCy
use your entry points, your package needs to expose them and it needs to be use your entry points, your package needs to expose them and it needs to be
installed in the same environment that's it. installed in the same environment that's it.
| Entry point | Description | | Entry point | Description |
| ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. | | [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. |
| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. | | [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. |
| `spacy_lookups` <Tag variant="new">2.2</Tag> | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. | | `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
| [`spacy_displacy_colors`](#entry-points-displacy) <Tag variant="new">2.2</Tag> | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. | | [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
### Custom components via entry points {#entry-points-components} ### Custom components via entry points {#entry-points-components}

View File

@ -1572,77 +1572,6 @@ token-based annotations like the dependency parse or entity labels, you'll need
to take care to adjust the `Example` object so its annotations match and remain to take care to adjust the `Example` object so its annotations match and remain
valid. valid.
## Parallel & distributed training with Ray {#parallel-training}
> #### Installation
>
> ```cli
> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS
> # Check that the CLI is registered
> $ python -m spacy ray --help
> ```
[Ray](https://ray.io/) is a fast and simple framework for building and running
**distributed applications**. You can use Ray to train spaCy on one or more
remote machines, potentially speeding up your training process. Parallel
training won't always be faster though it depends on your batch size, models,
and hardware.
<Infobox variant="warning">
To use Ray with spaCy, you need the
[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed.
Installing the package will automatically add the `ray` command to the spaCy
CLI.
</Infobox>
The [`spacy ray train`](/api/cli#ray-train) command follows the same API as
[`spacy train`](/api/cli#train), with a few extra options to configure the Ray
setup. You can optionally set the `--address` option to point to your Ray
cluster. If it's not set, Ray will run locally.
```cli
python -m spacy ray train config.cfg --n-workers 2
```
<Project id="integrations/ray">
Get started with parallel training using our project template. It trains a
simple model on a Universal Dependencies Treebank and lets you parallelize the
training with Ray.
</Project>
### How parallel training works {#parallel-training-details}
Each worker receives a shard of the **data** and builds a copy of the **model
and optimizer** from the [`config.cfg`](#config). It also has a communication
channel to **pass gradients and parameters** to the other workers. Additionally,
each worker is given ownership of a subset of the parameter arrays. Every
parameter array is owned by exactly one worker, and the workers are given a
mapping so they know which worker owns which parameter.
![Illustration of setup](../images/spacy-ray.svg)
As training proceeds, every worker will be computing gradients for **all** of
the model parameters. When they compute gradients for parameters they don't own,
they'll **send them to the worker** that does own that parameter, along with a
version identifier so that the owner can decide whether to discard the gradient.
Workers use the gradients they receive and the ones they compute locally to
update the parameters they own, and then broadcast the updated array and a new
version ID to the other workers.
This training procedure is **asynchronous** and **non-blocking**. Workers always
push their gradient increments and parameter updates, they do not have to pull
them and block on the result, so the transfers can happen in the background,
overlapped with the actual training work. The workers also do not have to stop
and wait for each other ("synchronize") at the start of each batch. This is very
useful for spaCy, because spaCy is often trained on long documents, which means
**batches can vary in size** significantly. Uneven workloads make synchronous
gradient descent inefficient, because if one batch is slow, all of the other
workers are stuck waiting for it to complete before they can continue.
## Internal training API {#api} ## Internal training API {#api}
<Infobox variant="danger"> <Infobox variant="danger">

View File

@ -15,18 +15,6 @@ menu:
> To help you make the transition from v2.x to v3.0, we've uploaded the old > To help you make the transition from v2.x to v3.0, we've uploaded the old
> website to [**v2.spacy.io**](https://v2.spacy.io/docs). > website to [**v2.spacy.io**](https://v2.spacy.io/docs).
<Infobox title="New: Commercial migration support for your spaCy pipelines" variant="warning" emoji="📣">
Want to make the transition from spaCy v2 to spaCy v3 as smooth as possible for
you and your organization? We're now offering commercial **migration support**
for your spaCy pipelines! We've put a lot of work into making it easy to upgrade
your existing code and training workflows but custom projects may always need
some custom work, especially when it comes to taking advantage of the new
capabilities.
[**Details & application &rarr;**](https://form.typeform.com/to/vMs2zSjM)
</Infobox>
<Grid cols={2} gutterBottom={false}> <Grid cols={2} gutterBottom={false}>
<div> <div>

View File

@ -562,6 +562,7 @@
"url": "https://github.com/explosion/spacy-pkuseg" "url": "https://github.com/explosion/spacy-pkuseg"
} }
], ],
"example": "这是一个用于示例的句子。",
"has_examples": true "has_examples": true
} }
], ],

View File

@ -1,5 +1,31 @@
{ {
"resources": [ "resources": [
{
"id": "grecy",
"title": "greCy",
"slogan": "Ancient Greek pipelines for spaCy",
"description": "greCy offers state-of-the-art pipelines for ancient Greek NLP. The repository makes language models available in various sizes, some of them containing floret word vectors and a BERT transformer layer.",
"github": "jmyerston/greCy",
"code_example": [
"import spacy",
"#After installing the grc_ud_proiel_trf wheel package from the greCy repository",
"",
"nlp = spacy.load('grc_ud_proiel_trf')",
"doc = nlp('δοκῶ μοι περὶ ὧν πυνθάνεσθε οὐκ ἀμελέτητος εἶναι.')",
"",
"for token in doc:",
" print(token.text, token.norm_, token.lemma_, token.pos_, token.tag_)"
],
"code_language": "python",
"author": "Jacobo Myerston",
"author_links": {
"twitter": "@jcbmyrstn",
"github": "jmyerston",
"website": "https://huggingface.co/spaces/Jacobo/syntax"
},
"category": ["pipeline", "research"],
"tags": ["ancient Greek"]
},
{ {
"id": "spacy-cleaner", "id": "spacy-cleaner",
"title": "spacy-cleaner", "title": "spacy-cleaner",
@ -435,37 +461,6 @@
}, },
"category": ["standalone"] "category": ["standalone"]
}, },
{
"id": "spikex",
"title": "SpikeX - SpaCy Pipes for Knowledge Extraction",
"slogan": "Use SpikeX to build knowledge extraction tools with almost-zero effort",
"description": "SpikeX is a collection of pipes ready to be plugged in a spaCy pipeline. It aims to help in building knowledge extraction tools with almost-zero effort.",
"github": "erre-quadro/spikex",
"pip": "spikex",
"code_example": [
"from spacy import load as spacy_load",
"from spikex.wikigraph import load as wg_load",
"from spikex.pipes import WikiPageX",
"",
"# load a spacy model and get a doc",
"nlp = spacy_load('en_core_web_sm')",
"doc = nlp('An apple a day keeps the doctor away')",
"# load a WikiGraph",
"wg = wg_load('simplewiki_core')",
"# get a WikiPageX and extract all pages",
"wikipagex = WikiPageX(wg)",
"doc = wikipagex(doc)",
"# see all pages extracted from the doc",
"for span in doc._.wiki_spans:",
" print(span._.wiki_pages)"
],
"category": ["pipeline", "standalone"],
"author": "Erre Quadro",
"author_links": {
"github": "erre-quadro",
"website": "https://www.errequadrosrl.com"
}
},
{ {
"id": "spacy-dbpedia-spotlight", "id": "spacy-dbpedia-spotlight",
"title": "DBpedia Spotlight for SpaCy", "title": "DBpedia Spotlight for SpaCy",
@ -531,17 +526,6 @@
"tags": ["sentiment", "textblob"], "tags": ["sentiment", "textblob"],
"spacy_version": 3 "spacy_version": 3
}, },
{
"id": "spacy-ray",
"title": "spacy-ray",
"slogan": "Parallel and distributed training with spaCy and Ray",
"description": "[Ray](https://ray.io/) is a fast and simple framework for building and running **distributed applications**. This very lightweight extension package lets you use Ray for parallel and distributed training with spaCy. If `spacy-ray` is installed in the same environment as spaCy, it will automatically add `spacy ray` commands to your spaCy CLI.",
"github": "explosion/spacy-ray",
"pip": "spacy-ray",
"category": ["training"],
"author": "Explosion / Anyscale",
"thumb": "https://i.imgur.com/7so6ZpS.png"
},
{ {
"id": "spacy-sentence-bert", "id": "spacy-sentence-bert",
"title": "spaCy - sentence-transformers", "title": "spaCy - sentence-transformers",
@ -2009,17 +1993,6 @@
}, },
"category": ["books"] "category": ["books"]
}, },
{
"type": "education",
"id": "learning-path-spacy",
"title": "Learning Path: Mastering spaCy for Natural Language Processing",
"slogan": "O'Reilly, 2017",
"description": "spaCy, a fast, user-friendly library for teaching computers to understand text, simplifies NLP techniques, such as speech tagging and syntactic dependencies, so you can easily extract information, attributes, and objects from massive amounts of text to then document, measure, and analyze. This Learning Path is a hands-on introduction to using spaCy to discover insights through natural language processing. While end-to-end natural language processing solutions can be complex, youll learn the linguistics, algorithms, and machine learning skills to get the job done.",
"url": "https://www.safaribooksonline.com/library/view/learning-path-mastering/9781491986653/",
"thumb": "https://i.imgur.com/9MIgMAc.jpg",
"author": "Aaron Kramer",
"category": ["courses"]
},
{ {
"type": "education", "type": "education",
"id": "introduction-into-spacy-3", "id": "introduction-into-spacy-3",