+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/pyproject.toml b/pyproject.toml
index 7abd7a96f..72f04dee3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0",
- "thinc>=8.1.0,<8.2.0",
+ "thinc>=9.0.0.dev1,<9.1.0",
"numpy>=1.15.0",
]
build-backend = "setuptools.build_meta"
diff --git a/requirements.txt b/requirements.txt
index 3e8501b2f..5c49f8d29 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,21 +1,22 @@
# Our libraries
-spacy-legacy>=3.0.10,<3.1.0
+spacy-legacy>=3.0.11,<3.1.0
spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
-thinc>=8.1.0,<8.2.0
+thinc>=9.0.0.dev1,<9.1.0
ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0
-wasabi>=0.9.1,<1.1.0
+wasabi>=0.9.1,<1.2.0
srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0
-typer>=0.3.0,<0.5.0
-pathy>=0.3.5
+typer>=0.3.0,<0.8.0
+pathy>=0.10.0
+smart-open>=5.2.1,<7.0.0
# Third party dependencies
numpy>=1.15.0
requests>=2.13.0,<3.0.0
tqdm>=4.38.0,<5.0.0
-pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0
+pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
jinja2
langcodes>=3.2.0,<4.0.0
# Official Python utilities
@@ -28,11 +29,12 @@ cython>=0.25,<3.0
pytest>=5.2.0,!=7.1.0
pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0
-flake8>=3.8.0,<3.10.0
+flake8>=3.8.0,<6.0.0
hypothesis>=3.27.0,<7.0.0
-mypy>=0.910,<0.970; platform_machine!='aarch64'
+mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1
+types-setuptools>=57.0.0
types-requests
types-setuptools>=57.0.0
black>=22.0,<23.0
diff --git a/setup.cfg b/setup.cfg
index c76961181..82f5ee085 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,7 @@ classifiers =
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
+ Programming Language :: Python :: 3.11
Topic :: Scientific/Engineering
project_urls =
Release notes = https://github.com/explosion/spaCy/releases
@@ -33,22 +34,23 @@ include_package_data = true
python_requires = >=3.6
install_requires =
# Our libraries
- spacy-legacy>=3.0.10,<3.1.0
+ spacy-legacy>=3.0.11,<3.1.0
spacy-loggers>=1.0.0,<2.0.0
murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
- thinc>=8.1.0,<8.2.0
- wasabi>=0.9.1,<1.1.0
+ thinc>=9.0.0.dev1,<9.1.0
+ wasabi>=0.9.1,<1.2.0
srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0
# Third-party dependencies
- typer>=0.3.0,<0.5.0
- pathy>=0.3.5
+ typer>=0.3.0,<0.8.0
+ pathy>=0.10.0
+ smart-open>=5.2.1,<7.0.0
tqdm>=4.38.0,<5.0.0
numpy>=1.15.0
requests>=2.13.0,<3.0.0
- pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0
+ pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
jinja2
# Official Python utilities
setuptools
@@ -68,37 +70,41 @@ transformers =
ray =
spacy_ray>=0.1.0,<1.0.0
cuda =
- cupy>=5.0.0b4,<11.0.0
+ cupy>=5.0.0b4,<12.0.0
cuda80 =
- cupy-cuda80>=5.0.0b4,<11.0.0
+ cupy-cuda80>=5.0.0b4,<12.0.0
cuda90 =
- cupy-cuda90>=5.0.0b4,<11.0.0
+ cupy-cuda90>=5.0.0b4,<12.0.0
cuda91 =
- cupy-cuda91>=5.0.0b4,<11.0.0
+ cupy-cuda91>=5.0.0b4,<12.0.0
cuda92 =
- cupy-cuda92>=5.0.0b4,<11.0.0
+ cupy-cuda92>=5.0.0b4,<12.0.0
cuda100 =
- cupy-cuda100>=5.0.0b4,<11.0.0
+ cupy-cuda100>=5.0.0b4,<12.0.0
cuda101 =
- cupy-cuda101>=5.0.0b4,<11.0.0
+ cupy-cuda101>=5.0.0b4,<12.0.0
cuda102 =
- cupy-cuda102>=5.0.0b4,<11.0.0
+ cupy-cuda102>=5.0.0b4,<12.0.0
cuda110 =
- cupy-cuda110>=5.0.0b4,<11.0.0
+ cupy-cuda110>=5.0.0b4,<12.0.0
cuda111 =
- cupy-cuda111>=5.0.0b4,<11.0.0
+ cupy-cuda111>=5.0.0b4,<12.0.0
cuda112 =
- cupy-cuda112>=5.0.0b4,<11.0.0
+ cupy-cuda112>=5.0.0b4,<12.0.0
cuda113 =
- cupy-cuda113>=5.0.0b4,<11.0.0
+ cupy-cuda113>=5.0.0b4,<12.0.0
cuda114 =
- cupy-cuda114>=5.0.0b4,<11.0.0
+ cupy-cuda114>=5.0.0b4,<12.0.0
cuda115 =
- cupy-cuda115>=5.0.0b4,<11.0.0
+ cupy-cuda115>=5.0.0b4,<12.0.0
cuda116 =
- cupy-cuda116>=5.0.0b4,<11.0.0
+ cupy-cuda116>=5.0.0b4,<12.0.0
cuda117 =
- cupy-cuda117>=5.0.0b4,<11.0.0
+ cupy-cuda117>=5.0.0b4,<12.0.0
+cuda11x =
+ cupy-cuda11x>=11.0.0,<12.0.0
+cuda-autodetect =
+ cupy-wheel>=11.0.0,<12.0.0
apple =
thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies
diff --git a/setup.py b/setup.py
index 3e02b156f..d5b82ec68 100755
--- a/setup.py
+++ b/setup.py
@@ -30,12 +30,13 @@ MOD_NAMES = [
"spacy.lexeme",
"spacy.vocab",
"spacy.attrs",
- "spacy.kb",
+ "spacy.kb.candidate",
+ "spacy.kb.kb",
+ "spacy.kb.kb_in_memory",
"spacy.ml.tb_framework",
"spacy.morphology",
"spacy.pipeline._edit_tree_internals.edit_trees",
"spacy.pipeline.morphologizer",
- "spacy.pipeline.multitask",
"spacy.pipeline.pipe",
"spacy.pipeline.trainable_pipe",
"spacy.pipeline.sentencizer",
@@ -207,6 +208,17 @@ def setup_package():
get_python_inc(plat_specific=True),
]
ext_modules = []
+ ext_modules.append(
+ Extension(
+ "spacy.matcher.levenshtein",
+ [
+ "spacy/matcher/levenshtein.pyx",
+ "spacy/matcher/polyleven.c",
+ ],
+ language="c",
+ include_dirs=include_dirs,
+ )
+ )
for name in MOD_NAMES:
mod_path = name.replace(".", "/") + ".pyx"
ext = Extension(
diff --git a/spacy/__init__.py b/spacy/__init__.py
index d60f46b96..c3568bc5c 100644
--- a/spacy/__init__.py
+++ b/spacy/__init__.py
@@ -31,9 +31,9 @@ def load(
name: Union[str, Path],
*,
vocab: Union[Vocab, bool] = True,
- disable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = util.SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language:
"""Load a spaCy model from an installed package or a local path.
diff --git a/spacy/about.py b/spacy/about.py
index 843c15aba..640e9e93b 100644
--- a/spacy/about.py
+++ b/spacy/about.py
@@ -1,6 +1,6 @@
# fmt: off
__title__ = "spacy"
-__version__ = "3.4.1"
+__version__ = "3.5.0"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects"
diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py
index ce76ef9a9..aabd1cfef 100644
--- a/spacy/cli/__init__.py
+++ b/spacy/cli/__init__.py
@@ -16,6 +16,7 @@ from .debug_config import debug_config # noqa: F401
from .debug_model import debug_model # noqa: F401
from .debug_diff import debug_diff # noqa: F401
from .evaluate import evaluate # noqa: F401
+from .apply import apply # noqa: F401
from .convert import convert # noqa: F401
from .init_pipeline import init_pipeline_cli # noqa: F401
from .init_config import init_config, fill_config # noqa: F401
@@ -27,6 +28,7 @@ from .project.dvc import project_update_dvc # noqa: F401
from .project.push import project_push # noqa: F401
from .project.pull import project_pull # noqa: F401
from .project.document import project_document # noqa: F401
+from .find_threshold import find_threshold # noqa: F401
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True)
diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py
index ae43b991b..c46abffe5 100644
--- a/spacy/cli/_util.py
+++ b/spacy/cli/_util.py
@@ -23,7 +23,7 @@ from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS
from .. import about
if TYPE_CHECKING:
- from pathy import Pathy # noqa: F401
+ from pathy import FluidPath # noqa: F401
SDIST_SUFFIX = ".tar.gz"
@@ -158,15 +158,15 @@ def load_project_config(
sys.exit(1)
validate_project_version(config)
validate_project_commands(config)
+ if interpolate:
+ err = f"{PROJECT_FILE} validation error"
+ with show_validation_error(title=err, hint_fill=False):
+ config = substitute_project_variables(config, overrides)
# Make sure directories defined in config exist
for subdir in config.get("directories", []):
dir_path = path / subdir
if not dir_path.exists():
dir_path.mkdir(parents=True)
- if interpolate:
- err = f"{PROJECT_FILE} validation error"
- with show_validation_error(title=err, hint_fill=False):
- config = substitute_project_variables(config, overrides)
return config
@@ -331,7 +331,7 @@ def import_code(code_path: Optional[Union[Path, str]]) -> None:
msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1)
-def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None:
+def upload_file(src: Path, dest: Union[str, "FluidPath"]) -> None:
"""Upload a file.
src (Path): The source path.
@@ -339,13 +339,20 @@ def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None:
"""
import smart_open
+ # Create parent directories for local paths
+ if isinstance(dest, Path):
+ if not dest.parent.exists():
+ dest.parent.mkdir(parents=True)
+
dest = str(dest)
with smart_open.open(dest, mode="wb") as output_file:
with src.open(mode="rb") as input_file:
output_file.write(input_file.read())
-def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) -> None:
+def download_file(
+ src: Union[str, "FluidPath"], dest: Path, *, force: bool = False
+) -> None:
"""Download a file using smart_open.
url (str): The URL of the file.
@@ -358,7 +365,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False)
if dest.exists() and not force:
return None
src = str(src)
- with smart_open.open(src, mode="rb", ignore_ext=True) as input_file:
+ with smart_open.open(src, mode="rb", compression="disable") as input_file:
with dest.open(mode="wb") as output_file:
shutil.copyfileobj(input_file, output_file)
@@ -368,7 +375,7 @@ def ensure_pathy(path):
slow and annoying Google Cloud warning)."""
from pathy import Pathy # noqa: F811
- return Pathy(path)
+ return Pathy.fluid(path)
def git_checkout(
@@ -573,3 +580,35 @@ def setup_gpu(use_gpu: int, silent=None) -> None:
local_msg.info("Using CPU")
if gpu_is_available():
local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")
+
+
+def walk_directory(path: Path, suffix: Optional[str] = None) -> List[Path]:
+ if not path.is_dir():
+ return [path]
+ paths = [path]
+ locs = []
+ seen = set()
+ for path in paths:
+ if str(path) in seen:
+ continue
+ seen.add(str(path))
+ if path.parts[-1].startswith("."):
+ continue
+ elif path.is_dir():
+ paths.extend(path.iterdir())
+ elif suffix is not None and not path.parts[-1].endswith(suffix):
+ continue
+ else:
+ locs.append(path)
+ # It's good to sort these, in case the ordering messes up cache.
+ locs.sort()
+ return locs
+
+
+def _format_number(number: Union[int, float], ndigits: int = 2) -> str:
+ """Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s,
+ as happens with `round(number, ndigits)`"""
+ if isinstance(number, float):
+ return f"{number:.{ndigits}f}"
+ else:
+ return str(number)
diff --git a/spacy/cli/apply.py b/spacy/cli/apply.py
new file mode 100644
index 000000000..f0df4e757
--- /dev/null
+++ b/spacy/cli/apply.py
@@ -0,0 +1,143 @@
+import tqdm
+import srsly
+
+from itertools import chain
+from pathlib import Path
+from typing import Optional, List, Iterable, cast, Union
+
+from wasabi import msg
+
+from ._util import app, Arg, Opt, setup_gpu, import_code, walk_directory
+
+from ..tokens import Doc, DocBin
+from ..vocab import Vocab
+from ..util import ensure_path, load_model
+
+
+path_help = """Location of the documents to predict on.
+Can be a single file in .spacy format or a .jsonl file.
+Files with other extensions are treated as single plain text documents.
+If a directory is provided it is traversed recursively to grab
+all files to be processed.
+The files can be a mixture of .spacy, .jsonl and text files.
+If .jsonl is provided the specified field is going
+to be grabbed ("text" by default)."""
+
+out_help = "Path to save the resulting .spacy file"
+code_help = (
+ "Path to Python file with additional " "code (registered functions) to be imported"
+)
+gold_help = "Use gold preprocessing provided in the .spacy files"
+force_msg = (
+ "The provided output file already exists. "
+ "To force overwriting the output file, set the --force or -F flag."
+)
+
+
+DocOrStrStream = Union[Iterable[str], Iterable[Doc]]
+
+
+def _stream_docbin(path: Path, vocab: Vocab) -> Iterable[Doc]:
+ """
+ Stream Doc objects from DocBin.
+ """
+ docbin = DocBin().from_disk(path)
+ for doc in docbin.get_docs(vocab):
+ yield doc
+
+
+def _stream_jsonl(path: Path, field: str) -> Iterable[str]:
+ """
+ Stream "text" field from JSONL. If the field "text" is
+ not found it raises error.
+ """
+ for entry in srsly.read_jsonl(path):
+ if field not in entry:
+ msg.fail(f"{path} does not contain the required '{field}' field.", exits=1)
+ else:
+ yield entry[field]
+
+
+def _stream_texts(paths: Iterable[Path]) -> Iterable[str]:
+ """
+ Yields strings from text files in paths.
+ """
+ for path in paths:
+ with open(path, "r") as fin:
+ text = fin.read()
+ yield text
+
+
+@app.command("apply")
+def apply_cli(
+ # fmt: off
+ model: str = Arg(..., help="Model name or path"),
+ data_path: Path = Arg(..., help=path_help, exists=True),
+ output_file: Path = Arg(..., help=out_help, dir_okay=False),
+ code_path: Optional[Path] = Opt(None, "--code", "-c", help=code_help),
+ text_key: str = Opt("text", "--text-key", "-tk", help="Key containing text string for JSONL"),
+ force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"),
+ use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU."),
+ batch_size: int = Opt(1, "--batch-size", "-b", help="Batch size."),
+ n_process: int = Opt(1, "--n-process", "-n", help="number of processors to use.")
+):
+ """
+ Apply a trained pipeline to documents to get predictions.
+ Expects a loadable spaCy pipeline and path to the data, which
+ can be a directory or a file.
+ The data files can be provided in multiple formats:
+ 1. .spacy files
+ 2. .jsonl files with a specified "field" to read the text from.
+ 3. Files with any other extension are assumed to be containing
+ a single document.
+ DOCS: https://spacy.io/api/cli#apply
+ """
+ data_path = ensure_path(data_path)
+ output_file = ensure_path(output_file)
+ code_path = ensure_path(code_path)
+ if output_file.exists() and not force_overwrite:
+ msg.fail(force_msg, exits=1)
+ if not data_path.exists():
+ msg.fail(f"Couldn't find data path: {data_path}", exits=1)
+ import_code(code_path)
+ setup_gpu(use_gpu)
+ apply(data_path, output_file, model, text_key, batch_size, n_process)
+
+
+def apply(
+ data_path: Path,
+ output_file: Path,
+ model: str,
+ json_field: str,
+ batch_size: int,
+ n_process: int,
+):
+ docbin = DocBin(store_user_data=True)
+ paths = walk_directory(data_path)
+ if len(paths) == 0:
+ docbin.to_disk(output_file)
+ msg.warn(
+ "Did not find data to process,"
+ f" {data_path} seems to be an empty directory."
+ )
+ return
+ nlp = load_model(model)
+ msg.good(f"Loaded model {model}")
+ vocab = nlp.vocab
+ streams: List[DocOrStrStream] = []
+ text_files = []
+ for path in paths:
+ if path.suffix == ".spacy":
+ streams.append(_stream_docbin(path, vocab))
+ elif path.suffix == ".jsonl":
+ streams.append(_stream_jsonl(path, json_field))
+ else:
+ text_files.append(path)
+ if len(text_files) > 0:
+ streams.append(_stream_texts(text_files))
+ datagen = cast(DocOrStrStream, chain(*streams))
+ for doc in tqdm.tqdm(nlp.pipe(datagen, batch_size=batch_size, n_process=n_process)):
+ docbin.add(doc)
+ if output_file.suffix == "":
+ output_file = output_file.with_suffix(".spacy")
+ docbin.to_disk(output_file)
diff --git a/spacy/cli/convert.py b/spacy/cli/convert.py
index 04eb7078f..7f365ae2c 100644
--- a/spacy/cli/convert.py
+++ b/spacy/cli/convert.py
@@ -1,4 +1,4 @@
-from typing import Callable, Iterable, Mapping, Optional, Any, List, Union
+from typing import Callable, Iterable, Mapping, Optional, Any, Union
from enum import Enum
from pathlib import Path
from wasabi import Printer
@@ -7,7 +7,7 @@ import re
import sys
import itertools
-from ._util import app, Arg, Opt
+from ._util import app, Arg, Opt, walk_directory
from ..training import docs_to_json
from ..tokens import Doc, DocBin
from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs
@@ -189,33 +189,6 @@ def autodetect_ner_format(input_data: str) -> Optional[str]:
return None
-def walk_directory(path: Path, converter: str) -> List[Path]:
- if not path.is_dir():
- return [path]
- paths = [path]
- locs = []
- seen = set()
- for path in paths:
- if str(path) in seen:
- continue
- seen.add(str(path))
- if path.parts[-1].startswith("."):
- continue
- elif path.is_dir():
- paths.extend(path.iterdir())
- elif converter == "json" and not path.parts[-1].endswith("json"):
- continue
- elif converter == "conll" and not path.parts[-1].endswith("conll"):
- continue
- elif converter == "iob" and not path.parts[-1].endswith("iob"):
- continue
- else:
- locs.append(path)
- # It's good to sort these, in case the ordering messes up cache.
- locs.sort()
- return locs
-
-
def verify_cli_args(
msg: Printer,
input_path: Path,
diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py
index bd05471b1..a85324e87 100644
--- a/spacy/cli/debug_data.py
+++ b/spacy/cli/debug_data.py
@@ -9,10 +9,11 @@ import typer
import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
-from ._util import import_code, debug_cli
+from ._util import import_code, debug_cli, _format_number
from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining
+from ..pipeline import TrainablePipe
from ..pipeline._parser_internals import nonproj
from ..pipeline._parser_internals.nonproj import DELIMITER
from ..pipeline import Morphologizer, SpanCategorizer
@@ -934,6 +935,7 @@ def _get_labels_from_model(nlp: Language, factory_name: str) -> Set[str]:
labels: Set[str] = set()
for pipe_name in pipe_names:
pipe = nlp.get_pipe(pipe_name)
+ assert isinstance(pipe, TrainablePipe)
labels.update(pipe.labels)
return labels
@@ -989,7 +991,8 @@ def _get_kl_divergence(p: Counter, q: Counter) -> float:
def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]:
"""Compile into one list for easier reporting"""
d = {
- label: [label] + list(round(d[label], 2) for d in span_data) for label in labels
+ label: [label] + list(_format_number(d[label]) for d in span_data)
+ for label in labels
}
return list(d.values())
@@ -1004,6 +1007,10 @@ def _get_span_characteristics(
label: _gmean(l)
for label, l in compiled_gold["spans_length"][spans_key].items()
}
+ spans_per_type = {
+ label: len(spans)
+ for label, spans in compiled_gold["spans_per_type"][spans_key].items()
+ }
min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()]
max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()]
@@ -1031,6 +1038,7 @@ def _get_span_characteristics(
return {
"sd": span_distinctiveness,
"bd": sb_distinctiveness,
+ "spans_per_type": spans_per_type,
"lengths": span_length,
"min_length": min(min_lengths),
"max_length": max(max_lengths),
@@ -1045,12 +1053,15 @@ def _get_span_characteristics(
def _print_span_characteristics(span_characteristics: Dict[str, Any]):
"""Print all span characteristics into a table"""
- headers = ("Span Type", "Length", "SD", "BD")
+ headers = ("Span Type", "Length", "SD", "BD", "N")
+ # Wasabi has this at 30 by default, but we might have some long labels
+ max_col = max(30, max(len(label) for label in span_characteristics["labels"]))
# Prepare table data with all span characteristics
table_data = [
span_characteristics["lengths"],
span_characteristics["sd"],
span_characteristics["bd"],
+ span_characteristics["spans_per_type"],
]
table = _format_span_row(
span_data=table_data, labels=span_characteristics["labels"]
@@ -1061,8 +1072,18 @@ def _print_span_characteristics(span_characteristics: Dict[str, Any]):
span_characteristics["avg_sd"],
span_characteristics["avg_bd"],
]
- footer = ["Wgt. Average"] + [str(round(f, 2)) for f in footer_data]
- msg.table(table, footer=footer, header=headers, divider=True)
+
+ footer = (
+ ["Wgt. Average"] + ["{:.2f}".format(round(f, 2)) for f in footer_data] + ["-"]
+ )
+ msg.table(
+ table,
+ footer=footer,
+ header=headers,
+ divider=True,
+ aligns=["l"] + ["r"] * (len(footer_data) + 1),
+ max_col=max_col,
+ )
def _get_spans_length_freq_dist(
diff --git a/spacy/cli/download.py b/spacy/cli/download.py
index 0c9a32b93..4c998a6e0 100644
--- a/spacy/cli/download.py
+++ b/spacy/cli/download.py
@@ -8,7 +8,6 @@ from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX
from .. import about
from ..util import is_package, get_minor_version, run_command
from ..util import is_prerelease_version
-from ..errors import OLD_MODEL_SHORTCUTS
@app.command(
@@ -61,12 +60,6 @@ def download(
version = components[-1]
else:
model_name = model
- if model in OLD_MODEL_SHORTCUTS:
- msg.warn(
- f"As of spaCy v3.0, shortcuts like '{model}' are deprecated. Please "
- f"use the full pipeline package name '{OLD_MODEL_SHORTCUTS[model]}' instead."
- )
- model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility()
version = get_version(model_name, compatibility)
diff --git a/spacy/cli/find_threshold.py b/spacy/cli/find_threshold.py
new file mode 100644
index 000000000..efa664832
--- /dev/null
+++ b/spacy/cli/find_threshold.py
@@ -0,0 +1,233 @@
+import functools
+import operator
+from pathlib import Path
+import logging
+from typing import Optional, Tuple, Any, Dict, List
+
+import numpy
+import wasabi.tables
+
+from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer
+from ..errors import Errors
+from ..training import Corpus
+from ._util import app, Arg, Opt, import_code, setup_gpu
+from .. import util
+
+_DEFAULTS = {
+ "n_trials": 11,
+ "use_gpu": -1,
+ "gold_preproc": False,
+}
+
+
+@app.command(
+ "find-threshold",
+ context_settings={"allow_extra_args": False, "ignore_unknown_options": True},
+)
+def find_threshold_cli(
+ # fmt: off
+ model: str = Arg(..., help="Model name or path"),
+ data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
+ pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"),
+ threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"),
+ scores_key: str = Arg(..., help="Metric to optimize"),
+ n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"),
+ code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
+ use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
+ gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"),
+ verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"),
+ # fmt: on
+):
+ """
+ Runs prediction trials for a trained model with varying tresholds to maximize
+ the specified metric. The search space for the threshold is traversed linearly
+ from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
+ (the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
+ returns all results).
+
+ This is applicable only for components whose predictions are influenced by
+ thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
+ that the full path to the corresponding threshold attribute in the config has to
+ be provided.
+
+ DOCS: https://spacy.io/api/cli#find-threshold
+ """
+
+ util.logger.setLevel(logging.DEBUG if verbose else logging.INFO)
+ import_code(code_path)
+ find_threshold(
+ model=model,
+ data_path=data_path,
+ pipe_name=pipe_name,
+ threshold_key=threshold_key,
+ scores_key=scores_key,
+ n_trials=n_trials,
+ use_gpu=use_gpu,
+ gold_preproc=gold_preproc,
+ silent=False,
+ )
+
+
+def find_threshold(
+ model: str,
+ data_path: Path,
+ pipe_name: str,
+ threshold_key: str,
+ scores_key: str,
+ *,
+ n_trials: int = _DEFAULTS["n_trials"], # type: ignore
+ use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore
+ gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore
+ silent: bool = True,
+) -> Tuple[float, float, Dict[float, float]]:
+ """
+ Runs prediction trials for models with varying tresholds to maximize the specified metric.
+ model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory.
+ data_path (Path): Path to file with DocBin with docs to use for threshold search.
+ pipe_name (str): Name of pipe to examine thresholds for.
+ threshold_key (str): Key of threshold attribute in component's configuration.
+ scores_key (str): Name of score to metric to optimize.
+ n_trials (int): Number of trials to determine optimal thresholds.
+ use_gpu (int): GPU ID or -1 for CPU.
+ gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the
+ tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due
+ to train/test skew.
+ silent (bool): Whether to print non-error-related output to stdout.
+ RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all
+ evaluated thresholds.
+ """
+
+ setup_gpu(use_gpu, silent=silent)
+ data_path = util.ensure_path(data_path)
+ if not data_path.exists():
+ wasabi.msg.fail("Evaluation data not found", data_path, exits=1)
+ nlp = util.load_model(model)
+
+ if pipe_name not in nlp.component_names:
+ raise AttributeError(
+ Errors.E001.format(name=pipe_name, opts=nlp.component_names)
+ )
+ pipe = nlp.get_pipe(pipe_name)
+ if not hasattr(pipe, "scorer"):
+ raise AttributeError(Errors.E1045)
+
+ if type(pipe) == TextCategorizer:
+ wasabi.msg.warn(
+ "The `textcat` component doesn't use a threshold as it's not applicable to the concept of "
+ "exclusive classes. All thresholds will yield the same results."
+ )
+
+ if not silent:
+ wasabi.msg.info(
+ title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} "
+ f"trials."
+ )
+
+ # Load evaluation corpus.
+ corpus = Corpus(data_path, gold_preproc=gold_preproc)
+ dev_dataset = list(corpus(nlp))
+ config_keys = threshold_key.split(".")
+
+ def set_nested_item(
+ config: Dict[str, Any], keys: List[str], value: float
+ ) -> Dict[str, Any]:
+ """Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200.
+ config (Dict[str, Any]): Configuration dictionary.
+ keys (List[Any]): Path to value to set.
+ value (float): Value to set.
+ RETURNS (Dict[str, Any]): Updated dictionary.
+ """
+ functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value
+ return config
+
+ def filter_config(
+ config: Dict[str, Any], keys: List[str], full_key: str
+ ) -> Dict[str, Any]:
+ """Filters provided config dictionary so that only the specified keys path remains.
+ config (Dict[str, Any]): Configuration dictionary.
+ keys (List[Any]): Path to value to set.
+ full_key (str): Full user-specified key.
+ RETURNS (Dict[str, Any]): Filtered dictionary.
+ """
+ if keys[0] not in config:
+ wasabi.msg.fail(
+ title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.",
+ text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: "
+ f"{list(config.keys())}",
+ exits=1,
+ )
+ return {
+ keys[0]: filter_config(config[keys[0]], keys[1:], full_key)
+ if len(keys) > 1
+ else config[keys[0]]
+ }
+
+ # Evaluate with varying threshold values.
+ scores: Dict[float, float] = {}
+ config_keys_full = ["components", pipe_name, *config_keys]
+ table_col_widths = (10, 10)
+ thresholds = numpy.linspace(0, 1, n_trials)
+ print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths))
+ for threshold in thresholds:
+ # Reload pipeline with overrides specifying the new threshold.
+ nlp = util.load_model(
+ model,
+ config=set_nested_item(
+ filter_config(
+ nlp.config, config_keys_full, ".".join(config_keys_full)
+ ).copy(),
+ config_keys_full,
+ threshold,
+ ),
+ )
+ if hasattr(pipe, "cfg"):
+ setattr(
+ nlp.get_pipe(pipe_name),
+ "cfg",
+ set_nested_item(getattr(pipe, "cfg"), config_keys, threshold),
+ )
+
+ eval_scores = nlp.evaluate(dev_dataset)
+ if scores_key not in eval_scores:
+ wasabi.msg.fail(
+ title=f"Failed to look up score `{scores_key}` in evaluation results.",
+ text=f"Make sure you specified the correct value for `scores_key`. The following scores are "
+ f"available: {list(eval_scores.keys())}",
+ exits=1,
+ )
+ scores[threshold] = eval_scores[scores_key]
+
+ if not isinstance(scores[threshold], (float, int)):
+ wasabi.msg.fail(
+ f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric "
+ f"scores.",
+ exits=1,
+ )
+ print(
+ wasabi.row(
+ [round(threshold, 3), round(scores[threshold], 3)],
+ widths=table_col_widths,
+ )
+ )
+
+ best_threshold = max(scores.keys(), key=(lambda key: scores[key]))
+
+ # If all scores are identical, emit warning.
+ if len(set(scores.values())) == 1:
+ wasabi.msg.warn(
+ title="All scores are identical. Verify that all settings are correct.",
+ text=""
+ if (
+ not isinstance(pipe, MultiLabel_TextCategorizer)
+ or scores_key in ("cats_macro_f", "cats_micro_f")
+ )
+ else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.",
+ )
+
+ else:
+ if not silent:
+ print(
+ f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}."
+ )
+
+ return best_threshold, scores[best_threshold], scores
diff --git a/spacy/cli/info.py b/spacy/cli/info.py
index e6ac4270f..974bc0f4e 100644
--- a/spacy/cli/info.py
+++ b/spacy/cli/info.py
@@ -147,6 +147,7 @@ def info_installed_model_url(model: str) -> Optional[str]:
# something else, like no file or invalid JSON
return None
+
def info_model_url(model: str) -> Dict[str, Any]:
"""Return the download URL for the latest version of a pipeline."""
version = get_latest_version(model)
diff --git a/spacy/cli/package.py b/spacy/cli/package.py
index b8c8397b6..324c5d1bb 100644
--- a/spacy/cli/package.py
+++ b/spacy/cli/package.py
@@ -299,8 +299,8 @@ def get_meta(
}
nlp = util.load_model_from_path(Path(model_path))
meta.update(nlp.meta)
- meta.update(existing_meta)
meta["spacy_version"] = util.get_minor_version_range(about.__version__)
+ meta.update(existing_meta)
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
diff --git a/spacy/cli/project/assets.py b/spacy/cli/project/assets.py
index 61438d1a8..8f35b2d23 100644
--- a/spacy/cli/project/assets.py
+++ b/spacy/cli/project/assets.py
@@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str:
RETURNS (str): The converted URL.
"""
# If the asset URL is a regular GitHub URL it's likely a mistake
- if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url:
+ if (
+ re.match(r"(http(s?)):\/\/github.com", url)
+ and "releases/download" not in url
+ and "/raw/" not in url
+ ):
converted = url.replace("github.com", "raw.githubusercontent.com")
converted = re.sub(r"/(tree|blob)/", "/", converted)
msg.warn(
diff --git a/spacy/cli/project/dvc.py b/spacy/cli/project/dvc.py
index 83dc5efbf..a15353855 100644
--- a/spacy/cli/project/dvc.py
+++ b/spacy/cli/project/dvc.py
@@ -25,6 +25,7 @@ def project_update_dvc_cli(
project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False),
workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."),
verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"),
+ quiet: bool = Opt(False, "--quiet", "-q", help="Print less info"),
force: bool = Opt(False, "--force", "-F", help="Force update DVC config"),
# fmt: on
):
@@ -36,7 +37,7 @@ def project_update_dvc_cli(
DOCS: https://spacy.io/api/cli#project-dvc
"""
- project_update_dvc(project_dir, workflow, verbose=verbose, force=force)
+ project_update_dvc(project_dir, workflow, verbose=verbose, quiet=quiet, force=force)
def project_update_dvc(
@@ -44,6 +45,7 @@ def project_update_dvc(
workflow: Optional[str] = None,
*,
verbose: bool = False,
+ quiet: bool = False,
force: bool = False,
) -> None:
"""Update the auto-generated Data Version Control (DVC) config file. A DVC
@@ -54,11 +56,12 @@ def project_update_dvc(
workflow (Optional[str]): Optional name of workflow defined in project.yml.
If not set, the first workflow will be used.
verbose (bool): Print more info.
+ quiet (bool): Print less info.
force (bool): Force update DVC config.
"""
config = load_project_config(project_dir)
updated = update_dvc_config(
- project_dir, config, workflow, verbose=verbose, force=force
+ project_dir, config, workflow, verbose=verbose, quiet=quiet, force=force
)
help_msg = "To execute the workflow with DVC, run: dvc repro"
if updated:
@@ -72,7 +75,7 @@ def update_dvc_config(
config: Dict[str, Any],
workflow: Optional[str] = None,
verbose: bool = False,
- silent: bool = False,
+ quiet: bool = False,
force: bool = False,
) -> bool:
"""Re-run the DVC commands in dry mode and update dvc.yaml file in the
@@ -83,7 +86,7 @@ def update_dvc_config(
path (Path): The path to the project directory.
config (Dict[str, Any]): The loaded project.yml.
verbose (bool): Whether to print additional info (via DVC).
- silent (bool): Don't output anything (via DVC).
+ quiet (bool): Don't output anything (via DVC).
force (bool): Force update, even if hashes match.
RETURNS (bool): Whether the DVC config file was updated.
"""
@@ -105,6 +108,14 @@ def update_dvc_config(
dvc_config_path.unlink()
dvc_commands = []
config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
+
+ # some flags that apply to every command
+ flags = []
+ if verbose:
+ flags.append("--verbose")
+ if quiet:
+ flags.append("--quiet")
+
for name in workflows[workflow]:
command = config_commands[name]
deps = command.get("deps", [])
@@ -118,14 +129,26 @@ def update_dvc_config(
deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl]
outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl]
outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl]
- dvc_cmd = ["run", "-n", name, "-w", str(path), "--no-exec"]
+
+ dvc_cmd = ["run", *flags, "-n", name, "-w", str(path), "--no-exec"]
if command.get("no_skip"):
dvc_cmd.append("--always-changed")
full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd]
dvc_commands.append(join_command(full_cmd))
+
+ if not dvc_commands:
+ # If we don't check for this, then there will be an error when reading the
+ # config, since DVC wouldn't create it.
+ msg.fail(
+ "No usable commands for DVC found. This can happen if none of your "
+ "commands have dependencies or outputs.",
+ exits=1,
+ )
+
with working_dir(path):
- dvc_flags = {"--verbose": verbose, "--quiet": silent}
- run_dvc_commands(dvc_commands, flags=dvc_flags)
+ for c in dvc_commands:
+ dvc_command = "dvc " + c
+ run_command(dvc_command)
with dvc_config_path.open("r+", encoding="utf8") as f:
content = f.read()
f.seek(0, 0)
@@ -133,26 +156,6 @@ def update_dvc_config(
return True
-def run_dvc_commands(
- commands: Iterable[str] = SimpleFrozenList(), flags: Dict[str, bool] = {}
-) -> None:
- """Run a sequence of DVC commands in a subprocess, in order.
-
- commands (List[str]): The string commands without the leading "dvc".
- flags (Dict[str, bool]): Conditional flags to be added to command. Makes it
- easier to pass flags like --quiet that depend on a variable or
- command-line setting while avoiding lots of nested conditionals.
- """
- for c in commands:
- command = split_command(c)
- dvc_command = ["dvc", *command]
- # Add the flags if they are set to True
- for flag, is_active in flags.items():
- if is_active:
- dvc_command.append(flag)
- run_command(dvc_command)
-
-
def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None:
"""Validate workflows provided in project.yml and check that a given
workflow can be used to generate a DVC config.
diff --git a/spacy/cli/project/remote_storage.py b/spacy/cli/project/remote_storage.py
index 336a4bcb3..076541580 100644
--- a/spacy/cli/project/remote_storage.py
+++ b/spacy/cli/project/remote_storage.py
@@ -5,14 +5,17 @@ import hashlib
import urllib.parse
import tarfile
from pathlib import Path
+from wasabi import msg
-from .._util import get_hash, get_checksum, download_file, ensure_pathy
-from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var
+from .._util import get_hash, get_checksum, upload_file, download_file
+from .._util import ensure_pathy, make_tempdir
+from ...util import get_minor_version, ENV_VARS, check_bool_env_var
from ...git_info import GIT_VERSION
from ... import about
+from ...errors import Errors
if TYPE_CHECKING:
- from pathy import Pathy # noqa: F401
+ from pathy import FluidPath # noqa: F401
class RemoteStorage:
@@ -27,7 +30,7 @@ class RemoteStorage:
self.url = ensure_pathy(url)
self.compression = compression
- def push(self, path: Path, command_hash: str, content_hash: str) -> "Pathy":
+ def push(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
"""Compress a file or directory within a project and upload it to a remote
storage. If an object exists at the full URL, nothing is done.
@@ -48,9 +51,7 @@ class RemoteStorage:
mode_string = f"w:{self.compression}" if self.compression else "w"
with tarfile.open(tar_loc, mode=mode_string) as tar_file:
tar_file.add(str(loc), arcname=str(path))
- with tar_loc.open(mode="rb") as input_file:
- with url.open(mode="wb") as output_file:
- output_file.write(input_file.read())
+ upload_file(tar_loc, url)
return url
def pull(
@@ -59,7 +60,7 @@ class RemoteStorage:
*,
command_hash: Optional[str] = None,
content_hash: Optional[str] = None,
- ) -> Optional["Pathy"]:
+ ) -> Optional["FluidPath"]:
"""Retrieve a file from the remote cache. If the file already exists,
nothing is done.
@@ -84,7 +85,23 @@ class RemoteStorage:
with tarfile.open(tar_loc, mode=mode_string) as tar_file:
# This requires that the path is added correctly, relative
# to root. This is how we set things up in push()
- tar_file.extractall(self.root)
+
+ # Disallow paths outside the current directory for the tar
+ # file (CVE-2007-4559, directory traversal vulnerability)
+ def is_within_directory(directory, target):
+ abs_directory = os.path.abspath(directory)
+ abs_target = os.path.abspath(target)
+ prefix = os.path.commonprefix([abs_directory, abs_target])
+ return prefix == abs_directory
+
+ def safe_extract(tar, path):
+ for member in tar.getmembers():
+ member_path = os.path.join(path, member.name)
+ if not is_within_directory(path, member_path):
+ raise ValueError(Errors.E852)
+ tar.extractall(path)
+
+ safe_extract(tar_file, self.root)
return url
def find(
@@ -93,25 +110,37 @@ class RemoteStorage:
*,
command_hash: Optional[str] = None,
content_hash: Optional[str] = None,
- ) -> Optional["Pathy"]:
+ ) -> Optional["FluidPath"]:
"""Find the best matching version of a file within the storage,
or `None` if no match can be found. If both the creation and content hash
are specified, only exact matches will be returned. Otherwise, the most
recent matching file is preferred.
"""
name = self.encode_name(str(path))
+ urls = []
if command_hash is not None and content_hash is not None:
- url = self.make_url(path, command_hash, content_hash)
+ url = self.url / name / command_hash / content_hash
urls = [url] if url.exists() else []
elif command_hash is not None:
- urls = list((self.url / name / command_hash).iterdir())
+ if (self.url / name / command_hash).exists():
+ urls = list((self.url / name / command_hash).iterdir())
else:
- urls = list((self.url / name).iterdir())
- if content_hash is not None:
- urls = [url for url in urls if url.parts[-1] == content_hash]
+ if (self.url / name).exists():
+ for sub_dir in (self.url / name).iterdir():
+ urls.extend(sub_dir.iterdir())
+ if content_hash is not None:
+ urls = [url for url in urls if url.parts[-1] == content_hash]
+ if len(urls) >= 2:
+ try:
+ urls.sort(key=lambda x: x.stat().last_modified) # type: ignore
+ except Exception:
+ msg.warn(
+ "Unable to sort remote files by last modified. The file(s) "
+ "pulled from the cache may not be the most recent."
+ )
return urls[-1] if urls else None
- def make_url(self, path: Path, command_hash: str, content_hash: str) -> "Pathy":
+ def make_url(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
"""Construct a URL from a subpath, a creation hash and a content hash."""
return self.url / self.encode_name(str(path)) / command_hash / content_hash
diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py
index 734803bc4..6dd174902 100644
--- a/spacy/cli/project/run.py
+++ b/spacy/cli/project/run.py
@@ -1,5 +1,8 @@
-from typing import Optional, List, Dict, Sequence, Any, Iterable
+from typing import Optional, List, Dict, Sequence, Any, Iterable, Tuple
+import os.path
from pathlib import Path
+
+import pkg_resources
from wasabi import msg
from wasabi.util import locale_escape
import sys
@@ -50,6 +53,7 @@ def project_run(
force: bool = False,
dry: bool = False,
capture: bool = False,
+ skip_requirements_check: bool = False,
) -> None:
"""Run a named script defined in the project.yml. If the script is part
of the default pipeline (defined in the "run" section), DVC is used to
@@ -66,11 +70,19 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function.
+ skip_requirements_check (bool): Whether to skip the requirements check.
"""
config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
workflows = config.get("workflows", {})
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
+
+ req_path = project_dir / "requirements.txt"
+ if not skip_requirements_check:
+ if config.get("check_requirements", True) and os.path.exists(req_path):
+ with req_path.open() as requirements_file:
+ _check_requirements([req.strip() for req in requirements_file])
+
if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'")
for cmd in workflows[subcommand]:
@@ -81,6 +93,7 @@ def project_run(
force=force,
dry=dry,
capture=capture,
+ skip_requirements_check=True,
)
else:
cmd = commands[subcommand]
@@ -88,8 +101,8 @@ def project_run(
if not (project_dir / dep).exists():
err = f"Missing dependency specified by command '{subcommand}': {dep}"
err_help = "Maybe you forgot to run the 'project assets' command or a previous step?"
- err_kwargs = {"exits": 1} if not dry else {}
- msg.fail(err, err_help, **err_kwargs)
+ err_exits = 1 if not dry else None
+ msg.fail(err, err_help, exits=err_exits)
check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION)
with working_dir(project_dir) as current_dir:
msg.divider(subcommand)
@@ -195,6 +208,8 @@ def validate_subcommand(
msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1)
if subcommand not in commands and subcommand not in workflows:
help_msg = []
+ if subcommand in ["assets", "asset"]:
+ help_msg.append("Did you mean to run: python -m spacy project assets?")
if commands:
help_msg.append(f"Available commands: {', '.join(commands)}")
if workflows:
@@ -308,3 +323,38 @@ def get_fileinfo(project_dir: Path, paths: List[str]) -> List[Dict[str, Optional
md5 = get_checksum(file_path) if file_path.exists() else None
data.append({"path": path, "md5": md5})
return data
+
+
+def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
+ """Checks whether requirements are installed and free of version conflicts.
+ requirements (List[str]): List of requirements.
+ RETURNS (Tuple[bool, bool]): Whether (1) any packages couldn't be imported, (2) any packages with version conflicts
+ exist.
+ """
+
+ failed_pkgs_msgs: List[str] = []
+ conflicting_pkgs_msgs: List[str] = []
+
+ for req in requirements:
+ try:
+ pkg_resources.require(req)
+ except pkg_resources.DistributionNotFound as dnf:
+ failed_pkgs_msgs.append(dnf.report())
+ except pkg_resources.VersionConflict as vc:
+ conflicting_pkgs_msgs.append(vc.report())
+ except Exception:
+ msg.warn(
+ f"Unable to check requirement: {req} "
+ "Checks are currently limited to requirement specifiers "
+ "(PEP 508)"
+ )
+
+ if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
+ msg.warn(
+ title="Missing requirements or requirement conflicts detected. Make sure your Python environment is set up "
+ "correctly and you installed all requirements specified in your project's requirements.txt: "
+ )
+ for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs:
+ msg.text(pgk_msg)
+
+ return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0
diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja
index 0ff1779a8..eb48d1de5 100644
--- a/spacy/cli/templates/quickstart_training.jinja
+++ b/spacy/cli/templates/quickstart_training.jinja
@@ -1,7 +1,7 @@
{# This is a template for training configs used for the quickstart widget in
the docs and the init config command. It encodes various best practices and
can help generate the best possible configuration, given a user's requirements. #}
-{%- set use_transformer = hardware != "cpu" -%}
+{%- set use_transformer = hardware != "cpu" and transformer_data -%}
{%- set transformer = transformer_data[optimize] if use_transformer else {} -%}
{%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%}
[paths]
@@ -269,13 +269,8 @@ factory = "tok2vec"
[components.tok2vec.model.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = ${components.tok2vec.model.encode.width}
-{% if has_letters -%}
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
-rows = [5000, 2500, 2500, 2500]
-{% else -%}
-attrs = ["ORTH", "SHAPE"]
-rows = [5000, 2500]
-{% endif -%}
+rows = [5000, 1000, 2500, 2500]
include_static_vectors = {{ "true" if optimize == "accuracy" else "false" }}
[components.tok2vec.model.encode]
diff --git a/spacy/cli/templates/quickstart_training_recommendations.yml b/spacy/cli/templates/quickstart_training_recommendations.yml
index a7bf9b74a..4f214d22d 100644
--- a/spacy/cli/templates/quickstart_training_recommendations.yml
+++ b/spacy/cli/templates/quickstart_training_recommendations.yml
@@ -37,6 +37,15 @@ bn:
accuracy:
name: sagorsarker/bangla-bert-base
size_factor: 3
+ca:
+ word_vectors: null
+ transformer:
+ efficiency:
+ name: projecte-aina/roberta-base-ca-v2
+ size_factor: 3
+ accuracy:
+ name: projecte-aina/roberta-base-ca-v2
+ size_factor: 3
da:
word_vectors: da_core_news_lg
transformer:
@@ -271,4 +280,3 @@ zh:
accuracy:
name: bert-base-chinese
size_factor: 3
- has_letters: false
diff --git a/spacy/default_config.cfg b/spacy/default_config.cfg
index 86a72926e..694fb732f 100644
--- a/spacy/default_config.cfg
+++ b/spacy/default_config.cfg
@@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
# Optional callback before nlp object is saved to disk after training
before_to_disk = null
+# Optional callback that is invoked at the start of each training step
+before_update = null
[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
diff --git a/spacy/displacy/__init__.py b/spacy/displacy/__init__.py
index 7bb300afa..bc32001d7 100644
--- a/spacy/displacy/__init__.py
+++ b/spacy/displacy/__init__.py
@@ -228,12 +228,13 @@ def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"kb_id": span.kb_id_ if span.kb_id_ else "",
"kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#",
}
- for span in doc.spans[spans_key]
+ for span in doc.spans.get(spans_key, [])
]
tokens = [token.text for token in doc]
if not spans:
- warnings.warn(Warnings.W117.format(spans_key=spans_key))
+ keys = list(doc.spans.keys())
+ warnings.warn(Warnings.W117.format(spans_key=spans_key, keys=keys))
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
settings = get_doc_settings(doc)
return {
diff --git a/spacy/errors.py b/spacy/errors.py
index 3268cb437..2ff42c4d8 100644
--- a/spacy/errors.py
+++ b/spacy/errors.py
@@ -131,13 +131,6 @@ class Warnings(metaclass=ErrorsWithCodes):
"and make it independent. For example, `replace_listeners = "
"[\"model.tok2vec\"]` See the documentation for details: "
"https://spacy.io/usage/training#config-components-listeners")
- W088 = ("The pipeline component {name} implements a `begin_training` "
- "method, which won't be called by spaCy. As of v3.0, `begin_training` "
- "has been renamed to `initialize`, so you likely want to rename the "
- "component method. See the documentation for details: "
- "https://spacy.io/api/language#initialize")
- W089 = ("As of spaCy v3.0, the `nlp.begin_training` method has been renamed "
- "to `nlp.initialize`.")
W090 = ("Could not locate any {format} files in path '{path}'.")
W091 = ("Could not clean/remove the temp directory at {dir}: {msg}.")
W092 = ("Ignoring annotations for sentence starts, as dependency heads are set.")
@@ -199,7 +192,7 @@ class Warnings(metaclass=ErrorsWithCodes):
W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is "
"surprising to you, make sure the Doc was processed using a model "
"that supports span categorization, and check the `doc.spans[spans_key]` "
- "property manually if necessary.")
+ "property manually if necessary.\n\nAvailable keys: {keys}")
W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation "
"for the corpora used to train the language. Please check "
"`nlp.meta[\"sources\"]` for any relevant links.")
@@ -212,6 +205,8 @@ class Warnings(metaclass=ErrorsWithCodes):
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.")
+ W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option "
+ "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.")
W400 = ("`use_upper=False` is ignored, the upper layer is always enabled")
@@ -250,9 +245,7 @@ class Errors(metaclass=ErrorsWithCodes):
"https://spacy.io/usage/models")
E011 = ("Unknown operator: '{op}'. Options: {opts}")
E012 = ("Cannot add pattern for zero tokens to matcher.\nKey: {key}")
- E016 = ("MultitaskObjective target should be function or one of: dep, "
- "tag, ent, dep_tag_offset, ent_tag.")
- E017 = ("Can only add unicode or bytes. Got type: {value_type}")
+ E017 = ("Can only add 'str' inputs to StringStore. Got type: {value_type}")
E018 = ("Can't retrieve string for hash '{hash_value}'. This usually "
"refers to an issue with the `Vocab` or `StringStore`.")
E019 = ("Can't create transition with unknown action ID: {action}. Action "
@@ -345,6 +338,11 @@ class Errors(metaclass=ErrorsWithCodes):
"clear the existing vectors and resize the table.")
E074 = ("Error interpreting compiled match pattern: patterns are expected "
"to end with the attribute {attr}. Got: {bad_attr}.")
+ E079 = ("Error computing states in beam: number of predicted beams "
+ "({pbeams}) does not equal number of gold beams ({gbeams}).")
+ E080 = ("Duplicate state found in beam: {key}.")
+ E081 = ("Error getting gradient in beam: number of histories ({n_hist}) "
+ "does not equal number of losses ({losses}).")
E082 = ("Error deprojectivizing parse: number of heads ({n_heads}), "
"projective heads ({n_proj_heads}) and labels ({n_labels}) do not "
"match.")
@@ -460,13 +458,13 @@ class Errors(metaclass=ErrorsWithCodes):
"same, but found '{nlp}' and '{vocab}' respectively.")
E152 = ("The attribute {attr} is not supported for token patterns. "
"Please use the option `validate=True` with the Matcher, PhraseMatcher, "
- "EntityRuler or AttributeRuler for more details.")
+ "SpanRuler or AttributeRuler for more details.")
E153 = ("The value type {vtype} is not supported for token patterns. "
"Please use the option validate=True with Matcher, PhraseMatcher, "
- "EntityRuler or AttributeRuler for more details.")
+ "SpanRuler or AttributeRuler for more details.")
E154 = ("One of the attributes or values is not supported for token "
"patterns. Please use the option `validate=True` with the Matcher, "
- "PhraseMatcher, or EntityRuler for more details.")
+ "PhraseMatcher, or SpanRuler for more details.")
E155 = ("The pipeline needs to include a {pipe} in order to use "
"Matcher or PhraseMatcher with the attribute {attr}. "
"Try using `nlp()` instead of `nlp.make_doc()` or `list(nlp.pipe())` "
@@ -540,8 +538,14 @@ class Errors(metaclass=ErrorsWithCodes):
E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.")
E200 = ("Can't set {attr} from Span.")
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
+ E203 = ("If the {name} embedding layer is not updated "
+ "during training, make sure to include it in 'annotating components'")
# New errors added in v3.x
+ E851 = ("The 'textcat' component labels should only have values of 0 or 1, "
+ "but found value of '{val}'.")
+ E852 = ("The tar file pulled from the remote attempted an unsafe path "
+ "traversal.")
E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
@@ -709,11 +713,11 @@ class Errors(metaclass=ErrorsWithCodes):
"need to modify the pipeline, use the built-in methods like "
"`nlp.add_pipe`, `nlp.remove_pipe`, `nlp.disable_pipe` or "
"`nlp.enable_pipe` instead.")
- E927 = ("Can't write to frozen list Maybe you're trying to modify a computed "
+ E927 = ("Can't write to frozen list. Maybe you're trying to modify a computed "
"property or default function argument?")
- E928 = ("A KnowledgeBase can only be serialized to/from from a directory, "
+ E928 = ("An InMemoryLookupKB can only be serialized to/from from a directory, "
"but the provided argument {loc} points to a file.")
- E929 = ("Couldn't read KnowledgeBase from {loc}. The path does not seem to exist.")
+ E929 = ("Couldn't read InMemoryLookupKB from {loc}. The path does not seem to exist.")
E930 = ("Received invalid get_examples callback in `{method}`. "
"Expected function that returns an iterable of Example objects but "
"got: {obj}")
@@ -721,13 +725,6 @@ class Errors(metaclass=ErrorsWithCodes):
"method in component '{name}'. If you want to use this "
"method, make sure it's overwritten on the subclass.")
E940 = ("Found NaN values in scores.")
- E941 = ("Can't find model '{name}'. It looks like you're trying to load a "
- "model from a shortcut, which is obsolete as of spaCy v3.0. To "
- "load the model, use its full name instead:\n\n"
- "nlp = spacy.load(\"{full}\")\n\nFor more details on the available "
- "models, see the models directory: https://spacy.io/models. If you "
- "want to create a blank model, use spacy.blank: "
- "nlp = spacy.blank(\"{name}\")")
E942 = ("Executing `after_{name}` callback failed. Expected the function to "
"return an initialized nlp object but got: {value}. Maybe "
"you forgot to return the modified object in your function?")
@@ -915,8 +912,6 @@ class Errors(metaclass=ErrorsWithCodes):
E1021 = ("`pos` value \"{pp}\" is not a valid Universal Dependencies tag. "
"Non-UD tags should use the `tag` property.")
E1022 = ("Words must be of type str or int, but input is of type '{wtype}'")
- E1023 = ("Couldn't read EntityRuler from the {path}. This file doesn't "
- "exist.")
E1024 = ("A pattern with {attr_type} '{label}' is not present in "
"'{component}' patterns.")
E1025 = ("Cannot intify the value '{value}' as an IOB string. The only "
@@ -939,23 +934,25 @@ class Errors(metaclass=ErrorsWithCodes):
E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
- E1042 = ("Function was called with `{arg1}`={arg1_values} and "
- "`{arg2}`={arg2_values} but these arguments are conflicting.")
+ E1042 = ("`enable={enable}` and `disable={disable}` are inconsistent with each other.\nIf you only passed "
+ "one of `enable` or `disable`, the other argument is specified in your pipeline's configuration.\nIn that "
+ "case pass an empty list for the previously not specified argument to avoid this error.")
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
"{value}.")
+ E1044 = ("Expected `candidates_batch_size` to be >= 1, but got: {value}")
+ E1045 = ("Encountered {parent} subclass without `{parent}.{method}` "
+ "method in '{name}'. If you want to use this method, make "
+ "sure it's overwritten on the subclass.")
+ E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
+ "knowledge base, use `InMemoryLookupKB`.")
+ E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
+ E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}")
# v4 error strings
E4000 = ("Expected a Doc as input, but got: '{type}'")
- E4001 = ("Backprop is not supported when is_train is not set.")
-
-
-# Deprecated model shortcuts, only used in errors and warnings
-OLD_MODEL_SHORTCUTS = {
- "en": "en_core_web_sm", "de": "de_core_news_sm", "es": "es_core_news_sm",
- "pt": "pt_core_news_sm", "fr": "fr_core_news_sm", "it": "it_core_news_sm",
- "nl": "nl_core_news_sm", "el": "el_core_news_sm", "nb": "nb_core_news_sm",
- "lt": "lt_core_news_sm", "xx": "xx_ent_wiki_sm"
-}
+ E4001 = ("Expected input to be one of the following types: ({expected_types}), "
+ "but got '{received_type}'")
+ E4002 = ("Backprop is not supported when is_train is not set.")
# fmt: on
diff --git a/spacy/kb/__init__.py b/spacy/kb/__init__.py
new file mode 100644
index 000000000..1d70a9b34
--- /dev/null
+++ b/spacy/kb/__init__.py
@@ -0,0 +1,3 @@
+from .kb import KnowledgeBase
+from .kb_in_memory import InMemoryLookupKB
+from .candidate import Candidate, get_candidates, get_candidates_batch
diff --git a/spacy/kb/candidate.pxd b/spacy/kb/candidate.pxd
new file mode 100644
index 000000000..942ce9dd0
--- /dev/null
+++ b/spacy/kb/candidate.pxd
@@ -0,0 +1,12 @@
+from .kb cimport KnowledgeBase
+from libcpp.vector cimport vector
+from ..typedefs cimport hash_t
+
+# Object used by the Entity Linker that summarizes one entity-alias candidate combination.
+cdef class Candidate:
+ cdef readonly KnowledgeBase kb
+ cdef hash_t entity_hash
+ cdef float entity_freq
+ cdef vector[float] entity_vector
+ cdef hash_t alias_hash
+ cdef float prior_prob
diff --git a/spacy/kb/candidate.pyx b/spacy/kb/candidate.pyx
new file mode 100644
index 000000000..c89efeb03
--- /dev/null
+++ b/spacy/kb/candidate.pyx
@@ -0,0 +1,74 @@
+# cython: infer_types=True, profile=True
+
+from typing import Iterable
+from .kb cimport KnowledgeBase
+from ..tokens import Span
+
+cdef class Candidate:
+ """A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
+ to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
+ algorithm which will disambiguate the various candidates to the correct one.
+ Each candidate (alias, entity) pair is assigned a certain prior probability.
+
+ DOCS: https://spacy.io/api/kb/#candidate-init
+ """
+
+ def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
+ self.kb = kb
+ self.entity_hash = entity_hash
+ self.entity_freq = entity_freq
+ self.entity_vector = entity_vector
+ self.alias_hash = alias_hash
+ self.prior_prob = prior_prob
+
+ @property
+ def entity(self) -> int:
+ """RETURNS (uint64): hash of the entity's KB ID/name"""
+ return self.entity_hash
+
+ @property
+ def entity_(self) -> str:
+ """RETURNS (str): ID/name of this entity in the KB"""
+ return self.kb.vocab.strings[self.entity_hash]
+
+ @property
+ def alias(self) -> int:
+ """RETURNS (uint64): hash of the alias"""
+ return self.alias_hash
+
+ @property
+ def alias_(self) -> str:
+ """RETURNS (str): ID of the original alias"""
+ return self.kb.vocab.strings[self.alias_hash]
+
+ @property
+ def entity_freq(self) -> float:
+ return self.entity_freq
+
+ @property
+ def entity_vector(self) -> Iterable[float]:
+ return self.entity_vector
+
+ @property
+ def prior_prob(self) -> float:
+ return self.prior_prob
+
+
+def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]:
+ """
+ Return candidate entities for a given mention and fetching appropriate entries from the index.
+ kb (KnowledgeBase): Knowledge base to query.
+ mention (Span): Entity mention for which to identify candidates.
+ RETURNS (Iterable[Candidate]): Identified candidates.
+ """
+ return kb.get_candidates(mention)
+
+
+def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
+ """
+ Return candidate entities for the given mentions and fetching appropriate entries from the index.
+ kb (KnowledgeBase): Knowledge base to query.
+ mention (Iterable[Span]): Entity mentions for which to identify candidates.
+ RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
+ """
+ return kb.get_candidates_batch(mentions)
diff --git a/spacy/kb/kb.pxd b/spacy/kb/kb.pxd
new file mode 100644
index 000000000..1adeef8ae
--- /dev/null
+++ b/spacy/kb/kb.pxd
@@ -0,0 +1,10 @@
+"""Knowledge-base for entity or concept linking."""
+
+from cymem.cymem cimport Pool
+from libc.stdint cimport int64_t
+from ..vocab cimport Vocab
+
+cdef class KnowledgeBase:
+ cdef Pool mem
+ cdef readonly Vocab vocab
+ cdef readonly int64_t entity_vector_length
diff --git a/spacy/kb/kb.pyx b/spacy/kb/kb.pyx
new file mode 100644
index 000000000..ce4bc0138
--- /dev/null
+++ b/spacy/kb/kb.pyx
@@ -0,0 +1,108 @@
+# cython: infer_types=True, profile=True
+
+from pathlib import Path
+from typing import Iterable, Tuple, Union
+from cymem.cymem cimport Pool
+
+from .candidate import Candidate
+from ..tokens import Span
+from ..util import SimpleFrozenList
+from ..errors import Errors
+
+
+cdef class KnowledgeBase:
+ """A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
+ to support entity linking of named entities to real-world concepts.
+ This is an abstract class and requires its operations to be implemented.
+
+ DOCS: https://spacy.io/api/kb
+ """
+
+ def __init__(self, vocab: Vocab, entity_vector_length: int):
+ """Create a KnowledgeBase."""
+ # Make sure abstract KB is not instantiated.
+ if self.__class__ == KnowledgeBase:
+ raise TypeError(
+ Errors.E1046.format(cls_name=self.__class__.__name__)
+ )
+
+ self.vocab = vocab
+ self.entity_vector_length = entity_vector_length
+ self.mem = Pool()
+
+ def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
+ """
+ Return candidate entities for specified texts. Each candidate defines the entity, the original alias,
+ and the prior probability of that alias resolving to that entity.
+ If no candidate is found for a given text, an empty list is returned.
+ mentions (Iterable[Span]): Mentions for which to get candidates.
+ RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
+ """
+ return [self.get_candidates(span) for span in mentions]
+
+ def get_candidates(self, mention: Span) -> Iterable[Candidate]:
+ """
+ Return candidate entities for specified text. Each candidate defines the entity, the original alias,
+ and the prior probability of that alias resolving to that entity.
+ If the no candidate is found for a given text, an empty list is returned.
+ mention (Span): Mention for which to get candidates.
+ RETURNS (Iterable[Candidate]): Identified candidates.
+ """
+ raise NotImplementedError(
+ Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__)
+ )
+
+ def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]:
+ """
+ Return vectors for entities.
+ entity (str): Entity name/ID.
+ RETURNS (Iterable[Iterable[float]]): Vectors for specified entities.
+ """
+ return [self.get_vector(entity) for entity in entities]
+
+ def get_vector(self, str entity) -> Iterable[float]:
+ """
+ Return vector for entity.
+ entity (str): Entity name/ID.
+ RETURNS (Iterable[float]): Vector for specified entity.
+ """
+ raise NotImplementedError(
+ Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__)
+ )
+
+ def to_bytes(self, **kwargs) -> bytes:
+ """Serialize the current state to a binary string.
+ RETURNS (bytes): Current state as binary string.
+ """
+ raise NotImplementedError(
+ Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__)
+ )
+
+ def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()):
+ """Load state from a binary string.
+ bytes_data (bytes): KB state.
+ exclude (Tuple[str]): Properties to exclude when restoring KB.
+ """
+ raise NotImplementedError(
+ Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__)
+ )
+
+ def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
+ """
+ Write KnowledgeBase content to disk.
+ path (Union[str, Path]): Target file path.
+ exclude (Iterable[str]): List of components to exclude.
+ """
+ raise NotImplementedError(
+ Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__)
+ )
+
+ def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
+ """
+ Load KnowledgeBase content from disk.
+ path (Union[str, Path]): Target file path.
+ exclude (Iterable[str]): List of components to exclude.
+ """
+ raise NotImplementedError(
+ Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__)
+ )
diff --git a/spacy/kb.pxd b/spacy/kb/kb_in_memory.pxd
similarity index 92%
rename from spacy/kb.pxd
rename to spacy/kb/kb_in_memory.pxd
index a823dbe1e..825a6bde9 100644
--- a/spacy/kb.pxd
+++ b/spacy/kb/kb_in_memory.pxd
@@ -1,14 +1,12 @@
"""Knowledge-base for entity or concept linking."""
-from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int64_t
from libc.stdio cimport FILE
-from .vocab cimport Vocab
-from .typedefs cimport hash_t
-from .structs cimport KBEntryC, AliasC
-
+from ..typedefs cimport hash_t
+from ..structs cimport KBEntryC, AliasC
+from .kb cimport KnowledgeBase
ctypedef vector[KBEntryC] entry_vec
ctypedef vector[AliasC] alias_vec
@@ -16,21 +14,7 @@ ctypedef vector[float] float_vec
ctypedef vector[float_vec] float_matrix
-# Object used by the Entity Linker that summarizes one entity-alias candidate combination.
-cdef class Candidate:
- cdef readonly KnowledgeBase kb
- cdef hash_t entity_hash
- cdef float entity_freq
- cdef vector[float] entity_vector
- cdef hash_t alias_hash
- cdef float prior_prob
-
-
-cdef class KnowledgeBase:
- cdef Pool mem
- cdef readonly Vocab vocab
- cdef int64_t entity_vector_length
-
+cdef class InMemoryLookupKB(KnowledgeBase):
# This maps 64bit keys (hash of unique entity string)
# to 64bit values (position of the _KBEntryC struct in the _entries vector).
# The PreshMap is pretty space efficient, as it uses open addressing. So
diff --git a/spacy/kb.pyx b/spacy/kb/kb_in_memory.pyx
similarity index 90%
rename from spacy/kb.pyx
rename to spacy/kb/kb_in_memory.pyx
index ae1983a8d..485e52c2f 100644
--- a/spacy/kb.pyx
+++ b/spacy/kb/kb_in_memory.pyx
@@ -1,8 +1,7 @@
# cython: infer_types=True, profile=True
-from typing import Iterator, Iterable, Callable, Dict, Any
+from typing import Iterable, Callable, Dict, Any, Union
import srsly
-from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
from cpython.exc cimport PyErr_SetFromErrno
from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek
@@ -12,85 +11,28 @@ from libcpp.vector cimport vector
from pathlib import Path
import warnings
-from .typedefs cimport hash_t
-from .errors import Errors, Warnings
-from . import util
-from .util import SimpleFrozenList, ensure_path
-
-cdef class Candidate:
- """A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
- to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
- algorithm which will disambiguate the various candidates to the correct one.
- Each candidate (alias, entity) pair is assigned to a certain prior probability.
-
- DOCS: https://spacy.io/api/kb/#candidate_init
- """
-
- def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
- self.kb = kb
- self.entity_hash = entity_hash
- self.entity_freq = entity_freq
- self.entity_vector = entity_vector
- self.alias_hash = alias_hash
- self.prior_prob = prior_prob
-
- @property
- def entity(self):
- """RETURNS (uint64): hash of the entity's KB ID/name"""
- return self.entity_hash
-
- @property
- def entity_(self):
- """RETURNS (str): ID/name of this entity in the KB"""
- return self.kb.vocab.strings[self.entity_hash]
-
- @property
- def alias(self):
- """RETURNS (uint64): hash of the alias"""
- return self.alias_hash
-
- @property
- def alias_(self):
- """RETURNS (str): ID of the original alias"""
- return self.kb.vocab.strings[self.alias_hash]
-
- @property
- def entity_freq(self):
- return self.entity_freq
-
- @property
- def entity_vector(self):
- return self.entity_vector
-
- @property
- def prior_prob(self):
- return self.prior_prob
+from ..tokens import Span
+from ..typedefs cimport hash_t
+from ..errors import Errors, Warnings
+from .. import util
+from ..util import SimpleFrozenList, ensure_path
+from ..vocab cimport Vocab
+from .kb cimport KnowledgeBase
+from .candidate import Candidate as Candidate
-def get_candidates(KnowledgeBase kb, span) -> Iterator[Candidate]:
- """
- Return candidate entities for a given span by using the text of the span as the alias
- and fetching appropriate entries from the index.
- This particular function is optimized to work with the built-in KB functionality,
- but any other custom candidate generation method can be used in combination with the KB as well.
- """
- return kb.get_alias_candidates(span.text)
-
-
-cdef class KnowledgeBase:
- """A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
+cdef class InMemoryLookupKB(KnowledgeBase):
+ """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts.
- DOCS: https://spacy.io/api/kb
+ DOCS: https://spacy.io/api/kb_in_memory
"""
def __init__(self, Vocab vocab, entity_vector_length):
- """Create a KnowledgeBase."""
- self.mem = Pool()
- self.entity_vector_length = entity_vector_length
+ """Create an InMemoryLookupKB."""
+ super().__init__(vocab, entity_vector_length)
self._entry_index = PreshMap()
self._alias_index = PreshMap()
- self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def _initialize_entities(self, int64_t nr_entities):
@@ -104,11 +46,6 @@ cdef class KnowledgeBase:
self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1)
- @property
- def entity_vector_length(self):
- """RETURNS (uint64): length of the entity vectors"""
- return self.entity_vector_length
-
def __len__(self):
return self.get_size_entities()
@@ -286,7 +223,10 @@ cdef class KnowledgeBase:
alias_entry.probs = probs
self._aliases_table[alias_index] = alias_entry
- def get_alias_candidates(self, str alias) -> Iterator[Candidate]:
+ def get_candidates(self, mention: Span) -> Iterable[Candidate]:
+ return self.get_alias_candidates(mention.text) # type: ignore
+
+ def get_alias_candidates(self, str alias) -> Iterable[Candidate]:
"""
Return candidate entities for an alias. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
diff --git a/spacy/lang/char_classes.py b/spacy/lang/char_classes.py
index 1d204c46c..37c58c85f 100644
--- a/spacy/lang/char_classes.py
+++ b/spacy/lang/char_classes.py
@@ -280,7 +280,7 @@ _currency = (
_punct = (
r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 ? ! , 、 ; : ~ · । ، ۔ ؛ ٪"
)
-_quotes = r'\' " ” “ ` ‘ ´ ’ ‚ , „ » « 「 」 『 』 ( ) 〔 〕 【 】 《 》 〈 〉'
+_quotes = r'\' " ” “ ` ‘ ´ ’ ‚ , „ » « 「 」 『 』 ( ) 〔 〕 【 】 《 》 〈 〉 〈 〉 ⟦ ⟧'
_hyphens = "- – — -- --- —— ~"
# Various symbols like dingbats, but also emoji
diff --git a/spacy/lang/grc/__init__.py b/spacy/lang/grc/__init__.py
index e83f0c5a5..019b3802e 100644
--- a/spacy/lang/grc/__init__.py
+++ b/spacy/lang/grc/__init__.py
@@ -1,11 +1,15 @@
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
+from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from ...language import Language, BaseDefaults
class AncientGreekDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
+ prefixes = TOKENIZER_PREFIXES
+ suffixes = TOKENIZER_SUFFIXES
+ infixes = TOKENIZER_INFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
diff --git a/spacy/lang/grc/punctuation.py b/spacy/lang/grc/punctuation.py
new file mode 100644
index 000000000..8f3589e9a
--- /dev/null
+++ b/spacy/lang/grc/punctuation.py
@@ -0,0 +1,46 @@
+from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
+from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS
+from ..char_classes import CONCAT_QUOTES
+
+_prefixes = (
+ [
+ "†",
+ "⸏",
+ ]
+ + LIST_PUNCT
+ + LIST_ELLIPSES
+ + LIST_QUOTES
+ + LIST_CURRENCY
+ + LIST_ICONS
+)
+
+_suffixes = (
+ LIST_PUNCT
+ + LIST_ELLIPSES
+ + LIST_QUOTES
+ + LIST_ICONS
+ + [
+ "†",
+ "⸎",
+ r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]",
+ ]
+)
+
+_infixes = (
+ LIST_ELLIPSES
+ + LIST_ICONS
+ + [
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
+ ),
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
+ r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
+ r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])—",
+ ]
+)
+
+TOKENIZER_PREFIXES = _prefixes
+TOKENIZER_SUFFIXES = _suffixes
+TOKENIZER_INFIXES = _infixes
diff --git a/spacy/lang/nl/stop_words.py b/spacy/lang/nl/stop_words.py
index a2c6198e7..cd4fdefdf 100644
--- a/spacy/lang/nl/stop_words.py
+++ b/spacy/lang/nl/stop_words.py
@@ -15,7 +15,7 @@
STOP_WORDS = set(
"""
-aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaangde aangezien achter achterna
+aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaande aangezien achter achterna
afgelopen aldus alhoewel anderzijds
ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven
diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py
index 720d3a8cb..f4a35de38 100644
--- a/spacy/lang/ru/lemmatizer.py
+++ b/spacy/lang/ru/lemmatizer.py
@@ -23,39 +23,44 @@ class RussianLemmatizer(Lemmatizer):
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
- if mode == "pymorphy2":
+ if mode in {"pymorphy2", "pymorphy2_lookup"}:
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
raise ImportError(
- "The Russian lemmatizer mode 'pymorphy2' requires the "
- "pymorphy2 library. Install it with: pip install pymorphy2"
+ "The lemmatizer mode 'pymorphy2' requires the "
+ "pymorphy2 library and dictionaries. Install them with: "
+ "pip install pymorphy2"
+ "# for Ukrainian dictionaries:"
+ "pip install pymorphy2-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
- self._morph = MorphAnalyzer()
- elif mode == "pymorphy3":
+ self._morph = MorphAnalyzer(lang="ru")
+ elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
- "The Russian lemmatizer mode 'pymorphy3' requires the "
- "pymorphy3 library. Install it with: pip install pymorphy3"
+ "The lemmatizer mode 'pymorphy3' requires the "
+ "pymorphy3 library and dictionaries. Install them with: "
+ "pip install pymorphy3"
+ "# for Ukrainian dictionaries:"
+ "pip install pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
- self._morph = MorphAnalyzer()
+ self._morph = MorphAnalyzer(lang="ru")
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
- def pymorphy2_lemmatize(self, token: Token) -> List[str]:
+ def _pymorphy_lemmatize(self, token: Token) -> List[str]:
string = token.text
univ_pos = token.pos_
morphology = token.morph.to_dict()
if univ_pos == "PUNCT":
return [PUNCT_RULES.get(string, string)]
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
- # Skip unchangeable pos
- return [string.lower()]
+ return self._pymorphy_lookup_lemmatize(token)
analyses = self._morph.parse(string)
filtered_analyses = []
for analysis in analyses:
@@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer):
# Skip suggested parse variant for unknown word for pymorphy
continue
analysis_pos, _ = oc2ud(str(analysis.tag))
- if analysis_pos == univ_pos or (
- analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN")
+ if (
+ analysis_pos == univ_pos
+ or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN"))
+ or ((analysis_pos == "PRON") and (univ_pos == "DET"))
):
filtered_analyses.append(analysis)
if not len(filtered_analyses):
@@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer):
dict.fromkeys([analysis.normal_form for analysis in filtered_analyses])
)
- def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
+ def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text
analyses = self._morph.parse(string)
- if len(analyses) == 1:
- return [analyses[0].normal_form]
+ # often multiple forms would derive from the same normal form
+ # thus check _unique_ normal forms
+ normal_forms = set([an.normal_form for an in analyses])
+ if len(normal_forms) == 1:
+ return [next(iter(normal_forms))]
return [string]
+ def pymorphy2_lemmatize(self, token: Token) -> List[str]:
+ return self._pymorphy_lemmatize(token)
+
+ def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
+ return self._pymorphy_lookup_lemmatize(token)
+
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
- return self.pymorphy2_lemmatize(token)
+ return self._pymorphy_lemmatize(token)
+
+ def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]:
+ return self._pymorphy_lookup_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
diff --git a/spacy/lang/ru/tokenizer_exceptions.py b/spacy/lang/ru/tokenizer_exceptions.py
index f3756e26c..e1889f785 100644
--- a/spacy/lang/ru/tokenizer_exceptions.py
+++ b/spacy/lang/ru/tokenizer_exceptions.py
@@ -61,6 +61,11 @@ for abbr in [
{ORTH: "2к23", NORM: "2023"},
{ORTH: "2к24", NORM: "2024"},
{ORTH: "2к25", NORM: "2025"},
+ {ORTH: "2к26", NORM: "2026"},
+ {ORTH: "2к27", NORM: "2027"},
+ {ORTH: "2к28", NORM: "2028"},
+ {ORTH: "2к29", NORM: "2029"},
+ {ORTH: "2к30", NORM: "2030"},
]:
_exc[abbr[ORTH]] = [abbr]
@@ -268,8 +273,8 @@ for abbr in [
{ORTH: "з-ка", NORM: "заимка"},
{ORTH: "п-к", NORM: "починок"},
{ORTH: "киш.", NORM: "кишлак"},
- {ORTH: "п. ст. ", NORM: "поселок станция"},
- {ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"},
+ {ORTH: "п. ст.", NORM: "поселок станция"},
+ {ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"},
{ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
{ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
{ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
@@ -280,12 +285,12 @@ for abbr in [
{ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
{ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
{ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
- {ORTH: "ж/д ст. ", NORM: "железнодорожная станция"},
+ {ORTH: "ж/д ст.", NORM: "железнодорожная станция"},
{ORTH: "м-ко", NORM: "местечко"},
{ORTH: "д.", NORM: "деревня"},
{ORTH: "с.", NORM: "село"},
{ORTH: "сл.", NORM: "слобода"},
- {ORTH: "ст. ", NORM: "станция"},
+ {ORTH: "ст.", NORM: "станция"},
{ORTH: "ст-ца", NORM: "станица"},
{ORTH: "у.", NORM: "улус"},
{ORTH: "х.", NORM: "хутор"},
@@ -388,8 +393,9 @@ for abbr in [
{ORTH: "прим.", NORM: "примечание"},
{ORTH: "прим.ред.", NORM: "примечание редакции"},
{ORTH: "см. также", NORM: "смотри также"},
- {ORTH: "кв.м.", NORM: "квадрантный метр"},
- {ORTH: "м2", NORM: "квадрантный метр"},
+ {ORTH: "см.", NORM: "смотри"},
+ {ORTH: "кв.м.", NORM: "квадратный метр"},
+ {ORTH: "м2", NORM: "квадратный метр"},
{ORTH: "б/у", NORM: "бывший в употреблении"},
{ORTH: "сокр.", NORM: "сокращение"},
{ORTH: "чел.", NORM: "человек"},
diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py
index 97ee80479..37015cc2a 100644
--- a/spacy/lang/uk/lemmatizer.py
+++ b/spacy/lang/uk/lemmatizer.py
@@ -18,7 +18,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
- if mode == "pymorphy2":
+ if mode in {"pymorphy2", "pymorphy2_lookup"}:
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
@@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
- elif mode == "pymorphy3":
+ elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
diff --git a/spacy/language.py b/spacy/language.py
index 34a06e576..dcb62aef0 100644
--- a/spacy/language.py
+++ b/spacy/language.py
@@ -1,4 +1,4 @@
-from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection
+from typing import Iterator, Optional, Any, Dict, Callable, Iterable
from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@@ -10,6 +10,7 @@ from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import warnings
+
from thinc.api import get_current_ops, Config, CupyOps, Optimizer
import srsly
import multiprocessing as mp
@@ -24,7 +25,7 @@ from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
from .training import Example, validate_examples
from .training.initialize import init_vocab, init_tok2vec
from .scorer import Scorer
-from .util import registry, SimpleFrozenList, _pipe, raise_error
+from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES
from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER
from .util import warn_if_jupyter_cupy
from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS
@@ -42,8 +43,7 @@ from .lookups import load_lookups
from .compat import Literal
-if TYPE_CHECKING:
- from .pipeline import Pipe # noqa: F401
+PipeCallable = Callable[[Doc], Doc]
# This is the base config will all settings (training etc.)
@@ -180,7 +180,7 @@ class Language:
self.vocab: Vocab = vocab
if self.lang is None:
self.lang = self.vocab.lang
- self._components: List[Tuple[str, "Pipe"]] = []
+ self._components: List[Tuple[str, PipeCallable]] = []
self._disabled: Set[str] = set()
self.max_length = max_length
# Create the default tokenizer from the default config
@@ -302,7 +302,7 @@ class Language:
return SimpleFrozenList(names)
@property
- def components(self) -> List[Tuple[str, "Pipe"]]:
+ def components(self) -> List[Tuple[str, PipeCallable]]:
"""Get all (name, component) tuples in the pipeline, including the
currently disabled components.
"""
@@ -321,12 +321,12 @@ class Language:
return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names"))
@property
- def pipeline(self) -> List[Tuple[str, "Pipe"]]:
+ def pipeline(self) -> List[Tuple[str, PipeCallable]]:
"""The processing pipeline consisting of (name, component) tuples. The
components are called on the Doc in order as it passes through the
pipeline.
- RETURNS (List[Tuple[str, Pipe]]): The pipeline.
+ RETURNS (List[Tuple[str, Callable[[Doc], Doc]]]): The pipeline.
"""
pipes = [(n, p) for n, p in self._components if n not in self._disabled]
return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline"))
@@ -526,7 +526,7 @@ class Language:
assigns: Iterable[str] = SimpleFrozenList(),
requires: Iterable[str] = SimpleFrozenList(),
retokenizes: bool = False,
- func: Optional["Pipe"] = None,
+ func: Optional[PipeCallable] = None,
) -> Callable[..., Any]:
"""Register a new pipeline component. Can be used for stateless function
components that don't require a separate factory. Can be used as a
@@ -541,7 +541,7 @@ class Language:
e.g. "token.ent_id". Used for pipeline analysis.
retokenizes (bool): Whether the component changes the tokenization.
Used for pipeline analysis.
- func (Optional[Callable]): Factory function if not used as a decorator.
+ func (Optional[Callable[[Doc], Doc]): Factory function if not used as a decorator.
DOCS: https://spacy.io/api/language#component
"""
@@ -552,11 +552,11 @@ class Language:
raise ValueError(Errors.E853.format(name=name))
component_name = name if name is not None else util.get_object_name(func)
- def add_component(component_func: "Pipe") -> Callable:
+ def add_component(component_func: PipeCallable) -> Callable:
if isinstance(func, type): # function is a class
raise ValueError(Errors.E965.format(name=component_name))
- def factory_func(nlp, name: str) -> "Pipe":
+ def factory_func(nlp, name: str) -> PipeCallable:
return component_func
internal_name = cls.get_factory_name(name)
@@ -606,7 +606,7 @@ class Language:
print_pipe_analysis(analysis, keys=keys)
return analysis
- def get_pipe(self, name: str) -> "Pipe":
+ def get_pipe(self, name: str) -> PipeCallable:
"""Get a pipeline component for a given component name.
name (str): Name of pipeline component to get.
@@ -627,7 +627,7 @@ class Language:
config: Dict[str, Any] = SimpleFrozenDict(),
raw_config: Optional[Config] = None,
validate: bool = True,
- ) -> "Pipe":
+ ) -> PipeCallable:
"""Create a pipeline component. Mostly used internally. To create and
add a component to the pipeline, you can use nlp.add_pipe.
@@ -639,7 +639,7 @@ class Language:
raw_config (Optional[Config]): Internals: the non-interpolated config.
validate (bool): Whether to validate the component config against the
arguments and types expected by the factory.
- RETURNS (Pipe): The pipeline component.
+ RETURNS (Callable[[Doc], Doc]): The pipeline component.
DOCS: https://spacy.io/api/language#create_pipe
"""
@@ -694,24 +694,18 @@ class Language:
def create_pipe_from_source(
self, source_name: str, source: "Language", *, name: str
- ) -> Tuple["Pipe", str]:
+ ) -> Tuple[PipeCallable, str]:
"""Create a pipeline component by copying it from an existing model.
source_name (str): Name of the component in the source pipeline.
source (Language): The source nlp object to copy from.
name (str): Optional alternative name to use in current pipeline.
- RETURNS (Tuple[Callable, str]): The component and its factory name.
+ RETURNS (Tuple[Callable[[Doc], Doc], str]): The component and its factory name.
"""
# Check source type
if not isinstance(source, Language):
raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
- # Check vectors, with faster checks first
- if (
- self.vocab.vectors.shape != source.vocab.vectors.shape
- or self.vocab.vectors.key2row != source.vocab.vectors.key2row
- or self.vocab.vectors.to_bytes(exclude=["strings"])
- != source.vocab.vectors.to_bytes(exclude=["strings"])
- ):
+ if self.vocab.vectors != source.vocab.vectors:
warnings.warn(Warnings.W113.format(name=source_name))
if source_name not in source.component_names:
raise KeyError(
@@ -745,7 +739,7 @@ class Language:
config: Dict[str, Any] = SimpleFrozenDict(),
raw_config: Optional[Config] = None,
validate: bool = True,
- ) -> "Pipe":
+ ) -> PipeCallable:
"""Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last".
@@ -768,7 +762,7 @@ class Language:
raw_config (Optional[Config]): Internals: the non-interpolated config.
validate (bool): Whether to validate the component config against the
arguments and types expected by the factory.
- RETURNS (Pipe): The pipeline component.
+ RETURNS (Callable[[Doc], Doc]): The pipeline component.
DOCS: https://spacy.io/api/language#add_pipe
"""
@@ -789,14 +783,6 @@ class Language:
factory_name, source, name=name
)
else:
- if not self.has_factory(factory_name):
- err = Errors.E002.format(
- name=factory_name,
- opts=", ".join(self.factory_names),
- method="add_pipe",
- lang=util.get_object_name(self),
- lang_code=self.lang,
- )
pipe_component = self.create_pipe(
factory_name,
name=name,
@@ -882,7 +868,7 @@ class Language:
*,
config: Dict[str, Any] = SimpleFrozenDict(),
validate: bool = True,
- ) -> "Pipe":
+ ) -> PipeCallable:
"""Replace a component in the pipeline.
name (str): Name of the component to replace.
@@ -891,7 +877,7 @@ class Language:
component. Will be merged with default config, if available.
validate (bool): Whether to validate the component config against the
arguments and types expected by the factory.
- RETURNS (Pipe): The new pipeline component.
+ RETURNS (Callable[[Doc], Doc]): The new pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
@@ -943,11 +929,11 @@ class Language:
init_cfg = self._config["initialize"]["components"].pop(old_name)
self._config["initialize"]["components"][new_name] = init_cfg
- def remove_pipe(self, name: str) -> Tuple[str, "Pipe"]:
+ def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]:
"""Remove a component from the pipeline.
name (str): Name of the component to remove.
- RETURNS (tuple): A `(name, component)` tuple of the removed component.
+ RETURNS (Tuple[str, Callable[[Doc], Doc]]): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
@@ -1253,15 +1239,6 @@ class Language:
sgd(key, W, dW) # type: ignore[call-arg, misc]
return losses
- def begin_training(
- self,
- get_examples: Optional[Callable[[], Iterable[Example]]] = None,
- *,
- sgd: Optional[Optimizer] = None,
- ) -> Optimizer:
- warnings.warn(Warnings.W089, DeprecationWarning)
- return self.initialize(get_examples, sgd=sgd)
-
def initialize(
self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
@@ -1362,15 +1339,15 @@ class Language:
def set_error_handler(
self,
- error_handler: Callable[[str, "Pipe", List[Doc], Exception], NoReturn],
+ error_handler: Callable[[str, PipeCallable, List[Doc], Exception], NoReturn],
):
- """Set an error handler object for all the components in the pipeline that implement
- a set_error_handler function.
+ """Set an error handler object for all the components in the pipeline
+ that implement a set_error_handler function.
- error_handler (Callable[[str, Pipe, List[Doc], Exception], NoReturn]):
- Function that deals with a failing batch of documents. This callable function should take in
- the component's name, the component itself, the offending batch of documents, and the exception
- that was thrown.
+ error_handler (Callable[[str, Callable[[Doc], Doc], List[Doc], Exception], NoReturn]):
+ Function that deals with a failing batch of documents. This callable
+ function should take in the component's name, the component itself,
+ the offending batch of documents, and the exception that was thrown.
DOCS: https://spacy.io/api/language#set_error_handler
"""
self.default_error_handler = error_handler
@@ -1698,9 +1675,9 @@ class Language:
config: Union[Dict[str, Any], Config] = {},
*,
vocab: Union[Vocab, bool] = True,
- disable: Union[str, Iterable[str]] = SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True,
validate: bool = True,
@@ -1727,12 +1704,6 @@ class Language:
DOCS: https://spacy.io/api/language#from_config
"""
- if isinstance(disable, str):
- disable = [disable]
- if isinstance(enable, str):
- enable = [enable]
- if isinstance(exclude, str):
- exclude = [exclude]
if auto_fill:
config = Config(
cls.default_config, section_order=CONFIG_SECTION_ORDER
@@ -1877,9 +1848,29 @@ class Language:
nlp.vocab.from_bytes(vocab_b)
# Resolve disabled/enabled settings.
+ if isinstance(disable, str):
+ disable = [disable]
+ if isinstance(enable, str):
+ enable = [enable]
+ if isinstance(exclude, str):
+ exclude = [exclude]
+
+ # `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config
+ # specifies values for `enabled` not included in `enable`, emit warning.
+ if id(enable) != id(_DEFAULT_EMPTY_PIPES):
+ enabled = config["nlp"].get("enabled", [])
+ if len(enabled) and not set(enabled).issubset(enable):
+ warnings.warn(
+ Warnings.W123.format(
+ enable=enable,
+ enabled=enabled,
+ )
+ )
+
+ # Ensure sets of disabled/enabled pipe names are not contradictory.
disabled_pipes = cls._resolve_component_status(
- [*config["nlp"]["disabled"], *disable],
- [*config["nlp"].get("enabled", []), *enable],
+ list({*disable, *config["nlp"].get("disabled", [])}),
+ enable,
config["nlp"]["pipeline"],
)
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
@@ -2060,18 +2051,13 @@ class Language:
if enable:
if isinstance(enable, str):
enable = [enable]
- to_disable = [
- pipe_name for pipe_name in pipe_names if pipe_name not in enable
- ]
- if disable and disable != to_disable:
- raise ValueError(
- Errors.E1042.format(
- arg1="enable",
- arg2="disable",
- arg1_values=enable,
- arg2_values=disable,
- )
- )
+ to_disable = {
+ *[pipe_name for pipe_name in pipe_names if pipe_name not in enable],
+ *disable,
+ }
+ # If any pipe to be enabled is in to_disable, the specification is inconsistent.
+ if len(set(enable) & to_disable):
+ raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
return tuple(to_disable)
diff --git a/spacy/lexeme.pxd b/spacy/lexeme.pxd
index 8dea0d6a2..2d14edcd6 100644
--- a/spacy/lexeme.pxd
+++ b/spacy/lexeme.pxd
@@ -5,7 +5,6 @@ from .attrs cimport attr_id_t
from .attrs cimport ID, ORTH, LOWER, NORM, SHAPE, PREFIX, SUFFIX, LENGTH, LANG
from .structs cimport LexemeC
-from .strings cimport StringStore
from .vocab cimport Vocab
diff --git a/spacy/lexeme.pyi b/spacy/lexeme.pyi
index 4fcaa82cf..4942b18aa 100644
--- a/spacy/lexeme.pyi
+++ b/spacy/lexeme.pyi
@@ -20,7 +20,6 @@ class Lexeme:
def vector_norm(self) -> float: ...
vector: Floats1d
rank: int
- sentiment: float
@property
def orth_(self) -> str: ...
@property
diff --git a/spacy/lexeme.pyx b/spacy/lexeme.pyx
index 6c66effde..73bf28dc2 100644
--- a/spacy/lexeme.pyx
+++ b/spacy/lexeme.pyx
@@ -173,19 +173,6 @@ cdef class Lexeme:
def __set__(self, value):
self.c.id = value
- property sentiment:
- """RETURNS (float): A scalar value indicating the positivity or
- negativity of the lexeme."""
- def __get__(self):
- sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment", {})
- return sentiment_table.get(self.c.orth, 0.0)
-
- def __set__(self, float x):
- if "lexeme_sentiment" not in self.vocab.lookups:
- self.vocab.lookups.add_table("lexeme_sentiment")
- sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment")
- sentiment_table[self.c.orth] = x
-
@property
def orth_(self):
"""RETURNS (str): The original verbatim text of the lexeme
diff --git a/spacy/matcher/__init__.py b/spacy/matcher/__init__.py
index 286844787..a4f164847 100644
--- a/spacy/matcher/__init__.py
+++ b/spacy/matcher/__init__.py
@@ -1,5 +1,6 @@
from .matcher import Matcher
from .phrasematcher import PhraseMatcher
from .dependencymatcher import DependencyMatcher
+from .levenshtein import levenshtein
-__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher"]
+__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"]
diff --git a/spacy/matcher/levenshtein.pyx b/spacy/matcher/levenshtein.pyx
new file mode 100644
index 000000000..8463d913d
--- /dev/null
+++ b/spacy/matcher/levenshtein.pyx
@@ -0,0 +1,15 @@
+# cython: profile=True, binding=True, infer_types=True
+from cpython.object cimport PyObject
+from libc.stdint cimport int64_t
+
+from typing import Optional
+
+
+cdef extern from "polyleven.c":
+ int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
+
+
+cpdef int64_t levenshtein(a: str, b: str, k: Optional[int] = None):
+ if k is None:
+ k = -1
+ return polyleven(a, b, k)
diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx
index 865e7594e..bd139a55b 100644
--- a/spacy/matcher/matcher.pyx
+++ b/spacy/matcher/matcher.pyx
@@ -1,4 +1,4 @@
-# cython: infer_types=True, cython: profile=True
+# cython: infer_types=True, profile=True
from typing import List, Iterable
from libcpp.vector cimport vector
@@ -22,7 +22,7 @@ from ..attrs cimport ID, attr_id_t, NULL_ATTR, ORTH, POS, TAG, DEP, LEMMA, MORPH
from ..schemas import validate_token_pattern
from ..errors import Errors, MatchPatternError, Warnings
-from ..strings import get_string_id
+from ..strings cimport get_string_id
from ..attrs import IDS
diff --git a/spacy/matcher/polyleven.c b/spacy/matcher/polyleven.c
new file mode 100644
index 000000000..2f2b8826c
--- /dev/null
+++ b/spacy/matcher/polyleven.c
@@ -0,0 +1,384 @@
+/*
+ * Adapted from Polyleven (https://ceptord.net/)
+ *
+ * Source: https://github.com/fujimotos/polyleven/blob/c3f95a080626c5652f0151a2e449963288ccae84/polyleven.c
+ *
+ * Copyright (c) 2021 Fujimoto Seiji
+ * Copyright (c) 2021 Max Bachmann
+ * Copyright (c) 2022 Nick Mazuk
+ * Copyright (c) 2022 Michael Weiss
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include
+#include
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define CDIV(a,b) ((a) / (b) + ((a) % (b) > 0))
+#define BIT(i,n) (((i) >> (n)) & 1)
+#define FLIP(i,n) ((i) ^ ((uint64_t) 1 << (n)))
+#define ISASCII(kd) ((kd) == PyUnicode_1BYTE_KIND)
+
+/*
+ * Bare bone of PyUnicode
+ */
+struct strbuf {
+ void *ptr;
+ int kind;
+ int64_t len;
+};
+
+static void strbuf_init(struct strbuf *s, PyObject *o)
+{
+ s->ptr = PyUnicode_DATA(o);
+ s->kind = PyUnicode_KIND(o);
+ s->len = PyUnicode_GET_LENGTH(o);
+}
+
+#define strbuf_read(s, i) PyUnicode_READ((s)->kind, (s)->ptr, (i))
+
+/*
+ * An encoded mbleven model table.
+ *
+ * Each 8-bit integer represents an edit sequence, with using two
+ * bits for a single operation.
+ *
+ * 01 = DELETE, 10 = INSERT, 11 = REPLACE
+ *
+ * For example, 13 is '1101' in binary notation, so it means
+ * DELETE + REPLACE.
+ */
+static const uint8_t MBLEVEN_MATRIX[] = {
+ 3, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0,
+ 15, 9, 6, 0, 0, 0, 0, 0,
+ 13, 7, 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 0, 0, 0, 0, 0,
+ 63, 39, 45, 57, 54, 30, 27, 0,
+ 61, 55, 31, 37, 25, 22, 0, 0,
+ 53, 29, 23, 0, 0, 0, 0, 0,
+ 21, 0, 0, 0, 0, 0, 0, 0,
+};
+
+#define MBLEVEN_MATRIX_GET(k, d) ((((k) + (k) * (k)) / 2 - 1) + (d)) * 8
+
+static int64_t mbleven_ascii(char *s1, int64_t len1,
+ char *s2, int64_t len2, int k)
+{
+ int pos;
+ uint8_t m;
+ int64_t i, j, c, r;
+
+ pos = MBLEVEN_MATRIX_GET(k, len1 - len2);
+ r = k + 1;
+
+ while (MBLEVEN_MATRIX[pos]) {
+ m = MBLEVEN_MATRIX[pos++];
+ i = j = c = 0;
+ while (i < len1 && j < len2) {
+ if (s1[i] != s2[j]) {
+ c++;
+ if (!m) break;
+ if (m & 1) i++;
+ if (m & 2) j++;
+ m >>= 2;
+ } else {
+ i++;
+ j++;
+ }
+ }
+ c += (len1 - i) + (len2 - j);
+ r = MIN(r, c);
+ if (r < 2) {
+ return r;
+ }
+ }
+ return r;
+}
+
+static int64_t mbleven(PyObject *o1, PyObject *o2, int64_t k)
+{
+ int pos;
+ uint8_t m;
+ int64_t i, j, c, r;
+ struct strbuf s1, s2;
+
+ strbuf_init(&s1, o1);
+ strbuf_init(&s2, o2);
+
+ if (s1.len < s2.len)
+ return mbleven(o2, o1, k);
+
+ if (k > 3)
+ return -1;
+
+ if (k < s1.len - s2.len)
+ return k + 1;
+
+ if (ISASCII(s1.kind) && ISASCII(s2.kind))
+ return mbleven_ascii(s1.ptr, s1.len, s2.ptr, s2.len, k);
+
+ pos = MBLEVEN_MATRIX_GET(k, s1.len - s2.len);
+ r = k + 1;
+
+ while (MBLEVEN_MATRIX[pos]) {
+ m = MBLEVEN_MATRIX[pos++];
+ i = j = c = 0;
+ while (i < s1.len && j < s2.len) {
+ if (strbuf_read(&s1, i) != strbuf_read(&s2, j)) {
+ c++;
+ if (!m) break;
+ if (m & 1) i++;
+ if (m & 2) j++;
+ m >>= 2;
+ } else {
+ i++;
+ j++;
+ }
+ }
+ c += (s1.len - i) + (s2.len - j);
+ r = MIN(r, c);
+ if (r < 2) {
+ return r;
+ }
+ }
+ return r;
+}
+
+/*
+ * Data structure to store Peq (equality bit-vector).
+ */
+struct blockmap_entry {
+ uint32_t key[128];
+ uint64_t val[128];
+};
+
+struct blockmap {
+ int64_t nr;
+ struct blockmap_entry *list;
+};
+
+#define blockmap_key(c) ((c) | 0x80000000U)
+#define blockmap_hash(c) ((c) % 128)
+
+static int blockmap_init(struct blockmap *map, struct strbuf *s)
+{
+ int64_t i;
+ struct blockmap_entry *be;
+ uint32_t c, k;
+ uint8_t h;
+
+ map->nr = CDIV(s->len, 64);
+ map->list = calloc(1, map->nr * sizeof(struct blockmap_entry));
+ if (map->list == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ for (i = 0; i < s->len; i++) {
+ be = &(map->list[i / 64]);
+ c = strbuf_read(s, i);
+ h = blockmap_hash(c);
+ k = blockmap_key(c);
+
+ while (be->key[h] && be->key[h] != k)
+ h = blockmap_hash(h + 1);
+ be->key[h] = k;
+ be->val[h] |= (uint64_t) 1 << (i % 64);
+ }
+ return 0;
+}
+
+static void blockmap_clear(struct blockmap *map)
+{
+ if (map->list)
+ free(map->list);
+ map->list = NULL;
+ map->nr = 0;
+}
+
+static uint64_t blockmap_get(struct blockmap *map, int block, uint32_t c)
+{
+ struct blockmap_entry *be;
+ uint8_t h;
+ uint32_t k;
+
+ h = blockmap_hash(c);
+ k = blockmap_key(c);
+
+ be = &(map->list[block]);
+ while (be->key[h] && be->key[h] != k)
+ h = blockmap_hash(h + 1);
+ return be->key[h] == k ? be->val[h] : 0;
+}
+
+/*
+ * Myers' bit-parallel algorithm
+ *
+ * See: G. Myers. "A fast bit-vector algorithm for approximate string
+ * matching based on dynamic programming." Journal of the ACM, 1999.
+ */
+static int64_t myers1999_block(struct strbuf *s1, struct strbuf *s2,
+ struct blockmap *map)
+{
+ uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
+ uint64_t *Mhc, *Phc;
+ int64_t i, b, hsize, vsize, Score;
+ uint8_t Pb, Mb;
+
+ hsize = CDIV(s1->len, 64);
+ vsize = CDIV(s2->len, 64);
+ Score = s2->len;
+
+ Phc = malloc(hsize * 2 * sizeof(uint64_t));
+ if (Phc == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ Mhc = Phc + hsize;
+ memset(Phc, -1, hsize * sizeof(uint64_t));
+ memset(Mhc, 0, hsize * sizeof(uint64_t));
+ Last = (uint64_t)1 << ((s2->len - 1) % 64);
+
+ for (b = 0; b < vsize; b++) {
+ Mv = 0;
+ Pv = (uint64_t) -1;
+ Score = s2->len;
+
+ for (i = 0; i < s1->len; i++) {
+ Eq = blockmap_get(map, b, strbuf_read(s1, i));
+
+ Pb = BIT(Phc[i / 64], i % 64);
+ Mb = BIT(Mhc[i / 64], i % 64);
+
+ Xv = Eq | Mv;
+ Xh = ((((Eq | Mb) & Pv) + Pv) ^ Pv) | Eq | Mb;
+
+ Ph = Mv | ~ (Xh | Pv);
+ Mh = Pv & Xh;
+
+ if (Ph & Last) Score++;
+ if (Mh & Last) Score--;
+
+ if ((Ph >> 63) ^ Pb)
+ Phc[i / 64] = FLIP(Phc[i / 64], i % 64);
+
+ if ((Mh >> 63) ^ Mb)
+ Mhc[i / 64] = FLIP(Mhc[i / 64], i % 64);
+
+ Ph = (Ph << 1) | Pb;
+ Mh = (Mh << 1) | Mb;
+
+ Pv = Mh | ~ (Xv | Ph);
+ Mv = Ph & Xv;
+ }
+ }
+ free(Phc);
+ return Score;
+}
+
+static int64_t myers1999_simple(uint8_t *s1, int64_t len1, uint8_t *s2, int64_t len2)
+{
+ uint64_t Peq[256];
+ uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
+ int64_t i;
+ int64_t Score = len2;
+
+ memset(Peq, 0, sizeof(Peq));
+
+ for (i = 0; i < len2; i++)
+ Peq[s2[i]] |= (uint64_t) 1 << i;
+
+ Mv = 0;
+ Pv = (uint64_t) -1;
+ Last = (uint64_t) 1 << (len2 - 1);
+
+ for (i = 0; i < len1; i++) {
+ Eq = Peq[s1[i]];
+
+ Xv = Eq | Mv;
+ Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq;
+
+ Ph = Mv | ~ (Xh | Pv);
+ Mh = Pv & Xh;
+
+ if (Ph & Last) Score++;
+ if (Mh & Last) Score--;
+
+ Ph = (Ph << 1) | 1;
+ Mh = (Mh << 1);
+
+ Pv = Mh | ~ (Xv | Ph);
+ Mv = Ph & Xv;
+ }
+ return Score;
+}
+
+static int64_t myers1999(PyObject *o1, PyObject *o2)
+{
+ struct strbuf s1, s2;
+ struct blockmap map;
+ int64_t ret;
+
+ strbuf_init(&s1, o1);
+ strbuf_init(&s2, o2);
+
+ if (s1.len < s2.len)
+ return myers1999(o2, o1);
+
+ if (ISASCII(s1.kind) && ISASCII(s2.kind) && s2.len < 65)
+ return myers1999_simple(s1.ptr, s1.len, s2.ptr, s2.len);
+
+ if (blockmap_init(&map, &s2))
+ return -1;
+
+ ret = myers1999_block(&s1, &s2, &map);
+ blockmap_clear(&map);
+ return ret;
+}
+
+/*
+ * Interface functions
+ */
+static int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
+{
+ int64_t len1, len2;
+
+ len1 = PyUnicode_GET_LENGTH(o1);
+ len2 = PyUnicode_GET_LENGTH(o2);
+
+ if (len1 < len2)
+ return polyleven(o2, o1, k);
+
+ if (k == 0)
+ return PyUnicode_Compare(o1, o2) ? 1 : 0;
+
+ if (0 < k && k < len1 - len2)
+ return k + 1;
+
+ if (len2 == 0)
+ return len1;
+
+ if (0 < k && k < 4)
+ return mbleven(o1, o2, k);
+
+ return myers1999(o1, o2);
+}
diff --git a/spacy/ml/callbacks.py b/spacy/ml/callbacks.py
index 18290b947..3b60ec2ab 100644
--- a/spacy/ml/callbacks.py
+++ b/spacy/ml/callbacks.py
@@ -89,11 +89,14 @@ def pipes_with_nvtx_range(
types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func
)
- # Try to preserve the original function signature.
+ # We need to preserve the original function signature so that
+ # the original parameters are passed to pydantic for validation downstream.
try:
wrapped_func.__signature__ = inspect.signature(func) # type: ignore
except:
- pass
+ # Can fail for Cython methods that do not have bindings.
+ warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
+ continue
try:
setattr(
diff --git a/spacy/ml/models/entity_linker.py b/spacy/ml/models/entity_linker.py
index d847342a3..299b6bb52 100644
--- a/spacy/ml/models/entity_linker.py
+++ b/spacy/ml/models/entity_linker.py
@@ -1,11 +1,12 @@
from pathlib import Path
from typing import Optional, Callable, Iterable, List, Tuple
from thinc.types import Floats2d
-from thinc.api import chain, clone, list2ragged, reduce_mean, residual
-from thinc.api import Model, Maxout, Linear, noop, tuplify, Ragged
+from thinc.api import chain, list2ragged, reduce_mean, residual
+from thinc.api import Model, Maxout, Linear, tuplify, Ragged
from ...util import registry
-from ...kb import KnowledgeBase, Candidate, get_candidates
+from ...kb import KnowledgeBase, InMemoryLookupKB
+from ...kb import Candidate, get_candidates, get_candidates_batch
from ...vocab import Vocab
from ...tokens import Span, Doc
from ..extract_spans import extract_spans
@@ -70,17 +71,18 @@ def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callab
cands.append((start_token, end_token))
candidates.append(ops.asarray2i(cands))
- candlens = ops.asarray1i([len(cands) for cands in candidates])
- candidates = ops.xp.concatenate(candidates)
- outputs = Ragged(candidates, candlens)
+ lengths = model.ops.asarray1i([len(cands) for cands in candidates])
+ out = Ragged(model.ops.flatten(candidates), lengths)
# because this is just rearranging docs, the backprop does nothing
- return outputs, lambda x: []
+ return out, lambda x: []
@registry.misc("spacy.KBFromFile.v1")
-def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]:
- def kb_from_file(vocab):
- kb = KnowledgeBase(vocab, entity_vector_length=1)
+def load_kb(
+ kb_path: Path,
+) -> Callable[[Vocab], KnowledgeBase]:
+ def kb_from_file(vocab: Vocab):
+ kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.from_disk(kb_path)
return kb
@@ -88,9 +90,11 @@ def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.EmptyKB.v1")
-def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]:
- def empty_kb_factory(vocab):
- return KnowledgeBase(vocab=vocab, entity_vector_length=entity_vector_length)
+def empty_kb(
+ entity_vector_length: int,
+) -> Callable[[Vocab], KnowledgeBase]:
+ def empty_kb_factory(vocab: Vocab):
+ return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length)
return empty_kb_factory
@@ -98,3 +102,10 @@ def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.CandidateGenerator.v1")
def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]:
return get_candidates
+
+
+@registry.misc("spacy.CandidateBatchGenerator.v1")
+def create_candidates_batch() -> Callable[
+ [KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
+]:
+ return get_candidates_batch
diff --git a/spacy/ml/tb_framework.pyx b/spacy/ml/tb_framework.pyx
index 1ee9716dc..e5bc99e60 100644
--- a/spacy/ml/tb_framework.pyx
+++ b/spacy/ml/tb_framework.pyx
@@ -233,7 +233,7 @@ def _forward_greedy_cpu(model: Model, TransitionSystem moves, states: List[State
scores = _parse_batch(cblas, moves, &c_states[0], weights, sizes, actions=actions)
def backprop(dY):
- raise ValueError(Errors.E4001)
+ raise ValueError(Errors.E4002)
return (states, scores), backprop
diff --git a/spacy/pipeline/__init__.py b/spacy/pipeline/__init__.py
index 4744a989b..14dfed949 100644
--- a/spacy/pipeline/__init__.py
+++ b/spacy/pipeline/__init__.py
@@ -3,7 +3,6 @@ from .dep_parser import DependencyParser
from .edit_tree_lemmatizer import EditTreeLemmatizer
from .entity_linker import EntityLinker
from .ner import EntityRecognizer
-from .entity_ruler import EntityRuler
from .lemmatizer import Lemmatizer
from .morphologizer import Morphologizer
from .pipe import Pipe
@@ -23,7 +22,6 @@ __all__ = [
"DependencyParser",
"EntityLinker",
"EntityRecognizer",
- "EntityRuler",
"Morphologizer",
"Lemmatizer",
"MultiLabel_TextCategorizer",
diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py
index 37aa9663b..2a2242aa4 100644
--- a/spacy/pipeline/edit_tree_lemmatizer.py
+++ b/spacy/pipeline/edit_tree_lemmatizer.py
@@ -1,13 +1,13 @@
-from typing import cast, Any, Callable, Dict, Iterable, List, Optional
-from typing import Sequence, Tuple, Union
+from typing import cast, Any, Callable, Dict, Iterable, List, Optional, Union
+from typing import Tuple
from collections import Counter
-from copy import deepcopy
from itertools import islice
import numpy as np
import srsly
-from thinc.api import Config, Model, SequenceCategoricalCrossentropy
+from thinc.api import Config, Model
from thinc.types import ArrayXd, Floats2d, Ints1d
+from thinc.legacy import LegacySequenceCategoricalCrossentropy
from ._edit_tree_internals.edit_trees import EditTrees
from ._edit_tree_internals.schemas import validate_edit_tree
@@ -130,7 +130,9 @@ class EditTreeLemmatizer(TrainablePipe):
self, examples: Iterable[Example], scores: List[Floats2d]
) -> Tuple[float, List[Floats2d]]:
validate_examples(examples, "EditTreeLemmatizer.get_loss")
- loss_func = SequenceCategoricalCrossentropy(normalize=False, missing_value=-1)
+ loss_func = LegacySequenceCategoricalCrossentropy(
+ normalize=False, missing_value=-1
+ )
truths = []
for eg in examples:
@@ -348,9 +350,9 @@ class EditTreeLemmatizer(TrainablePipe):
tree = dict(tree)
if "orig" in tree:
- tree["orig"] = self.vocab.strings[tree["orig"]]
+ tree["orig"] = self.vocab.strings.add(tree["orig"])
if "orig" in tree:
- tree["subst"] = self.vocab.strings[tree["subst"]]
+ tree["subst"] = self.vocab.strings.add(tree["subst"])
trees.append(tree)
diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py
index ac05cb840..19c355238 100644
--- a/spacy/pipeline/entity_linker.py
+++ b/spacy/pipeline/entity_linker.py
@@ -60,9 +60,11 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
"incl_context": True,
"entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
+ "get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
"overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True,
+ "candidates_batch_size": 1,
"threshold": None,
"save_activations": False,
},
@@ -83,9 +85,13 @@ def make_entity_linker(
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
+ get_candidates_batch: Callable[
+ [KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
+ ],
overwrite: bool,
scorer: Optional[Callable],
use_gold_ents: bool,
+ candidates_batch_size: int,
threshold: Optional[float] = None,
save_activations: bool,
):
@@ -99,18 +105,22 @@ def make_entity_linker(
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB.
- get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that
+ get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
+ get_candidates_batch (
+ Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]]
+ ): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
+ candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
prediction is discarded. If None, predictions are not filtered by any threshold.
save_activations (bool): save model activations in Doc when annotating.
"""
if not model.attrs.get("include_span_maker", False):
- # The only difference in arguments here is that use_gold_ents is not available
+ # The only difference in arguments here is that use_gold_ents and threshold aren't available.
return EntityLinker_v1(
nlp.vocab,
model,
@@ -134,9 +144,11 @@ def make_entity_linker(
incl_context=incl_context,
entity_vector_length=entity_vector_length,
get_candidates=get_candidates,
+ get_candidates_batch=get_candidates_batch,
overwrite=overwrite,
scorer=scorer,
use_gold_ents=use_gold_ents,
+ candidates_batch_size=candidates_batch_size,
threshold=threshold,
save_activations=save_activations,
)
@@ -171,9 +183,13 @@ class EntityLinker(TrainablePipe):
incl_context: bool,
entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
+ get_candidates_batch: Callable[
+ [KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
+ ],
overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool,
+ candidates_batch_size: int,
threshold: Optional[float] = None,
save_activations: bool = False,
) -> None:
@@ -190,10 +206,14 @@ class EntityLinker(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
- scorer (Optional[Callable]): The scoring method. Defaults to
- Scorer.score_links.
+ get_candidates_batch (
+ Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]],
+ Iterable[Candidate]]
+ ): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
+ scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
+ candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init
@@ -216,23 +236,28 @@ class EntityLinker(TrainablePipe):
self.incl_prior = incl_prior
self.incl_context = incl_context
self.get_candidates = get_candidates
+ self.get_candidates_batch = get_candidates_batch
self.cfg: Dict[str, Any] = {"overwrite": overwrite}
self.distance = CosineDistance(normalize=False)
# how many neighbour sentences to take into account
- # create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'.
+ # create an empty KB by default
self.kb = empty_kb(entity_vector_length)(self.vocab)
self.scorer = scorer
self.use_gold_ents = use_gold_ents
+ self.candidates_batch_size = candidates_batch_size
self.threshold = threshold
self.save_activations = save_activations
+ if candidates_batch_size < 1:
+ raise ValueError(Errors.E1044)
+
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will
create it using this object's vocab."""
if not callable(kb_loader):
raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
- self.kb = kb_loader(self.vocab)
+ self.kb = kb_loader(self.vocab) # type: ignore
def validate_kb(self) -> None:
# Raise an error if the knowledge base is not initialized.
@@ -254,8 +279,8 @@ class EntityLinker(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
- kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance.
- Note that providing this argument, will overwrite all data accumulated in the current KB.
+ kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab
+ instance. Note that providing this argument will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file.
DOCS: https://spacy.io/api/entitylinker#initialize
@@ -439,36 +464,50 @@ class EntityLinker(TrainablePipe):
docs_ents.append(Ragged(xp.zeros(0, dtype="uint64"), ops.alloc1i(0)))
continue
sentences = [s for s in doc.sents]
- # Looping through each entity (TODO: rewrite)
- for ent in doc.ents:
- sent_index = sentences.index(ent.sent)
- assert sent_index >= 0
- if self.incl_context:
- # get n_neighbour sentences, clipped to the length of the document
- start_sentence = max(0, sent_index - self.n_sents)
- end_sentence = min(len(sentences) - 1, sent_index + self.n_sents)
- start_token = sentences[start_sentence].start
- end_token = sentences[end_sentence].end
- sent_doc = doc[start_token:end_token].as_doc()
- # currently, the context is the same for each entity in a sentence (should be refined)
- sentence_encoding = self.model.predict([sent_doc])[0]
- sentence_encoding_t = sentence_encoding.T
- sentence_norm = xp.linalg.norm(sentence_encoding_t)
- entity_count += 1
- if ent.label_ in self.labels_discard:
- # ignoring this entity - setting to NIL
- final_kb_ids.append(self.NIL)
- self._add_activations(
- doc_scores=doc_scores,
- doc_ents=doc_ents,
- scores=[0.0],
- ents=[0],
+ # Loop over entities in batches.
+ for ent_idx in range(0, len(doc.ents), self.candidates_batch_size):
+ ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size]
+
+ # Look up candidate entities.
+ valid_ent_idx = [
+ idx
+ for idx in range(len(ent_batch))
+ if ent_batch[idx].label_ not in self.labels_discard
+ ]
+
+ batch_candidates = list(
+ self.get_candidates_batch(
+ self.kb, [ent_batch[idx] for idx in valid_ent_idx]
)
- else:
- candidates = list(self.get_candidates(self.kb, ent))
- if not candidates:
- # no prediction possible for this entity - setting to NIL
+ if self.candidates_batch_size > 1
+ else [
+ self.get_candidates(self.kb, ent_batch[idx])
+ for idx in valid_ent_idx
+ ]
+ )
+
+ # Looping through each entity in batch (TODO: rewrite)
+ for j, ent in enumerate(ent_batch):
+ sent_index = sentences.index(ent.sent)
+ assert sent_index >= 0
+
+ if self.incl_context:
+ # get n_neighbour sentences, clipped to the length of the document
+ start_sentence = max(0, sent_index - self.n_sents)
+ end_sentence = min(
+ len(sentences) - 1, sent_index + self.n_sents
+ )
+ start_token = sentences[start_sentence].start
+ end_token = sentences[end_sentence].end
+ sent_doc = doc[start_token:end_token].as_doc()
+ # currently, the context is the same for each entity in a sentence (should be refined)
+ sentence_encoding = self.model.predict([sent_doc])[0]
+ sentence_encoding_t = sentence_encoding.T
+ sentence_norm = xp.linalg.norm(sentence_encoding_t)
+ entity_count += 1
+ if ent.label_ in self.labels_discard:
+ # ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL)
self._add_activations(
doc_scores=doc_scores,
@@ -476,53 +515,65 @@ class EntityLinker(TrainablePipe):
scores=[0.0],
ents=[0],
)
- elif len(candidates) == 1 and self.threshold is None:
- # shortcut for efficiency reasons: take the 1 candidate
- final_kb_ids.append(candidates[0].entity_)
- self._add_activations(
- doc_scores=doc_scores,
- doc_ents=doc_ents,
- scores=[1.0],
- ents=[candidates[0].entity_],
- )
else:
- random.shuffle(candidates)
- # set all prior probabilities to 0 if incl_prior=False
- prior_probs = xp.asarray([c.prior_prob for c in candidates])
- if not self.incl_prior:
- prior_probs = xp.asarray([0.0 for _ in candidates])
- scores = prior_probs
- # add in similarity from the context
- if self.incl_context:
- entity_encodings = xp.asarray(
- [c.entity_vector for c in candidates]
+ candidates = list(batch_candidates[j])
+ if not candidates:
+ # no prediction possible for this entity - setting to NIL
+ final_kb_ids.append(self.NIL)
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=[0.0],
+ ents=[0],
)
- entity_norm = xp.linalg.norm(entity_encodings, axis=1)
- if len(entity_encodings) != len(prior_probs):
- raise RuntimeError(
- Errors.E147.format(
- method="predict",
- msg="vectors not of equal length",
- )
+ elif len(candidates) == 1 and self.threshold is None:
+ # shortcut for efficiency reasons: take the 1 candidate
+ final_kb_ids.append(candidates[0].entity_)
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=[1.0],
+ ents=[candidates[0].entity_],
+ )
+ else:
+ random.shuffle(candidates)
+ # set all prior probabilities to 0 if incl_prior=False
+ prior_probs = xp.asarray([c.prior_prob for c in candidates])
+ if not self.incl_prior:
+ prior_probs = xp.asarray([0.0 for _ in candidates])
+ scores = prior_probs
+ # add in similarity from the context
+ if self.incl_context:
+ entity_encodings = xp.asarray(
+ [c.entity_vector for c in candidates]
)
- # cosine similarity
- sims = xp.dot(entity_encodings, sentence_encoding_t) / (
- sentence_norm * entity_norm
+ entity_norm = xp.linalg.norm(entity_encodings, axis=1)
+ if len(entity_encodings) != len(prior_probs):
+ raise RuntimeError(
+ Errors.E147.format(
+ method="predict",
+ msg="vectors not of equal length",
+ )
+ )
+ # cosine similarity
+ sims = xp.dot(entity_encodings, sentence_encoding_t) / (
+ sentence_norm * entity_norm
+ )
+ if sims.shape != prior_probs.shape:
+ raise ValueError(Errors.E161)
+ scores = prior_probs + sims - (prior_probs * sims)
+ final_kb_ids.append(
+ candidates[scores.argmax().item()].entity_
+ if self.threshold is None
+ or scores.max() >= self.threshold
+ else EntityLinker.NIL
+ )
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=scores,
+ ents=[c.entity for c in candidates],
)
- if sims.shape != prior_probs.shape:
- raise ValueError(Errors.E161)
- scores = prior_probs + sims - (prior_probs * sims)
- final_kb_ids.append(
- candidates[scores.argmax().item()].entity_
- if self.threshold is None or scores.max() >= self.threshold
- else EntityLinker.NIL
- )
- self._add_activations(
- doc_scores=doc_scores,
- doc_ents=doc_ents,
- scores=scores,
- ents=[c.entity for c in candidates],
- )
self._add_doc_activations(
docs_scores=docs_scores,
docs_ents=docs_ents,
diff --git a/spacy/pipeline/entity_ruler.py b/spacy/pipeline/entity_ruler.py
deleted file mode 100644
index 3cb1ca676..000000000
--- a/spacy/pipeline/entity_ruler.py
+++ /dev/null
@@ -1,526 +0,0 @@
-import warnings
-from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence
-from typing import cast
-from collections import defaultdict
-from pathlib import Path
-import srsly
-
-from .pipe import Pipe
-from ..training import Example
-from ..language import Language
-from ..errors import Errors, Warnings
-from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList, registry
-from ..tokens import Doc, Span
-from ..matcher import Matcher, PhraseMatcher
-from ..scorer import get_ner_prf
-
-
-DEFAULT_ENT_ID_SEP = "||"
-PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
-
-
-@Language.factory(
- "entity_ruler",
- assigns=["doc.ents", "token.ent_type", "token.ent_iob"],
- default_config={
- "phrase_matcher_attr": None,
- "validate": False,
- "overwrite_ents": False,
- "ent_id_sep": DEFAULT_ENT_ID_SEP,
- "scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
- },
- default_score_weights={
- "ents_f": 1.0,
- "ents_p": 0.0,
- "ents_r": 0.0,
- "ents_per_type": None,
- },
-)
-def make_entity_ruler(
- nlp: Language,
- name: str,
- phrase_matcher_attr: Optional[Union[int, str]],
- validate: bool,
- overwrite_ents: bool,
- ent_id_sep: str,
- scorer: Optional[Callable],
-):
- return EntityRuler(
- nlp,
- name,
- phrase_matcher_attr=phrase_matcher_attr,
- validate=validate,
- overwrite_ents=overwrite_ents,
- ent_id_sep=ent_id_sep,
- scorer=scorer,
- )
-
-
-def entity_ruler_score(examples, **kwargs):
- return get_ner_prf(examples)
-
-
-@registry.scorers("spacy.entity_ruler_scorer.v1")
-def make_entity_ruler_scorer():
- return entity_ruler_score
-
-
-class EntityRuler(Pipe):
- """The EntityRuler lets you add spans to the `Doc.ents` using token-based
- rules or exact phrase matches. It can be combined with the statistical
- `EntityRecognizer` to boost accuracy, or used on its own to implement a
- purely rule-based entity recognition system. After initialization, the
- component is typically added to the pipeline using `nlp.add_pipe`.
-
- DOCS: https://spacy.io/api/entityruler
- USAGE: https://spacy.io/usage/rule-based-matching#entityruler
- """
-
- def __init__(
- self,
- nlp: Language,
- name: str = "entity_ruler",
- *,
- phrase_matcher_attr: Optional[Union[int, str]] = None,
- validate: bool = False,
- overwrite_ents: bool = False,
- ent_id_sep: str = DEFAULT_ENT_ID_SEP,
- patterns: Optional[List[PatternType]] = None,
- scorer: Optional[Callable] = entity_ruler_score,
- ) -> None:
- """Initialize the entity ruler. If patterns are supplied here, they
- need to be a list of dictionaries with a `"label"` and `"pattern"`
- key. A pattern can either be a token pattern (list) or a phrase pattern
- (string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`.
-
- nlp (Language): The shared nlp object to pass the vocab to the matchers
- and process phrase patterns.
- name (str): Instance name of the current pipeline component. Typically
- passed in automatically from the factory when the component is
- added. Used to disable the current entity ruler while creating
- phrase patterns with the nlp object.
- phrase_matcher_attr (int / str): Token attribute to match on, passed
- to the internal PhraseMatcher as `attr`
- validate (bool): Whether patterns should be validated, passed to
- Matcher and PhraseMatcher as `validate`
- patterns (iterable): Optional patterns to load in.
- overwrite_ents (bool): If existing entities are present, e.g. entities
- added by the model, overwrite them by matches if necessary.
- ent_id_sep (str): Separator used internally for entity IDs.
- scorer (Optional[Callable]): The scoring method. Defaults to
- spacy.scorer.get_ner_prf.
-
- DOCS: https://spacy.io/api/entityruler#init
- """
- self.nlp = nlp
- self.name = name
- self.overwrite = overwrite_ents
- self.token_patterns = defaultdict(list) # type: ignore
- self.phrase_patterns = defaultdict(list) # type: ignore
- self._validate = validate
- self.matcher = Matcher(nlp.vocab, validate=validate)
- self.phrase_matcher_attr = phrase_matcher_attr
- self.phrase_matcher = PhraseMatcher(
- nlp.vocab, attr=self.phrase_matcher_attr, validate=validate
- )
- self.ent_id_sep = ent_id_sep
- self._ent_ids = defaultdict(tuple) # type: ignore
- if patterns is not None:
- self.add_patterns(patterns)
- self.scorer = scorer
-
- def __len__(self) -> int:
- """The number of all patterns added to the entity ruler."""
- n_token_patterns = sum(len(p) for p in self.token_patterns.values())
- n_phrase_patterns = sum(len(p) for p in self.phrase_patterns.values())
- return n_token_patterns + n_phrase_patterns
-
- def __contains__(self, label: str) -> bool:
- """Whether a label is present in the patterns."""
- return label in self.token_patterns or label in self.phrase_patterns
-
- def __call__(self, doc: Doc) -> Doc:
- """Find matches in document and add them as entities.
-
- doc (Doc): The Doc object in the pipeline.
- RETURNS (Doc): The Doc with added entities, if available.
-
- DOCS: https://spacy.io/api/entityruler#call
- """
- error_handler = self.get_error_handler()
- try:
- matches = self.match(doc)
- self.set_annotations(doc, matches)
- return doc
- except Exception as e:
- return error_handler(self.name, self, [doc], e)
-
- def match(self, doc: Doc):
- self._require_patterns()
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", message="\\[W036")
- matches = list(self.matcher(doc)) + list(self.phrase_matcher(doc))
-
- final_matches = set(
- [(m_id, start, end) for m_id, start, end in matches if start != end]
- )
- get_sort_key = lambda m: (m[2] - m[1], -m[1])
- final_matches = sorted(final_matches, key=get_sort_key, reverse=True)
- return final_matches
-
- def set_annotations(self, doc, matches):
- """Modify the document in place"""
- entities = list(doc.ents)
- new_entities = []
- seen_tokens = set()
- for match_id, start, end in matches:
- if any(t.ent_type for t in doc[start:end]) and not self.overwrite:
- continue
- # check for end - 1 here because boundaries are inclusive
- if start not in seen_tokens and end - 1 not in seen_tokens:
- if match_id in self._ent_ids:
- label, ent_id = self._ent_ids[match_id]
- span = Span(doc, start, end, label=label, span_id=ent_id)
- else:
- span = Span(doc, start, end, label=match_id)
- new_entities.append(span)
- entities = [
- e for e in entities if not (e.start < end and e.end > start)
- ]
- seen_tokens.update(range(start, end))
- doc.ents = entities + new_entities
-
- @property
- def labels(self) -> Tuple[str, ...]:
- """All labels present in the match patterns.
-
- RETURNS (set): The string labels.
-
- DOCS: https://spacy.io/api/entityruler#labels
- """
- keys = set(self.token_patterns.keys())
- keys.update(self.phrase_patterns.keys())
- all_labels = set()
-
- for l in keys:
- if self.ent_id_sep in l:
- label, _ = self._split_label(l)
- all_labels.add(label)
- else:
- all_labels.add(l)
- return tuple(sorted(all_labels))
-
- def initialize(
- self,
- get_examples: Callable[[], Iterable[Example]],
- *,
- nlp: Optional[Language] = None,
- patterns: Optional[Sequence[PatternType]] = None,
- ):
- """Initialize the pipe for training.
-
- get_examples (Callable[[], Iterable[Example]]): Function that
- returns a representative sample of gold-standard Example objects.
- nlp (Language): The current nlp object the component is part of.
- patterns Optional[Iterable[PatternType]]: The list of patterns.
-
- DOCS: https://spacy.io/api/entityruler#initialize
- """
- self.clear()
- if patterns:
- self.add_patterns(patterns) # type: ignore[arg-type]
-
- @property
- def ent_ids(self) -> Tuple[Optional[str], ...]:
- """All entity ids present in the match patterns `id` properties
-
- RETURNS (set): The string entity ids.
-
- DOCS: https://spacy.io/api/entityruler#ent_ids
- """
- keys = set(self.token_patterns.keys())
- keys.update(self.phrase_patterns.keys())
- all_ent_ids = set()
-
- for l in keys:
- if self.ent_id_sep in l:
- _, ent_id = self._split_label(l)
- all_ent_ids.add(ent_id)
- return tuple(all_ent_ids)
-
- @property
- def patterns(self) -> List[PatternType]:
- """Get all patterns that were added to the entity ruler.
-
- RETURNS (list): The original patterns, one dictionary per pattern.
-
- DOCS: https://spacy.io/api/entityruler#patterns
- """
- all_patterns = []
- for label, patterns in self.token_patterns.items():
- for pattern in patterns:
- ent_label, ent_id = self._split_label(label)
- p = {"label": ent_label, "pattern": pattern}
- if ent_id:
- p["id"] = ent_id
- all_patterns.append(p)
- for label, patterns in self.phrase_patterns.items():
- for pattern in patterns:
- ent_label, ent_id = self._split_label(label)
- p = {"label": ent_label, "pattern": pattern.text}
- if ent_id:
- p["id"] = ent_id
- all_patterns.append(p)
- return all_patterns
-
- def add_patterns(self, patterns: List[PatternType]) -> None:
- """Add patterns to the entity ruler. A pattern can either be a token
- pattern (list of dicts) or a phrase pattern (string). For example:
- {'label': 'ORG', 'pattern': 'Apple'}
- {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
-
- patterns (list): The patterns to add.
-
- DOCS: https://spacy.io/api/entityruler#add_patterns
- """
-
- # disable the nlp components after this one in case they hadn't been initialized / deserialised yet
- try:
- current_index = -1
- for i, (name, pipe) in enumerate(self.nlp.pipeline):
- if self == pipe:
- current_index = i
- break
- subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]]
- except ValueError:
- subsequent_pipes = []
- with self.nlp.select_pipes(disable=subsequent_pipes):
- token_patterns = []
- phrase_pattern_labels = []
- phrase_pattern_texts = []
- phrase_pattern_ids = []
- for entry in patterns:
- if isinstance(entry["pattern"], str):
- phrase_pattern_labels.append(entry["label"])
- phrase_pattern_texts.append(entry["pattern"])
- phrase_pattern_ids.append(entry.get("id"))
- elif isinstance(entry["pattern"], list):
- token_patterns.append(entry)
- phrase_patterns = []
- for label, pattern, ent_id in zip(
- phrase_pattern_labels,
- self.nlp.pipe(phrase_pattern_texts),
- phrase_pattern_ids,
- ):
- phrase_pattern = {"label": label, "pattern": pattern}
- if ent_id:
- phrase_pattern["id"] = ent_id
- phrase_patterns.append(phrase_pattern)
- for entry in token_patterns + phrase_patterns: # type: ignore[operator]
- label = entry["label"]
- if "id" in entry:
- ent_label = label
- label = self._create_label(label, entry["id"])
- key = self.matcher._normalize_key(label)
- self._ent_ids[key] = (ent_label, entry["id"])
- pattern = entry["pattern"] # type: ignore
- if isinstance(pattern, Doc):
- self.phrase_patterns[label].append(pattern)
- self.phrase_matcher.add(label, [pattern]) # type: ignore
- elif isinstance(pattern, list):
- self.token_patterns[label].append(pattern)
- self.matcher.add(label, [pattern])
- else:
- raise ValueError(Errors.E097.format(pattern=pattern))
-
- def clear(self) -> None:
- """Reset all patterns."""
- self.token_patterns = defaultdict(list)
- self.phrase_patterns = defaultdict(list)
- self._ent_ids = defaultdict(tuple)
- self.matcher = Matcher(self.nlp.vocab, validate=self._validate)
- self.phrase_matcher = PhraseMatcher(
- self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self._validate
- )
-
- def remove(self, ent_id: str) -> None:
- """Remove a pattern by its ent_id if a pattern with this ent_id was added before
-
- ent_id (str): id of the pattern to be removed
- RETURNS: None
- DOCS: https://spacy.io/api/entityruler#remove
- """
- label_id_pairs = [
- (label, eid) for (label, eid) in self._ent_ids.values() if eid == ent_id
- ]
- if not label_id_pairs:
- raise ValueError(
- Errors.E1024.format(attr_type="ID", label=ent_id, component=self.name)
- )
- created_labels = [
- self._create_label(label, eid) for (label, eid) in label_id_pairs
- ]
- # remove the patterns from self.phrase_patterns
- self.phrase_patterns = defaultdict(
- list,
- {
- label: val
- for (label, val) in self.phrase_patterns.items()
- if label not in created_labels
- },
- )
- # remove the patterns from self.token_pattern
- self.token_patterns = defaultdict(
- list,
- {
- label: val
- for (label, val) in self.token_patterns.items()
- if label not in created_labels
- },
- )
- # remove the patterns from self.token_pattern
- for label in created_labels:
- if label in self.phrase_matcher:
- self.phrase_matcher.remove(label)
- else:
- self.matcher.remove(label)
-
- def _require_patterns(self) -> None:
- """Raise a warning if this component has no patterns defined."""
- if len(self) == 0:
- warnings.warn(Warnings.W036.format(name=self.name))
-
- def _split_label(self, label: str) -> Tuple[str, Optional[str]]:
- """Split Entity label into ent_label and ent_id if it contains self.ent_id_sep
-
- label (str): The value of label in a pattern entry
- RETURNS (tuple): ent_label, ent_id
- """
- if self.ent_id_sep in label:
- ent_label, ent_id = label.rsplit(self.ent_id_sep, 1)
- else:
- ent_label = label
- ent_id = None # type: ignore
- return ent_label, ent_id
-
- def _create_label(self, label: Any, ent_id: Any) -> str:
- """Join Entity label with ent_id if the pattern has an `id` attribute
- If ent_id is not a string, the label is returned as is.
-
- label (str): The label to set for ent.label_
- ent_id (str): The label
- RETURNS (str): The ent_label joined with configured `ent_id_sep`
- """
- if isinstance(ent_id, str):
- label = f"{label}{self.ent_id_sep}{ent_id}"
- return label
-
- def from_bytes(
- self, patterns_bytes: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
- ) -> "EntityRuler":
- """Load the entity ruler from a bytestring.
-
- patterns_bytes (bytes): The bytestring to load.
- RETURNS (EntityRuler): The loaded entity ruler.
-
- DOCS: https://spacy.io/api/entityruler#from_bytes
- """
- cfg = srsly.msgpack_loads(patterns_bytes)
- self.clear()
- if isinstance(cfg, dict):
- self.add_patterns(cfg.get("patterns", cfg))
- self.overwrite = cfg.get("overwrite", False)
- self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None)
- self.phrase_matcher = PhraseMatcher(
- self.nlp.vocab, attr=self.phrase_matcher_attr
- )
- self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
- else:
- self.add_patterns(cfg)
- return self
-
- def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
- """Serialize the entity ruler patterns to a bytestring.
-
- RETURNS (bytes): The serialized patterns.
-
- DOCS: https://spacy.io/api/entityruler#to_bytes
- """
- serial = {
- "overwrite": self.overwrite,
- "ent_id_sep": self.ent_id_sep,
- "phrase_matcher_attr": self.phrase_matcher_attr,
- "patterns": self.patterns,
- }
- return srsly.msgpack_dumps(serial)
-
- def from_disk(
- self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
- ) -> "EntityRuler":
- """Load the entity ruler from a file. Expects a file containing
- newline-delimited JSON (JSONL) with one entry per line.
-
- path (str / Path): The JSONL file to load.
- RETURNS (EntityRuler): The loaded entity ruler.
-
- DOCS: https://spacy.io/api/entityruler#from_disk
- """
- path = ensure_path(path)
- self.clear()
- depr_patterns_path = path.with_suffix(".jsonl")
- if path.suffix == ".jsonl": # user provides a jsonl
- if path.is_file:
- patterns = srsly.read_jsonl(path)
- self.add_patterns(patterns)
- else:
- raise ValueError(Errors.E1023.format(path=path))
- elif depr_patterns_path.is_file():
- patterns = srsly.read_jsonl(depr_patterns_path)
- self.add_patterns(patterns)
- elif path.is_dir(): # path is a valid directory
- cfg = {}
- deserializers_patterns = {
- "patterns": lambda p: self.add_patterns(
- srsly.read_jsonl(p.with_suffix(".jsonl"))
- )
- }
- deserializers_cfg = {"cfg": lambda p: cfg.update(srsly.read_json(p))}
- from_disk(path, deserializers_cfg, {})
- self.overwrite = cfg.get("overwrite", False)
- self.phrase_matcher_attr = cfg.get("phrase_matcher_attr")
- self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
-
- self.phrase_matcher = PhraseMatcher(
- self.nlp.vocab, attr=self.phrase_matcher_attr
- )
- from_disk(path, deserializers_patterns, {})
- else: # path is not a valid directory or file
- raise ValueError(Errors.E146.format(path=path))
- return self
-
- def to_disk(
- self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
- ) -> None:
- """Save the entity ruler patterns to a directory. The patterns will be
- saved as newline-delimited JSON (JSONL).
-
- path (str / Path): The JSONL file to save.
-
- DOCS: https://spacy.io/api/entityruler#to_disk
- """
- path = ensure_path(path)
- cfg = {
- "overwrite": self.overwrite,
- "phrase_matcher_attr": self.phrase_matcher_attr,
- "ent_id_sep": self.ent_id_sep,
- }
- serializers = {
- "patterns": lambda p: srsly.write_jsonl(
- p.with_suffix(".jsonl"), self.patterns
- ),
- "cfg": lambda p: srsly.write_json(p, cfg),
- }
- if path.suffix == ".jsonl": # user wants to save only JSONL
- srsly.write_jsonl(path, self.patterns)
- else:
- to_disk(path, serializers, {})
diff --git a/spacy/pipeline/legacy/entity_linker.py b/spacy/pipeline/legacy/entity_linker.py
index 2f8a1f8ea..c14dfa1db 100644
--- a/spacy/pipeline/legacy/entity_linker.py
+++ b/spacy/pipeline/legacy/entity_linker.py
@@ -68,8 +68,7 @@ class EntityLinker_v1(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
- scorer (Optional[Callable]): The scoring method. Defaults to
- Scorer.score_links.
+ scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
DOCS: https://spacy.io/api/entitylinker#init
"""
self.vocab = vocab
@@ -115,7 +114,7 @@ class EntityLinker_v1(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
- kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance.
+ kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance.
Note that providing this argument, will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file.
diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx
index 782a1dabe..293add9e1 100644
--- a/spacy/pipeline/morphologizer.pyx
+++ b/spacy/pipeline/morphologizer.pyx
@@ -1,7 +1,8 @@
# cython: infer_types=True, profile=True, binding=True
from typing import Callable, Dict, Iterable, List, Optional, Union
import srsly
-from thinc.api import SequenceCategoricalCrossentropy, Model, Config
+from thinc.api import Model, Config
+from thinc.legacy import LegacySequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints1d
from itertools import islice
@@ -290,7 +291,7 @@ class Morphologizer(Tagger):
DOCS: https://spacy.io/api/morphologizer#get_loss
"""
validate_examples(examples, "Morphologizer.get_loss")
- loss_func = SequenceCategoricalCrossentropy(names=tuple(self.labels), normalize=False)
+ loss_func = LegacySequenceCategoricalCrossentropy(names=tuple(self.labels), normalize=False)
truths = []
for eg in examples:
eg_truths = []
diff --git a/spacy/pipeline/multitask.pyx b/spacy/pipeline/multitask.pyx
deleted file mode 100644
index 8c44061e2..000000000
--- a/spacy/pipeline/multitask.pyx
+++ /dev/null
@@ -1,221 +0,0 @@
-# cython: infer_types=True, profile=True, binding=True
-from typing import Optional
-import numpy
-from thinc.api import CosineDistance, to_categorical, Model, Config
-from thinc.api import set_dropout_rate
-
-from ..tokens.doc cimport Doc
-
-from .trainable_pipe import TrainablePipe
-from .tagger import Tagger
-from ..training import validate_examples
-from ..language import Language
-from ._parser_internals import nonproj
-from ..attrs import POS, ID
-from ..errors import Errors
-
-
-default_model_config = """
-[model]
-@architectures = "spacy.MultiTask.v1"
-maxout_pieces = 3
-token_vector_width = 96
-
-[model.tok2vec]
-@architectures = "spacy.HashEmbedCNN.v2"
-pretrained_vectors = null
-width = 96
-depth = 4
-embed_size = 2000
-window_size = 1
-maxout_pieces = 2
-subword_features = true
-"""
-DEFAULT_MT_MODEL = Config().from_str(default_model_config)["model"]
-
-
-@Language.factory(
- "nn_labeller",
- default_config={"labels": None, "target": "dep_tag_offset", "model": DEFAULT_MT_MODEL}
-)
-def make_nn_labeller(nlp: Language, name: str, model: Model, labels: Optional[dict], target: str):
- return MultitaskObjective(nlp.vocab, model, name)
-
-
-class MultitaskObjective(Tagger):
- """Experimental: Assist training of a parser or tagger, by training a
- side-objective.
- """
-
- def __init__(self, vocab, model, name="nn_labeller", *, target):
- self.vocab = vocab
- self.model = model
- self.name = name
- if target == "dep":
- self.make_label = self.make_dep
- elif target == "tag":
- self.make_label = self.make_tag
- elif target == "ent":
- self.make_label = self.make_ent
- elif target == "dep_tag_offset":
- self.make_label = self.make_dep_tag_offset
- elif target == "ent_tag":
- self.make_label = self.make_ent_tag
- elif target == "sent_start":
- self.make_label = self.make_sent_start
- elif hasattr(target, "__call__"):
- self.make_label = target
- else:
- raise ValueError(Errors.E016)
- cfg = {"labels": {}, "target": target}
- self.cfg = dict(cfg)
-
- @property
- def labels(self):
- return self.cfg.setdefault("labels", {})
-
- @labels.setter
- def labels(self, value):
- self.cfg["labels"] = value
-
- def set_annotations(self, docs, dep_ids):
- pass
-
- def initialize(self, get_examples, nlp=None, labels=None):
- if not hasattr(get_examples, "__call__"):
- err = Errors.E930.format(name="MultitaskObjective", obj=type(get_examples))
- raise ValueError(err)
- if labels is not None:
- self.labels = labels
- else:
- for example in get_examples():
- for token in example.y:
- label = self.make_label(token)
- if label is not None and label not in self.labels:
- self.labels[label] = len(self.labels)
- self.model.initialize() # TODO: fix initialization by defining X and Y
-
- def predict(self, docs):
- tokvecs = self.model.get_ref("tok2vec")(docs)
- scores = self.model.get_ref("softmax")(tokvecs)
- return tokvecs, scores
-
- def get_loss(self, examples, scores):
- cdef int idx = 0
- correct = numpy.zeros((scores.shape[0],), dtype="i")
- guesses = scores.argmax(axis=1)
- docs = [eg.predicted for eg in examples]
- for i, eg in enumerate(examples):
- # Handles alignment for tokenization differences
- doc_annots = eg.get_aligned() # TODO
- for j in range(len(eg.predicted)):
- tok_annots = {key: values[j] for key, values in tok_annots.items()}
- label = self.make_label(j, tok_annots)
- if label is None or label not in self.labels:
- correct[idx] = guesses[idx]
- else:
- correct[idx] = self.labels[label]
- idx += 1
- correct = self.model.ops.xp.array(correct, dtype="i")
- d_scores = scores - to_categorical(correct, n_classes=scores.shape[1])
- loss = (d_scores**2).sum()
- return float(loss), d_scores
-
- @staticmethod
- def make_dep(token):
- return token.dep_
-
- @staticmethod
- def make_tag(token):
- return token.tag_
-
- @staticmethod
- def make_ent(token):
- if token.ent_iob_ == "O":
- return "O"
- else:
- return token.ent_iob_ + "-" + token.ent_type_
-
- @staticmethod
- def make_dep_tag_offset(token):
- dep = token.dep_
- tag = token.tag_
- offset = token.head.i - token.i
- offset = min(offset, 2)
- offset = max(offset, -2)
- return f"{dep}-{tag}:{offset}"
-
- @staticmethod
- def make_ent_tag(token):
- if token.ent_iob_ == "O":
- ent = "O"
- else:
- ent = token.ent_iob_ + "-" + token.ent_type_
- tag = token.tag_
- return f"{tag}-{ent}"
-
- @staticmethod
- def make_sent_start(token):
- """A multi-task objective for representing sentence boundaries,
- using BILU scheme. (O is impossible)
- """
- if token.is_sent_start and token.is_sent_end:
- return "U-SENT"
- elif token.is_sent_start:
- return "B-SENT"
- else:
- return "I-SENT"
-
-
-class ClozeMultitask(TrainablePipe):
- def __init__(self, vocab, model, **cfg):
- self.vocab = vocab
- self.model = model
- self.cfg = cfg
- self.distance = CosineDistance(ignore_zeros=True, normalize=False) # TODO: in config
-
- def set_annotations(self, docs, dep_ids):
- pass
-
- def initialize(self, get_examples, nlp=None):
- self.model.initialize() # TODO: fix initialization by defining X and Y
- X = self.model.ops.alloc((5, self.model.get_ref("tok2vec").get_dim("nO")))
- self.model.output_layer.initialize(X)
-
- def predict(self, docs):
- tokvecs = self.model.get_ref("tok2vec")(docs)
- vectors = self.model.get_ref("output_layer")(tokvecs)
- return tokvecs, vectors
-
- def get_loss(self, examples, vectors, prediction):
- validate_examples(examples, "ClozeMultitask.get_loss")
- # The simplest way to implement this would be to vstack the
- # token.vector values, but that's a bit inefficient, especially on GPU.
- # Instead we fetch the index into the vectors table for each of our tokens,
- # and look them up all at once. This prevents data copying.
- ids = self.model.ops.flatten([eg.predicted.to_array(ID).ravel() for eg in examples])
- target = vectors[ids]
- gradient = self.distance.get_grad(prediction, target)
- loss = self.distance.get_loss(prediction, target)
- return float(loss), gradient
-
- def update(self, examples, *, drop=0., sgd=None, losses=None):
- pass
-
- def rehearse(self, examples, drop=0., sgd=None, losses=None):
- if losses is not None and self.name not in losses:
- losses[self.name] = 0.
- set_dropout_rate(self.model, drop)
- validate_examples(examples, "ClozeMultitask.rehearse")
- docs = [eg.predicted for eg in examples]
- predictions, bp_predictions = self.model.begin_update()
- loss, d_predictions = self.get_loss(examples, self.vocab.vectors.data, predictions)
- bp_predictions(d_predictions)
- if sgd is not None:
- self.finish_update(sgd)
- if losses is not None:
- losses[self.name] += loss
- return losses
-
- def add_label(self, label):
- raise NotImplementedError
diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx
index 4e3ae1cf0..c5650382b 100644
--- a/spacy/pipeline/pipe.pyx
+++ b/spacy/pipeline/pipe.pyx
@@ -1,4 +1,4 @@
-# cython: infer_types=True, profile=True
+# cython: infer_types=True, profile=True, binding=True
from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict
import srsly
import warnings
@@ -19,13 +19,6 @@ cdef class Pipe:
DOCS: https://spacy.io/api/pipe
"""
- @classmethod
- def __init_subclass__(cls, **kwargs):
- """Raise a warning if an inheriting class implements 'begin_training'
- (from v2) instead of the new 'initialize' method (from v3)"""
- if hasattr(cls, "begin_training"):
- warnings.warn(Warnings.W088.format(name=cls.__name__))
-
def __call__(self, Doc doc) -> Doc:
"""Apply the pipe to one document. The document is modified in place,
and returned. This usually happens under the hood when the nlp object
diff --git a/spacy/pipeline/senter.pyx b/spacy/pipeline/senter.pyx
index 93a7ee796..42feeb277 100644
--- a/spacy/pipeline/senter.pyx
+++ b/spacy/pipeline/senter.pyx
@@ -3,7 +3,9 @@ from typing import Dict, Iterable, Optional, Callable, List, Union
from itertools import islice
import srsly
-from thinc.api import Model, SequenceCategoricalCrossentropy, Config
+from thinc.api import Model, Config
+from thinc.legacy import LegacySequenceCategoricalCrossentropy
+
from thinc.types import Floats2d, Ints1d
from ..tokens.doc cimport Doc
@@ -161,7 +163,7 @@ class SentenceRecognizer(Tagger):
"""
validate_examples(examples, "SentenceRecognizer.get_loss")
labels = self.labels
- loss_func = SequenceCategoricalCrossentropy(names=labels, normalize=False)
+ loss_func = LegacySequenceCategoricalCrossentropy(names=labels, normalize=False)
truths = []
for eg in examples:
eg_truth = []
diff --git a/spacy/pipeline/span_ruler.py b/spacy/pipeline/span_ruler.py
index 807a4ffe5..0641d9c7d 100644
--- a/spacy/pipeline/span_ruler.py
+++ b/spacy/pipeline/span_ruler.py
@@ -11,7 +11,7 @@ from ..language import Language
from ..errors import Errors, Warnings
from ..util import ensure_path, SimpleFrozenList, registry
from ..tokens import Doc, Span
-from ..scorer import Scorer
+from ..scorer import Scorer, get_ner_prf
from ..matcher import Matcher, PhraseMatcher
from .. import util
@@ -20,7 +20,7 @@ DEFAULT_SPANS_KEY = "ruler"
@Language.factory(
- "future_entity_ruler",
+ "entity_ruler",
assigns=["doc.ents"],
default_config={
"phrase_matcher_attr": None,
@@ -63,6 +63,15 @@ def make_entity_ruler(
)
+def entity_ruler_score(examples, **kwargs):
+ return get_ner_prf(examples)
+
+
+@registry.scorers("spacy.entity_ruler_scorer.v1")
+def make_entity_ruler_scorer():
+ return entity_ruler_score
+
+
@Language.factory(
"span_ruler",
assigns=["doc.spans"],
@@ -117,7 +126,7 @@ def prioritize_new_ents_filter(
) -> List[Span]:
"""Merge entities and spans into one list without overlaps by allowing
spans to overwrite any entities that they overlap with. Intended to
- replicate the overwrite_ents=True behavior from the EntityRuler.
+ replicate the overwrite_ents=True behavior from the v3 EntityRuler.
entities (Iterable[Span]): The entities, already filtered for overlaps.
spans (Iterable[Span]): The spans to merge, may contain overlaps.
@@ -148,7 +157,7 @@ def prioritize_existing_ents_filter(
) -> List[Span]:
"""Merge entities and spans into one list without overlaps by prioritizing
existing entities. Intended to replicate the overwrite_ents=False behavior
- from the EntityRuler.
+ from the v3 EntityRuler.
entities (Iterable[Span]): The entities, already filtered for overlaps.
spans (Iterable[Span]): The spans to merge, may contain overlaps.
@@ -170,7 +179,7 @@ def prioritize_existing_ents_filter(
@registry.misc("spacy.prioritize_existing_ents_filter.v1")
-def make_preverse_existing_ents_filter():
+def make_preserve_existing_ents_filter():
return prioritize_existing_ents_filter
diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py
index c517991f5..7a875dda9 100644
--- a/spacy/pipeline/spancat.py
+++ b/spacy/pipeline/spancat.py
@@ -2,7 +2,7 @@ from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
from typing import Union
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer
-from thinc.types import Ragged, Ints2d, Floats2d, Ints1d
+from thinc.types import Ragged, Ints2d, Floats2d
import numpy
@@ -30,17 +30,17 @@ scorer = {"@layers": "spacy.LinearLogistic.v1"}
hidden_size = 128
[model.tok2vec]
-@architectures = "spacy.Tok2Vec.v1"
+@architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed]
-@architectures = "spacy.MultiHashEmbed.v1"
+@architectures = "spacy.MultiHashEmbed.v2"
width = 96
rows = [5000, 2000, 1000, 1000]
attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
-@architectures = "spacy.MaxoutWindowEncoder.v1"
+@architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width}
window_size = 1
maxout_pieces = 3
@@ -139,6 +139,9 @@ def make_spancat(
spans_key (str): Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the
reference document under the same key.
+ scorer (Optional[Callable]): The scoring method. Defaults to
+ Scorer.score_spans for the Doc.spans[spans_key] with overlapping
+ spans allowed.
threshold (float): Minimum probability to consider a prediction positive.
Spans with a positive prediction will be saved on the Doc. Defaults to
0.5.
@@ -279,7 +282,10 @@ class SpanCategorizer(TrainablePipe):
DOCS: https://spacy.io/api/spancategorizer#predict
"""
indices = self.suggester(docs, ops=self.model.ops)
- scores = self.model.predict((docs, indices)) # type: ignore
+ if indices.lengths.sum() == 0:
+ scores = self.model.ops.alloc2f(0, 0)
+ else:
+ scores = self.model.predict((docs, indices)) # type: ignore
return {"indices": indices, "scores": scores}
def set_candidates(
diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx
index 3b4715ce5..e12f116af 100644
--- a/spacy/pipeline/tagger.pyx
+++ b/spacy/pipeline/tagger.pyx
@@ -2,7 +2,8 @@
from typing import Callable, Dict, Iterable, List, Optional, Union
import numpy
import srsly
-from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config
+from thinc.api import Model, set_dropout_rate, Config
+from thinc.legacy import LegacySequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints1d
import warnings
from itertools import islice
@@ -244,7 +245,7 @@ class Tagger(TrainablePipe):
DOCS: https://spacy.io/api/tagger#rehearse
"""
- loss_func = SequenceCategoricalCrossentropy()
+ loss_func = LegacySequenceCategoricalCrossentropy()
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
@@ -275,7 +276,7 @@ class Tagger(TrainablePipe):
DOCS: https://spacy.io/api/tagger#get_loss
"""
validate_examples(examples, "Tagger.get_loss")
- loss_func = SequenceCategoricalCrossentropy(names=self.labels, normalize=False, neg_prefix=self.cfg["neg_prefix"])
+ loss_func = LegacySequenceCategoricalCrossentropy(names=self.labels, normalize=False, neg_prefix=self.cfg["neg_prefix"])
# Convert empty tag "" to missing value None so that both misaligned
# tokens and tokens with missing annotation have the default missing
# value None.
diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py
index 506cdb61c..f00e5a96d 100644
--- a/spacy/pipeline/textcat.py
+++ b/spacy/pipeline/textcat.py
@@ -27,8 +27,8 @@ single_label_default_config = """
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = 64
-rows = [2000, 2000, 1000, 1000, 1000, 1000]
-attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"]
+rows = [2000, 2000, 500, 1000, 500]
+attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
@@ -75,9 +75,9 @@ subword_features = true
"textcat",
assigns=["doc.cats"],
default_config={
- "threshold": 0.5,
+ "threshold": 0.0,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL,
- "scorer": {"@scorers": "spacy.textcat_scorer.v1"},
+ "scorer": {"@scorers": "spacy.textcat_scorer.v2"},
"save_activations": False,
},
default_score_weights={
@@ -91,7 +91,6 @@ subword_features = true
"cats_macro_f": None,
"cats_macro_auc": None,
"cats_f_per_type": None,
- "cats_macro_auc_per_type": None,
},
)
def make_textcat(
@@ -131,7 +130,7 @@ def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
)
-@registry.scorers("spacy.textcat_scorer.v1")
+@registry.scorers("spacy.textcat_scorer.v2")
def make_textcat_scorer():
return textcat_score
@@ -158,7 +157,8 @@ class TextCategorizer(TrainablePipe):
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the
losses during training.
- threshold (float): Cutoff to consider a prediction "positive".
+ threshold (float): Unused, not needed for single-label (exclusive
+ classes) classification.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_cats for the attribute "cats".
@@ -168,7 +168,11 @@ class TextCategorizer(TrainablePipe):
self.model = model
self.name = name
self._rehearsal_model = None
- cfg = {"labels": [], "threshold": threshold, "positive_label": None}
+ cfg: Dict[str, Any] = {
+ "labels": [],
+ "threshold": threshold,
+ "positive_label": None,
+ }
self.cfg = dict(cfg)
self.scorer = scorer
self.save_activations = save_activations
@@ -415,5 +419,9 @@ class TextCategorizer(TrainablePipe):
def _validate_categories(self, examples: Iterable[Example]):
"""Check whether the provided examples all have single-label cats annotations."""
for ex in examples:
- if list(ex.reference.cats.values()).count(1.0) > 1:
+ vals = list(ex.reference.cats.values())
+ if vals.count(1.0) > 1:
raise ValueError(Errors.E895.format(value=ex.reference.cats))
+ for val in vals:
+ if not (val == 1.0 or val == 0.0):
+ raise ValueError(Errors.E851.format(val=val))
diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py
index 3a6dd0b0c..d64be66f6 100644
--- a/spacy/pipeline/textcat_multilabel.py
+++ b/spacy/pipeline/textcat_multilabel.py
@@ -19,17 +19,17 @@ multi_label_default_config = """
@architectures = "spacy.TextCatEnsemble.v2"
[model.tok2vec]
-@architectures = "spacy.Tok2Vec.v1"
+@architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = 64
-rows = [2000, 2000, 1000, 1000, 1000, 1000]
-attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"]
+rows = [2000, 2000, 500, 1000, 500]
+attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false
[model.tok2vec.encode]
-@architectures = "spacy.MaxoutWindowEncoder.v1"
+@architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width}
window_size = 1
maxout_pieces = 3
@@ -88,7 +88,6 @@ subword_features = true
"cats_macro_f": None,
"cats_macro_auc": None,
"cats_f_per_type": None,
- "cats_macro_auc_per_type": None,
},
)
def make_multilabel_textcat(
@@ -98,7 +97,7 @@ def make_multilabel_textcat(
threshold: float,
scorer: Optional[Callable],
save_activations: bool,
-) -> "TextCategorizer":
+) -> "MultiLabel_TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered
to be non-mutually exclusive, which means that there can be zero or more labels
@@ -107,6 +106,7 @@ def make_multilabel_textcat(
model (Model[List[Doc], List[Floats2d]]): A model instance that predicts
scores for each category.
threshold (float): Cutoff to consider a prediction "positive".
+ scorer (Optional[Callable]): The scoring method.
"""
return MultiLabel_TextCategorizer(
nlp.vocab,
@@ -155,6 +155,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
name (str): The component instance name, used to add entries to the
losses during training.
threshold (float): Cutoff to consider a prediction "positive".
+ scorer (Optional[Callable]): The scoring method.
save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/textcategorizer#init
@@ -200,6 +201,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
for label in labels:
self.add_label(label)
subbatch = list(islice(get_examples(), 10))
+ self._validate_categories(subbatch)
+
doc_sample = [eg.reference for eg in subbatch]
label_sample, _ = self._examples_to_truth(subbatch)
self._require_labels()
@@ -210,4 +213,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
def _validate_categories(self, examples: Iterable[Example]):
"""This component allows any type of single- or multi-label annotations.
This method overwrites the more strict one from 'textcat'."""
- pass
+ # check that annotation values are valid
+ for ex in examples:
+ for val in ex.reference.cats.values():
+ if not (val == 1.0 or val == 0.0):
+ raise ValueError(Errors.E851.format(val=val))
diff --git a/spacy/pipeline/tok2vec.py b/spacy/pipeline/tok2vec.py
index 2e3dde3cb..c742aaeaa 100644
--- a/spacy/pipeline/tok2vec.py
+++ b/spacy/pipeline/tok2vec.py
@@ -123,9 +123,6 @@ class Tok2Vec(TrainablePipe):
width = self.model.get_dim("nO")
return [self.model.ops.alloc((0, width)) for doc in docs]
tokvecs = self.model.predict(docs)
- batch_id = Tok2VecListener.get_batch_id(docs)
- for listener in self.listeners:
- listener.receive(batch_id, tokvecs, _empty_backprop)
return tokvecs
def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None:
@@ -286,8 +283,19 @@ class Tok2VecListener(Model):
def forward(model: Tok2VecListener, inputs, is_train: bool):
"""Supply the outputs from the upstream Tok2Vec component."""
if is_train:
- model.verify_inputs(inputs)
- return model._outputs, model._backprop
+ # This might occur during training when the tok2vec layer is frozen / hasn't been updated.
+ # In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc.
+ if model._batch_id is None:
+ outputs = []
+ for doc in inputs:
+ if doc.tensor.size == 0:
+ raise ValueError(Errors.E203.format(name="tok2vec"))
+ else:
+ outputs.append(doc.tensor)
+ return outputs, _empty_backprop
+ else:
+ model.verify_inputs(inputs)
+ return model._outputs, model._backprop
else:
# This is pretty grim, but it's hard to do better :(.
# It's hard to avoid relying on the doc.tensor attribute, because the
@@ -306,7 +314,7 @@ def forward(model: Tok2VecListener, inputs, is_train: bool):
outputs.append(model.ops.alloc2f(len(doc), width))
else:
outputs.append(doc.tensor)
- return outputs, lambda dX: []
+ return outputs, _empty_backprop
def _empty_backprop(dX): # for pickling
diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx
index c82f2830c..5bba34e4a 100644
--- a/spacy/pipeline/trainable_pipe.pyx
+++ b/spacy/pipeline/trainable_pipe.pyx
@@ -1,4 +1,4 @@
-# cython: infer_types=True, profile=True
+# cython: infer_types=True, profile=True, binding=True
from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable
import srsly
from thinc.api import set_dropout_rate, Model, Optimizer
diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx
index 85fcc1ef8..b0f24cd73 100644
--- a/spacy/pipeline/transition_parser.pyx
+++ b/spacy/pipeline/transition_parser.pyx
@@ -13,7 +13,6 @@ import contextlib
import srsly
from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps
from thinc.api import get_array_module
-from thinc.extra.search cimport Beam
from thinc.types import Ints1d
import numpy.random
import numpy
diff --git a/spacy/schemas.py b/spacy/schemas.py
index a38421fa0..dc30f9e39 100644
--- a/spacy/schemas.py
+++ b/spacy/schemas.py
@@ -181,12 +181,12 @@ class TokenPatternNumber(BaseModel):
IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset")
IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset")
INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects")
- EQ: Union[StrictInt, StrictFloat] = Field(None, alias="==")
- NEQ: Union[StrictInt, StrictFloat] = Field(None, alias="!=")
- GEQ: Union[StrictInt, StrictFloat] = Field(None, alias=">=")
- LEQ: Union[StrictInt, StrictFloat] = Field(None, alias="<=")
- GT: Union[StrictInt, StrictFloat] = Field(None, alias=">")
- LT: Union[StrictInt, StrictFloat] = Field(None, alias="<")
+ EQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="==")
+ NEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="!=")
+ GEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">=")
+ LEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<=")
+ GT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">")
+ LT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<")
class Config:
extra = "forbid"
@@ -329,6 +329,7 @@ class ConfigSchemaTraining(BaseModel):
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
+ before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
# fmt: on
class Config:
@@ -430,7 +431,7 @@ class ProjectConfigAssetURL(BaseModel):
# fmt: off
dest: StrictStr = Field(..., title="Destination of downloaded asset")
url: Optional[StrictStr] = Field(None, title="URL of asset")
- checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
+ checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
description: StrictStr = Field("", title="Description of asset")
# fmt: on
@@ -438,7 +439,7 @@ class ProjectConfigAssetURL(BaseModel):
class ProjectConfigAssetGit(BaseModel):
# fmt: off
git: ProjectConfigAssetGitItem = Field(..., title="Git repo information")
- checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
+ checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
description: Optional[StrictStr] = Field(None, title="Description of asset")
# fmt: on
@@ -508,9 +509,9 @@ class DocJSONSchema(BaseModel):
None, title="Indices of sentences' start and end indices"
)
text: StrictStr = Field(..., title="Document text")
- spans: Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] = Field(
- None, title="Span information - end/start indices, label, KB ID"
- )
+ spans: Optional[
+ Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]]
+ ] = Field(None, title="Span information - end/start indices, label, KB ID")
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
..., title="Token information - ID, start, annotations"
)
@@ -519,9 +520,9 @@ class DocJSONSchema(BaseModel):
title="Any custom data stored in the document's _ attribute",
alias="_",
)
- underscore_token: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
+ underscore_token: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the token's _ attribute"
)
- underscore_span: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
+ underscore_span: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the span's _ attribute"
)
diff --git a/spacy/scorer.py b/spacy/scorer.py
index 8cd755ac4..16fc303a0 100644
--- a/spacy/scorer.py
+++ b/spacy/scorer.py
@@ -446,7 +446,7 @@ class Scorer:
labels (Iterable[str]): The set of possible labels. Defaults to [].
multi_label (bool): Whether the attribute allows multiple labels.
Defaults to True. When set to False (exclusive labels), missing
- gold labels are interpreted as 0.0.
+ gold labels are interpreted as 0.0 and the threshold is set to 0.0.
positive_label (str): The positive label for a binary task with
exclusive classes. Defaults to None.
threshold (float): Cutoff to consider a prediction "positive". Defaults
@@ -471,6 +471,8 @@ class Scorer:
"""
if threshold is None:
threshold = 0.5 if multi_label else 0.0
+ if not multi_label:
+ threshold = 0.0
f_per_type = {label: PRFScore() for label in labels}
auc_per_type = {label: ROCAUCScore() for label in labels}
labels = set(labels)
@@ -505,20 +507,18 @@ class Scorer:
# Get the highest-scoring for each.
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1])
- if pred_label == gold_label and pred_score >= threshold:
+ if pred_label == gold_label:
f_per_type[pred_label].tp += 1
else:
f_per_type[gold_label].fn += 1
- if pred_score >= threshold:
- f_per_type[pred_label].fp += 1
+ f_per_type[pred_label].fp += 1
elif gold_cats:
gold_label, gold_score = max(gold_cats, key=lambda it: it[1])
if gold_score > 0:
f_per_type[gold_label].fn += 1
elif pred_cats:
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
- if pred_score >= threshold:
- f_per_type[pred_label].fp += 1
+ f_per_type[pred_label].fp += 1
micro_prf = PRFScore()
for label_prf in f_per_type.values():
micro_prf.tp += label_prf.tp
diff --git a/spacy/strings.pxd b/spacy/strings.pxd
index 5f03a9a28..0c1a30fe3 100644
--- a/spacy/strings.pxd
+++ b/spacy/strings.pxd
@@ -1,4 +1,4 @@
-from libc.stdint cimport int64_t
+from libc.stdint cimport int64_t, uint32_t
from libcpp.vector cimport vector
from libcpp.set cimport set
from cymem.cymem cimport Pool
@@ -7,13 +7,6 @@ from murmurhash.mrmr cimport hash64
from .typedefs cimport attr_t, hash_t
-
-cpdef hash_t hash_string(str string) except 0
-cdef hash_t hash_utf8(char* utf8_string, int length) nogil
-
-cdef str decode_Utf8Str(const Utf8Str* string)
-
-
ctypedef union Utf8Str:
unsigned char[8] s
unsigned char* p
@@ -21,9 +14,13 @@ ctypedef union Utf8Str:
cdef class StringStore:
cdef Pool mem
+ cdef vector[hash_t] _keys
+ cdef PreshMap _map
- cdef vector[hash_t] keys
- cdef public PreshMap _map
+ cdef hash_t _intern_str(self, str string)
+ cdef Utf8Str* _allocate_str_repr(self, const unsigned char* chars, uint32_t length) except *
+ cdef str _decode_str_repr(self, const Utf8Str* string)
- cdef const Utf8Str* intern_unicode(self, str py_string)
- cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash)
+
+cpdef hash_t hash_string(object string) except -1
+cpdef hash_t get_string_id(object string_or_hash) except -1
diff --git a/spacy/strings.pyi b/spacy/strings.pyi
index b29389b9a..d9509ff57 100644
--- a/spacy/strings.pyi
+++ b/spacy/strings.pyi
@@ -1,21 +1,20 @@
-from typing import Optional, Iterable, Iterator, Union, Any, overload
+from typing import List, Optional, Iterable, Iterator, Union, Any, Tuple, overload
from pathlib import Path
-def get_string_id(key: Union[str, int]) -> int: ...
-
class StringStore:
- def __init__(
- self, strings: Optional[Iterable[str]] = ..., freeze: bool = ...
- ) -> None: ...
+ def __init__(self, strings: Optional[Iterable[str]]) -> None: ...
@overload
- def __getitem__(self, string_or_id: Union[bytes, str]) -> int: ...
+ def __getitem__(self, string_or_hash: str) -> int: ...
@overload
- def __getitem__(self, string_or_id: int) -> str: ...
- def as_int(self, key: Union[bytes, str, int]) -> int: ...
- def as_string(self, key: Union[bytes, str, int]) -> str: ...
+ def __getitem__(self, string_or_hash: int) -> str: ...
+ def as_int(self, string_or_hash: Union[str, int]) -> int: ...
+ def as_string(self, string_or_hash: Union[str, int]) -> str: ...
def add(self, string: str) -> int: ...
+ def items(self) -> List[Tuple[str, int]]: ...
+ def keys(self) -> List[str]: ...
+ def values(self) -> List[int]: ...
def __len__(self) -> int: ...
- def __contains__(self, string: str) -> bool: ...
+ def __contains__(self, string_or_hash: Union[str, int]) -> bool: ...
def __iter__(self) -> Iterator[str]: ...
def __reduce__(self) -> Any: ...
def to_disk(self, path: Union[str, Path]) -> None: ...
@@ -23,3 +22,5 @@ class StringStore:
def to_bytes(self, **kwargs: Any) -> bytes: ...
def from_bytes(self, bytes_data: bytes, **kwargs: Any) -> StringStore: ...
def _reset_and_load(self, strings: Iterable[str]) -> None: ...
+
+def get_string_id(string_or_hash: Union[str, int]) -> int: ...
diff --git a/spacy/strings.pyx b/spacy/strings.pyx
index e86682733..5a037eb9a 100644
--- a/spacy/strings.pyx
+++ b/spacy/strings.pyx
@@ -1,9 +1,10 @@
# cython: infer_types=True
+from typing import Optional, Union, Iterable, Tuple, Callable, Any, List, Iterator
cimport cython
from libc.string cimport memcpy
from libcpp.set cimport set
from libc.stdint cimport uint32_t
-from murmurhash.mrmr cimport hash64, hash32
+from murmurhash.mrmr cimport hash64
import srsly
@@ -14,105 +15,13 @@ from .symbols import NAMES as SYMBOLS_BY_INT
from .errors import Errors
from . import util
-# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)`
-cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash):
- try:
- out_hash[0] = key
- return True
- except:
- return False
-
-def get_string_id(key):
- """Get a string ID, handling the reserved symbols correctly. If the key is
- already an ID, return it.
-
- This function optimises for convenience over performance, so shouldn't be
- used in tight loops.
- """
- cdef hash_t str_hash
- if isinstance(key, str):
- if len(key) == 0:
- return 0
-
- symbol = SYMBOLS_BY_STR.get(key, None)
- if symbol is not None:
- return symbol
- else:
- chars = key.encode("utf8")
- return hash_utf8(chars, len(chars))
- elif _try_coerce_to_hash(key, &str_hash):
- # Coerce the integral key to the expected primitive hash type.
- # This ensures that custom/overloaded "primitive" data types
- # such as those implemented by numpy are not inadvertently used
- # downsteam (as these are internally implemented as custom PyObjects
- # whose comparison operators can incur a significant overhead).
- return str_hash
- else:
- # TODO: Raise an error instead
- return key
-
-
-cpdef hash_t hash_string(str string) except 0:
- chars = string.encode("utf8")
- return hash_utf8(chars, len(chars))
-
-
-cdef hash_t hash_utf8(char* utf8_string, int length) nogil:
- return hash64(utf8_string, length, 1)
-
-
-cdef uint32_t hash32_utf8(char* utf8_string, int length) nogil:
- return hash32(utf8_string, length, 1)
-
-
-cdef str decode_Utf8Str(const Utf8Str* string):
- cdef int i, length
- if string.s[0] < sizeof(string.s) and string.s[0] != 0:
- return string.s[1:string.s[0]+1].decode("utf8")
- elif string.p[0] < 255:
- return string.p[1:string.p[0]+1].decode("utf8")
- else:
- i = 0
- length = 0
- while string.p[i] == 255:
- i += 1
- length += 255
- length += string.p[i]
- i += 1
- return string.p[i:length + i].decode("utf8")
-
-
-cdef Utf8Str* _allocate(Pool mem, const unsigned char* chars, uint32_t length) except *:
- cdef int n_length_bytes
- cdef int i
- cdef Utf8Str* string = mem.alloc(1, sizeof(Utf8Str))
- cdef uint32_t ulength = length
- if length < sizeof(string.s):
- string.s[0] = length
- memcpy(&string.s[1], chars, length)
- return string
- elif length < 255:
- string.p = mem.alloc(length + 1, sizeof(unsigned char))
- string.p[0] = length
- memcpy(&string.p[1], chars, length)
- return string
- else:
- i = 0
- n_length_bytes = (length // 255) + 1
- string.p = mem.alloc(length + n_length_bytes, sizeof(unsigned char))
- for i in range(n_length_bytes-1):
- string.p[i] = 255
- string.p[n_length_bytes-1] = length % 255
- memcpy(&string.p[n_length_bytes], chars, length)
- return string
-
cdef class StringStore:
- """Look up strings by 64-bit hashes.
+ """Look up strings by 64-bit hashes. Implicitly handles reserved symbols.
DOCS: https://spacy.io/api/stringstore
"""
- def __init__(self, strings=None, freeze=False):
+ def __init__(self, strings: Optional[Iterable[str]] = None):
"""Create the StringStore.
strings (iterable): A sequence of unicode strings to add to the store.
@@ -123,128 +32,127 @@ cdef class StringStore:
for string in strings:
self.add(string)
- def __getitem__(self, object string_or_id):
- """Retrieve a string from a given hash, or vice versa.
+ def __getitem__(self, string_or_hash: Union[str, int]) -> Union[str, int]:
+ """Retrieve a string from a given hash. If a string
+ is passed as the input, add it to the store and return
+ its hash.
- string_or_id (bytes, str or uint64): The value to encode.
- Returns (str / uint64): The value to be retrieved.
+ string_or_hash (int / str): The hash value to lookup or the string to store.
+ RETURNS (str / int): The stored string or the hash of the newly added string.
"""
- cdef hash_t str_hash
- cdef Utf8Str* utf8str = NULL
-
- if isinstance(string_or_id, str):
- if len(string_or_id) == 0:
- return 0
-
- # Return early if the string is found in the symbols LUT.
- symbol = SYMBOLS_BY_STR.get(string_or_id, None)
- if symbol is not None:
- return symbol
- else:
- return hash_string(string_or_id)
- elif isinstance(string_or_id, bytes):
- return hash_utf8(string_or_id, len(string_or_id))
- elif _try_coerce_to_hash(string_or_id, &str_hash):
- if str_hash == 0:
- return ""
- elif str_hash in SYMBOLS_BY_INT:
- return SYMBOLS_BY_INT[str_hash]
- else:
- utf8str = self._map.get(str_hash)
+ if isinstance(string_or_hash, str):
+ return self.add(string_or_hash)
else:
- # TODO: Raise an error instead
- utf8str = self._map.get(string_or_id)
+ return self._get_interned_str(string_or_hash)
- if utf8str is NULL:
- raise KeyError(Errors.E018.format(hash_value=string_or_id))
- else:
- return decode_Utf8Str(utf8str)
+ def __contains__(self, string_or_hash: Union[str, int]) -> bool:
+ """Check whether a string or a hash is in the store.
- def as_int(self, key):
- """If key is an int, return it; otherwise, get the int value."""
- if not isinstance(key, str):
- return key
- else:
- return self[key]
-
- def as_string(self, key):
- """If key is a string, return it; otherwise, get the string value."""
- if isinstance(key, str):
- return key
- else:
- return self[key]
-
- def add(self, string):
- """Add a string to the StringStore.
-
- string (str): The string to add.
- RETURNS (uint64): The string's hash value.
- """
- cdef hash_t str_hash
- if isinstance(string, str):
- if string in SYMBOLS_BY_STR:
- return SYMBOLS_BY_STR[string]
-
- string = string.encode("utf8")
- str_hash = hash_utf8(string, len(string))
- self._intern_utf8(string, len(string), &str_hash)
- elif isinstance(string, bytes):
- if string in SYMBOLS_BY_STR:
- return SYMBOLS_BY_STR[string]
- str_hash = hash_utf8(string, len(string))
- self._intern_utf8(string, len(string), &str_hash)
- else:
- raise TypeError(Errors.E017.format(value_type=type(string)))
- return str_hash
-
- def __len__(self):
- """The number of strings in the store.
-
- RETURNS (int): The number of strings in the store.
- """
- return self.keys.size()
-
- def __contains__(self, string_or_id not None):
- """Check whether a string or ID is in the store.
-
- string_or_id (str or int): The string to check.
+ string (str / int): The string/hash to check.
RETURNS (bool): Whether the store contains the string.
"""
- cdef hash_t str_hash
- if isinstance(string_or_id, str):
- if len(string_or_id) == 0:
- return True
- elif string_or_id in SYMBOLS_BY_STR:
- return True
- str_hash = hash_string(string_or_id)
- elif _try_coerce_to_hash(string_or_id, &str_hash):
- pass
- else:
- # TODO: Raise an error instead
- return self._map.get(string_or_id) is not NULL
-
+ cdef hash_t str_hash = get_string_id(string_or_hash)
if str_hash in SYMBOLS_BY_INT:
return True
else:
return self._map.get(str_hash) is not NULL
- def __iter__(self):
- """Iterate over the strings in the store, in order.
+ def __iter__(self) -> Iterator[str]:
+ """Iterate over the strings in the store in insertion order.
- YIELDS (str): A string in the store.
+ RETURNS: An iterable collection of strings.
"""
- cdef int i
- cdef hash_t key
- for i in range(self.keys.size()):
- key = self.keys[i]
- utf8str = self._map.get(key)
- yield decode_Utf8Str(utf8str)
- # TODO: Iterate OOV here?
+ return iter(self.keys())
def __reduce__(self):
strings = list(self)
return (StringStore, (strings,), None, None, None)
+ def __len__(self) -> int:
+ """The number of strings in the store.
+
+ RETURNS (int): The number of strings in the store.
+ """
+ return self._keys.size()
+
+ def add(self, string: str) -> int:
+ """Add a string to the StringStore.
+
+ string (str): The string to add.
+ RETURNS (uint64): The string's hash value.
+ """
+ if not isinstance(string, str):
+ raise TypeError(Errors.E017.format(value_type=type(string)))
+
+ if string in SYMBOLS_BY_STR:
+ return SYMBOLS_BY_STR[string]
+ else:
+ return self._intern_str(string)
+
+ def as_int(self, string_or_hash: Union[str, int]) -> str:
+ """If a hash value is passed as the input, return it as-is. If the input
+ is a string, return its corresponding hash.
+
+ string_or_hash (str / int): The string to hash or a hash value.
+ RETURNS (int): The hash of the string or the input hash value.
+ """
+ if isinstance(string_or_hash, int):
+ return string_or_hash
+ else:
+ return get_string_id(string_or_hash)
+
+ def as_string(self, string_or_hash: Union[str, int]) -> str:
+ """If a string is passed as the input, return it as-is. If the input
+ is a hash value, return its corresponding string.
+
+ string_or_hash (str / int): The hash value to lookup or a string.
+ RETURNS (str): The stored string or the input string.
+ """
+ if isinstance(string_or_hash, str):
+ return string_or_hash
+ else:
+ return self._get_interned_str(string_or_hash)
+
+ def items(self) -> List[Tuple[str, int]]:
+ """Iterate over the stored strings and their hashes in insertion order.
+
+ RETURNS: A list of string-hash pairs.
+ """
+ # Even though we internally store the hashes as keys and the strings as
+ # values, we invert the order in the public API to keep it consistent with
+ # the implementation of the `__iter__` method (where we wish to iterate over
+ # the strings in the store).
+ cdef int i
+ pairs = [None] * self._keys.size()
+ for i in range(self._keys.size()):
+ str_hash = self._keys[i]
+ utf8str = self._map.get(str_hash)
+ pairs[i] = (self._decode_str_repr(utf8str), str_hash)
+ return pairs
+
+ def keys(self) -> List[str]:
+ """Iterate over the stored strings in insertion order.
+
+ RETURNS: A list of strings.
+ """
+ cdef int i
+ strings = [None] * self._keys.size()
+ for i in range(self._keys.size()):
+ utf8str = self._map.get(self._keys[i])
+ strings[i] = self._decode_str_repr(utf8str)
+ return strings
+
+ def values(self) -> List[int]:
+ """Iterate over the stored strings hashes in insertion order.
+
+ RETURNS: A list of string hashs.
+ """
+ cdef int i
+ hashes = [None] * self._keys.size()
+ for i in range(self._keys.size()):
+ hashes[i] = self._keys[i]
+ return hashes
+
def to_disk(self, path):
"""Save the current state to a directory.
@@ -294,24 +202,122 @@ cdef class StringStore:
def _reset_and_load(self, strings):
self.mem = Pool()
self._map = PreshMap()
- self.keys.clear()
+ self._keys.clear()
for string in strings:
self.add(string)
- cdef const Utf8Str* intern_unicode(self, str py_string):
- # 0 means missing, but we don't bother offsetting the index.
- cdef bytes byte_string = py_string.encode("utf8")
- return self._intern_utf8(byte_string, len(byte_string), NULL)
+ def _get_interned_str(self, hash_value: int) -> str:
+ cdef hash_t str_hash
+ if not _try_coerce_to_hash(hash_value, &str_hash):
+ raise TypeError(Errors.E4001.format(expected_types="'int'", received_type=type(hash_value)))
- @cython.final
- cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash):
+ # Handle reserved symbols and empty strings correctly.
+ if str_hash == 0:
+ return ""
+
+ symbol = SYMBOLS_BY_INT.get(str_hash)
+ if symbol is not None:
+ return symbol
+
+ utf8str = self._map.get(str_hash)
+ if utf8str is NULL:
+ raise KeyError(Errors.E018.format(hash_value=str_hash))
+ else:
+ return self._decode_str_repr(utf8str)
+
+ cdef hash_t _intern_str(self, str string):
# TODO: This function's API/behaviour is an unholy mess...
# 0 means missing, but we don't bother offsetting the index.
- cdef hash_t key = precalculated_hash[0] if precalculated_hash is not NULL else hash_utf8(utf8_string, length)
+ chars = string.encode('utf-8')
+ cdef hash_t key = hash64(chars, len(chars), 1)
cdef Utf8Str* value = self._map.get(key)
if value is not NULL:
- return value
- value = _allocate(self.mem, utf8_string, length)
+ return key
+
+ value = self._allocate_str_repr(chars, len(chars))
self._map.set(key, value)
- self.keys.push_back(key)
- return value
+ self._keys.push_back(key)
+ return key
+
+ cdef Utf8Str* _allocate_str_repr(self, const unsigned char* chars, uint32_t length) except *:
+ cdef int n_length_bytes
+ cdef int i
+ cdef Utf8Str* string = self.mem.alloc(1, sizeof(Utf8Str))
+ cdef uint32_t ulength = length
+ if length < sizeof(string.s):
+ string.s[0] = length
+ memcpy(&string.s[1], chars, length)
+ return string
+ elif length < 255:
+ string.p = self.mem.alloc(length + 1, sizeof(unsigned char))
+ string.p[0] = length
+ memcpy(&string.p[1], chars, length)
+ return string
+ else:
+ i = 0
+ n_length_bytes = (length // 255) + 1
+ string.p = self.mem.alloc(length + n_length_bytes, sizeof(unsigned char))
+ for i in range(n_length_bytes-1):
+ string.p[i] = 255
+ string.p[n_length_bytes-1] = length % 255
+ memcpy(&string.p[n_length_bytes], chars, length)
+ return string
+
+ cdef str _decode_str_repr(self, const Utf8Str* string):
+ cdef int i, length
+ if string.s[0] < sizeof(string.s) and string.s[0] != 0:
+ return string.s[1:string.s[0]+1].decode('utf-8')
+ elif string.p[0] < 255:
+ return string.p[1:string.p[0]+1].decode('utf-8')
+ else:
+ i = 0
+ length = 0
+ while string.p[i] == 255:
+ i += 1
+ length += 255
+ length += string.p[i]
+ i += 1
+ return string.p[i:length + i].decode('utf-8')
+
+
+cpdef hash_t hash_string(object string) except -1:
+ if not isinstance(string, str):
+ raise TypeError(Errors.E4001.format(expected_types="'str'", received_type=type(string)))
+
+ # Handle reserved symbols and empty strings correctly.
+ if len(string) == 0:
+ return 0
+
+ symbol = SYMBOLS_BY_STR.get(string)
+ if symbol is not None:
+ return symbol
+
+ chars = string.encode('utf-8')
+ return hash64(chars, len(chars), 1)
+
+
+cpdef hash_t get_string_id(object string_or_hash) except -1:
+ cdef hash_t str_hash
+
+ try:
+ return hash_string(string_or_hash)
+ except:
+ if _try_coerce_to_hash(string_or_hash, &str_hash):
+ # Coerce the integral key to the expected primitive hash type.
+ # This ensures that custom/overloaded "primitive" data types
+ # such as those implemented by numpy are not inadvertently used
+ # downsteam (as these are internally implemented as custom PyObjects
+ # whose comparison operators can incur a significant overhead).
+ return str_hash
+ else:
+ raise TypeError(Errors.E4001.format(expected_types="'str','int'", received_type=type(string_or_hash)))
+
+
+# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)`
+cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash):
+ try:
+ out_hash[0] = key
+ return True
+ except:
+ return False
+
diff --git a/spacy/tests/README.md b/spacy/tests/README.md
index 82fabcc77..f3c96a39e 100644
--- a/spacy/tests/README.md
+++ b/spacy/tests/README.md
@@ -40,7 +40,7 @@ py.test spacy/tests/tokenizer/test_exceptions.py::test_tokenizer_handles_emoji #
To keep the behavior of the tests consistent and predictable, we try to follow a few basic conventions:
-- **Test names** should follow a pattern of `test_[module]_[tested behaviour]`. For example: `test_tokenizer_keeps_email` or `test_spans_override_sentiment`.
+- **Test names** should follow a pattern of `test_[module]_[tested behaviour]`. For example: `test_tokenizer_keeps_email`.
- If you're testing for a bug reported in a specific issue, always create a **regression test**. Regression tests should be named `test_issue[ISSUE NUMBER]` and live in the [`regression`](regression) directory.
- Only use `@pytest.mark.xfail` for tests that **should pass, but currently fail**. To test for desired negative behavior, use `assert not` in your test.
- Very **extensive tests** that take a long time to run should be marked with `@pytest.mark.slow`. If your slow test is testing important behavior, consider adding an additional simpler version.
diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py
index ee78b64a5..b9c4ef715 100644
--- a/spacy/tests/conftest.py
+++ b/spacy/tests/conftest.py
@@ -1,10 +1,10 @@
import pytest
from spacy.util import get_lang_class
import functools
+from hypothesis import settings
import inspect
import importlib
import sys
-from hypothesis import settings
# Functionally disable deadline settings for tests
# to prevent spurious test failures in CI builds.
@@ -382,12 +382,20 @@ def ru_tokenizer():
return get_lang_class("ru")().tokenizer
-@pytest.fixture
+@pytest.fixture(scope="session")
def ru_lemmatizer():
pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer")
+@pytest.fixture(scope="session")
+def ru_lookup_lemmatizer():
+ pytest.importorskip("pymorphy3")
+ return get_lang_class("ru")().add_pipe(
+ "lemmatizer", config={"mode": "pymorphy3_lookup"}
+ )
+
+
@pytest.fixture(scope="session")
def sa_tokenizer():
return get_lang_class("sa")().tokenizer
@@ -460,13 +468,22 @@ def uk_tokenizer():
return get_lang_class("uk")().tokenizer
-@pytest.fixture
+@pytest.fixture(scope="session")
def uk_lemmatizer():
pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer")
+@pytest.fixture(scope="session")
+def uk_lookup_lemmatizer():
+ pytest.importorskip("pymorphy3")
+ pytest.importorskip("pymorphy3_dicts_uk")
+ return get_lang_class("uk")().add_pipe(
+ "lemmatizer", config={"mode": "pymorphy3_lookup"}
+ )
+
+
@pytest.fixture(scope="session")
def ur_tokenizer():
return get_lang_class("ur")().tokenizer
diff --git a/spacy/tests/doc/test_array.py b/spacy/tests/doc/test_array.py
index c334cc6eb..1f2d7d999 100644
--- a/spacy/tests/doc/test_array.py
+++ b/spacy/tests/doc/test_array.py
@@ -123,14 +123,14 @@ def test_doc_from_array_heads_in_bounds(en_vocab):
# head before start
arr = doc.to_array(["HEAD"])
- arr[0] = -1
+ arr[0] = numpy.int32(-1).astype(numpy.uint64)
doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr)
# head after end
arr = doc.to_array(["HEAD"])
- arr[0] = 5
+ arr[0] = numpy.int32(5).astype(numpy.uint64)
doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr)
diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py
index a64ab2ba8..f77d54493 100644
--- a/spacy/tests/doc/test_doc_api.py
+++ b/spacy/tests/doc/test_doc_api.py
@@ -82,6 +82,21 @@ def test_issue2396(en_vocab):
assert (span.get_lca_matrix() == matrix).all()
+@pytest.mark.issue(11499)
+def test_init_args_unmodified(en_vocab):
+ words = ["A", "sentence"]
+ ents = ["B-TYPE1", ""]
+ sent_starts = [True, False]
+ Doc(
+ vocab=en_vocab,
+ words=words,
+ ents=ents,
+ sent_starts=sent_starts,
+ )
+ assert ents == ["B-TYPE1", ""]
+ assert sent_starts == [True, False]
+
+
@pytest.mark.parametrize("text", ["-0.23", "+123,456", "±1"])
@pytest.mark.parametrize("lang_cls", [English, MultiLanguage])
@pytest.mark.issue(2782)
@@ -365,9 +380,7 @@ def test_doc_api_serialize(en_tokenizer, text):
assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
- new_tokens = Doc(tokens.vocab).from_bytes(
- tokens.to_bytes(exclude=["sentiment"]), exclude=["sentiment"]
- )
+ new_tokens = Doc(tokens.vocab).from_bytes(tokens.to_bytes())
assert tokens.text == new_tokens.text
assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
@@ -975,3 +988,12 @@ def test_doc_spans_setdefault(en_tokenizer):
assert len(doc.spans["key2"]) == 1
doc.spans.setdefault("key3", default=SpanGroup(doc, spans=[doc[0:1], doc[1:2]]))
assert len(doc.spans["key3"]) == 2
+
+
+def test_doc_sentiment_from_bytes_v3_to_v4():
+ """Test if a doc with sentiment attribute created in v3.x works with '.from_bytes' in v4.x without throwing errors. The sentiment attribute was removed in v4"""
+ doc_bytes = b"\x89\xa4text\xa5happy\xaaarray_head\x9fGQACKOLMN\xcd\x01\xc4\xcd\x01\xc6I\xcd\x01\xc5JP\xaaarray_body\x85\xc4\x02nd\xc3\xc4\x04type\xa3 Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]:
+ def create_candidates() -> Callable[
+ [InMemoryLookupKB, "Span"], Iterable[Candidate]
+ ]:
return get_lowercased_candidates
+ @registry.misc("spacy.LowercaseCandidateBatchGenerator.v1")
+ def create_candidates_batch() -> Callable[
+ [InMemoryLookupKB, Iterable["Span"]], Iterable[Iterable[Candidate]]
+ ]:
+ return get_lowercased_candidates_batch
+
# replace the pipe with a new one with with a different candidate generator
entity_linker = nlp.replace_pipe(
"entity_linker",
@@ -512,6 +524,9 @@ def test_el_pipe_configuration(nlp):
config={
"incl_context": False,
"get_candidates": {"@misc": "spacy.LowercaseCandidateGenerator.v1"},
+ "get_candidates_batch": {
+ "@misc": "spacy.LowercaseCandidateBatchGenerator.v1"
+ },
},
)
entity_linker.set_kb(create_kb)
@@ -533,7 +548,7 @@ def test_nel_nsents(nlp):
def test_vocab_serialization(nlp):
"""Test that string information is retained across storage"""
- mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
+ mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
@@ -553,7 +568,7 @@ def test_vocab_serialization(nlp):
with make_tempdir() as d:
mykb.to_disk(d / "kb")
- kb_new_vocab = KnowledgeBase(Vocab(), entity_vector_length=1)
+ kb_new_vocab = InMemoryLookupKB(Vocab(), entity_vector_length=1)
kb_new_vocab.from_disk(d / "kb")
candidates = kb_new_vocab.get_alias_candidates("adam")
@@ -569,7 +584,7 @@ def test_vocab_serialization(nlp):
def test_append_alias(nlp):
"""Test that we can append additional alias-entity pairs"""
- mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
+ mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
@@ -600,7 +615,7 @@ def test_append_alias(nlp):
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_append_invalid_alias(nlp):
"""Test that append an alias will throw an error if prior probs are exceeding 1"""
- mykb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
+ mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
# adding entities
mykb.add_entity(entity="Q1", freq=27, entity_vector=[1])
@@ -622,7 +637,7 @@ def test_preserving_links_asdoc(nlp):
vector_length = 1
def create_kb(vocab):
- mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[1])
mykb.add_entity(entity="Q2", freq=8, entity_vector=[1])
@@ -702,7 +717,11 @@ TRAIN_DATA = [
("Russ Cochran was a member of University of Kentucky's golf team.",
{"links": {(0, 12): {"Q7381115": 0.0, "Q2146908": 1.0}},
"entities": [(0, 12, "PERSON"), (43, 51, "LOC")],
- "sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]})
+ "sent_starts": [1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}),
+ # having a blank instance shouldn't break things
+ ("The weather is nice today.",
+ {"links": {}, "entities": [],
+ "sent_starts": [1, -1, 0, 0, 0, 0]})
]
GOLD_entities = ["Q2146908", "Q7381115", "Q7381115", "Q2146908"]
# fmt: on
@@ -724,7 +743,7 @@ def test_overfitting_IO():
# create artificial KB - assign same prior weight to the two russ cochran's
# Q2146908 (Russ Cochran): American golfer
# Q7381115 (Russ Cochran): publisher
- mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
@@ -806,7 +825,7 @@ def test_kb_serialization():
kb_dir = tmp_dir / "kb"
nlp1 = English()
assert "Q2146908" not in nlp1.vocab.strings
- mykb = KnowledgeBase(nlp1.vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(nlp1.vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
assert "Q2146908" in nlp1.vocab.strings
@@ -829,7 +848,7 @@ def test_kb_serialization():
def test_kb_pickle():
# Test that the KB can be pickled
nlp = English()
- kb_1 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
+ kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
assert not kb_1.contains_alias("Russ Cochran")
kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
@@ -843,7 +862,7 @@ def test_kb_pickle():
def test_nel_pickle():
# Test that a pipeline with an EL component can be pickled
def create_kb(vocab):
- kb = KnowledgeBase(vocab, entity_vector_length=3)
+ kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
return kb
@@ -865,7 +884,7 @@ def test_nel_pickle():
def test_kb_to_bytes():
# Test that the KB's to_bytes method works correctly
nlp = English()
- kb_1 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
+ kb_1 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
kb_1.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb_1.add_entity(entity="Q66", freq=9, entity_vector=[1, 2, 3])
kb_1.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
@@ -875,7 +894,7 @@ def test_kb_to_bytes():
)
assert kb_1.contains_alias("Russ Cochran")
kb_bytes = kb_1.to_bytes()
- kb_2 = KnowledgeBase(nlp.vocab, entity_vector_length=3)
+ kb_2 = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
assert not kb_2.contains_alias("Russ Cochran")
kb_2 = kb_2.from_bytes(kb_bytes)
# check that both KBs are exactly the same
@@ -898,7 +917,7 @@ def test_kb_to_bytes():
def test_nel_to_bytes():
# Test that a pipeline with an EL component can be converted to bytes
def create_kb(vocab):
- kb = KnowledgeBase(vocab, entity_vector_length=3)
+ kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
kb.add_alias(alias="Russ Cochran", entities=["Q2146908"], probabilities=[0.8])
return kb
@@ -988,7 +1007,7 @@ def test_legacy_architectures(name, config):
train_examples.append(Example.from_dict(doc, annotation))
def create_kb(vocab):
- mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
@@ -1055,7 +1074,7 @@ def test_no_gold_ents(patterns):
def create_kb(vocab):
# create artificial KB
- mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Kirby", ["Q613241"], [0.9])
# Placeholder
@@ -1105,7 +1124,7 @@ def test_tokenization_mismatch():
def create_kb(vocab):
# create placeholder KB
- mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q613241", freq=12, entity_vector=[6, -4, 3])
mykb.add_alias("Kirby", ["Q613241"], [0.9])
return mykb
@@ -1122,6 +1141,12 @@ def test_tokenization_mismatch():
nlp.evaluate(train_examples)
+def test_abstract_kb_instantiation():
+ """Test whether instantiation of abstract KB base class fails."""
+ with pytest.raises(TypeError):
+ KnowledgeBase(None, 3)
+
+
# fmt: off
@pytest.mark.parametrize(
"meet_threshold,config",
@@ -1152,7 +1177,7 @@ def test_threshold(meet_threshold: bool, config: Dict[str, Any]):
def create_kb(vocab):
# create artificial KB
- mykb = KnowledgeBase(vocab, entity_vector_length=3)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=3)
mykb.add_entity(entity=entity_id, freq=12, entity_vector=[6, -4, 3])
mykb.add_alias(
alias="Mahler",
@@ -1194,7 +1219,7 @@ def test_save_activations():
# create artificial KB - assign same prior weight to the two russ cochran's
# Q2146908 (Russ Cochran): American golfer
# Q7381115 (Russ Cochran): publisher
- mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length)
mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
mykb.add_alias(
@@ -1240,3 +1265,18 @@ def test_save_activations():
assert scores.data.shape == (2, 1)
assert scores.data.dtype == "float32"
assert scores.lengths.shape == (1,)
+
+
+def test_span_maker_forward_with_empty():
+ """The forward pass of the span maker may have a doc with no entities."""
+ nlp = English()
+ doc1 = nlp("a b c")
+ ent = doc1[0:1]
+ ent.label_ = "X"
+ doc1.ents = [ent]
+ # no entities
+ doc2 = nlp("x y z")
+
+ # just to get a model
+ span_maker = build_span_maker()
+ span_maker([doc1, doc2], False)
diff --git a/spacy/tests/pipeline/test_entity_ruler.py b/spacy/tests/pipeline/test_entity_ruler.py
index 6851e2a7c..440849e84 100644
--- a/spacy/tests/pipeline/test_entity_ruler.py
+++ b/spacy/tests/pipeline/test_entity_ruler.py
@@ -4,7 +4,7 @@ from spacy import registry
from spacy.tokens import Doc, Span
from spacy.language import Language
from spacy.lang.en import English
-from spacy.pipeline import EntityRuler, EntityRecognizer, merge_entities
+from spacy.pipeline import EntityRecognizer, merge_entities
from spacy.pipeline import SpanRuler
from spacy.pipeline.ner import DEFAULT_NER_MODEL
from spacy.errors import MatchPatternError
@@ -12,8 +12,6 @@ from spacy.tests.util import make_tempdir
from thinc.api import NumpyOps, get_current_ops
-ENTITY_RULERS = ["entity_ruler", "future_entity_ruler"]
-
@pytest.fixture
def nlp():
@@ -40,13 +38,12 @@ def add_ent_component(doc):
@pytest.mark.issue(3345)
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_issue3345(entity_ruler_factory):
+def test_issue3345():
"""Test case where preset entity crosses sentence boundary."""
nlp = English()
doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"])
doc[4].is_sent_start = True
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns([{"label": "GPE", "pattern": "New York"}])
cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
@@ -65,15 +62,14 @@ def test_issue3345(entity_ruler_factory):
@pytest.mark.issue(4849)
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_issue4849(entity_ruler_factory):
+def test_issue4849():
nlp = English()
patterns = [
{"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"},
{"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"},
]
ruler = nlp.add_pipe(
- entity_ruler_factory,
+ "entity_ruler",
name="entity_ruler",
config={"phrase_matcher_attr": "LOWER"},
)
@@ -96,11 +92,10 @@ def test_issue4849(entity_ruler_factory):
@pytest.mark.issue(5918)
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_issue5918(entity_ruler_factory):
+def test_issue5918():
# Test edge case when merging entities.
nlp = English()
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "ORG", "pattern": "Digicon Inc"},
{"label": "ORG", "pattern": "Rotan Mosle Inc's"},
@@ -125,10 +120,9 @@ def test_issue5918(entity_ruler_factory):
@pytest.mark.issue(8168)
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_issue8168(entity_ruler_factory):
+def test_issue8168():
nlp = English()
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "ORG", "pattern": "Apple"},
{
@@ -148,12 +142,9 @@ def test_issue8168(entity_ruler_factory):
@pytest.mark.issue(8216)
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory):
+def test_entity_ruler_fix8216(nlp, patterns):
"""Test that patterns don't get added excessively."""
- ruler = nlp.add_pipe(
- entity_ruler_factory, name="entity_ruler", config={"validate": True}
- )
+ ruler = nlp.add_pipe("entity_ruler", config={"validate": True})
ruler.add_patterns(patterns)
pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert pattern_count > 0
@@ -162,16 +153,15 @@ def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory):
assert after_count == pattern_count
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_init(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_init(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
assert "HELLO" in ruler
assert "BYE" in ruler
nlp.remove_pipe("entity_ruler")
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
doc = nlp("hello world bye bye")
assert len(doc.ents) == 2
@@ -179,23 +169,21 @@ def test_entity_ruler_init(nlp, patterns, entity_ruler_factory):
assert doc.ents[1].label_ == "BYE"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_no_patterns_warns(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_no_patterns_warns(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
assert len(ruler) == 0
assert len(ruler.labels) == 0
nlp.remove_pipe("entity_ruler")
- nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ nlp.add_pipe("entity_ruler")
assert nlp.pipe_names == ["entity_ruler"]
with pytest.warns(UserWarning):
doc = nlp("hello world bye bye")
assert len(doc.ents) == 0
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory):
+def test_entity_ruler_init_patterns(nlp, patterns):
# initialize with patterns
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
assert len(ruler.labels) == 0
ruler.initialize(lambda: [], patterns=patterns)
assert len(ruler.labels) == 4
@@ -207,7 +195,7 @@ def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory):
nlp.config["initialize"]["components"]["entity_ruler"] = {
"patterns": {"@misc": "entity_ruler_patterns"}
}
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
assert len(ruler.labels) == 0
nlp.initialize()
assert len(ruler.labels) == 4
@@ -216,20 +204,18 @@ def test_entity_ruler_init_patterns(nlp, patterns, entity_ruler_factory):
assert doc.ents[1].label_ == "BYE"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_init_clear(nlp, patterns, entity_ruler_factory):
+def test_entity_ruler_init_clear(nlp, patterns):
"""Test that initialization clears patterns."""
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
ruler.initialize(lambda: [])
assert len(ruler.labels) == 0
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_clear(nlp, patterns, entity_ruler_factory):
+def test_entity_ruler_clear(nlp, patterns):
"""Test that initialization clears patterns."""
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world")
@@ -241,9 +227,8 @@ def test_entity_ruler_clear(nlp, patterns, entity_ruler_factory):
assert len(doc.ents) == 0
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_existing(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_existing(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
@@ -252,11 +237,8 @@ def test_entity_ruler_existing(nlp, patterns, entity_ruler_factory):
assert doc.ents[1].label_ == "BYE"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_existing_overwrite(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(
- entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
- )
+def test_entity_ruler_existing_overwrite(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
@@ -266,11 +248,8 @@ def test_entity_ruler_existing_overwrite(nlp, patterns, entity_ruler_factory):
assert doc.ents[1].label_ == "BYE"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_existing_complex(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(
- entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
- )
+def test_entity_ruler_existing_complex(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("foo foo bye bye")
@@ -281,11 +260,8 @@ def test_entity_ruler_existing_complex(nlp, patterns, entity_ruler_factory):
assert len(doc.ents[1]) == 2
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_entity_id(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(
- entity_ruler_factory, name="entity_ruler", config={"overwrite_ents": True}
- )
+def test_entity_ruler_entity_id(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1
@@ -293,26 +269,23 @@ def test_entity_ruler_entity_id(nlp, patterns, entity_ruler_factory):
assert doc.ents[0].ent_id_ == "a1"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_cfg_ent_id_sep(nlp, patterns, entity_ruler_factory):
+def test_entity_ruler_cfg_ent_id_sep(nlp, patterns):
config = {"overwrite_ents": True, "ent_id_sep": "**"}
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler", config=config)
+ ruler = nlp.add_pipe("entity_ruler", config=config)
ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company")
- if isinstance(ruler, EntityRuler):
- assert "TECH_ORG**a1" in ruler.phrase_patterns
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_serialize_bytes(nlp, patterns, entity_ruler_factory):
- ruler = EntityRuler(nlp, patterns=patterns)
+def test_entity_ruler_serialize_bytes(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler")
+ ruler.add_patterns(patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
- new_ruler = EntityRuler(nlp)
+ new_ruler = nlp.add_pipe("entity_ruler", name="new_ruler")
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
new_ruler = new_ruler.from_bytes(ruler_bytes)
@@ -324,28 +297,27 @@ def test_entity_ruler_serialize_bytes(nlp, patterns, entity_ruler_factory):
assert sorted(new_ruler.labels) == sorted(ruler.labels)
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_serialize_phrase_matcher_attr_bytes(
- nlp, patterns, entity_ruler_factory
-):
- ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns)
+def test_entity_ruler_serialize_phrase_matcher_attr_bytes(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"})
+ ruler.add_patterns(patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
- new_ruler = EntityRuler(nlp)
+ new_ruler = nlp.add_pipe(
+ "entity_ruler", name="new_ruler", config={"phrase_matcher_attr": "LOWER"}
+ )
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
- assert new_ruler.phrase_matcher_attr is None
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
- assert new_ruler.phrase_matcher_attr == "LOWER"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_validate(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
- validated_ruler = EntityRuler(nlp, validate=True)
+def test_entity_ruler_validate(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
+ validated_ruler = nlp.add_pipe(
+ "entity_ruler", name="validated_ruler", config={"validate": True}
+ )
valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]}
invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]}
@@ -362,16 +334,15 @@ def test_entity_ruler_validate(nlp, entity_ruler_factory):
validated_ruler.add_patterns([invalid_pattern])
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_properties(nlp, patterns, entity_ruler_factory):
- ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
+def test_entity_ruler_properties(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
+ ruler.add_patterns(patterns)
assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"])
- assert sorted(ruler.ent_ids) == ["a1", "a2"]
+ assert sorted(ruler.ids) == ["a1", "a2"]
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_overlapping_spans(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_overlapping_spans(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "FOOBAR", "pattern": "foo bar"},
{"label": "BARBAZ", "pattern": "bar baz"},
@@ -383,14 +354,13 @@ def test_entity_ruler_overlapping_spans(nlp, entity_ruler_factory):
@pytest.mark.parametrize("n_process", [1, 2])
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_multiprocessing(nlp, n_process, entity_ruler_factory):
+def test_entity_ruler_multiprocessing(nlp, n_process):
if isinstance(get_current_ops, NumpyOps) or n_process < 2:
texts = ["I enjoy eating Pizza Hut pizza."]
patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}]
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
for doc in nlp.pipe(texts, n_process=2):
@@ -398,9 +368,8 @@ def test_entity_ruler_multiprocessing(nlp, n_process, entity_ruler_factory):
assert ent.ent_id_ == "1234"
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_serialize_jsonl(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_serialize_jsonl(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler.jsonl")
@@ -409,9 +378,8 @@ def test_entity_ruler_serialize_jsonl(nlp, patterns, entity_ruler_factory):
ruler.from_disk(d / "non_existing.jsonl") # read from a bad jsonl file
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_serialize_dir(nlp, patterns, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_serialize_dir(nlp, patterns):
+ ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler")
@@ -420,9 +388,8 @@ def test_entity_ruler_serialize_dir(nlp, patterns, entity_ruler_factory):
ruler.from_disk(d / "non_existing_dir") # read from a bad directory
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_basic(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_basic(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
@@ -432,24 +399,16 @@ def test_entity_ruler_remove_basic(nlp, entity_ruler_factory):
doc = nlp("Dina went to school")
assert len(ruler.patterns) == 3
assert len(doc.ents) == 1
- if isinstance(ruler, EntityRuler):
- assert "PERSON||dina" in ruler.phrase_matcher
assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Dina"
- if isinstance(ruler, EntityRuler):
- ruler.remove("dina")
- else:
- ruler.remove_by_id("dina")
+ ruler.remove_by_id("dina")
doc = nlp("Dina went to school")
assert len(doc.ents) == 0
- if isinstance(ruler, EntityRuler):
- assert "PERSON||dina" not in ruler.phrase_matcher
assert len(ruler.patterns) == 2
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_same_id_multiple_patterns(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_same_id_multiple_patterns(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "DinaCorp", "id": "dina"},
@@ -458,25 +417,15 @@ def test_entity_ruler_remove_same_id_multiple_patterns(nlp, entity_ruler_factory
ruler.add_patterns(patterns)
doc = nlp("Dina founded DinaCorp and ACME.")
assert len(ruler.patterns) == 3
- if isinstance(ruler, EntityRuler):
- assert "PERSON||dina" in ruler.phrase_matcher
- assert "ORG||dina" in ruler.phrase_matcher
assert len(doc.ents) == 3
- if isinstance(ruler, EntityRuler):
- ruler.remove("dina")
- else:
- ruler.remove_by_id("dina")
+ ruler.remove_by_id("dina")
doc = nlp("Dina founded DinaCorp and ACME.")
assert len(ruler.patterns) == 1
- if isinstance(ruler, EntityRuler):
- assert "PERSON||dina" not in ruler.phrase_matcher
- assert "ORG||dina" not in ruler.phrase_matcher
assert len(doc.ents) == 1
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_nonexisting_pattern(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_nonexisting_pattern(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
@@ -491,9 +440,8 @@ def test_entity_ruler_remove_nonexisting_pattern(nlp, entity_ruler_factory):
ruler.remove_by_id("nepattern")
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_several_patterns(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_several_patterns(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
@@ -507,27 +455,20 @@ def test_entity_ruler_remove_several_patterns(nlp, entity_ruler_factory):
assert doc.ents[0].text == "Dina"
assert doc.ents[1].label_ == "ORG"
assert doc.ents[1].text == "ACME"
- if isinstance(ruler, EntityRuler):
- ruler.remove("dina")
- else:
- ruler.remove_by_id("dina")
+ ruler.remove_by_id("dina")
doc = nlp("Dina founded her company ACME")
assert len(ruler.patterns) == 2
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "ORG"
assert doc.ents[0].text == "ACME"
- if isinstance(ruler, EntityRuler):
- ruler.remove("acme")
- else:
- ruler.remove_by_id("acme")
+ ruler.remove_by_id("acme")
doc = nlp("Dina founded her company ACME")
assert len(ruler.patterns) == 1
assert len(doc.ents) == 0
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_patterns_in_a_row(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_patterns_in_a_row(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
@@ -543,21 +484,15 @@ def test_entity_ruler_remove_patterns_in_a_row(nlp, entity_ruler_factory):
assert doc.ents[1].text == "ACME"
assert doc.ents[2].label_ == "DATE"
assert doc.ents[2].text == "her birthday"
- if isinstance(ruler, EntityRuler):
- ruler.remove("dina")
- ruler.remove("acme")
- ruler.remove("bday")
- else:
- ruler.remove_by_id("dina")
- ruler.remove_by_id("acme")
- ruler.remove_by_id("bday")
+ ruler.remove_by_id("dina")
+ ruler.remove_by_id("acme")
+ ruler.remove_by_id("bday")
doc = nlp("Dina went to school")
assert len(doc.ents) == 0
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_all_patterns(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_all_patterns(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "PERSON", "pattern": "Dina", "id": "dina"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
@@ -565,29 +500,19 @@ def test_entity_ruler_remove_all_patterns(nlp, entity_ruler_factory):
]
ruler.add_patterns(patterns)
assert len(ruler.patterns) == 3
- if isinstance(ruler, EntityRuler):
- ruler.remove("dina")
- else:
- ruler.remove_by_id("dina")
+ ruler.remove_by_id("dina")
assert len(ruler.patterns) == 2
- if isinstance(ruler, EntityRuler):
- ruler.remove("acme")
- else:
- ruler.remove_by_id("acme")
+ ruler.remove_by_id("acme")
assert len(ruler.patterns) == 1
- if isinstance(ruler, EntityRuler):
- ruler.remove("bday")
- else:
- ruler.remove_by_id("bday")
+ ruler.remove_by_id("bday")
assert len(ruler.patterns) == 0
with pytest.warns(UserWarning):
doc = nlp("Dina founded her company ACME on her birthday")
assert len(doc.ents) == 0
-@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS)
-def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory):
- ruler = nlp.add_pipe(entity_ruler_factory, name="entity_ruler")
+def test_entity_ruler_remove_and_add(nlp):
+ ruler = nlp.add_pipe("entity_ruler")
patterns = [{"label": "DATE", "pattern": "last time"}]
ruler.add_patterns(patterns)
doc = ruler(
@@ -608,10 +533,7 @@ def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory):
assert doc.ents[0].text == "last time"
assert doc.ents[1].label_ == "DATE"
assert doc.ents[1].text == "this time"
- if isinstance(ruler, EntityRuler):
- ruler.remove("ttime")
- else:
- ruler.remove_by_id("ttime")
+ ruler.remove_by_id("ttime")
doc = ruler(
nlp.make_doc("I saw him last time we met, this time he brought some flowers")
)
@@ -634,10 +556,7 @@ def test_entity_ruler_remove_and_add(nlp, entity_ruler_factory):
)
assert len(ruler.patterns) == 3
assert len(doc.ents) == 3
- if isinstance(ruler, EntityRuler):
- ruler.remove("ttime")
- else:
- ruler.remove_by_id("ttime")
+ ruler.remove_by_id("ttime")
doc = ruler(
nlp.make_doc(
"I saw him last time we met, this time he brought some flowers, another time some chocolate."
diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py
index b946061f6..9b9786f04 100644
--- a/spacy/tests/pipeline/test_pipe_methods.py
+++ b/spacy/tests/pipeline/test_pipe_methods.py
@@ -529,17 +529,6 @@ def test_pipe_label_data_no_labels(pipe):
assert "labels" not in get_arg_names(initialize)
-def test_warning_pipe_begin_training():
- with pytest.warns(UserWarning, match="begin_training"):
-
- class IncompatPipe(TrainablePipe):
- def __init__(self):
- ...
-
- def begin_training(*args, **kwargs):
- ...
-
-
def test_pipe_methods_initialize():
"""Test that the [initialize] config reflects the components correctly."""
nlp = Language()
@@ -605,10 +594,33 @@ def test_update_with_annotates():
assert results[component] == ""
-def test_load_disable_enable() -> None:
- """
- Tests spacy.load() with dis-/enabling components.
- """
+@pytest.mark.issue(11443)
+def test_enable_disable_conflict_with_config():
+ """Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config."""
+ nlp = English()
+ nlp.add_pipe("tagger")
+ nlp.add_pipe("senter")
+ nlp.add_pipe("sentencizer")
+
+ with make_tempdir() as tmp_dir:
+ nlp.to_disk(tmp_dir)
+ # Expected to succeed, as config and arguments do not conflict.
+ assert spacy.load(
+ tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
+ ).disabled == ["senter", "sentencizer"]
+ # Expected to succeed without warning due to the lack of a conflicting config option.
+ spacy.load(tmp_dir, enable=["tagger"])
+ # Expected to fail due to conflict between enable and disabled.
+ with pytest.raises(ValueError):
+ spacy.load(
+ tmp_dir,
+ enable=["senter"],
+ config={"nlp": {"disabled": ["senter", "tagger"]}},
+ )
+
+
+def test_load_disable_enable():
+ """Tests spacy.load() with dis-/enabling components."""
base_nlp = English()
for pipe in ("sentencizer", "tagger", "parser"):
diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py
index 4fb26c7e7..da9bffbc8 100644
--- a/spacy/tests/pipeline/test_spancat.py
+++ b/spacy/tests/pipeline/test_spancat.py
@@ -1,7 +1,7 @@
import pytest
import numpy
from numpy.testing import assert_array_equal, assert_almost_equal
-from thinc.api import get_current_ops, Ragged
+from thinc.api import get_current_ops, Ragged, fix_random_seed
from spacy import util
from spacy.lang.en import English
@@ -9,7 +9,7 @@ from spacy.language import Language
from spacy.tokens import SpanGroup
from spacy.tokens.span_groups import SpanGroups
from spacy.training import Example
-from spacy.util import fix_random_seed, registry, make_tempdir
+from spacy.util import registry, make_tempdir
OPS = get_current_ops()
@@ -372,24 +372,39 @@ def test_overfitting_IO_overlapping():
def test_zero_suggestions():
- # Test with a suggester that returns 0 suggestions
+ # Test with a suggester that can return 0 suggestions
- @registry.misc("test_zero_suggester")
- def make_zero_suggester():
- def zero_suggester(docs, *, ops=None):
+ @registry.misc("test_mixed_zero_suggester")
+ def make_mixed_zero_suggester():
+ def mixed_zero_suggester(docs, *, ops=None):
if ops is None:
ops = get_current_ops()
- return Ragged(
- ops.xp.zeros((0, 0), dtype="i"), ops.xp.zeros((len(docs),), dtype="i")
- )
+ spans = []
+ lengths = []
+ for doc in docs:
+ if len(doc) > 0 and len(doc) % 2 == 0:
+ spans.append((0, 1))
+ lengths.append(1)
+ else:
+ lengths.append(0)
+ spans = ops.asarray2i(spans)
+ lengths_array = ops.asarray1i(lengths)
+ if len(spans) > 0:
+ output = Ragged(ops.xp.vstack(spans), lengths_array)
+ else:
+ output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array)
+ return output
- return zero_suggester
+ return mixed_zero_suggester
fix_random_seed(0)
nlp = English()
spancat = nlp.add_pipe(
"spancat",
- config={"suggester": {"@misc": "test_zero_suggester"}, "spans_key": SPAN_KEY},
+ config={
+ "suggester": {"@misc": "test_mixed_zero_suggester"},
+ "spans_key": SPAN_KEY,
+ },
)
train_examples = make_examples(nlp)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
@@ -397,6 +412,16 @@ def test_zero_suggestions():
assert set(spancat.labels) == {"LOC", "PERSON"}
nlp.update(train_examples, sgd=optimizer)
+ # empty doc
+ nlp("")
+ # single doc with zero suggestions
+ nlp("one")
+ # single doc with one suggestion
+ nlp("two two")
+ # batch with mixed zero/one suggestions
+ list(nlp.pipe(["one", "two two", "three three three", "", "four four four four"]))
+ # batch with no suggestions
+ list(nlp.pipe(["", "one", "three three three"]))
def test_set_candidates():
diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py
index c1f61a3c0..942062d1d 100644
--- a/spacy/tests/pipeline/test_textcat.py
+++ b/spacy/tests/pipeline/test_textcat.py
@@ -361,6 +361,30 @@ def test_label_types(name):
nlp.initialize()
+@pytest.mark.parametrize(
+ "name,get_examples",
+ [
+ ("textcat", make_get_examples_single_label),
+ ("textcat_multilabel", make_get_examples_multi_label),
+ ],
+)
+def test_invalid_label_value(name, get_examples):
+ nlp = Language()
+ textcat = nlp.add_pipe(name)
+ example_getter = get_examples(nlp)
+
+ def invalid_examples():
+ # make one example with an invalid score
+ examples = example_getter()
+ ref = examples[0].reference
+ key = list(ref.cats.keys())[0]
+ ref.cats[key] = 2.0
+ return examples
+
+ with pytest.raises(ValueError):
+ nlp.initialize(get_examples=invalid_examples)
+
+
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
def test_no_label(name):
nlp = Language()
@@ -815,8 +839,8 @@ def test_textcat_loss(multi_label: bool, expected_loss: float):
textcat = nlp.add_pipe("textcat_multilabel")
else:
textcat = nlp.add_pipe("textcat")
- textcat.initialize(lambda: train_examples)
assert isinstance(textcat, TextCategorizer)
+ textcat.initialize(lambda: train_examples)
scores = textcat.model.ops.asarray(
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype="f" # type: ignore
)
@@ -824,10 +848,10 @@ def test_textcat_loss(multi_label: bool, expected_loss: float):
assert loss == expected_loss
-def test_textcat_threshold():
+def test_textcat_multilabel_threshold():
# Ensure the scorer can be called with a different threshold
nlp = English()
- nlp.add_pipe("textcat")
+ nlp.add_pipe("textcat_multilabel")
train_examples = []
for text, annotations in TRAIN_DATA_SINGLE_LABEL:
@@ -850,7 +874,7 @@ def test_textcat_threshold():
)
pos_f = scores["cats_score"]
assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0
- assert pos_f > macro_f
+ assert pos_f >= macro_f
def test_textcat_multi_threshold():
@@ -910,3 +934,22 @@ def test_save_activations_multi():
doc = nlp("This is a test.")
assert list(doc.activations["textcat_multilabel"].keys()) == ["probabilities"]
assert doc.activations["textcat_multilabel"]["probabilities"].shape == (nO,)
+
+
+@pytest.mark.parametrize(
+ "component_name,scorer", [("textcat", "spacy.textcat_scorer.v1")]
+)
+def test_textcat_legacy_scorers(component_name, scorer):
+ """Check that legacy scorers are registered and produce the expected score
+ keys."""
+ nlp = English()
+ nlp.add_pipe(component_name, config={"scorer": {"@scorers": scorer}})
+
+ train_examples = []
+ for text, annotations in TRAIN_DATA_SINGLE_LABEL:
+ train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
+ nlp.initialize(get_examples=lambda: train_examples)
+
+ # score the model (it's not actually trained but that doesn't matter)
+ scores = nlp.evaluate(train_examples)
+ assert 0 <= scores["cats_score"] <= 1
diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py
index 09c314c5a..ee62b1ab4 100644
--- a/spacy/tests/pipeline/test_tok2vec.py
+++ b/spacy/tests/pipeline/test_tok2vec.py
@@ -230,6 +230,97 @@ def test_tok2vec_listener_callback():
assert get_dX(Y) is not None
+def test_tok2vec_listener_overfitting():
+ """Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components"""
+ orig_config = Config().from_str(cfg_string)
+ nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
+ train_examples = []
+ for t in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
+ optimizer = nlp.initialize(get_examples=lambda: train_examples)
+
+ for i in range(50):
+ losses = {}
+ nlp.update(train_examples, sgd=optimizer, losses=losses, annotates=["tok2vec"])
+ assert losses["tagger"] < 0.00001
+
+ # test the trained model
+ test_text = "I like blue eggs"
+ doc = nlp(test_text)
+ assert doc[0].tag_ == "N"
+ assert doc[1].tag_ == "V"
+ assert doc[2].tag_ == "J"
+ assert doc[3].tag_ == "N"
+
+ # Also test the results are still the same after IO
+ with make_tempdir() as tmp_dir:
+ nlp.to_disk(tmp_dir)
+ nlp2 = util.load_model_from_path(tmp_dir)
+ doc2 = nlp2(test_text)
+ assert doc2[0].tag_ == "N"
+ assert doc2[1].tag_ == "V"
+ assert doc2[2].tag_ == "J"
+ assert doc2[3].tag_ == "N"
+
+
+def test_tok2vec_frozen_not_annotating():
+ """Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating"""
+ orig_config = Config().from_str(cfg_string)
+ nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
+ train_examples = []
+ for t in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
+ optimizer = nlp.initialize(get_examples=lambda: train_examples)
+
+ for i in range(2):
+ losses = {}
+ with pytest.raises(
+ ValueError, match=r"the tok2vec embedding layer is not updated"
+ ):
+ nlp.update(
+ train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"]
+ )
+
+
+def test_tok2vec_frozen_overfitting():
+ """Test that a pipeline with a frozen & annotating tok2vec can still overfit"""
+ orig_config = Config().from_str(cfg_string)
+ nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
+ train_examples = []
+ for t in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
+ optimizer = nlp.initialize(get_examples=lambda: train_examples)
+
+ for i in range(100):
+ losses = {}
+ nlp.update(
+ train_examples,
+ sgd=optimizer,
+ losses=losses,
+ exclude=["tok2vec"],
+ annotates=["tok2vec"],
+ )
+ assert losses["tagger"] < 0.0001
+
+ # test the trained model
+ test_text = "I like blue eggs"
+ doc = nlp(test_text)
+ assert doc[0].tag_ == "N"
+ assert doc[1].tag_ == "V"
+ assert doc[2].tag_ == "J"
+ assert doc[3].tag_ == "N"
+
+ # Also test the results are still the same after IO
+ with make_tempdir() as tmp_dir:
+ nlp.to_disk(tmp_dir)
+ nlp2 = util.load_model_from_path(tmp_dir)
+ doc2 = nlp2(test_text)
+ assert doc2[0].tag_ == "N"
+ assert doc2[1].tag_ == "V"
+ assert doc2[2].tag_ == "J"
+ assert doc2[3].tag_ == "N"
+
+
def test_replace_listeners():
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
diff --git a/spacy/tests/serialize/test_resource_warning.py b/spacy/tests/serialize/test_resource_warning.py
index a00b2a688..38701c6d9 100644
--- a/spacy/tests/serialize/test_resource_warning.py
+++ b/spacy/tests/serialize/test_resource_warning.py
@@ -3,7 +3,7 @@ from unittest import TestCase
import pytest
import srsly
from numpy import zeros
-from spacy.kb import KnowledgeBase, Writer
+from spacy.kb.kb_in_memory import InMemoryLookupKB, Writer
from spacy.vectors import Vectors
from spacy.language import Language
from spacy.pipeline import TrainablePipe
@@ -71,7 +71,7 @@ def entity_linker():
nlp = Language()
def create_kb(vocab):
- kb = KnowledgeBase(vocab, entity_vector_length=1)
+ kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.add_entity("test", 0.0, zeros((1, 1), dtype="f"))
return kb
@@ -120,7 +120,7 @@ def test_writer_with_path_py35():
def test_save_and_load_knowledge_base():
nlp = Language()
- kb = KnowledgeBase(nlp.vocab, entity_vector_length=1)
+ kb = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
with make_tempdir() as d:
path = d / "kb"
try:
@@ -129,7 +129,7 @@ def test_save_and_load_knowledge_base():
pytest.fail(str(e))
try:
- kb_loaded = KnowledgeBase(nlp.vocab, entity_vector_length=1)
+ kb_loaded = InMemoryLookupKB(nlp.vocab, entity_vector_length=1)
kb_loaded.from_disk(path)
except Exception as e:
pytest.fail(str(e))
diff --git a/spacy/tests/serialize/test_serialize_kb.py b/spacy/tests/serialize/test_serialize_kb.py
index 1e0ae3c76..8d3653ab1 100644
--- a/spacy/tests/serialize/test_serialize_kb.py
+++ b/spacy/tests/serialize/test_serialize_kb.py
@@ -2,7 +2,7 @@ from typing import Callable
from spacy import util
from spacy.util import ensure_path, registry, load_model_from_config
-from spacy.kb import KnowledgeBase
+from spacy.kb.kb_in_memory import InMemoryLookupKB
from spacy.vocab import Vocab
from thinc.api import Config
@@ -22,7 +22,7 @@ def test_serialize_kb_disk(en_vocab):
dir_path.mkdir()
file_path = dir_path / "kb"
kb1.to_disk(str(file_path))
- kb2 = KnowledgeBase(vocab=en_vocab, entity_vector_length=3)
+ kb2 = InMemoryLookupKB(vocab=en_vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
# final assertions
@@ -30,7 +30,7 @@ def test_serialize_kb_disk(en_vocab):
def _get_dummy_kb(vocab):
- kb = KnowledgeBase(vocab, entity_vector_length=3)
+ kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q53", freq=33, entity_vector=[0, 5, 3])
kb.add_entity(entity="Q17", freq=2, entity_vector=[7, 1, 0])
kb.add_entity(entity="Q007", freq=7, entity_vector=[0, 0, 7])
@@ -104,7 +104,7 @@ def test_serialize_subclassed_kb():
custom_field = 666
"""
- class SubKnowledgeBase(KnowledgeBase):
+ class SubInMemoryLookupKB(InMemoryLookupKB):
def __init__(self, vocab, entity_vector_length, custom_field):
super().__init__(vocab, entity_vector_length)
self.custom_field = custom_field
@@ -112,9 +112,9 @@ def test_serialize_subclassed_kb():
@registry.misc("spacy.CustomKB.v1")
def custom_kb(
entity_vector_length: int, custom_field: int
- ) -> Callable[[Vocab], KnowledgeBase]:
+ ) -> Callable[[Vocab], InMemoryLookupKB]:
def custom_kb_factory(vocab):
- kb = SubKnowledgeBase(
+ kb = SubInMemoryLookupKB(
vocab=vocab,
entity_vector_length=entity_vector_length,
custom_field=custom_field,
@@ -129,7 +129,7 @@ def test_serialize_subclassed_kb():
nlp.initialize()
entity_linker = nlp.get_pipe("entity_linker")
- assert type(entity_linker.kb) == SubKnowledgeBase
+ assert type(entity_linker.kb) == SubInMemoryLookupKB
assert entity_linker.kb.entity_vector_length == 342
assert entity_linker.kb.custom_field == 666
@@ -139,6 +139,6 @@ def test_serialize_subclassed_kb():
nlp2 = util.load_model_from_path(tmp_dir)
entity_linker2 = nlp2.get_pipe("entity_linker")
# After IO, the KB is the standard one
- assert type(entity_linker2.kb) == KnowledgeBase
+ assert type(entity_linker2.kb) == InMemoryLookupKB
assert entity_linker2.kb.entity_vector_length == 342
assert not hasattr(entity_linker2.kb, "custom_field")
diff --git a/spacy/tests/serialize/test_serialize_pipeline.py b/spacy/tests/serialize/test_serialize_pipeline.py
index 9fcf18e2d..36129a408 100644
--- a/spacy/tests/serialize/test_serialize_pipeline.py
+++ b/spacy/tests/serialize/test_serialize_pipeline.py
@@ -8,7 +8,7 @@ import spacy
from spacy import Vocab, load, registry
from spacy.lang.en import English
from spacy.language import Language
-from spacy.pipeline import DependencyParser, EntityRecognizer, EntityRuler
+from spacy.pipeline import DependencyParser, EntityRecognizer
from spacy.pipeline import SentenceRecognizer, Tagger, TextCategorizer
from spacy.pipeline import TrainablePipe
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
@@ -85,58 +85,17 @@ def test_issue_3526_1(en_vocab):
{"label": "TECH_ORG", "pattern": "Apple", "id": "a1"},
]
nlp = Language(vocab=en_vocab)
- ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
+ ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
+ ruler.add_patterns(patterns)
ruler_bytes = ruler.to_bytes()
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
- assert ruler.overwrite
- new_ruler = EntityRuler(nlp)
+ new_ruler = nlp.add_pipe(
+ "entity_ruler", name="new_ruler", config={"overwrite_ents": True}
+ )
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(ruler)
assert len(new_ruler.labels) == 4
- assert new_ruler.overwrite == ruler.overwrite
- assert new_ruler.ent_id_sep == ruler.ent_id_sep
-
-
-@pytest.mark.issue(3526)
-def test_issue_3526_2(en_vocab):
- patterns = [
- {"label": "HELLO", "pattern": "hello world"},
- {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]},
- {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]},
- {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]},
- {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"},
- ]
- nlp = Language(vocab=en_vocab)
- ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
- bytes_old_style = srsly.msgpack_dumps(ruler.patterns)
- new_ruler = EntityRuler(nlp)
- new_ruler = new_ruler.from_bytes(bytes_old_style)
- assert len(new_ruler) == len(ruler)
- for pattern in ruler.patterns:
- assert pattern in new_ruler.patterns
- assert new_ruler.overwrite is not ruler.overwrite
-
-
-@pytest.mark.issue(3526)
-def test_issue_3526_3(en_vocab):
- patterns = [
- {"label": "HELLO", "pattern": "hello world"},
- {"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]},
- {"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]},
- {"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]},
- {"label": "TECH_ORG", "pattern": "Apple", "id": "a1"},
- ]
- nlp = Language(vocab=en_vocab)
- ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
- with make_tempdir() as tmpdir:
- out_file = tmpdir / "entity_ruler"
- srsly.write_jsonl(out_file.with_suffix(".jsonl"), ruler.patterns)
- new_ruler = EntityRuler(nlp).from_disk(out_file)
- for pattern in ruler.patterns:
- assert pattern in new_ruler.patterns
- assert len(new_ruler) == len(ruler)
- assert new_ruler.overwrite is not ruler.overwrite
@pytest.mark.issue(3526)
@@ -150,16 +109,14 @@ def test_issue_3526_4(en_vocab):
nlp.to_disk(tmpdir)
ruler = nlp.get_pipe("entity_ruler")
assert ruler.patterns == [{"label": "ORG", "pattern": "Apple"}]
- assert ruler.overwrite is True
nlp2 = load(tmpdir)
new_ruler = nlp2.get_pipe("entity_ruler")
assert new_ruler.patterns == [{"label": "ORG", "pattern": "Apple"}]
- assert new_ruler.overwrite is True
@pytest.mark.issue(4042)
def test_issue4042():
- """Test that serialization of an EntityRuler before NER works fine."""
+ """Test that serialization of an entity_ruler before NER works fine."""
nlp = English()
# add ner pipe
ner = nlp.add_pipe("ner")
diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py
index 838e00369..c6768a3fd 100644
--- a/spacy/tests/test_cli.py
+++ b/spacy/tests/test_cli.py
@@ -1,8 +1,12 @@
import os
import math
-from random import sample
-from typing import Counter
+from collections import Counter
+from typing import Tuple, List, Dict, Any
+import pkg_resources
+import time
+import spacy
+import numpy
import pytest
import srsly
from click import NoSuchOption
@@ -15,6 +19,7 @@ from spacy.cli._util import is_subpath_of, load_project_config
from spacy.cli._util import parse_config_overrides, string_to_list
from spacy.cli._util import substitute_project_variables
from spacy.cli._util import validate_project_commands
+from spacy.cli._util import upload_file, download_file
from spacy.cli.debug_data import _compile_gold, _get_labels_from_model
from spacy.cli.debug_data import _get_labels_from_spancat
from spacy.cli.debug_data import _get_distribution, _get_kl_divergence
@@ -25,12 +30,16 @@ from spacy.cli.download import get_compatibility, get_version
from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config
from spacy.cli.package import get_third_party_dependencies
from spacy.cli.package import _is_permitted_package_name
+from spacy.cli.project.remote_storage import RemoteStorage
+from spacy.cli.project.run import _check_requirements
from spacy.cli.validate import get_model_pkgs
+from spacy.cli.apply import apply
+from spacy.cli.find_threshold import find_threshold
from spacy.lang.en import English
from spacy.lang.nl import Dutch
from spacy.language import Language
from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate
-from spacy.tokens import Doc
+from spacy.tokens import Doc, DocBin
from spacy.tokens.span import Span
from spacy.training import Example, docs_to_json, offsets_to_biluo_tags
from spacy.training.converters import conll_ner_to_docs, conllu_to_docs
@@ -116,6 +125,25 @@ def test_issue7055():
assert "model" in filled_cfg["components"]["ner"]
+@pytest.mark.issue(11235)
+def test_issue11235():
+ """
+ Test that the cli handles interpolation in the directory names correctly when loading project config.
+ """
+ lang_var = "en"
+ variables = {"lang": lang_var}
+ commands = [{"name": "x", "script": ["hello ${vars.lang}"]}]
+ directories = ["cfg", "${vars.lang}_model"]
+ project = {"commands": commands, "vars": variables, "directories": directories}
+ with make_tempdir() as d:
+ srsly.write_yaml(d / "project.yml", project)
+ cfg = load_project_config(d)
+ # Check that the directories are interpolated and created correctly
+ assert os.path.exists(d / "cfg")
+ assert os.path.exists(d / f"{lang_var}_model")
+ assert cfg["commands"][0]["script"][0] == f"hello {lang_var}"
+
+
def test_cli_info():
nlp = Dutch()
nlp.add_pipe("textcat")
@@ -589,6 +617,7 @@ def test_string_to_list_intify(value):
assert string_to_list(value, intify=True) == [1, 2, 3]
+@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_download_compatibility():
spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False
@@ -599,6 +628,7 @@ def test_download_compatibility():
assert get_minor_version(about.__version__) == get_minor_version(version)
+@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_validate_compatibility_table():
spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False
@@ -855,3 +885,303 @@ def test_span_length_freq_dist_output_must_be_correct():
span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold)
assert sum(span_freqs.values()) >= threshold
assert list(span_freqs.keys()) == [3, 1, 4, 5, 2]
+
+
+def test_applycli_empty_dir():
+ with make_tempdir() as data_path:
+ output = data_path / "test.spacy"
+ apply(data_path, output, "blank:en", "text", 1, 1)
+
+
+def test_applycli_docbin():
+ with make_tempdir() as data_path:
+ output = data_path / "testout.spacy"
+ nlp = spacy.blank("en")
+ doc = nlp("testing apply cli.")
+ # test empty DocBin case
+ docbin = DocBin()
+ docbin.to_disk(data_path / "testin.spacy")
+ apply(data_path, output, "blank:en", "text", 1, 1)
+ docbin.add(doc)
+ docbin.to_disk(data_path / "testin.spacy")
+ apply(data_path, output, "blank:en", "text", 1, 1)
+
+
+def test_applycli_jsonl():
+ with make_tempdir() as data_path:
+ output = data_path / "testout.spacy"
+ data = [{"field": "Testing apply cli.", "key": 234}]
+ data2 = [{"field": "234"}]
+ srsly.write_jsonl(data_path / "test.jsonl", data)
+ apply(data_path, output, "blank:en", "field", 1, 1)
+ srsly.write_jsonl(data_path / "test2.jsonl", data2)
+ apply(data_path, output, "blank:en", "field", 1, 1)
+
+
+def test_applycli_txt():
+ with make_tempdir() as data_path:
+ output = data_path / "testout.spacy"
+ with open(data_path / "test.foo", "w") as ftest:
+ ftest.write("Testing apply cli.")
+ apply(data_path, output, "blank:en", "text", 1, 1)
+
+
+def test_applycli_mixed():
+ with make_tempdir() as data_path:
+ output = data_path / "testout.spacy"
+ text = "Testing apply cli"
+ nlp = spacy.blank("en")
+ doc = nlp(text)
+ jsonl_data = [{"text": text}]
+ srsly.write_jsonl(data_path / "test.jsonl", jsonl_data)
+ docbin = DocBin()
+ docbin.add(doc)
+ docbin.to_disk(data_path / "testin.spacy")
+ with open(data_path / "test.txt", "w") as ftest:
+ ftest.write(text)
+ apply(data_path, output, "blank:en", "text", 1, 1)
+ # Check whether it worked
+ result = list(DocBin().from_disk(output).get_docs(nlp.vocab))
+ assert len(result) == 3
+ for doc in result:
+ assert doc.text == text
+
+
+def test_applycli_user_data():
+ Doc.set_extension("ext", default=0)
+ val = ("ext", 0)
+ with make_tempdir() as data_path:
+ output = data_path / "testout.spacy"
+ nlp = spacy.blank("en")
+ doc = nlp("testing apply cli.")
+ doc._.ext = val
+ docbin = DocBin(store_user_data=True)
+ docbin.add(doc)
+ docbin.to_disk(data_path / "testin.spacy")
+ apply(data_path, output, "blank:en", "", 1, 1)
+ result = list(DocBin().from_disk(output).get_docs(nlp.vocab))
+ assert result[0]._.ext == val
+
+
+def test_local_remote_storage():
+ with make_tempdir() as d:
+ filename = "a.txt"
+
+ content_hashes = ("aaaa", "cccc", "bbbb")
+ for i, content_hash in enumerate(content_hashes):
+ # make sure that each subsequent file has a later timestamp
+ if i > 0:
+ time.sleep(1)
+ content = f"{content_hash} content"
+ loc_file = d / "root" / filename
+ if not loc_file.parent.exists():
+ loc_file.parent.mkdir(parents=True)
+ with loc_file.open(mode="w") as file_:
+ file_.write(content)
+
+ # push first version to remote storage
+ remote = RemoteStorage(d / "root", str(d / "remote"))
+ remote.push(filename, "aaaa", content_hash)
+
+ # retrieve with full hashes
+ loc_file.unlink()
+ remote.pull(filename, command_hash="aaaa", content_hash=content_hash)
+ with loc_file.open(mode="r") as file_:
+ assert file_.read() == content
+
+ # retrieve with command hash
+ loc_file.unlink()
+ remote.pull(filename, command_hash="aaaa")
+ with loc_file.open(mode="r") as file_:
+ assert file_.read() == content
+
+ # retrieve with content hash
+ loc_file.unlink()
+ remote.pull(filename, content_hash=content_hash)
+ with loc_file.open(mode="r") as file_:
+ assert file_.read() == content
+
+ # retrieve with no hashes
+ loc_file.unlink()
+ remote.pull(filename)
+ with loc_file.open(mode="r") as file_:
+ assert file_.read() == content
+
+
+def test_local_remote_storage_pull_missing():
+ # pulling from a non-existent remote pulls nothing gracefully
+ with make_tempdir() as d:
+ filename = "a.txt"
+ remote = RemoteStorage(d / "root", str(d / "remote"))
+ assert remote.pull(filename, command_hash="aaaa") is None
+ assert remote.pull(filename) is None
+
+
+def test_cli_find_threshold(capsys):
+ thresholds = numpy.linspace(0, 1, 10)
+
+ def make_examples(nlp: Language) -> List[Example]:
+ docs: List[Example] = []
+
+ for t in [
+ (
+ "I am angry and confused in the Bank of America.",
+ {
+ "cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0},
+ "spans": {"sc": [(31, 46, "ORG")]},
+ },
+ ),
+ (
+ "I am confused but happy in New York.",
+ {
+ "cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0},
+ "spans": {"sc": [(27, 35, "GPE")]},
+ },
+ ),
+ ]:
+ doc = nlp.make_doc(t[0])
+ docs.append(Example.from_dict(doc, t[1]))
+
+ return docs
+
+ def init_nlp(
+ components: Tuple[Tuple[str, Dict[str, Any]], ...] = ()
+ ) -> Tuple[Language, List[Example]]:
+ new_nlp = English()
+ new_nlp.add_pipe( # type: ignore
+ factory_name="textcat_multilabel",
+ name="tc_multi",
+ config={"threshold": 0.9},
+ )
+
+ # Append additional components to pipeline.
+ for cfn, comp_config in components:
+ new_nlp.add_pipe(cfn, config=comp_config)
+
+ new_examples = make_examples(new_nlp)
+ new_nlp.initialize(get_examples=lambda: new_examples)
+ for i in range(5):
+ new_nlp.update(new_examples)
+
+ return new_nlp, new_examples
+
+ with make_tempdir() as docs_dir:
+ # Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches
+ # the current model behavior with the examples above. This can break once the model behavior changes and serves
+ # mostly as a smoke test.
+ nlp, examples = init_nlp()
+ DocBin(docs=[example.reference for example in examples]).to_disk(
+ docs_dir / "docs.spacy"
+ )
+ with make_tempdir() as nlp_dir:
+ nlp.to_disk(nlp_dir)
+ res = find_threshold(
+ model=nlp_dir,
+ data_path=docs_dir / "docs.spacy",
+ pipe_name="tc_multi",
+ threshold_key="threshold",
+ scores_key="cats_macro_f",
+ silent=True,
+ )
+ assert res[0] != thresholds[0]
+ assert thresholds[0] < res[0] < thresholds[9]
+ assert res[1] == 1.0
+ assert res[2][1.0] == 0.0
+
+ # Test with spancat.
+ nlp, _ = init_nlp((("spancat", {}),))
+ with make_tempdir() as nlp_dir:
+ nlp.to_disk(nlp_dir)
+ res = find_threshold(
+ model=nlp_dir,
+ data_path=docs_dir / "docs.spacy",
+ pipe_name="spancat",
+ threshold_key="threshold",
+ scores_key="spans_sc_f",
+ silent=True,
+ )
+ assert res[0] != thresholds[0]
+ assert thresholds[0] < res[0] < thresholds[8]
+ assert res[1] >= 0.6
+ assert res[2][1.0] == 0.0
+
+ # Having multiple textcat_multilabel components should work, since the name has to be specified.
+ nlp, _ = init_nlp((("textcat_multilabel", {}),))
+ with make_tempdir() as nlp_dir:
+ nlp.to_disk(nlp_dir)
+ assert find_threshold(
+ model=nlp_dir,
+ data_path=docs_dir / "docs.spacy",
+ pipe_name="tc_multi",
+ threshold_key="threshold",
+ scores_key="cats_macro_f",
+ silent=True,
+ )
+
+ # Specifying the name of an non-existing pipe should fail.
+ nlp, _ = init_nlp()
+ with make_tempdir() as nlp_dir:
+ nlp.to_disk(nlp_dir)
+ with pytest.raises(AttributeError):
+ find_threshold(
+ model=nlp_dir,
+ data_path=docs_dir / "docs.spacy",
+ pipe_name="_",
+ threshold_key="threshold",
+ scores_key="cats_macro_f",
+ silent=True,
+ )
+
+
+@pytest.mark.parametrize(
+ "reqs,output",
+ [
+ [
+ """
+ spacy
+
+ # comment
+
+ thinc""",
+ (False, False),
+ ],
+ [
+ """# comment
+ --some-flag
+ spacy""",
+ (False, False),
+ ],
+ [
+ """# comment
+ --some-flag
+ spacy; python_version >= '3.6'""",
+ (False, False),
+ ],
+ [
+ """# comment
+ spacyunknowndoesnotexist12345""",
+ (True, False),
+ ],
+ ],
+)
+def test_project_check_requirements(reqs, output):
+ # excessive guard against unlikely package name
+ try:
+ pkg_resources.require("spacyunknowndoesnotexist12345")
+ except pkg_resources.DistributionNotFound:
+ assert output == _check_requirements([req.strip() for req in reqs.split("\n")])
+
+
+def test_upload_download_local_file():
+ with make_tempdir() as d1, make_tempdir() as d2:
+ filename = "f.txt"
+ content = "content"
+ local_file = d1 / filename
+ remote_file = d2 / filename
+ with local_file.open(mode="w") as file_:
+ file_.write(content)
+ upload_file(local_file, remote_file)
+ local_file.unlink()
+ download_file(remote_file, local_file)
+ with local_file.open(mode="r") as file_:
+ assert file_.read() == content
diff --git a/spacy/tests/test_displacy.py b/spacy/tests/test_displacy.py
index ccc145b44..f298b38e0 100644
--- a/spacy/tests/test_displacy.py
+++ b/spacy/tests/test_displacy.py
@@ -203,6 +203,16 @@ def test_displacy_parse_spans_different_spans_key(en_vocab):
]
+def test_displacy_parse_empty_spans_key(en_vocab):
+ """Test that having an unset spans key doesn't raise an error"""
+ doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"])
+ doc.spans["custom"] = [Span(doc, 3, 6, "BANK")]
+ with pytest.warns(UserWarning, match="W117"):
+ spans = displacy.parse_spans(doc)
+
+ assert isinstance(spans, dict)
+
+
def test_displacy_parse_ents(en_vocab):
"""Test that named entities on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
diff --git a/spacy/tests/test_models.py b/spacy/tests/test_models.py
index 2306cabb7..d91ed1201 100644
--- a/spacy/tests/test_models.py
+++ b/spacy/tests/test_models.py
@@ -23,7 +23,7 @@ def get_textcat_bow_kwargs():
def get_textcat_cnn_kwargs():
- return {"tok2vec": test_tok2vec(), "exclusive_classes": False, "nO": 13}
+ return {"tok2vec": make_test_tok2vec(), "exclusive_classes": False, "nO": 13}
def get_all_params(model):
@@ -65,7 +65,7 @@ def get_tok2vec_kwargs():
}
-def test_tok2vec():
+def make_test_tok2vec():
return build_Tok2Vec_model(**get_tok2vec_kwargs())
diff --git a/spacy/tests/test_scorer.py b/spacy/tests/test_scorer.py
index 6e15fa2de..b903f1669 100644
--- a/spacy/tests/test_scorer.py
+++ b/spacy/tests/test_scorer.py
@@ -474,3 +474,50 @@ def test_prf_score():
assert (a.precision, a.recall, a.fscore) == approx(
(c.precision, c.recall, c.fscore)
)
+
+
+def test_score_cats(en_tokenizer):
+ text = "some text"
+ gold_doc = en_tokenizer(text)
+ gold_doc.cats = {"POSITIVE": 1.0, "NEGATIVE": 0.0}
+ pred_doc = en_tokenizer(text)
+ pred_doc.cats = {"POSITIVE": 0.75, "NEGATIVE": 0.25}
+ example = Example(pred_doc, gold_doc)
+ # threshold is ignored for multi_label=False
+ scores1 = Scorer.score_cats(
+ [example],
+ "cats",
+ labels=list(gold_doc.cats.keys()),
+ multi_label=False,
+ positive_label="POSITIVE",
+ threshold=0.1,
+ )
+ scores2 = Scorer.score_cats(
+ [example],
+ "cats",
+ labels=list(gold_doc.cats.keys()),
+ multi_label=False,
+ positive_label="POSITIVE",
+ threshold=0.9,
+ )
+ assert scores1["cats_score"] == 1.0
+ assert scores2["cats_score"] == 1.0
+ assert scores1 == scores2
+ # threshold is relevant for multi_label=True
+ scores = Scorer.score_cats(
+ [example],
+ "cats",
+ labels=list(gold_doc.cats.keys()),
+ multi_label=True,
+ threshold=0.9,
+ )
+ assert scores["cats_macro_f"] == 0.0
+ # threshold is relevant for multi_label=True
+ scores = Scorer.score_cats(
+ [example],
+ "cats",
+ labels=list(gold_doc.cats.keys()),
+ multi_label=True,
+ threshold=0.1,
+ )
+ assert scores["cats_macro_f"] == 0.5
diff --git a/spacy/tests/training/test_augmenters.py b/spacy/tests/training/test_augmenters.py
index e3639c5da..35860a199 100644
--- a/spacy/tests/training/test_augmenters.py
+++ b/spacy/tests/training/test_augmenters.py
@@ -31,7 +31,7 @@ def doc(nlp):
words = ["Sarah", "'s", "sister", "flew", "to", "Silicon", "Valley", "via", "London", "."]
tags = ["NNP", "POS", "NN", "VBD", "IN", "NNP", "NNP", "IN", "NNP", "."]
pos = ["PROPN", "PART", "NOUN", "VERB", "ADP", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"]
- ents = ["B-PERSON", "I-PERSON", "O", "O", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"]
+ ents = ["B-PERSON", "I-PERSON", "O", "", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"]
cats = {"TRAVEL": 1.0, "BAKING": 0.0}
# fmt: on
doc = Doc(nlp.vocab, words=words, tags=tags, pos=pos, ents=ents)
@@ -106,6 +106,7 @@ def test_lowercase_augmenter(nlp, doc):
assert [(e.start, e.end, e.label) for e in eg.reference.ents] == ents
for ref_ent, orig_ent in zip(eg.reference.ents, doc.ents):
assert ref_ent.text == orig_ent.text.lower()
+ assert [t.ent_iob for t in doc] == [t.ent_iob for t in eg.reference]
assert [t.pos_ for t in eg.reference] == [t.pos_ for t in doc]
# check that augmentation works when lowercasing leads to different
@@ -166,7 +167,7 @@ def test_make_whitespace_variant(nlp):
lemmas = ["they", "fly", "to", "New", "York", "City", ".", "\n", "then", "they", "drive", "to", "Washington", ",", "D.C."]
heads = [1, 1, 1, 4, 5, 2, 1, 10, 10, 10, 10, 10, 11, 12, 12]
deps = ["nsubj", "ROOT", "prep", "compound", "compound", "pobj", "punct", "dep", "advmod", "nsubj", "ROOT", "prep", "pobj", "punct", "appos"]
- ents = ["O", "O", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"]
+ ents = ["O", "", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"]
# fmt: on
doc = Doc(
nlp.vocab,
@@ -215,6 +216,8 @@ def test_make_whitespace_variant(nlp):
assert mod_ex2.reference[j].head.i == j - 1
# entities are well-formed
assert len(doc.ents) == len(mod_ex.reference.ents)
+ # there is one token with missing entity information
+ assert any(t.ent_iob == 0 for t in mod_ex.reference)
for ent in mod_ex.reference.ents:
assert not ent[0].is_space
assert not ent[-1].is_space
diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py
index 4384a796d..7933ea31f 100644
--- a/spacy/tests/training/test_training.py
+++ b/spacy/tests/training/test_training.py
@@ -2,6 +2,7 @@ import random
import numpy
import pytest
+import spacy
import srsly
from spacy.lang.en import English
from spacy.tokens import Doc, DocBin
@@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags
from spacy.training.alignment_array import AlignmentArray
from spacy.training.align import get_alignments
from spacy.training.converters import json_to_docs
+from spacy.training.loop import train_while_improving
from spacy.util import get_words_and_spaces, load_model_from_path, minibatch
from spacy.util import load_config_from_str
-from thinc.api import compounding
+from thinc.api import compounding, Adam
from ..util import make_tempdir
@@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc):
retokenizer.merge(doc1[0:2])
retokenizer.merge(doc1[5:7])
assert example.get_aligned("ORTH", as_string=True) == expected2
+
+
+def test_training_before_update(doc):
+ def before_update(nlp, args):
+ assert args["step"] == 0
+ assert args["epoch"] == 1
+
+ # Raise an error here as the rest of the loop
+ # will not run to completion due to uninitialized
+ # models.
+ raise ValueError("ran_before_update")
+
+ def generate_batch():
+ yield 1, [Example(doc, doc)]
+
+ nlp = spacy.blank("en")
+ nlp.add_pipe("tagger")
+ optimizer = Adam()
+ generator = train_while_improving(
+ nlp,
+ optimizer,
+ generate_batch(),
+ lambda: None,
+ dropout=0.1,
+ eval_frequency=100,
+ accumulate_gradient=10,
+ patience=10,
+ max_steps=100,
+ exclude=[],
+ annotating_components=[],
+ before_update=before_update,
+ )
+
+ with pytest.raises(ValueError, match="ran_before_update"):
+ for _ in generator:
+ pass
diff --git a/spacy/tests/vocab_vectors/test_stringstore.py b/spacy/tests/vocab_vectors/test_stringstore.py
index a0f8016af..f86c0f10d 100644
--- a/spacy/tests/vocab_vectors/test_stringstore.py
+++ b/spacy/tests/vocab_vectors/test_stringstore.py
@@ -24,6 +24,14 @@ def test_stringstore_from_api_docs(stringstore):
stringstore.add("orange")
all_strings = [s for s in stringstore]
assert all_strings == ["apple", "orange"]
+ assert all_strings == list(stringstore.keys())
+ all_strings_and_hashes = list(stringstore.items())
+ assert all_strings_and_hashes == [
+ ("apple", 8566208034543834098),
+ ("orange", 2208928596161743350),
+ ]
+ all_hashes = list(stringstore.values())
+ assert all_hashes == [8566208034543834098, 2208928596161743350]
banana_hash = stringstore.add("banana")
assert len(stringstore) == 3
assert banana_hash == 2525716904149915114
@@ -31,12 +39,25 @@ def test_stringstore_from_api_docs(stringstore):
assert stringstore["banana"] == banana_hash
-@pytest.mark.parametrize("text1,text2,text3", [(b"Hello", b"goodbye", b"hello")])
-def test_stringstore_save_bytes(stringstore, text1, text2, text3):
- key = stringstore.add(text1)
- assert stringstore[text1] == key
- assert stringstore[text2] != key
- assert stringstore[text3] != key
+@pytest.mark.parametrize(
+ "val_bytes,val_float,val_list,val_text,val_hash",
+ [(b"Hello", 1.1, ["abc"], "apple", 8566208034543834098)],
+)
+def test_stringstore_type_checking(
+ stringstore, val_bytes, val_float, val_list, val_text, val_hash
+):
+ with pytest.raises(TypeError):
+ assert stringstore[val_bytes]
+
+ with pytest.raises(TypeError):
+ stringstore.add(val_float)
+
+ with pytest.raises(TypeError):
+ assert val_list not in stringstore
+
+ key = stringstore.add(val_text)
+ assert val_hash == key
+ assert stringstore[val_hash] == val_text
@pytest.mark.parametrize("text1,text2,text3", [("Hello", "goodbye", "hello")])
@@ -47,19 +68,19 @@ def test_stringstore_save_unicode(stringstore, text1, text2, text3):
assert stringstore[text3] != key
-@pytest.mark.parametrize("text", [b"A"])
+@pytest.mark.parametrize("text", ["A"])
def test_stringstore_retrieve_id(stringstore, text):
key = stringstore.add(text)
assert len(stringstore) == 1
- assert stringstore[key] == text.decode("utf8")
+ assert stringstore[key] == text
with pytest.raises(KeyError):
stringstore[20000]
-@pytest.mark.parametrize("text1,text2", [(b"0123456789", b"A")])
+@pytest.mark.parametrize("text1,text2", [("0123456789", "A")])
def test_stringstore_med_string(stringstore, text1, text2):
store = stringstore.add(text1)
- assert stringstore[store] == text1.decode("utf8")
+ assert stringstore[store] == text1
stringstore.add(text2)
assert stringstore[text1] == store
diff --git a/spacy/tests/vocab_vectors/test_vectors.py b/spacy/tests/vocab_vectors/test_vectors.py
index dd2cfc596..70835816d 100644
--- a/spacy/tests/vocab_vectors/test_vectors.py
+++ b/spacy/tests/vocab_vectors/test_vectors.py
@@ -626,3 +626,23 @@ def test_floret_vectors(floret_vectors_vec_str, floret_vectors_hashvec_str):
OPS.to_numpy(vocab_r[word].vector),
decimal=6,
)
+
+
+def test_equality():
+ vectors1 = Vectors(shape=(10, 10))
+ vectors2 = Vectors(shape=(10, 8))
+
+ assert vectors1 != vectors2
+
+ vectors2 = Vectors(shape=(10, 10))
+ assert vectors1 == vectors2
+
+ vectors1.add("hello", row=2)
+ assert vectors1 != vectors2
+
+ vectors2.add("hello", row=2)
+ assert vectors1 == vectors2
+
+ vectors1.resize((5, 9))
+ vectors2.resize((5, 9))
+ assert vectors1 == vectors2
diff --git a/spacy/tests/vocab_vectors/test_vocab_api.py b/spacy/tests/vocab_vectors/test_vocab_api.py
index 16cf80a08..b9c386eb8 100644
--- a/spacy/tests/vocab_vectors/test_vocab_api.py
+++ b/spacy/tests/vocab_vectors/test_vocab_api.py
@@ -1,8 +1,13 @@
+import os
+
import pytest
from spacy.attrs import IS_ALPHA, LEMMA, ORTH
+from spacy.lang.en import English
from spacy.parts_of_speech import NOUN, VERB
from spacy.vocab import Vocab
+from ..util import make_tempdir
+
@pytest.mark.issue(1868)
def test_issue1868():
@@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text):
def test_vocab_writing_system(en_vocab):
assert en_vocab.writing_system["direction"] == "ltr"
assert en_vocab.writing_system["has_case"] is True
+
+
+def test_to_disk():
+ nlp = English()
+ with make_tempdir() as d:
+ nlp.vocab.to_disk(d)
+ assert "vectors" in os.listdir(d)
+ assert "lookups.bin" in os.listdir(d)
+
+
+def test_to_disk_exclude():
+ nlp = English()
+ with make_tempdir() as d:
+ nlp.vocab.to_disk(d, exclude=("vectors", "lookups"))
+ assert "vectors" not in os.listdir(d)
+ assert "lookups.bin" not in os.listdir(d)
diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd
index 86e62ddbf..6f9dfc90f 100644
--- a/spacy/tokenizer.pxd
+++ b/spacy/tokenizer.pxd
@@ -4,7 +4,6 @@ from cymem.cymem cimport Pool
from .typedefs cimport hash_t
from .structs cimport LexemeC, SpanC, TokenC
-from .strings cimport StringStore
from .tokens.doc cimport Doc
from .vocab cimport Vocab, LexemesOrTokens, _Cached
from .matcher.phrasematcher cimport PhraseMatcher
diff --git a/spacy/tokens/doc.pxd b/spacy/tokens/doc.pxd
index 83a940cbb..b53c75a2f 100644
--- a/spacy/tokens/doc.pxd
+++ b/spacy/tokens/doc.pxd
@@ -48,8 +48,6 @@ cdef class Doc:
cdef TokenC* c
- cdef public float sentiment
-
cdef public dict activations
cdef public dict user_hooks
diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi
index 763c1fd2f..1c7c18bf3 100644
--- a/spacy/tokens/doc.pyi
+++ b/spacy/tokens/doc.pyi
@@ -21,7 +21,6 @@ class Doc:
spans: SpanGroups
max_length: int
length: int
- sentiment: float
activations: Dict[str, Dict[str, Union[ArrayXd, Ragged]]]
cats: Dict[str, float]
user_hooks: Dict[str, Callable[..., Any]]
@@ -73,7 +72,7 @@ class Doc:
lemmas: Optional[List[str]] = ...,
heads: Optional[List[int]] = ...,
deps: Optional[List[str]] = ...,
- sent_starts: Optional[List[Union[bool, None]]] = ...,
+ sent_starts: Optional[List[Union[bool, int, None]]] = ...,
ents: Optional[List[str]] = ...,
) -> None: ...
@property
diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx
index 6969515c3..25af6ca6a 100644
--- a/spacy/tokens/doc.pyx
+++ b/spacy/tokens/doc.pyx
@@ -217,9 +217,9 @@ cdef class Doc:
head in the doc. Defaults to None.
deps (Optional[List[str]]): A list of unicode strings, of the same
length as words, to assign as token.dep. Defaults to None.
- sent_starts (Optional[List[Union[bool, None]]]): A list of values, of
- the same length as words, to assign as token.is_sent_start. Will be
- overridden by heads if heads is provided. Defaults to None.
+ sent_starts (Optional[List[Union[bool, int, None]]]): A list of values,
+ of the same length as words, to assign as token.is_sent_start. Will
+ be overridden by heads if heads is provided. Defaults to None.
ents (Optional[List[str]]): A list of unicode strings, of the same
length as words, as IOB tags to assign as token.ent_iob and
token.ent_type. Defaults to None.
@@ -243,7 +243,6 @@ cdef class Doc:
self.c = data_start + PADDING
self.max_length = size
self.length = 0
- self.sentiment = 0.0
self.cats = {}
self.activations = {}
self.user_hooks = {}
@@ -286,6 +285,7 @@ cdef class Doc:
heads = [0] * len(deps)
if heads and not deps:
raise ValueError(Errors.E1017)
+ sent_starts = list(sent_starts) if sent_starts is not None else None
if sent_starts is not None:
for i in range(len(sent_starts)):
if sent_starts[i] is True:
@@ -301,12 +301,11 @@ cdef class Doc:
ent_iobs = None
ent_types = None
if ents is not None:
+ ents = [ent if ent != "" else None for ent in ents]
iob_strings = Token.iob_strings()
# make valid IOB2 out of IOB1 or IOB2
for i, ent in enumerate(ents):
- if ent is "":
- ents[i] = None
- elif ent is not None and not isinstance(ent, str):
+ if ent is not None and not isinstance(ent, str):
raise ValueError(Errors.E177.format(tag=ent))
if i < len(ents) - 1:
# OI -> OB
@@ -360,6 +359,7 @@ cdef class Doc:
for annot in annotations:
if annot:
if annot is heads or annot is sent_starts or annot is ent_iobs:
+ annot = numpy.array(annot, dtype=numpy.int32).astype(numpy.uint64)
for i in range(len(words)):
if attrs.ndim == 1:
attrs[i] = annot[i]
@@ -1178,13 +1178,22 @@ cdef class Doc:
if "user_data" not in exclude:
for key, value in doc.user_data.items():
- if isinstance(key, tuple) and len(key) == 4 and key[0] == "._.":
- data_type, name, start, end = key
+ if isinstance(key, tuple) and len(key) >= 4 and key[0] == "._.":
+ data_type = key[0]
+ name = key[1]
+ start = key[2]
+ end = key[3]
if start is not None or end is not None:
start += char_offset
if end is not None:
end += char_offset
- concat_user_data[(data_type, name, start, end)] = copy.copy(value)
+ _label = key[4]
+ _kb_id = key[5]
+ _span_id = key[6]
+ concat_user_data[(data_type, name, start, end, _label, _kb_id, _span_id)] = copy.copy(value)
+ else:
+ concat_user_data[(data_type, name, start, end)] = copy.copy(value)
+
else:
warnings.warn(Warnings.W101.format(name=name))
else:
@@ -1270,7 +1279,6 @@ cdef class Doc:
other.tensor = copy.deepcopy(self.tensor)
other.cats = copy.deepcopy(self.cats)
other.user_data = copy.deepcopy(self.user_data)
- other.sentiment = self.sentiment
other.has_unknown_spaces = self.has_unknown_spaces
other.user_hooks = dict(self.user_hooks)
other.user_token_hooks = dict(self.user_token_hooks)
@@ -1367,7 +1375,6 @@ cdef class Doc:
"text": lambda: self.text,
"array_head": lambda: array_head,
"array_body": lambda: self.to_array(array_head),
- "sentiment": lambda: self.sentiment,
"tensor": lambda: self.tensor,
"cats": lambda: self.cats,
"spans": lambda: self.spans.to_bytes(),
@@ -1405,8 +1412,6 @@ cdef class Doc:
for key, value in zip(user_data_keys, user_data_values):
self.user_data[key] = value
cdef int i, start, end, has_space
- if "sentiment" not in exclude and "sentiment" in msg:
- self.sentiment = msg["sentiment"]
if "tensor" not in exclude and "tensor" in msg:
self.tensor = msg["tensor"]
if "cats" not in exclude and "cats" in msg:
@@ -1569,6 +1574,7 @@ cdef class Doc:
for j, (attr, annot) in enumerate(token_annotations.items()):
if attr is HEAD:
+ annot = numpy.array(annot, dtype=numpy.int32).astype(numpy.uint64)
for i in range(len(words)):
array[i, j] = annot[i]
elif attr is MORPH:
@@ -1619,24 +1625,24 @@ cdef class Doc:
Doc.set_extension(attr)
self._.set(attr, doc_json["_"][attr])
- if doc_json.get("underscore_token", {}):
- for token_attr in doc_json["underscore_token"]:
- token_start = doc_json["underscore_token"][token_attr]["token_start"]
- value = doc_json["underscore_token"][token_attr]["value"]
-
- if not Token.has_extension(token_attr):
- Token.set_extension(token_attr)
- self[token_start]._.set(token_attr, value)
+ for token_attr in doc_json.get("underscore_token", {}):
+ if not Token.has_extension(token_attr):
+ Token.set_extension(token_attr)
+ for token_data in doc_json["underscore_token"][token_attr]:
+ start = token_by_char(self.c, self.length, token_data["start"])
+ value = token_data["value"]
+ self[start]._.set(token_attr, value)
- if doc_json.get("underscore_span", {}):
- for span_attr in doc_json["underscore_span"]:
- token_start = doc_json["underscore_span"][span_attr]["token_start"]
- token_end = doc_json["underscore_span"][span_attr]["token_end"]
- value = doc_json["underscore_span"][span_attr]["value"]
-
- if not Span.has_extension(span_attr):
- Span.set_extension(span_attr)
- self[token_start:token_end]._.set(span_attr, value)
+ for span_attr in doc_json.get("underscore_span", {}):
+ if not Span.has_extension(span_attr):
+ Span.set_extension(span_attr)
+ for span_data in doc_json["underscore_span"][span_attr]:
+ value = span_data["value"]
+ span = self.char_span(span_data["start"], span_data["end"])
+ span.label = span_data["label"]
+ span.kb_id = span_data["kb_id"]
+ span.id = span_data["id"]
+ span._.set(span_attr, value)
return self
def to_json(self, underscore=None):
@@ -1683,31 +1689,47 @@ cdef class Doc:
if underscore:
user_keys = set()
+ # Handle doc attributes with .get to include values from getters
+ # and not only values stored in user_data, for backwards
+ # compatibility
+ for attr in underscore:
+ if self.has_extension(attr):
+ if "_" not in data:
+ data["_"] = {}
+ value = self._.get(attr)
+ if not srsly.is_json_serializable(value):
+ raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
+ data["_"][attr] = value
+ user_keys.add(attr)
+ # Token and span attributes only include values stored in user_data
+ # and not values generated by getters
if self.user_data:
- data["_"] = {}
- data["underscore_token"] = {}
- data["underscore_span"] = {}
- for data_key in self.user_data:
+ for data_key, value in self.user_data.copy().items():
if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.":
attr = data_key[1]
start = data_key[2]
end = data_key[3]
if attr in underscore:
user_keys.add(attr)
- value = self.user_data[data_key]
if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
- # Check if doc attribute
- if start is None:
- data["_"][attr] = value
- # Check if token attribute
- elif end is None:
+ # Token attribute
+ if start is not None and end is None:
+ if "underscore_token" not in data:
+ data["underscore_token"] = {}
if attr not in data["underscore_token"]:
- data["underscore_token"][attr] = {"token_start": start, "value": value}
+ data["underscore_token"][attr] = []
+ data["underscore_token"][attr].append({"start": start, "value": value})
# Else span attribute
- else:
+ elif end is not None:
+ _label = data_key[4]
+ _kb_id = data_key[5]
+ _span_id = data_key[6]
+ if "underscore_span" not in data:
+ data["underscore_span"] = {}
if attr not in data["underscore_span"]:
- data["underscore_span"][attr] = {"token_start": start, "token_end": end, "value": value}
+ data["underscore_span"][attr] = []
+ data["underscore_span"][attr].append({"start": start, "end": end, "value": value, "label": _label, "kb_id": _kb_id, "id":_span_id})
for attr in underscore:
if attr not in user_keys:
diff --git a/spacy/tokens/graph.pyx b/spacy/tokens/graph.pyx
index adc4d23c8..0ae0d94c7 100644
--- a/spacy/tokens/graph.pyx
+++ b/spacy/tokens/graph.pyx
@@ -12,7 +12,7 @@ from murmurhash.mrmr cimport hash64
from .. import Errors
from ..typedefs cimport hash_t
-from ..strings import get_string_id
+from ..strings cimport get_string_id
from ..structs cimport EdgeC, GraphC
from .token import Token
diff --git a/spacy/tokens/retokenizer.pyx b/spacy/tokens/retokenizer.pyx
index 43e6d4aa7..29143bed3 100644
--- a/spacy/tokens/retokenizer.pyx
+++ b/spacy/tokens/retokenizer.pyx
@@ -18,7 +18,7 @@ from .underscore import is_writable_attr
from ..attrs import intify_attrs
from ..util import SimpleFrozenDict
from ..errors import Errors
-from ..strings import get_string_id
+from ..strings cimport get_string_id
cdef class Retokenizer:
diff --git a/spacy/tokens/span.pyi b/spacy/tokens/span.pyi
index 28b627c32..5168f3b03 100644
--- a/spacy/tokens/span.pyi
+++ b/spacy/tokens/span.pyi
@@ -82,8 +82,6 @@ class Span:
@property
def tensor(self) -> FloatsXd: ...
@property
- def sentiment(self) -> float: ...
- @property
def text(self) -> str: ...
@property
def text_with_ws(self) -> str: ...
@@ -95,8 +93,8 @@ class Span:
self,
start_idx: int,
end_idx: int,
- label: int = ...,
- kb_id: int = ...,
+ label: Union[int, str] = ...,
+ kb_id: Union[int, str] = ...,
vector: Optional[Floats1d] = ...,
) -> Span: ...
@property
diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx
index 89d9727e9..b605434fd 100644
--- a/spacy/tokens/span.pyx
+++ b/spacy/tokens/span.pyx
@@ -218,11 +218,10 @@ cdef class Span:
cdef SpanC* span_c = self.span_c()
"""Custom extension attributes registered via `set_extension`."""
return Underscore(Underscore.span_extensions, self,
- start=span_c.start_char, end=span_c.end_char)
+ start=span_c.start_char, end=span_c.end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
def as_doc(self, *, bint copy_user_data=False, array_head=None, array=None):
"""Create a `Doc` object with a copy of the `Span`'s data.
-
copy_user_data (bool): Whether or not to copy the original doc's user data.
array_head (tuple): `Doc` array attrs, can be passed in to speed up computation.
array (ndarray): `Doc` as array, can be passed in to speed up computation.
@@ -275,12 +274,22 @@ cdef class Span:
char_offset = self.start_char
for key, value in self.doc.user_data.items():
if isinstance(key, tuple) and len(key) == 4 and key[0] == "._.":
- data_type, name, start, end = key
+ data_type = key[0]
+ name = key[1]
+ start = key[2]
+ end = key[3]
if start is not None or end is not None:
start -= char_offset
+ # Check if Span object
if end is not None:
end -= char_offset
- user_data[(data_type, name, start, end)] = copy.copy(value)
+ _label = key[4]
+ _kb_id = key[5]
+ _span_id = key[6]
+ user_data[(data_type, name, start, end, _label, _kb_id, _span_id)] = copy.copy(value)
+ # Else Token object
+ else:
+ user_data[(data_type, name, start, end)] = copy.copy(value)
else:
user_data[key] = copy.copy(value)
doc.user_data = user_data
@@ -309,7 +318,7 @@ cdef class Span:
for ancestor in ancestors:
ancestor_i = ancestor.i - span_c.start
if ancestor_i in range(length):
- array[i, head_col] = ancestor_i - i
+ array[i, head_col] = numpy.int32(ancestor_i - i).astype(numpy.uint64)
# if there is no appropriate ancestor, define a new artificial root
value = array[i, head_col]
@@ -317,7 +326,7 @@ cdef class Span:
new_root = old_to_new_root.get(ancestor_i, None)
if new_root is not None:
# take the same artificial root as a previous token from the same sentence
- array[i, head_col] = new_root - i
+ array[i, head_col] = numpy.int32(new_root - i).astype(numpy.uint64)
else:
# set this token as the new artificial root
array[i, head_col] = 0
@@ -566,16 +575,6 @@ cdef class Span:
return None
return self.doc.tensor[self.start : self.end]
- @property
- def sentiment(self):
- """RETURNS (float): A scalar value indicating the positivity or
- negativity of the span.
- """
- if "sentiment" in self.doc.user_span_hooks:
- return self.doc.user_span_hooks["sentiment"](self)
- else:
- return sum([token.sentiment for token in self]) / len(self)
-
@property
def text(self):
"""RETURNS (str): The original verbatim text of the span."""
@@ -791,21 +790,36 @@ cdef class Span:
return self.span_c().label
def __set__(self, attr_t label):
- self.span_c().label = label
+ if label != self.span_c().label :
+ old_label = self.span_c().label
+ self.span_c().label = label
+ new = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
+ old = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=old_label, kb_id=self.kb_id, span_id=self.id)
+ Underscore._replace_keys(old, new)
property kb_id:
def __get__(self):
return self.span_c().kb_id
def __set__(self, attr_t kb_id):
- self.span_c().kb_id = kb_id
+ if kb_id != self.span_c().kb_id :
+ old_kb_id = self.span_c().kb_id
+ self.span_c().kb_id = kb_id
+ new = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
+ old = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=old_kb_id, span_id=self.id)
+ Underscore._replace_keys(old, new)
property id:
def __get__(self):
return self.span_c().id
def __set__(self, attr_t id):
- self.span_c().id = id
+ if id != self.span_c().id :
+ old_id = self.span_c().id
+ self.span_c().id = id
+ new = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
+ old = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=old_id)
+ Underscore._replace_keys(old, new)
property ent_id:
"""Alias for the span's ID."""
diff --git a/spacy/tokens/span_group.pyi b/spacy/tokens/span_group.pyi
index 21cd124ab..0b4aa83aa 100644
--- a/spacy/tokens/span_group.pyi
+++ b/spacy/tokens/span_group.pyi
@@ -18,6 +18,7 @@ class SpanGroup:
def doc(self) -> Doc: ...
@property
def has_overlap(self) -> bool: ...
+ def __iter__(self): ...
def __len__(self) -> int: ...
def append(self, span: Span) -> None: ...
def extend(self, spans: Iterable[Span]) -> None: ...
diff --git a/spacy/tokens/span_group.pyx b/spacy/tokens/span_group.pyx
index 7caa01ee7..7325c1fa7 100644
--- a/spacy/tokens/span_group.pyx
+++ b/spacy/tokens/span_group.pyx
@@ -159,6 +159,16 @@ cdef class SpanGroup:
return self._concat(other)
return NotImplemented
+ def __iter__(self):
+ """
+ Iterate over the spans in this SpanGroup.
+ YIELDS (Span): A span in this SpanGroup.
+
+ DOCS: https://spacy.io/api/spangroup#iter
+ """
+ for i in range(self.c.size()):
+ yield self[i]
+
def append(self, Span span):
"""Add a span to the group. The span must refer to the same Doc
object as the span group.
diff --git a/spacy/tokens/token.pyi b/spacy/tokens/token.pyi
index bd585d034..6de7e984a 100644
--- a/spacy/tokens/token.pyi
+++ b/spacy/tokens/token.pyi
@@ -79,8 +79,6 @@ class Token:
@property
def prob(self) -> float: ...
@property
- def sentiment(self) -> float: ...
- @property
def lang(self) -> int: ...
@property
def idx(self) -> int: ...
diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx
index cee903f48..64c707acd 100644
--- a/spacy/tokens/token.pyx
+++ b/spacy/tokens/token.pyx
@@ -283,14 +283,6 @@ cdef class Token:
"""RETURNS (float): Smoothed log probability estimate of token type."""
return self.vocab[self.c.lex.orth].prob
- @property
- def sentiment(self):
- """RETURNS (float): A scalar value indicating the positivity or
- negativity of the token."""
- if "sentiment" in self.doc.user_token_hooks:
- return self.doc.user_token_hooks["sentiment"](self)
- return self.vocab[self.c.lex.orth].sentiment
-
@property
def lang(self):
"""RETURNS (uint64): ID of the language of the parent document's
diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py
index e9a4e1862..f2f357441 100644
--- a/spacy/tokens/underscore.py
+++ b/spacy/tokens/underscore.py
@@ -2,10 +2,10 @@ from typing import Dict, Any, List, Optional, Tuple, Union, TYPE_CHECKING
import functools
import copy
from ..errors import Errors
+from .span import Span
if TYPE_CHECKING:
from .doc import Doc
- from .span import Span
from .token import Token
@@ -25,6 +25,9 @@ class Underscore:
obj: Union["Doc", "Span", "Token"],
start: Optional[int] = None,
end: Optional[int] = None,
+ label: int = 0,
+ kb_id: int = 0,
+ span_id: int = 0,
):
object.__setattr__(self, "_extensions", extensions)
object.__setattr__(self, "_obj", obj)
@@ -36,6 +39,10 @@ class Underscore:
object.__setattr__(self, "_doc", obj.doc)
object.__setattr__(self, "_start", start)
object.__setattr__(self, "_end", end)
+ if type(obj) == Span:
+ object.__setattr__(self, "_label", label)
+ object.__setattr__(self, "_kb_id", kb_id)
+ object.__setattr__(self, "_span_id", span_id)
def __dir__(self) -> List[str]:
# Hack to enable autocomplete on custom extensions
@@ -88,8 +95,39 @@ class Underscore:
def has(self, name: str) -> bool:
return name in self._extensions
- def _get_key(self, name: str) -> Tuple[str, str, Optional[int], Optional[int]]:
- return ("._.", name, self._start, self._end)
+ def _get_key(
+ self, name: str
+ ) -> Union[
+ Tuple[str, str, Optional[int], Optional[int]],
+ Tuple[str, str, Optional[int], Optional[int], int, int, int],
+ ]:
+ if hasattr(self, "_label"):
+ return (
+ "._.",
+ name,
+ self._start,
+ self._end,
+ self._label,
+ self._kb_id,
+ self._span_id,
+ )
+ else:
+ return "._.", name, self._start, self._end
+
+ @staticmethod
+ def _replace_keys(old_underscore: "Underscore", new_underscore: "Underscore"):
+ """
+ This function is called by Span when its kb_id or label are re-assigned.
+ It checks if any user_data is stored for this span and replaces the keys
+ """
+ for name in old_underscore._extensions:
+ old_key = old_underscore._get_key(name)
+ old_doc = old_underscore._doc
+ new_key = new_underscore._get_key(name)
+ if old_key != new_key and old_key in old_doc.user_data:
+ old_underscore._doc.user_data[
+ new_key
+ ] = old_underscore._doc.user_data.pop(old_key)
@classmethod
def get_state(cls) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]:
diff --git a/spacy/training/augment.py b/spacy/training/augment.py
index 55d780ba4..2fe8c24fb 100644
--- a/spacy/training/augment.py
+++ b/spacy/training/augment.py
@@ -6,7 +6,7 @@ from functools import partial
from ..util import registry
from .example import Example
-from .iob_utils import split_bilu_label
+from .iob_utils import split_bilu_label, _doc_to_biluo_tags_with_partial
if TYPE_CHECKING:
from ..language import Language # noqa: F401
@@ -62,6 +62,9 @@ def combined_augmenter(
if orth_variants and random.random() < orth_level:
raw_text = example.text
orig_dict = example.to_dict()
+ orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
+ example.reference
+ )
variant_text, variant_token_annot = make_orth_variants(
nlp,
raw_text,
@@ -128,6 +131,9 @@ def lower_casing_augmenter(
def make_lowercase_variant(nlp: "Language", example: Example):
example_dict = example.to_dict()
+ example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
+ example.reference
+ )
doc = nlp.make_doc(example.text.lower())
example_dict["token_annotation"]["ORTH"] = [t.lower_ for t in example.reference]
return example.from_dict(doc, example_dict)
@@ -146,6 +152,9 @@ def orth_variants_augmenter(
else:
raw_text = example.text
orig_dict = example.to_dict()
+ orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
+ example.reference
+ )
variant_text, variant_token_annot = make_orth_variants(
nlp,
raw_text,
@@ -248,6 +257,9 @@ def make_whitespace_variant(
RETURNS (Example): Example with one additional space token.
"""
example_dict = example.to_dict()
+ example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
+ example.reference
+ )
doc_dict = example_dict.get("doc_annotation", {})
token_dict = example_dict.get("token_annotation", {})
# returned unmodified if:
diff --git a/spacy/training/batchers.py b/spacy/training/batchers.py
index f0b6c3123..73678c7fc 100644
--- a/spacy/training/batchers.py
+++ b/spacy/training/batchers.py
@@ -2,11 +2,12 @@ from typing import Union, Iterable, Sequence, TypeVar, List, Callable, Iterator
from typing import Optional, Any
from functools import partial
import itertools
+from thinc.schedules import Schedule, constant as constant_schedule
from ..util import registry, minibatch
-Sizing = Union[Sequence[int], int]
+Sizing = Union[Sequence[int], int, Schedule[int]]
ItemT = TypeVar("ItemT")
BatcherT = Callable[[Iterable[ItemT]], Iterable[List[ItemT]]]
@@ -111,12 +112,13 @@ def minibatch_by_padded_size(
The `len` function is used by default.
"""
if isinstance(size, int):
- size_ = itertools.repeat(size) # type: Iterator[int]
+ size_ = constant_schedule(size)
else:
- size_ = iter(size)
- for outer_batch in minibatch(seqs, size=buffer):
+ assert isinstance(size, Schedule)
+ size_ = size
+ for step, outer_batch in enumerate(minibatch(seqs, size=buffer)):
outer_batch = list(outer_batch)
- target_size = next(size_)
+ target_size = size_(step)
for indices in _batch_by_length(outer_batch, target_size, get_length):
subbatch = [outer_batch[i] for i in indices]
padded_size = max(len(seq) for seq in subbatch) * len(subbatch)
@@ -147,10 +149,12 @@ def minibatch_by_words(
item. The `len` function is used by default.
"""
if isinstance(size, int):
- size_ = itertools.repeat(size) # type: Iterator[int]
+ size_ = constant_schedule(size)
else:
- size_ = iter(size)
- target_size = next(size_)
+ assert isinstance(size, Schedule)
+ size_ = size
+ step = 0
+ target_size = size_(step)
tol_size = target_size * tolerance
batch = []
overflow = []
@@ -175,7 +179,8 @@ def minibatch_by_words(
else:
if batch:
yield batch
- target_size = next(size_)
+ step += 1
+ target_size = size_(step)
tol_size = target_size * tolerance
batch = overflow
batch_size = overflow_size
@@ -193,7 +198,8 @@ def minibatch_by_words(
else:
if batch:
yield batch
- target_size = next(size_)
+ step += 1
+ target_size = size_(step)
tol_size = target_size * tolerance
batch = [seq]
batch_size = n_words
diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx
index f6fc3a48d..1908bf042 100644
--- a/spacy/training/example.pyx
+++ b/spacy/training/example.pyx
@@ -442,26 +442,27 @@ def _annot2array(vocab, tok_annot, doc_annot):
if key not in IDS:
raise ValueError(Errors.E974.format(obj="token", key=key))
elif key in ["ORTH", "SPACY"]:
- pass
+ continue
elif key == "HEAD":
attrs.append(key)
- values.append([h-i if h is not None else 0 for i, h in enumerate(value)])
+ row = [h-i if h is not None else 0 for i, h in enumerate(value)]
elif key == "DEP":
attrs.append(key)
- values.append([vocab.strings.add(h) if h is not None else MISSING_DEP for h in value])
+ row = [vocab.strings.add(h) if h is not None else MISSING_DEP for h in value]
elif key == "SENT_START":
attrs.append(key)
- values.append([to_ternary_int(v) for v in value])
+ row = [to_ternary_int(v) for v in value]
elif key == "MORPH":
attrs.append(key)
- values.append([vocab.morphology.add(v) for v in value])
+ row = [vocab.morphology.add(v) for v in value]
else:
attrs.append(key)
if not all(isinstance(v, str) for v in value):
types = set([type(v) for v in value])
raise TypeError(Errors.E969.format(field=key, types=types)) from None
- values.append([vocab.strings.add(v) for v in value])
- array = numpy.asarray(values, dtype="uint64")
+ row = [vocab.strings.add(v) for v in value]
+ values.append([numpy.array(v, dtype=numpy.int32).astype(numpy.uint64) if v < 0 else v for v in row])
+ array = numpy.array(values, dtype=numpy.uint64)
return attrs, array.T
diff --git a/spacy/training/iob_utils.py b/spacy/training/iob_utils.py
index 61f83a1c3..0d4d246b0 100644
--- a/spacy/training/iob_utils.py
+++ b/spacy/training/iob_utils.py
@@ -60,6 +60,14 @@ def doc_to_biluo_tags(doc: Doc, missing: str = "O"):
)
+def _doc_to_biluo_tags_with_partial(doc: Doc) -> List[str]:
+ ents = doc_to_biluo_tags(doc, missing="-")
+ for i, token in enumerate(doc):
+ if token.ent_iob == 2:
+ ents[i] = "O"
+ return ents
+
+
def offsets_to_biluo_tags(
doc: Doc, entities: Iterable[Tuple[int, int, Union[str, int]]], missing: str = "O"
) -> List[str]:
diff --git a/spacy/training/loggers.py b/spacy/training/loggers.py
index 408ea7140..7de31822e 100644
--- a/spacy/training/loggers.py
+++ b/spacy/training/loggers.py
@@ -26,6 +26,8 @@ def setup_table(
return final_cols, final_widths, ["r" for _ in final_widths]
+# We cannot rename this method as it's directly imported
+# and used by external packages such as spacy-loggers.
@registry.loggers("spacy.ConsoleLogger.v2")
def console_logger(
progress_bar: bool = False,
@@ -33,7 +35,27 @@ def console_logger(
output_file: Optional[Union[str, Path]] = None,
):
"""The ConsoleLogger.v2 prints out training logs in the console and/or saves them to a jsonl file.
- progress_bar (bool): Whether the logger should print the progress bar.
+ progress_bar (bool): Whether the logger should print a progress bar tracking the steps till the next evaluation pass.
+ console_output (bool): Whether the logger should print the logs on the console.
+ output_file (Optional[Union[str, Path]]): The file to save the training logs to.
+ """
+ return console_logger_v3(
+ progress_bar=None if progress_bar is False else "eval",
+ console_output=console_output,
+ output_file=output_file,
+ )
+
+
+@registry.loggers("spacy.ConsoleLogger.v3")
+def console_logger_v3(
+ progress_bar: Optional[str] = None,
+ console_output: bool = True,
+ output_file: Optional[Union[str, Path]] = None,
+):
+ """The ConsoleLogger.v3 prints out training logs in the console and/or saves them to a jsonl file.
+ progress_bar (Optional[str]): Type of progress bar to show in the console. Allowed values:
+ train - Tracks the number of steps from the beginning of training until the full training run is complete (training.max_steps is reached).
+ eval - Tracks the number of steps between the previous and next evaluation (training.eval_frequency is reached).
console_output (bool): Whether the logger should print the logs on the console.
output_file (Optional[Union[str, Path]]): The file to save the training logs to.
"""
@@ -70,6 +92,7 @@ def console_logger(
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
+ max_steps = nlp.config["training"]["max_steps"]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
@@ -84,6 +107,13 @@ def console_logger(
write(msg.row(table_header, widths=table_widths, spacing=spacing))
write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
+ expected_progress_types = ("train", "eval")
+ if progress_bar is not None and progress_bar not in expected_progress_types:
+ raise ValueError(
+ Errors.E1048.format(
+ unexpected=progress_bar, expected=expected_progress_types
+ )
+ )
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress
@@ -141,11 +171,23 @@ def console_logger(
)
)
if progress_bar:
+ if progress_bar == "train":
+ total = max_steps
+ desc = f"Last Eval Epoch: {info['epoch']}"
+ initial = info["step"]
+ else:
+ total = eval_frequency
+ desc = f"Epoch {info['epoch']+1}"
+ initial = 0
# Set disable=None, so that it disables on non-TTY
progress = tqdm.tqdm(
- total=eval_frequency, disable=None, leave=False, file=stderr
+ total=total,
+ disable=None,
+ leave=False,
+ file=stderr,
+ initial=initial,
)
- progress.set_description(f"Epoch {info['epoch']+1}")
+ progress.set_description(desc)
def finalize() -> None:
if output_stream:
diff --git a/spacy/training/loop.py b/spacy/training/loop.py
index 06372cbb0..fc929816d 100644
--- a/spacy/training/loop.py
+++ b/spacy/training/loop.py
@@ -59,6 +59,7 @@ def train(
batcher = T["batcher"]
train_logger = T["logger"]
before_to_disk = create_before_to_disk_callback(T["before_to_disk"])
+ before_update = T["before_update"]
# Helper function to save checkpoints. This is a closure for convenience,
# to avoid passing in all the args all the time.
@@ -89,6 +90,7 @@ def train(
eval_frequency=T["eval_frequency"],
exclude=frozen_components,
annotating_components=annotating_components,
+ before_update=before_update,
)
clean_output_dir(output_path)
stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n")
@@ -98,7 +100,7 @@ def train(
stdout.write(
msg.info(f"Set annotations on update for: {annotating_components}") + "\n"
)
- stdout.write(msg.info(f"Initial learn rate: {optimizer.learn_rate}") + "\n")
+ stdout.write(msg.info(f"Initial learn rate: {optimizer.learn_rate(step=0)}") + "\n")
with nlp.select_pipes(disable=frozen_components):
log_step, finalize_logger = train_logger(nlp, stdout, stderr)
try:
@@ -150,6 +152,7 @@ def train_while_improving(
max_steps: int,
exclude: List[str],
annotating_components: List[str],
+ before_update: Optional[Callable[["Language", Dict[str, Any]], None]],
):
"""Train until an evaluation stops improving. Works as a generator,
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
@@ -198,7 +201,10 @@ def train_while_improving(
words_seen = 0
start_time = timer()
for step, (epoch, batch) in enumerate(train_data):
- dropout = next(dropouts) # type: ignore
+ if before_update:
+ before_update_args = {"step": step, "epoch": epoch}
+ before_update(nlp, before_update_args)
+ dropout = dropouts(optimizer.step) # type: ignore
for subbatch in subdivide_batch(batch, accumulate_gradient):
nlp.update(
subbatch,
@@ -224,6 +230,7 @@ def train_while_improving(
score, other_scores = evaluate()
else:
score, other_scores = evaluate()
+ optimizer.last_score = score
results.append((score, step))
is_best_checkpoint = score == max(results)[0]
else:
diff --git a/spacy/util.py b/spacy/util.py
index 4e1a62d05..aafbbb5de 100644
--- a/spacy/util.py
+++ b/spacy/util.py
@@ -9,7 +9,7 @@ import re
from pathlib import Path
import thinc
from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer
-from thinc.api import ConfigValidationError, Model
+from thinc.api import ConfigValidationError, Model, constant as constant_schedule
import functools
import itertools
import numpy
@@ -37,22 +37,15 @@ try:
except ImportError:
cupy = None
-# These are functions that were previously (v2.x) available from spacy.util
-# and have since moved to Thinc. We're importing them here so people's code
-# doesn't break, but they should always be imported from Thinc from now on,
-# not from spacy.util.
-from thinc.api import fix_random_seed, compounding, decaying # noqa: F401
-
from .symbols import ORTH
from .compat import cupy, CudaStream, is_windows, importlib_metadata
-from .errors import Errors, Warnings, OLD_MODEL_SHORTCUTS
+from .errors import Errors, Warnings
from . import about
if TYPE_CHECKING:
# This lets us add type hints for mypy etc. without causing circular imports
- from .language import Language # noqa: F401
- from .pipeline import Pipe # noqa: F401
+ from .language import Language, PipeCallable # noqa: F401
from .tokens import Doc, Span # noqa: F401
from .vocab import Vocab # noqa: F401
@@ -67,7 +60,6 @@ LEXEME_NORM_LANGS = ["cs", "da", "de", "el", "en", "id", "lb", "mk", "pt", "ru",
CONFIG_SECTION_ORDER = ["paths", "variables", "system", "nlp", "components", "corpora", "training", "pretraining", "initialize"]
# fmt: on
-
logger = logging.getLogger("spacy")
logger_stream_handler = logging.StreamHandler()
logger_stream_handler.setFormatter(
@@ -394,13 +386,17 @@ def get_module_path(module: ModuleType) -> Path:
return file_path.parent
+# Default value for passed enable/disable values.
+_DEFAULT_EMPTY_PIPES = SimpleFrozenList()
+
+
def load_model(
name: Union[str, Path],
*,
vocab: Union["Vocab", bool] = True,
- disable: Union[str, Iterable[str]] = SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from a package or data path.
@@ -431,8 +427,6 @@ def load_model(
return load_model_from_path(Path(name), **kwargs) # type: ignore[arg-type]
elif hasattr(name, "exists"): # Path or Path-like to model data
return load_model_from_path(name, **kwargs) # type: ignore[arg-type]
- if name in OLD_MODEL_SHORTCUTS:
- raise IOError(Errors.E941.format(name=name, full=OLD_MODEL_SHORTCUTS[name])) # type: ignore[index]
raise IOError(Errors.E050.format(name=name))
@@ -440,9 +434,9 @@ def load_model_from_package(
name: str,
*,
vocab: Union["Vocab", bool] = True,
- disable: Union[str, Iterable[str]] = SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from an installed package.
@@ -470,9 +464,9 @@ def load_model_from_path(
*,
meta: Optional[Dict[str, Any]] = None,
vocab: Union["Vocab", bool] = True,
- disable: Union[str, Iterable[str]] = SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from a data directory path. Creates Language class with
@@ -516,9 +510,9 @@ def load_model_from_config(
*,
meta: Dict[str, Any] = SimpleFrozenDict(),
vocab: Union["Vocab", bool] = True,
- disable: Union[str, Iterable[str]] = SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
auto_fill: bool = False,
validate: bool = True,
) -> "Language":
@@ -616,9 +610,9 @@ def load_model_from_init_py(
init_file: Union[Path, str],
*,
vocab: Union["Vocab", bool] = True,
- disable: Union[str, Iterable[str]] = SimpleFrozenList(),
- enable: Union[str, Iterable[str]] = SimpleFrozenList(),
- exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
+ exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Helper function to use in the `load()` method of a model package's
@@ -1588,12 +1582,12 @@ def minibatch(items, size):
so that batch-size can vary on each step.
"""
if isinstance(size, int):
- size_ = itertools.repeat(size)
+ size_ = constant_schedule(size)
else:
size_ = size
items = iter(items)
- while True:
- batch_size = next(size_)
+ for step in itertools.count():
+ batch_size = size_(step)
batch = list(itertools.islice(items, int(batch_size)))
if len(batch) == 0:
break
@@ -1639,9 +1633,11 @@ def check_bool_env_var(env_var: str) -> bool:
def _pipe(
docs: Iterable["Doc"],
- proc: "Pipe",
+ proc: "PipeCallable",
name: str,
- default_error_handler: Callable[[str, "Pipe", List["Doc"], Exception], NoReturn],
+ default_error_handler: Callable[
+ [str, "PipeCallable", List["Doc"], Exception], NoReturn
+ ],
kwargs: Mapping[str, Any],
) -> Iterator["Doc"]:
if hasattr(proc, "pipe"):
diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx
index 8300220c1..be0f6db09 100644
--- a/spacy/vectors.pyx
+++ b/spacy/vectors.pyx
@@ -243,6 +243,15 @@ cdef class Vectors:
else:
return key in self.key2row
+ def __eq__(self, other):
+ # Check for equality, with faster checks first
+ return (
+ self.shape == other.shape
+ and self.key2row == other.key2row
+ and self.to_bytes(exclude=["strings"])
+ == other.to_bytes(exclude=["strings"])
+ )
+
def resize(self, shape, inplace=False):
"""Resize the underlying vectors array. If inplace=True, the memory
is reallocated. This may cause other references to the data to become
diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx
index d780dec0d..fc496a68b 100644
--- a/spacy/vocab.pyx
+++ b/spacy/vocab.pyx
@@ -467,9 +467,9 @@ cdef class Vocab:
setters = ["strings", "vectors"]
if "strings" not in exclude:
self.strings.to_disk(path / "strings.json")
- if "vectors" not in "exclude":
+ if "vectors" not in exclude:
self.vectors.to_disk(path, exclude=["strings"])
- if "lookups" not in "exclude":
+ if "lookups" not in exclude:
self.lookups.to_disk(path)
def from_disk(self, path, *, exclude=tuple()):
diff --git a/website/README.md b/website/README.md
index db050cf03..890a48ef9 100644
--- a/website/README.md
+++ b/website/README.md
@@ -1,531 +1,11 @@
-
-
# spacy.io website and docs

-_This page contains the documentation and styleguide for the spaCy website. Its
-rendered version is available at https://spacy.io/styleguide._
+The styleguide for the spaCy website is available at
+[spacy.io/styleguide](https://spacy.io/styleguide).
----
-
-
-
-The [spacy.io](https://spacy.io) website is implemented using
-[Gatsby](https://www.gatsbyjs.org) with
-[Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This
-allows authoring content in **straightforward Markdown** without the usual
-limitations. Standard elements can be overwritten with powerful
-[React](http://reactjs.org/) components and wherever Markdown syntax isn't
-enough, JSX components can be used.
-
-> #### Contributing to the site
->
-> The docs can always use another example or more detail, and they should always
-> be up to date and not misleading. We always appreciate a
-> [pull request](https://github.com/explosion/spaCy/pulls). To quickly find the
-> correct file to edit, simply click on the "Suggest edits" button at the bottom
-> of a page.
->
-> For more details on editing the site locally, see the installation
-> instructions and markdown reference below.
-
-## Logo {#logo source="website/src/images/logo.svg"}
-
-import { Logos } from 'widgets/styleguide'
-
-If you would like to use the spaCy logo on your site, please get in touch and
-ask us first. However, if you want to show support and tell others that your
-project is using spaCy, you can grab one of our
-[spaCy badges](/usage/spacy-101#faq-project-with-spacy).
-
-
-
-## Colors {#colors}
-
-import { Colors, Patterns } from 'widgets/styleguide'
-
-
-
-### Patterns
-
-
-
-## Typography {#typography}
-
-import { H1, H2, H3, H4, H5, Label, InlineList, Comment } from
-'components/typography'
-
-> #### Markdown
->
-> ```markdown_
-> ## Headline 2
-> ## Headline 2 {#some_id}
-> ## Headline 2 {#some_id tag="method"}
-> ```
->
-> #### JSX
->
-> ```jsx
-> Headline 2
-> Headline 2
-> Headline 2
-> ```
-
-Headlines are set in
-[HK Grotesk](http://cargocollective.com/hanken/HK-Grotesk-Open-Source-Font) by
-Hanken Design. All other body text and code uses the best-matching default
-system font to provide a "native" reading experience. All code uses the
-[JetBrains Mono](https://www.jetbrains.com/lp/mono/) typeface by JetBrains.
-
-
-
-Level 2 headings are automatically wrapped in `` elements at compile
-time, using a custom
-[Markdown transformer](https://github.com/explosion/spaCy/tree/master/website/plugins/remark-wrap-section.js).
-This makes it easier to highlight the section that's currently in the viewpoint
-in the sidebar menu.
-
-
-
-
-Headline 1
-Headline 2
-Headline 3
-Headline 4
-Headline 5
-
-
-
----
-
-The following optional attributes can be set on the headline to modify it. For
-example, to add a tag for the documented type or mark features that have been
-introduced in a specific version or require statistical models to be loaded.
-Tags are also available as standalone ` ` components.
-
-| Argument | Example | Result |
-| -------- | -------------------------- | ----------------------------------------- |
-| `tag` | `{tag="method"}` | method |
-| `new` | `{new="3"}` | 3 |
-| `model` | `{model="tagger, parser"}` | tagger, parser |
-| `hidden` | `{hidden="true"}` | |
-
-## Elements {#elements}
-
-### Links {#links}
-
-> #### Markdown
->
-> ```markdown
-> [I am a link](https://spacy.io)
-> ```
->
-> #### JSX
->
-> ```jsx
-> I am a link
-> ```
-
-Special link styles are used depending on the link URL.
-
-- [I am a regular external link](https://explosion.ai)
-- [I am a link to the documentation](/api/doc)
-- [I am a link to an architecture](/api/architectures#HashEmbedCNN)
-- [I am a link to a model](/models/en#en_core_web_sm)
-- [I am a link to GitHub](https://github.com/explosion/spaCy)
-
-### Abbreviations {#abbr}
-
-import { Abbr } from 'components/typography'
-
-> #### JSX
->
-> ```jsx
-> Abbreviation
-> ```
-
-Some text with an abbreviation. On small
-screens, I collapse and the explanation text is displayed next to the
-abbreviation.
-
-### Tags {#tags}
-
-import Tag from 'components/tag'
-
-> ```jsx
-> method
-> 2.1
-> tagger, parser
-> ```
-
-Tags can be used together with headlines, or next to properties across the
-documentation, and combined with tooltips to provide additional information. An
-optional `variant` argument can be used for special tags. `variant="new"` makes
-the tag take a version number to mark new features. Using the component,
-visibility of this tag can later be toggled once the feature isn't considered
-new anymore. Setting `variant="model"` takes a description of model capabilities
-and can be used to mark features that require a respective model to be
-installed.
-
-
-
-method 2 tagger,
-parser
-
-
-
-### Buttons {#buttons}
-
-import Button from 'components/button'
-
-> ```jsx
->
->
-> ```
-
-Link buttons come in two variants, `primary` and `secondary` and two sizes, with
-an optional `large` size modifier. Since they're mostly used as enhanced links,
-the buttons are implemented as styled links instead of native button elements.
-
-
-
-
-
-
-
-
-
-## Components
-
-### Table {#table}
-
-> #### Markdown
->
-> ```markdown_
-> | Header 1 | Header 2 |
-> | -------- | -------- |
-> | Column 1 | Column 2 |
-> ```
->
-> #### JSX
->
-> ```markup
->
-> Header 1 Header 2
-> Column 1 Column 2
->
-> ```
-
-Tables are used to present data and API documentation. Certain keywords can be
-used to mark a footer row with a distinct style, for example to visualize the
-return values of a documented function.
-
-| Header 1 | Header 2 | Header 3 | Header 4 |
-| ----------- | -------- | :------: | -------: |
-| Column 1 | Column 2 | Column 3 | Column 4 |
-| Column 1 | Column 2 | Column 3 | Column 4 |
-| Column 1 | Column 2 | Column 3 | Column 4 |
-| Column 1 | Column 2 | Column 3 | Column 4 |
-| **RETURNS** | Column 2 | Column 3 | Column 4 |
-
-Tables also support optional "divider" rows that are typically used to denote
-keyword-only arguments in API documentation. To turn a row into a dividing
-headline, it should only include content in its first cell, and its value should
-be italicized:
-
-> #### Markdown
->
-> ```markdown_
-> | Header 1 | Header 2 | Header 3 |
-> | -------- | -------- | -------- |
-> | Column 1 | Column 2 | Column 3 |
-> | _Hello_ | | |
-> | Column 1 | Column 2 | Column 3 |
-> ```
-
-| Header 1 | Header 2 | Header 3 |
-| -------- | -------- | -------- |
-| Column 1 | Column 2 | Column 3 |
-| _Hello_ | | |
-| Column 1 | Column 2 | Column 3 |
-
-### Type Annotations {#type-annotations}
-
-> #### Markdown
->
-> ```markdown_
-> ~~Model[List[Doc], Floats2d]~~
-> ```
->
-> #### JSX
->
-> ```markup
-> Model[List[Doc], Floats2d]
-> ```
-
-Type annotations are special inline code blocks are used to describe Python
-types in the [type hints](https://docs.python.org/3/library/typing.html) format.
-The special component will split the type, apply syntax highlighting and link
-all types that specify links in `meta/type-annotations.json`. Types can link to
-internal or external documentation pages. To make it easy to represent the type
-annotations in Markdown, the rendering "hijacks" the `~~` tags that would
-typically be converted to a `` element – but in this case, text surrounded
-by `~~` becomes a type annotation.
-
-- ~~Dict[str, List[Union[Doc, Span]]]~~
-- ~~Model[List[Doc], List[numpy.ndarray]]~~
-
-Type annotations support a special visual style in tables and will render as a
-separate row, under the cell text. This allows the API docs to display complex
-types without taking up too much space in the cell. The type annotation should
-always be the **last element** in the row.
-
-> #### Markdown
->
-> ```markdown_
-> | Header 1 | Header 2 |
-> | -------- | ----------------------- |
-> | Column 1 | Column 2 ~~List[Doc]~~ |
-> ```
-
-| Name | Description |
-| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `vocab` | The shared vocabulary. ~~Vocab~~ |
-| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. ~~Model[List[Doc], FullTransformerBatch]~~ |
-| `set_extra_annotations` | Function that takes a batch of `Doc` objects and transformer outputs and can set additional annotations on the `Doc`. ~~Callable[[List[Doc], FullTransformerBatch], None]~~ |
-
-### List {#list}
-
-> #### Markdown
->
-> ```markdown_
-> 1. One
-> 2. Two
-> ```
->
-> #### JSX
->
-> ```markup
->
-> - One
-> - Two
->
-> ```
-
-Lists are available as bulleted and numbered. Markdown lists are transformed
-automatically.
-
-- I am a bulleted list
-- I have nice bullets
-- Lorem ipsum dolor
-- consectetur adipiscing elit
-
-1. I am an ordered list
-2. I have nice numbers
-3. Lorem ipsum dolor
-4. consectetur adipiscing elit
-
-### Aside {#aside}
-
-> #### Markdown
->
-> ```markdown_
-> > #### Aside title
-> > This is aside text.
-> ```
->
-> #### JSX
->
-> ```jsx
->
-> ```
-
-Asides can be used to display additional notes and content in the right-hand
-column. Asides can contain text, code and other elements if needed. Visually,
-asides are moved to the side on the X-axis, and displayed at the same level they
-were inserted. On small screens, they collapse and are rendered in their
-original position, in between the text.
-
-To make them easier to use in Markdown, paragraphs formatted as blockquotes will
-turn into asides by default. Level 4 headlines (with a leading `####`) will
-become aside titles.
-
-### Code Block {#code-block}
-
-> #### Markdown
->
-> ````markdown_
-> ```python
-> ### This is a title
-> import spacy
-> ```
-> ````
->
-> #### JSX
->
-> ```jsx
->
-> import spacy
->
-> ```
-
-Code blocks use the [Prism](http://prismjs.com/) syntax highlighter with a
-custom theme. The language can be set individually on each block, and defaults
-to raw text with no highlighting. An optional label can be added as the first
-line with the prefix `####` (Python-like) and `///` (JavaScript-like). the
-indented block as plain text and preserve whitespace.
-
-```python
-### Using spaCy
-import spacy
-nlp = spacy.load("en_core_web_sm")
-doc = nlp("This is a sentence.")
-for token in doc:
- print(token.text, token.pos_)
-```
-
-Code blocks and also specify an optional range of line numbers to highlight by
-adding `{highlight="..."}` to the headline. Acceptable ranges are spans like
-`5-7`, but also `5-7,10` or `5-7,10,13-14`.
-
-> #### Markdown
->
-> ````markdown_
-> ```python
-> ### This is a title {highlight="1-2"}
-> import spacy
-> nlp = spacy.load("en_core_web_sm")
-> ```
-> ````
-
-```python
-### Using the matcher {highlight="5-7"}
-import spacy
-from spacy.matcher import Matcher
-
-nlp = spacy.load('en_core_web_sm')
-matcher = Matcher(nlp.vocab)
-pattern = [{"LOWER": "hello"}, {"IS_PUNCT": True}, {"LOWER": "world"}]
-matcher.add("HelloWorld", None, pattern)
-doc = nlp("Hello, world! Hello world!")
-matches = matcher(doc)
-```
-
-Adding `{executable="true"}` to the title turns the code into an executable
-block, powered by [Binder](https://mybinder.org) and
-[Juniper](https://github.com/ines/juniper). If JavaScript is disabled, the
-interactive widget defaults to a regular code block.
-
-> #### Markdown
->
-> ````markdown_
-> ```python
-> ### {executable="true"}
-> import spacy
-> nlp = spacy.load("en_core_web_sm")
-> ```
-> ````
-
-```python
-### {executable="true"}
-import spacy
-nlp = spacy.load("en_core_web_sm")
-doc = nlp("This is a sentence.")
-for token in doc:
- print(token.text, token.pos_)
-```
-
-If a code block only contains a URL to a GitHub file, the raw file contents are
-embedded automatically and syntax highlighting is applied. The link to the
-original file is shown at the top of the widget.
-
-> #### Markdown
->
-> ````markdown_
-> ```python
-> https://github.com/...
-> ```
-> ````
->
-> #### JSX
->
-> ```jsx
->
-> ```
-
-```python
-https://github.com/explosion/spaCy/tree/master/spacy/language.py
-```
-
-### Infobox {#infobox}
-
-import Infobox from 'components/infobox'
-
-> #### JSX
->
-> ```jsx
-> Regular infobox
-> This is a warning.
-> This is dangerous.
-> ```
-
-Infoboxes can be used to add notes, updates, warnings or additional information
-to a page or section. Semantically, they're implemented and interpreted as an
-`aside` element. Infoboxes can take an optional `title` argument, as well as an
-optional `variant` (either `"warning"` or `"danger"`).
-
-
-
-If needed, an infobox can contain regular text, `inline code`, lists and other
-blocks.
-
-
-
-
-
-If needed, an infobox can contain regular text, `inline code`, lists and other
-blocks.
-
-
-
-
-
-If needed, an infobox can contain regular text, `inline code`, lists and other
-blocks.
-
-
-
-### Accordion {#accordion}
-
-import Accordion from 'components/accordion'
-
-> #### JSX
->
-> ```jsx
->
-> Accordion content goes here.
->
-> ```
-
-Accordions are collapsible sections that are mostly used for lengthy tables,
-like the tag and label annotation schemes for different languages. They all need
-to be presented – but chances are the user doesn't actually care about _all_ of
-them, especially not at the same time. So it's fairly reasonable to hide them
-begin a click. This particular implementation was inspired by the amazing
-[Inclusive Components blog](https://inclusive-components.design/collapsible-sections/).
-
-
-
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante,
-pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt
-nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor
-gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor,
-sit amet dignissim justo congue.
-
-
-
-## Setup and installation {#setup}
+## Setup and installation
Before running the setup, make sure your versions of
[Node](https://nodejs.org/en/) and [npm](https://www.npmjs.com/) are up to date.
@@ -554,14 +34,14 @@ extensions for your code editor. The
[`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc)
file in the root defines the settings used in this codebase.
-## Building & developing the site with Docker {#docker}
-Sometimes it's hard to get a local environment working due to rapid updates to node dependencies,
-so it may be easier to use docker for building the docs.
+## Building & developing the site with Docker
-If you'd like to do this,
-**be sure you do *not* include your local `node_modules` folder**,
-since there are some dependencies that need to be built for the image system.
-Rename it before using.
+Sometimes it's hard to get a local environment working due to rapid updates to
+node dependencies, so it may be easier to use docker for building the docs.
+
+If you'd like to do this, **be sure you do _not_ include your local
+`node_modules` folder**, since there are some dependencies that need to be built
+for the image system. Rename it before using.
```bash
docker run -it \
@@ -571,16 +51,16 @@ docker run -it \
gatsby develop -H 0.0.0.0
```
-This will allow you to access the built website at http://0.0.0.0:8000/
-in your browser, and still edit code in your editor while having the site
-reflect those changes.
+This will allow you to access the built website at http://0.0.0.0:8000/ in your
+browser, and still edit code in your editor while having the site reflect those
+changes.
-**Note**: If you're working on a Mac with an M1 processor,
-you might see segfault errors from `qemu` if you use the default image.
-To fix this use the `arm64` tagged image in the `docker run` command
+**Note**: If you're working on a Mac with an M1 processor, you might see
+segfault errors from `qemu` if you use the default image. To fix this use the
+`arm64` tagged image in the `docker run` command
(ghcr.io/explosion/spacy-io:arm64).
-### Building the Docker image {#docker-build}
+### Building the Docker image
If you'd like to build the image locally, you can do so like this:
@@ -588,67 +68,21 @@ If you'd like to build the image locally, you can do so like this:
docker build -t spacy-io .
```
-This will take some time, so if you want to use the prebuilt image you'll save a bit of time.
+This will take some time, so if you want to use the prebuilt image you'll save a
+bit of time.
-## Markdown reference {#markdown}
-
-All page content and page meta lives in the `.md` files in the `/docs`
-directory. The frontmatter block at the top of each file defines the page title
-and other settings like the sidebar menu.
-
-````markdown
----
-title: Page title
----
-
-## Headline starting a section {#some_id}
-
-This is a regular paragraph with a [link](https://spacy.io) and **bold text**.
-
-> #### This is an aside title
->
-> This is aside text.
-
-### Subheadline
-
-| Header 1 | Header 2 |
-| -------- | -------- |
-| Column 1 | Column 2 |
-
-```python
-### Code block title {highlight="2-3"}
-import spacy
-nlp = spacy.load("en_core_web_sm")
-doc = nlp("Hello world")
-```
-
-
-
-This is content in the infobox.
-
-
-````
-
-In addition to the native markdown elements, you can use the components
-[` `][infobox], [` `][accordion], [``][abbr] and
-[` `][tag] via their JSX syntax.
-
-[infobox]: https://spacy.io/styleguide#infobox
-[accordion]: https://spacy.io/styleguide#accordion
-[abbr]: https://spacy.io/styleguide#abbr
-[tag]: https://spacy.io/styleguide#tag
-
-## Project structure {#structure}
+## Project structure
```yaml
-### Directory structure
├── docs # the actual markdown content
├── meta # JSON-formatted site metadata
| ├── languages.json # supported languages and statistical models
| ├── sidebars.json # sidebar navigations for different sections
| ├── site.json # general site metadata
+| ├── type-annotations.json # Type annotations
| └── universe.json # data for the spaCy universe section
├── public # compiled site
+├── setup # Jinja setup
├── src # source
| ├── components # React components
| ├── fonts # webfonts
@@ -661,54 +95,10 @@ In addition to the native markdown elements, you can use the components
| | ├── models.js # layout template for model pages
| | └── universe.js # layout templates for universe
| └── widgets # non-reusable components with content, e.g. changelog
+├── .eslintrc.json # ESLint config file
+├── .prettierrc # Prettier config file
├── gatsby-browser.js # browser-specific hooks for Gatsby
├── gatsby-config.js # Gatsby configuration
├── gatsby-node.js # Node-specific hooks for Gatsby
└── package.json # package settings and dependencies
```
-
-## Editorial {#editorial}
-
-- "spaCy" should always be spelled with a lowercase "s" and a capital "C",
- unless it specifically refers to the Python package or Python import `spacy`
- (in which case it should be formatted as code).
- - ✅ spaCy is a library for advanced NLP in Python.
- - ❌ Spacy is a library for advanced NLP in Python.
- - ✅ First, you need to install the `spacy` package from pip.
-- Mentions of code, like function names, classes, variable names etc. in inline
- text should be formatted as `code`.
- - ✅ "Calling the `nlp` object on a text returns a `Doc`."
-- Objects that have pages in the [API docs](/api) should be linked – for
- example, [`Doc`](/api/doc) or [`Language.to_disk`](/api/language#to_disk). The
- mentions should still be formatted as code within the link. Links pointing to
- the API docs will automatically receive a little icon. However, if a paragraph
- includes many references to the API, the links can easily get messy. In that
- case, we typically only link the first mention of an object and not any
- subsequent ones.
- - ✅ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
- [`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a `Doc` object
- from a `Span`.
- - ❌ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
- [`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a
- [`Doc`](/api/doc) object from a [`Span`](/api/span).
-
-* Other things we format as code are: references to trained pipeline packages
- like `en_core_web_sm` or file names like `code.py` or `meta.json`.
-
- - ✅ After training, the `config.cfg` is saved to disk.
-
-* [Type annotations](#type-annotations) are a special type of code formatting,
- expressed by wrapping the text in `~~` instead of backticks. The result looks
- like this: ~~List[Doc]~~. All references to known types will be linked
- automatically.
-
- - ✅ The model has the input type ~~List[Doc]~~ and it outputs a
- ~~List[Array2d]~~.
-
-* We try to keep links meaningful but short.
- - ✅ For details, see the usage guide on
- [training with custom code](/usage/training#custom-code).
- - ❌ For details, see
- [the usage guide on training with custom code](/usage/training#custom-code).
- - ❌ For details, see the usage guide on training with custom code
- [here](/usage/training#custom-code).
diff --git a/website/UNIVERSE.md b/website/UNIVERSE.md
index 770bbde13..c3e49ba43 100644
--- a/website/UNIVERSE.md
+++ b/website/UNIVERSE.md
@@ -51,7 +51,7 @@ markup is correct.
"import spacy",
"import package_name",
"",
- "nlp = spacy.load('en')",
+ "nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe(package_name)"
],
"code_language": "python",
diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md
index 213aa7455..45d2872bb 100644
--- a/website/docs/api/architectures.md
+++ b/website/docs/api/architectures.md
@@ -11,6 +11,7 @@ menu:
- ['Text Classification', 'textcat']
- ['Span Classification', 'spancat']
- ['Entity Linking', 'entitylinker']
+ - ['Coreference', 'coref-architectures']
---
A **model architecture** is a function that wires up a
@@ -586,8 +587,8 @@ consists of either two or three subnetworks:
run once for each batch.
- **lower**: Construct a feature-specific vector for each `(token, feature)`
pair. This is also run once for each batch. Constructing the state
- representation is then a matter of summing the component features and
- applying the non-linearity.
+ representation is then a matter of summing the component features and applying
+ the non-linearity.
- **upper** (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is used
as action scores directly.
@@ -619,8 +620,8 @@ consists of either two or three subnetworks:
> ```
Build a tagger model, using a provided token-to-vector component. The tagger
-model adds a linear layer with softmax activation to predict scores given
-the token vectors.
+model adds a linear layer with softmax activation to predict scores given the
+token vectors.
| Name | Description |
| ----------- | ------------------------------------------------------------------------------------------ |
@@ -911,5 +912,84 @@ A function that reads an existing `KnowledgeBase` from file.
A function that takes as input a [`KnowledgeBase`](/api/kb) and a
[`Span`](/api/span) object denoting a named entity, and returns a list of
plausible [`Candidate`](/api/kb/#candidate) objects. The default
-`CandidateGenerator` uses the text of a mention to find its potential
-aliases in the `KnowledgeBase`. Note that this function is case-dependent.
+`CandidateGenerator` uses the text of a mention to find its potential aliases in
+the `KnowledgeBase`. Note that this function is case-dependent.
+
+## Coreference {#coref-architectures tag="experimental"}
+
+A [`CoreferenceResolver`](/api/coref) component identifies tokens that refer to
+the same entity. A [`SpanResolver`](/api/span-resolver) component infers spans
+from single tokens. Together these components can be used to reproduce
+traditional coreference models. You can also omit the `SpanResolver` if working
+with only token-level clusters is acceptable.
+
+### spacy-experimental.Coref.v1 {#Coref tag="experimental"}
+
+> #### Example Config
+>
+> ```ini
+>
+> [model]
+> @architectures = "spacy-experimental.Coref.v1"
+> distance_embedding_size = 20
+> dropout = 0.3
+> hidden_size = 1024
+> depth = 2
+> antecedent_limit = 50
+> antecedent_batch_size = 512
+>
+> [model.tok2vec]
+> @architectures = "spacy-transformers.TransformerListener.v1"
+> grad_factor = 1.0
+> upstream = "transformer"
+> pooling = {"@layers":"reduce_mean.v1"}
+> ```
+
+The `Coref` model architecture is a Thinc `Model`.
+
+| Name | Description |
+| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ |
+| `distance_embedding_size` | A representation of the distance between candidates. ~~int~~ |
+| `dropout` | The dropout to use internally. Unlike some Thinc models, this has separate dropout for the internal PyTorch layers. ~~float~~ |
+| `hidden_size` | Size of the main internal layers. ~~int~~ |
+| `depth` | Depth of the internal network. ~~int~~ |
+| `antecedent_limit` | How many candidate antecedents to keep after rough scoring. This has a significant effect on memory usage. Typical values would be 50 to 200, or higher for very long documents. ~~int~~ |
+| `antecedent_batch_size` | Internal batch size. ~~int~~ |
+| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ |
+
+### spacy-experimental.SpanResolver.v1 {#SpanResolver tag="experimental"}
+
+> #### Example Config
+>
+> ```ini
+>
+> [model]
+> @architectures = "spacy-experimental.SpanResolver.v1"
+> hidden_size = 1024
+> distance_embedding_size = 64
+> conv_channels = 4
+> window_size = 1
+> max_distance = 128
+> prefix = "coref_head_clusters"
+>
+> [model.tok2vec]
+> @architectures = "spacy-transformers.TransformerListener.v1"
+> grad_factor = 1.0
+> upstream = "transformer"
+> pooling = {"@layers":"reduce_mean.v1"}
+> ```
+
+The `SpanResolver` model architecture is a Thinc `Model`. Note that
+`MentionClusters` is `List[List[Tuple[int, int]]]`.
+
+| Name | Description |
+| ------------------------- | -------------------------------------------------------------------------------------------------------------------- |
+| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ |
+| `hidden_size` | Size of the main internal layers. ~~int~~ |
+| `distance_embedding_size` | A representation of the distance between two candidates. ~~int~~ |
+| `conv_channels` | The number of channels in the internal CNN. ~~int~~ |
+| `window_size` | The number of neighboring tokens to consider in the internal CNN. `1` means consider one token on each side. ~~int~~ |
+| `max_distance` | The longest possible length of a predicted span. ~~int~~ |
+| `prefix` | The prefix that indicates spans to use for input data. ~~string~~ |
+| **CREATES** | The model using the architecture. ~~Model[List[Doc], List[MentionClusters]]~~ |
diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md
index 8621719b9..7fa0c39bb 100644
--- a/website/docs/api/cli.md
+++ b/website/docs/api/cli.md
@@ -12,10 +12,11 @@ menu:
- ['train', 'train']
- ['pretrain', 'pretrain']
- ['evaluate', 'evaluate']
+ - ['apply', 'apply']
+ - ['find-threshold', 'find-threshold']
- ['assemble', 'assemble']
- ['package', 'package']
- ['project', 'project']
- - ['ray', 'ray']
- ['huggingface-hub', 'huggingface-hub']
---
@@ -53,7 +54,7 @@ $ python -m spacy download [model] [--direct] [--sdist] [pip_args]
| `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ |
| `--sdist`, `-S` 3 | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
-| pip args 2.1 | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ |
+| pip args | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ |
| **CREATES** | The installed pipeline package in your `site-packages` directory. |
## info {#info tag="command"}
@@ -77,15 +78,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude]
$ python -m spacy info [model] [--markdown] [--silent] [--exclude]
```
-| Name | Description |
-| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- |
-| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
-| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
-| `--silent`, `-s` 2.0.12 | Don't print anything, just return the values. ~~bool (flag)~~ |
-| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
-| `--url`, `-u` 3.5.0 | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
-| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
-| **PRINTS** | Information about your spaCy installation. |
+| Name | Description |
+| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- |
+| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
+| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
+| `--silent`, `-s` | Don't print anything, just return the values. ~~bool (flag)~~ |
+| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
+| `--url`, `-u` 3.5.0 | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
+| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
+| **PRINTS** | Information about your spaCy installation. |
## validate {#validate new="2" tag="command"}
@@ -260,22 +261,22 @@ chosen based on the file extension of the input file.
$ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang]
```
-| Name | Description |
-| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- |
-| `input_path` | Input file or directory. ~~Path (positional)~~ |
-| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ |
-| `--converter`, `-c` 2 | Name of converter to use (see below). ~~str (option)~~ |
-| `--file-type`, `-t` 2.1 | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
-| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
-| `--seg-sents`, `-s` 2.2 | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
-| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
-| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
-| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
-| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
-| `--lang`, `-l` 2.1 | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
-| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
-| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
-| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). |
+| Name | Description |
+| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
+| `input_path` | Input file or directory. ~~Path (positional)~~ |
+| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ |
+| `--converter`, `-c` | Name of converter to use (see below). ~~str (option)~~ |
+| `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
+| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
+| `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
+| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
+| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
+| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
+| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
+| `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
+| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
+| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
+| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). |
### Converters {#converters}
@@ -474,8 +475,7 @@ report span characteristics such as the average span length and the span (or
span boundary) distinctiveness. The distinctiveness measure shows how different
the tokens are with respect to the rest of the corpus using the KL-divergence of
the token distributions. To learn more, you can check out Papay et al.'s work on
-[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP
-2020)](https://aclanthology.org/2020.emnlp-main.396/).
+[_Dissecting Span Identification Tasks with Performance Prediction_ (EMNLP 2020)](https://aclanthology.org/2020.emnlp-main.396/).
@@ -1163,6 +1163,76 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Training results and optional metrics and visualizations. |
+## apply {#apply new="3.5" tag="command"}
+
+Applies a trained pipeline to data and stores the resulting annotated documents
+in a `DocBin`. The input can be a single file or a directory. The recognized
+input formats are:
+
+1. `.spacy`
+2. `.jsonl` containing a user specified `text_key`
+3. Files with any other extension are assumed to be plain text files containing
+ a single document.
+
+When a directory is provided it is traversed recursively to collect all files.
+
+```cli
+$ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process]
+```
+
+| Name | Description |
+| ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
+| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
+| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
+| `--code`, `-c` 3 | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
+| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
+| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
+| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
+| `--batch-size`, `-b` | Batch size to use for prediction. Defaults to `1`. ~~int (option)~~ |
+| `--n-process`, `-n` | Number of processes to use for prediction. Defaults to `1`. ~~int (option)~~ |
+| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
+| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
+
+## find-threshold {#find-threshold new="3.5" tag="command"}
+
+Runs prediction trials for a trained model with varying tresholds to maximize
+the specified metric. The search space for the threshold is traversed linearly
+from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
+(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
+returns all results).
+
+This is applicable only for components whose predictions are influenced by
+thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
+that the full path to the corresponding threshold attribute in the config has to
+be provided.
+
+> #### Examples
+>
+> ```cli
+> # For textcat_multilabel:
+> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f
+> ```
+>
+> ```cli
+> # For spancat:
+> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f
+> ```
+
+| Name | Description |
+| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ |
+| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ |
+| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ |
+| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ |
+| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ |
+| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ |
+| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
+| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
+| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ |
+| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
+| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
+
## assemble {#assemble tag="command"}
Assemble a pipeline from a config file without additional training. Expects a
@@ -1229,19 +1299,19 @@ $ python -m spacy package [input_dir] [output_dir] [--code] [--meta-path] [--cre
> $ pip install dist/en_pipeline-0.0.0.tar.gz
> ```
-| Name | Description |
-| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ |
-| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ |
-| `--code`, `-c` 3 | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ |
-| `--meta-path`, `-m` 2 | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ |
-| `--create-meta`, `-C` 2 | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ |
-| `--build`, `-b` 3 | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ |
-| `--name`, `-n` 3 | Package name to override in meta. ~~Optional[str] \(option)~~ |
-| `--version`, `-v` 3 | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ |
-| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ |
-| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
-| **CREATES** | A Python package containing the spaCy pipeline. |
+| Name | Description |
+| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ |
+| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ |
+| `--code`, `-c` 3 | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ |
+| `--meta-path`, `-m` | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ |
+| `--create-meta`, `-C` | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ |
+| `--build`, `-b` 3 | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ |
+| `--name`, `-n` 3 | Package name to override in meta. ~~Optional[str] \(option)~~ |
+| `--version`, `-v` 3 | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ |
+| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ |
+| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
+| **CREATES** | A Python package containing the spaCy pipeline. |
## project {#project new="3"}
@@ -1352,12 +1422,13 @@ If the contents are different, the new version of the file is uploaded. Deleting
obsolete files is left up to you.
Remotes can be defined in the `remotes` section of the
-[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the
-[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to
-communicate with the remote storages, so you can use any protocol that
-`smart-open` supports, including [S3](https://aws.amazon.com/s3/),
-[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although
-you may need to install extra dependencies to use certain protocols.
+[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses
+[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
+remote storages, so you can use any protocol that `Pathy` supports, including
+[S3](https://aws.amazon.com/s3/),
+[Google Cloud Storage](https://cloud.google.com/storage), and the local
+filesystem, although you may need to install extra dependencies to use certain
+protocols.
```cli
$ python -m spacy project push [remote] [project_dir]
@@ -1396,12 +1467,13 @@ outputs, so if you change the config back, you'll be able to fetch back the
result.
Remotes can be defined in the `remotes` section of the
-[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the
-[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to
-communicate with the remote storages, so you can use any protocol that
-`smart-open` supports, including [S3](https://aws.amazon.com/s3/),
-[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although
-you may need to install extra dependencies to use certain protocols.
+[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses
+[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
+remote storages, so you can use any protocol that `Pathy` supports, including
+[S3](https://aws.amazon.com/s3/),
+[Google Cloud Storage](https://cloud.google.com/storage), and the local
+filesystem, although you may need to install extra dependencies to use certain
+protocols.
```cli
$ python -m spacy project pull [remote] [project_dir]
@@ -1482,7 +1554,7 @@ You'll also need to add the assets you want to track with
```cli
-$ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose]
+$ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] [--quiet]
```
> #### Example
@@ -1499,53 +1571,10 @@ $ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose]
| `workflow` | Name of workflow defined in `project.yml`. Defaults to first workflow if not set. ~~Optional[str] \(option)~~ |
| `--force`, `-F` | Force-updating config file. ~~bool (flag)~~ |
| `--verbose`, `-V` | Print more output generated by DVC. ~~bool (flag)~~ |
+| `--quiet`, `-q` | Print no output generated by DVC. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. |
-## ray {#ray new="3"}
-
-The `spacy ray` CLI includes commands for parallel and distributed computing via
-[Ray](https://ray.io).
-
-
-
-To use this command, you need the
-[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed.
-Installing the package will automatically add the `ray` command to the spaCy
-CLI.
-
-
-
-### ray train {#ray-train tag="command"}
-
-Train a spaCy pipeline using [Ray](https://ray.io) for parallel training. The
-command works just like [`spacy train`](/api/cli#train). For more details and
-examples, see the usage guide on
-[parallel training](/usage/training#parallel-training) and the spaCy project
-[integration](/usage/projects#ray).
-
-```cli
-$ python -m spacy ray train [config_path] [--code] [--output] [--n-workers] [--address] [--gpu-id] [--verbose] [overrides]
-```
-
-> #### Example
->
-> ```cli
-> $ python -m spacy ray train config.cfg --n-workers 2
-> ```
-
-| Name | Description |
-| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `config_path` | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. ~~Path (positional)~~ |
-| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
-| `--output`, `-o` | Directory or remote storage URL for saving trained pipeline. The directory will be created if it doesn't exist. ~~Optional[Path] \(option)~~ |
-| `--n-workers`, `-n` | The number of workers. Defaults to `1`. ~~int (option)~~ |
-| `--address`, `-a` | Optional address of the Ray cluster. If not set (default), Ray will run locally. ~~Optional[str] \(option)~~ |
-| `--gpu-id`, `-g` | GPU ID or `-1` for CPU. Defaults to `-1`. ~~int (option)~~ |
-| `--verbose`, `-V` | Display more information for debugging purposes. ~~bool (flag)~~ |
-| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
-| overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ |
-
## huggingface-hub {#huggingface-hub new="3.1"}
The `spacy huggingface-cli` CLI includes commands for uploading your trained
diff --git a/website/docs/api/coref.md b/website/docs/api/coref.md
new file mode 100644
index 000000000..8f54422d6
--- /dev/null
+++ b/website/docs/api/coref.md
@@ -0,0 +1,353 @@
+---
+title: CoreferenceResolver
+tag: class,experimental
+source: spacy-experimental/coref/coref_component.py
+teaser: 'Pipeline component for word-level coreference resolution'
+api_base_class: /api/pipe
+api_string_name: coref
+api_trainable: true
+---
+
+> #### Installation
+>
+> ```bash
+> $ pip install -U spacy-experimental
+> ```
+
+
+
+This component is not yet integrated into spaCy core, and is available via the
+extension package
+[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting
+in version 0.6.0. It exposes the component via
+[entry points](/usage/saving-loading/#entry-points), so if you have the package
+installed, using `factory = "experimental_coref"` in your
+[training config](/usage/training#config) or
+`nlp.add_pipe("experimental_coref")` will work out-of-the-box.
+
+
+
+A `CoreferenceResolver` component groups tokens into clusters that refer to the
+same thing. Clusters are represented as SpanGroups that start with a prefix
+(`coref_clusters` by default).
+
+A `CoreferenceResolver` component can be paired with a
+[`SpanResolver`](/api/span-resolver) to expand single tokens to spans.
+
+## Assigned Attributes {#assigned-attributes}
+
+Predictions will be saved to `Doc.spans` as a [`SpanGroup`](/api/spangroup). The
+span key will be a prefix plus a serial number referring to the coreference
+cluster, starting from zero.
+
+The span key prefix defaults to `"coref_clusters"`, but can be passed as a
+parameter.
+
+| Location | Value |
+| ------------------------------------------ | ------------------------------------------------------------------------------------------------------- |
+| `Doc.spans[prefix + "_" + cluster_number]` | One coreference cluster, represented as single-token spans. Cluster numbers start from 1. ~~SpanGroup~~ |
+
+## Config and implementation {#config}
+
+The default config is defined by the pipeline component factory and describes
+how the component should be configured. You can override its settings via the
+`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your
+[`config.cfg` for training](/usage/training#config). See the
+[model architectures](/api/architectures#coref-architectures) documentation for
+details on the architectures and their arguments and hyperparameters.
+
+> #### Example
+>
+> ```python
+> from spacy_experimental.coref.coref_component import DEFAULT_COREF_MODEL
+> from spacy_experimental.coref.coref_util import DEFAULT_CLUSTER_PREFIX
+> config={
+> "model": DEFAULT_COREF_MODEL,
+> "span_cluster_prefix": DEFAULT_CLUSTER_PREFIX,
+> },
+> nlp.add_pipe("experimental_coref", config=config)
+> ```
+
+| Setting | Description |
+| --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Coref](/api/architectures#Coref). ~~Model~~ |
+| `span_cluster_prefix` | The prefix for the keys for clusters saved to `doc.spans`. Defaults to `coref_clusters`. ~~str~~ |
+
+## CoreferenceResolver.\_\_init\_\_ {#init tag="method"}
+
+> #### Example
+>
+> ```python
+> # Construction via add_pipe with default model
+> coref = nlp.add_pipe("experimental_coref")
+>
+> # Construction via add_pipe with custom model
+> config = {"model": {"@architectures": "my_coref.v1"}}
+> coref = nlp.add_pipe("experimental_coref", config=config)
+>
+> # Construction from class
+> from spacy_experimental.coref.coref_component import CoreferenceResolver
+> coref = CoreferenceResolver(nlp.vocab, model)
+> ```
+
+Create a new pipeline instance. In your application, you would normally use a
+shortcut for this and instantiate the component using its string name and
+[`nlp.add_pipe`](/api/language#add_pipe).
+
+| Name | Description |
+| --------------------- | --------------------------------------------------------------------------------------------------- |
+| `vocab` | The shared vocabulary. ~~Vocab~~ |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ |
+| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
+| _keyword-only_ | |
+| `span_cluster_prefix` | The prefix for the key for saving clusters of spans. ~~bool~~ |
+
+## CoreferenceResolver.\_\_call\_\_ {#call tag="method"}
+
+Apply the pipe to one document. The document is modified in place and returned.
+This usually happens under the hood when the `nlp` object is called on a text
+and all pipeline components are applied to the `Doc` in order. Both
+[`__call__`](/api/coref#call) and [`pipe`](/api/coref#pipe) delegate to the
+[`predict`](/api/coref#predict) and
+[`set_annotations`](/api/coref#set_annotations) methods.
+
+> #### Example
+>
+> ```python
+> doc = nlp("This is a sentence.")
+> coref = nlp.add_pipe("experimental_coref")
+> # This usually happens under the hood
+> processed = coref(doc)
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------- |
+| `doc` | The document to process. ~~Doc~~ |
+| **RETURNS** | The processed document. ~~Doc~~ |
+
+## CoreferenceResolver.pipe {#pipe tag="method"}
+
+Apply the pipe to a stream of documents. This usually happens under the hood
+when the `nlp` object is called on a text and all pipeline components are
+applied to the `Doc` in order. Both [`__call__`](/api/coref#call) and
+[`pipe`](/api/coref#pipe) delegate to the [`predict`](/api/coref#predict) and
+[`set_annotations`](/api/coref#set_annotations) methods.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> for doc in coref.pipe(docs, batch_size=50):
+> pass
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------- |
+| `stream` | A stream of documents. ~~Iterable[Doc]~~ |
+| _keyword-only_ | |
+| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ |
+| **YIELDS** | The processed documents in order. ~~Doc~~ |
+
+## CoreferenceResolver.initialize {#initialize tag="method"}
+
+Initialize the component for training. `get_examples` should be a function that
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
+[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
+setting up the label scheme based on the data. This method is typically called
+by [`Language.initialize`](/api/language#initialize).
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> coref.initialize(lambda: examples, nlp=nlp)
+> ```
+
+| Name | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
+| _keyword-only_ | |
+| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
+
+## CoreferenceResolver.predict {#predict tag="method"}
+
+Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
+modifying them. Clusters are returned as a list of `MentionClusters`, one for
+each input `Doc`. A `MentionClusters` instance is just a list of lists of pairs
+of `int`s, where each item corresponds to a cluster, and the `int`s correspond
+to token indices.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> clusters = coref.predict([doc1, doc2])
+> ```
+
+| Name | Description |
+| ----------- | ---------------------------------------------------------------------------- |
+| `docs` | The documents to predict. ~~Iterable[Doc]~~ |
+| **RETURNS** | The predicted coreference clusters for the `docs`. ~~List[MentionClusters]~~ |
+
+## CoreferenceResolver.set_annotations {#set_annotations tag="method"}
+
+Modify a batch of documents, saving coreference clusters in `Doc.spans`.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> clusters = coref.predict([doc1, doc2])
+> coref.set_annotations([doc1, doc2], clusters)
+> ```
+
+| Name | Description |
+| ---------- | ---------------------------------------------------------------------------- |
+| `docs` | The documents to modify. ~~Iterable[Doc]~~ |
+| `clusters` | The predicted coreference clusters for the `docs`. ~~List[MentionClusters]~~ |
+
+## CoreferenceResolver.update {#update tag="method"}
+
+Learn from a batch of [`Example`](/api/example) objects. Delegates to
+[`predict`](/api/coref#predict).
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> optimizer = nlp.initialize()
+> losses = coref.update(examples, sgd=optimizer)
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------ |
+| `examples` | A batch of [`Example`](/api/example) objects to learn from. ~~Iterable[Example]~~ |
+| _keyword-only_ | |
+| `drop` | The dropout rate. ~~float~~ |
+| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
+| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
+| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
+
+## CoreferenceResolver.create_optimizer {#create_optimizer tag="method"}
+
+Create an optimizer for the pipeline component.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> optimizer = coref.create_optimizer()
+> ```
+
+| Name | Description |
+| ----------- | ---------------------------- |
+| **RETURNS** | The optimizer. ~~Optimizer~~ |
+
+## CoreferenceResolver.use_params {#use_params tag="method, contextmanager"}
+
+Modify the pipe's model, to use the given parameter values. At the end of the
+context, the original parameters are restored.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> with coref.use_params(optimizer.averages):
+> coref.to_disk("/best_model")
+> ```
+
+| Name | Description |
+| -------- | -------------------------------------------------- |
+| `params` | The parameter values to use in the model. ~~dict~~ |
+
+## CoreferenceResolver.to_disk {#to_disk tag="method"}
+
+Serialize the pipe to disk.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> coref.to_disk("/path/to/coref")
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
+| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+
+## CoreferenceResolver.from_disk {#from_disk tag="method"}
+
+Load the pipe from disk. Modifies the object in place and returns it.
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> coref.from_disk("/path/to/coref")
+> ```
+
+| Name | Description |
+| -------------- | ----------------------------------------------------------------------------------------------- |
+| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The modified `CoreferenceResolver` object. ~~CoreferenceResolver~~ |
+
+## CoreferenceResolver.to_bytes {#to_bytes tag="method"}
+
+> #### Example
+>
+> ```python
+> coref = nlp.add_pipe("experimental_coref")
+> coref_bytes = coref.to_bytes()
+> ```
+
+Serialize the pipe to a bytestring, including the `KnowledgeBase`.
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------- |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The serialized form of the `CoreferenceResolver` object. ~~bytes~~ |
+
+## CoreferenceResolver.from_bytes {#from_bytes tag="method"}
+
+Load the pipe from a bytestring. Modifies the object in place and returns it.
+
+> #### Example
+>
+> ```python
+> coref_bytes = coref.to_bytes()
+> coref = nlp.add_pipe("experimental_coref")
+> coref.from_bytes(coref_bytes)
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------- |
+| `bytes_data` | The data to load from. ~~bytes~~ |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The `CoreferenceResolver` object. ~~CoreferenceResolver~~ |
+
+## Serialization fields {#serialization-fields}
+
+During serialization, spaCy will export several data fields used to restore
+different aspects of the object. If needed, you can exclude them from
+serialization by passing in the string names via the `exclude` argument.
+
+> #### Example
+>
+> ```python
+> data = coref.to_disk("/path", exclude=["vocab"])
+> ```
+
+| Name | Description |
+| ------- | -------------------------------------------------------------- |
+| `vocab` | The shared [`Vocab`](/api/vocab). |
+| `cfg` | The config file. You usually don't want to exclude this. |
+| `model` | The binary model data. You usually don't want to exclude this. |
diff --git a/website/docs/api/data-formats.md b/website/docs/api/data-formats.md
index ce06c4ea8..768844cf3 100644
--- a/website/docs/api/data-formats.md
+++ b/website/docs/api/data-formats.md
@@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train).
| `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ |
| `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ |
| `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
+| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ |
| `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ |
| `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ |
| `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ |
diff --git a/website/docs/api/dependencyparser.md b/website/docs/api/dependencyparser.md
index 27e315592..c30d39b57 100644
--- a/website/docs/api/dependencyparser.md
+++ b/website/docs/api/dependencyparser.md
@@ -169,12 +169,6 @@ arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the
config.
-
-
-This method was previously called `begin_training`.
-
-
-
> #### Example
>
> ```python
diff --git a/website/docs/api/doc.md b/website/docs/api/doc.md
index 136e7785d..235470934 100644
--- a/website/docs/api/doc.md
+++ b/website/docs/api/doc.md
@@ -31,21 +31,21 @@ Construct a `Doc` object. The most common way to get a `Doc` object is via the
> doc = Doc(nlp.vocab, words=words, spaces=spaces)
> ```
-| Name | Description |
-| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `vocab` | A storage container for lexical types. ~~Vocab~~ |
-| `words` | A list of strings or integer hash values to add to the document as words. ~~Optional[List[Union[str,int]]]~~ |
-| `spaces` | A list of boolean values indicating whether each word has a subsequent space. Must have the same length as `words`, if specified. Defaults to a sequence of `True`. ~~Optional[List[bool]]~~ |
-| _keyword-only_ | |
-| `user\_data` | Optional extra data to attach to the Doc. ~~Dict~~ |
-| `tags` 3 | A list of strings, of the same length as `words`, to assign as `token.tag` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
-| `pos` 3 | A list of strings, of the same length as `words`, to assign as `token.pos` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
-| `morphs` 3 | A list of strings, of the same length as `words`, to assign as `token.morph` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
-| `lemmas` 3 | A list of strings, of the same length as `words`, to assign as `token.lemma` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
-| `heads` 3 | A list of values, of the same length as `words`, to assign as the head for each word. Head indices are the absolute position of the head in the `Doc`. Defaults to `None`. ~~Optional[List[int]]~~ |
-| `deps` 3 | A list of strings, of the same length as `words`, to assign as `token.dep` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
-| `sent_starts` 3 | A list of values, of the same length as `words`, to assign as `token.is_sent_start`. Will be overridden by heads if `heads` is provided. Defaults to `None`. ~~Optional[List[Optional[bool]]]~~ |
-| `ents` 3 | A list of strings, of the same length of `words`, to assign the token-based IOB tag. Defaults to `None`. ~~Optional[List[str]]~~ |
+| Name | Description |
+| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `vocab` | A storage container for lexical types. ~~Vocab~~ |
+| `words` | A list of strings or integer hash values to add to the document as words. ~~Optional[List[Union[str,int]]]~~ |
+| `spaces` | A list of boolean values indicating whether each word has a subsequent space. Must have the same length as `words`, if specified. Defaults to a sequence of `True`. ~~Optional[List[bool]]~~ |
+| _keyword-only_ | |
+| `user\_data` | Optional extra data to attach to the Doc. ~~Dict~~ |
+| `tags` 3 | A list of strings, of the same length as `words`, to assign as `token.tag` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
+| `pos` 3 | A list of strings, of the same length as `words`, to assign as `token.pos` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
+| `morphs` 3 | A list of strings, of the same length as `words`, to assign as `token.morph` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
+| `lemmas` 3 | A list of strings, of the same length as `words`, to assign as `token.lemma` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
+| `heads` 3 | A list of values, of the same length as `words`, to assign as the head for each word. Head indices are the absolute position of the head in the `Doc`. Defaults to `None`. ~~Optional[List[int]]~~ |
+| `deps` 3 | A list of strings, of the same length as `words`, to assign as `token.dep` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
+| `sent_starts` 3 | A list of values, of the same length as `words`, to assign as `token.is_sent_start`. Will be overridden by heads if `heads` is provided. Defaults to `None`. ~~Optional[List[Union[bool, int, None]]]~~ |
+| `ents` 3 | A list of strings, of the same length of `words`, to assign the token-based IOB tag. Defaults to `None`. ~~Optional[List[str]]~~ |
## Doc.\_\_getitem\_\_ {#getitem tag="method"}
@@ -209,15 +209,15 @@ alignment mode `"strict".
> assert span.text == "New York"
> ```
-| Name | Description |
-| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `start` | The index of the first character of the span. ~~int~~ |
-| `end` | The index of the last character after the span. ~~int~~ |
-| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
-| `kb_id` 2.2 | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
-| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
-| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ |
-| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
+| Name | Description |
+| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `start` | The index of the first character of the span. ~~int~~ |
+| `end` | The index of the last character after the span. ~~int~~ |
+| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
+| `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
+| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
+| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ |
+| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Doc.set_ents {#set_ents tag="method" new="3"}
@@ -757,11 +757,10 @@ The L2 norm of the document's vector representation.
| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
| `vocab` | The store of lexical types. ~~Vocab~~ |
-| `tensor` 2 | Container for dense vector representations. ~~numpy.ndarray~~ |
+| `tensor` | Container for dense vector representations. ~~numpy.ndarray~~ |
| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
-| `lang` 2.1 | Language of the document's vocabulary. ~~int~~ |
-| `lang_` 2.1 | Language of the document's vocabulary. ~~str~~ |
-| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
+| `lang` | Language of the document's vocabulary. ~~int~~ |
+| `lang_` | Language of the document's vocabulary. ~~str~~ |
| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
@@ -785,7 +784,6 @@ serialization by passing in the string names via the `exclude` argument.
| Name | Description |
| ------------------ | --------------------------------------------- |
| `text` | The value of the `Doc.text` attribute. |
-| `sentiment` | The value of the `Doc.sentiment` attribute. |
| `tensor` | The value of the `Doc.tensor` attribute. |
| `user_data` | The value of the `Doc.user_data` dictionary. |
| `user_data_keys` | The keys of the `Doc.user_data` dictionary. |
diff --git a/website/docs/api/entitylinker.md b/website/docs/api/entitylinker.md
index 07dd02634..b116c4be4 100644
--- a/website/docs/api/entitylinker.md
+++ b/website/docs/api/entitylinker.md
@@ -14,7 +14,8 @@ entities) to unique identifiers, grounding the named entities into the "real
world". It requires a `KnowledgeBase`, as well as a function to generate
plausible candidates from that `KnowledgeBase` given a certain textual mention,
and a machine learning model to pick the right candidate, given the local
-context of the mention.
+context of the mention. `EntityLinker` defaults to using the
+[`InMemoryLookupKB`](/api/kb_in_memory) implementation.
## Assigned Attributes {#assigned-attributes}
@@ -171,7 +172,7 @@ with the current vocab.
>
> ```python
> def create_kb(vocab):
-> kb = KnowledgeBase(vocab, entity_vector_length=128)
+> kb = InMemoryLookupKB(vocab, entity_vector_length=128)
> kb.add_entity(...)
> kb.add_alias(...)
> return kb
@@ -199,12 +200,6 @@ knowledge base. This argument should be a function that takes a `Vocab` instance
and creates the `KnowledgeBase`, ensuring that the strings of the knowledge base
are synced with the current vocab.
-
-
-This method was previously called `begin_training`.
-
-
-
> #### Example
>
> ```python
diff --git a/website/docs/api/entityrecognizer.md b/website/docs/api/entityrecognizer.md
index a535e8316..06828eb04 100644
--- a/website/docs/api/entityrecognizer.md
+++ b/website/docs/api/entityrecognizer.md
@@ -165,12 +165,6 @@ arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the
config.
-
-
-This method was previously called `begin_training`.
-
-
-
> #### Example
>
> ```python
diff --git a/website/docs/api/entityruler.md b/website/docs/api/entityruler.md
index ef7acbbf1..651c87585 100644
--- a/website/docs/api/entityruler.md
+++ b/website/docs/api/entityruler.md
@@ -1,13 +1,24 @@
---
title: EntityRuler
-tag: class
-source: spacy/pipeline/entity_ruler.py
new: 2.1
teaser: 'Pipeline component for rule-based named entity recognition'
api_string_name: entity_ruler
api_trainable: false
---
+
+
+As of spaCy v4, there is no separate `EntityRuler` class. The entity ruler is
+implemented as a special case of the `SpanRuler` component.
+
+See the [migration guide](#migrating) below for differences between the v3
+`EntityRuler` and v4 `SpanRuler` implementations of the `entity_ruler`
+component.
+
+See the [`SpanRuler`](/api/spanruler) API docs for the full API.
+
+
+
The entity ruler lets you add spans to the [`Doc.ents`](/api/doc#ents) using
token-based rules or exact phrase matches. It can be combined with the
statistical [`EntityRecognizer`](/api/entityrecognizer) to boost accuracy, or
@@ -63,271 +74,51 @@ how the component should be configured. You can override its settings via the
| `ent_id_sep` | Separator used internally for entity IDs. Defaults to `"\|\|"`. ~~str~~ |
| `scorer` | The scoring method. Defaults to [`spacy.scorer.get_ner_prf`](/api/scorer#get_ner_prf). ~~Optional[Callable]~~ |
-```python
-%%GITHUB_SPACY/spacy/pipeline/entity_ruler.py
+## Migrating from v3 {#migrating}
+
+### Loading patterns
+
+Unlike the v3 `EntityRuler`, the `SpanRuler` cannot load patterns on
+initialization with `SpanRuler(patterns=patterns)` or directly from a JSONL file
+path with `SpanRuler.from_disk(jsonl_path)`. Patterns should be loaded from the
+JSONL file separately and then added through
+[`SpanRuler.initialize`](/api/spanruler#initialize]) or
+[`SpanRuler.add_patterns`](/api/spanruler#add_patterns).
+
+```diff
+ ruler = nlp.get_pipe("entity_ruler")
+- ruler.from_disk("patterns.jsonl")
++ import srsly
++ patterns = srsly.read_jsonl("patterns.jsonl")
++ ruler.add_patterns(patterns)
```
-## EntityRuler.\_\_init\_\_ {#init tag="method"}
+### Saving patterns
-Initialize the entity ruler. If patterns are supplied here, they need to be a
-list of dictionaries with a `"label"` and `"pattern"` key. A pattern can either
-be a token pattern (list) or a phrase pattern (string). For example:
-`{"label": "ORG", "pattern": "Apple"}`.
+`SpanRuler.to_disk` always saves the full component data to a directory and does
+not include an option to save the patterns to a single JSONL file.
-> #### Example
->
-> ```python
-> # Construction via add_pipe
-> ruler = nlp.add_pipe("entity_ruler")
->
-> # Construction from class
-> from spacy.pipeline import EntityRuler
-> ruler = EntityRuler(nlp, overwrite_ents=True)
-> ```
+```diff
+ ruler = nlp.get_pipe("entity_ruler")
+- ruler.to_disk("patterns.jsonl")
++ import srsly
++ srsly.write_jsonl("patterns.jsonl", ruler.patterns)
+```
-| Name | Description |
-| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `nlp` | The shared nlp object to pass the vocab to the matchers and process phrase patterns. ~~Language~~ |
-| `name` 3 | Instance name of the current pipeline component. Typically passed in automatically from the factory when the component is added. Used to disable the current entity ruler while creating phrase patterns with the nlp object. ~~str~~ |
-| _keyword-only_ | |
-| `phrase_matcher_attr` | Optional attribute name match on for the internal [`PhraseMatcher`](/api/phrasematcher), e.g. `LOWER` to match on the lowercase token text. Defaults to `None`. ~~Optional[Union[int, str]]~~ |
-| `validate` | Whether patterns should be validated, passed to Matcher and PhraseMatcher as `validate`. Defaults to `False`. ~~bool~~ |
-| `overwrite_ents` | If existing entities are present, e.g. entities added by the model, overwrite them by matches if necessary. Defaults to `False`. ~~bool~~ |
-| `ent_id_sep` | Separator used internally for entity IDs. Defaults to `"\|\|"`. ~~str~~ |
-| `patterns` | Optional patterns to load in on initialization. ~~Optional[List[Dict[str, Union[str, List[dict]]]]]~~ |
+### Accessing token and phrase patterns
-## EntityRuler.initialize {#initialize tag="method" new="3"}
+The separate token patterns and phrase patterns are no longer accessible under
+`ruler.token_patterns` or `ruler.phrase_patterns`. You can access the combined
+patterns in their original format using the property
+[`SpanRuler.patterns`](/api/spanruler#patterns).
-Initialize the component with data and used before training to load in rules
-from a [pattern file](/usage/rule-based-matching/#entityruler-files). This method
-is typically called by [`Language.initialize`](/api/language#initialize) and
-lets you customize arguments it receives via the
-[`[initialize.components]`](/api/data-formats#config-initialize) block in the
-config.
+### Removing patterns by ID
-> #### Example
->
-> ```python
-> entity_ruler = nlp.add_pipe("entity_ruler")
-> entity_ruler.initialize(lambda: [], nlp=nlp, patterns=patterns)
-> ```
->
-> ```ini
-> ### config.cfg
-> [initialize.components.entity_ruler]
->
-> [initialize.components.entity_ruler.patterns]
-> @readers = "srsly.read_jsonl.v1"
-> path = "corpus/entity_ruler_patterns.jsonl
-> ```
+[`SpanRuler.remove`](/api/spanruler#remove) removes by label rather than ID. To
+remove by ID, use [`SpanRuler.remove_by_id`](/api/spanruler#remove_by_id):
-| Name | Description |
-| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Not used by the `EntityRuler`. ~~Callable[[], Iterable[Example]]~~ |
-| _keyword-only_ | |
-| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
-| `patterns` | The list of patterns. Defaults to `None`. ~~Optional[Sequence[Dict[str, Union[str, List[Dict[str, Any]]]]]]~~ |
-
-## EntityRuler.\_\len\_\_ {#len tag="method"}
-
-The number of all patterns added to the entity ruler.
-
-> #### Example
->
-> ```python
-> ruler = nlp.add_pipe("entity_ruler")
-> assert len(ruler) == 0
-> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}])
-> assert len(ruler) == 1
-> ```
-
-| Name | Description |
-| ----------- | ------------------------------- |
-| **RETURNS** | The number of patterns. ~~int~~ |
-
-## EntityRuler.\_\_contains\_\_ {#contains tag="method"}
-
-Whether a label is present in the patterns.
-
-> #### Example
->
-> ```python
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}])
-> assert "ORG" in ruler
-> assert not "PERSON" in ruler
-> ```
-
-| Name | Description |
-| ----------- | ----------------------------------------------------- |
-| `label` | The label to check. ~~str~~ |
-| **RETURNS** | Whether the entity ruler contains the label. ~~bool~~ |
-
-## EntityRuler.\_\_call\_\_ {#call tag="method"}
-
-Find matches in the `Doc` and add them to the `doc.ents`. Typically, this
-happens automatically after the component has been added to the pipeline using
-[`nlp.add_pipe`](/api/language#add_pipe). If the entity ruler was initialized
-with `overwrite_ents=True`, existing entities will be replaced if they overlap
-with the matches. When matches overlap in a Doc, the entity ruler prioritizes
-longer patterns over shorter, and if equal the match occuring first in the Doc
-is chosen.
-
-> #### Example
->
-> ```python
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.add_patterns([{"label": "ORG", "pattern": "Apple"}])
->
-> doc = nlp("A text about Apple.")
-> ents = [(ent.text, ent.label_) for ent in doc.ents]
-> assert ents == [("Apple", "ORG")]
-> ```
-
-| Name | Description |
-| ----------- | -------------------------------------------------------------------- |
-| `doc` | The `Doc` object to process, e.g. the `Doc` in the pipeline. ~~Doc~~ |
-| **RETURNS** | The modified `Doc` with added entities, if available. ~~Doc~~ |
-
-## EntityRuler.add_patterns {#add_patterns tag="method"}
-
-Add patterns to the entity ruler. A pattern can either be a token pattern (list
-of dicts) or a phrase pattern (string). For more details, see the usage guide on
-[rule-based matching](/usage/rule-based-matching).
-
-> #### Example
->
-> ```python
-> patterns = [
-> {"label": "ORG", "pattern": "Apple"},
-> {"label": "GPE", "pattern": [{"lower": "san"}, {"lower": "francisco"}]}
-> ]
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.add_patterns(patterns)
-> ```
-
-| Name | Description |
-| ---------- | ---------------------------------------------------------------- |
-| `patterns` | The patterns to add. ~~List[Dict[str, Union[str, List[dict]]]]~~ |
-
-
-## EntityRuler.remove {#remove tag="method" new="3.2.1"}
-
-Remove a pattern by its ID from the entity ruler. A `ValueError` is raised if the ID does not exist.
-
-> #### Example
->
-> ```python
-> patterns = [{"label": "ORG", "pattern": "Apple", "id": "apple"}]
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.add_patterns(patterns)
-> ruler.remove("apple")
-> ```
-
-| Name | Description |
-| ---------- | ---------------------------------------------------------------- |
-| `id` | The ID of the pattern rule. ~~str~~ |
-
-## EntityRuler.to_disk {#to_disk tag="method"}
-
-Save the entity ruler patterns to a directory. The patterns will be saved as
-newline-delimited JSON (JSONL). If a file with the suffix `.jsonl` is provided,
-only the patterns are saved as JSONL. If a directory name is provided, a
-`patterns.jsonl` and `cfg` file with the component configuration is exported.
-
-> #### Example
->
-> ```python
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.to_disk("/path/to/patterns.jsonl") # saves patterns only
-> ruler.to_disk("/path/to/entity_ruler") # saves patterns and config
-> ```
-
-| Name | Description |
-| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `path` | A path to a JSONL file or directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
-
-## EntityRuler.from_disk {#from_disk tag="method"}
-
-Load the entity ruler from a path. Expects either a file containing
-newline-delimited JSON (JSONL) with one entry per line, or a directory
-containing a `patterns.jsonl` file and a `cfg` file with the component
-configuration.
-
-> #### Example
->
-> ```python
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.from_disk("/path/to/patterns.jsonl") # loads patterns only
-> ruler.from_disk("/path/to/entity_ruler") # loads patterns and config
-> ```
-
-| Name | Description |
-| ----------- | ------------------------------------------------------------------------------------------------------------- |
-| `path` | A path to a JSONL file or directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
-| **RETURNS** | The modified `EntityRuler` object. ~~EntityRuler~~ |
-
-## EntityRuler.to_bytes {#to_bytes tag="method"}
-
-Serialize the entity ruler patterns to a bytestring.
-
-> #### Example
->
-> ```python
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler_bytes = ruler.to_bytes()
-> ```
-
-| Name | Description |
-| ----------- | ---------------------------------- |
-| **RETURNS** | The serialized patterns. ~~bytes~~ |
-
-## EntityRuler.from_bytes {#from_bytes tag="method"}
-
-Load the pipe from a bytestring. Modifies the object in place and returns it.
-
-> #### Example
->
-> ```python
-> ruler_bytes = ruler.to_bytes()
-> ruler = nlp.add_pipe("entity_ruler")
-> ruler.from_bytes(ruler_bytes)
-> ```
-
-| Name | Description |
-| ------------ | -------------------------------------------------- |
-| `bytes_data` | The bytestring to load. ~~bytes~~ |
-| **RETURNS** | The modified `EntityRuler` object. ~~EntityRuler~~ |
-
-## EntityRuler.labels {#labels tag="property"}
-
-All labels present in the match patterns.
-
-| Name | Description |
-| ----------- | -------------------------------------- |
-| **RETURNS** | The string labels. ~~Tuple[str, ...]~~ |
-
-## EntityRuler.ent_ids {#ent_ids tag="property" new="2.2.2"}
-
-All entity IDs present in the `id` properties of the match patterns.
-
-| Name | Description |
-| ----------- | ----------------------------------- |
-| **RETURNS** | The string IDs. ~~Tuple[str, ...]~~ |
-
-## EntityRuler.patterns {#patterns tag="property"}
-
-Get all patterns that were added to the entity ruler.
-
-| Name | Description |
-| ----------- | ---------------------------------------------------------------------------------------- |
-| **RETURNS** | The original patterns, one dictionary per pattern. ~~List[Dict[str, Union[str, dict]]]~~ |
-
-## Attributes {#attributes}
-
-| Name | Description |
-| ----------------- | --------------------------------------------------------------------------------------------------------------------- |
-| `matcher` | The underlying matcher used to process token patterns. ~~Matcher~~ |
-| `phrase_matcher` | The underlying phrase matcher used to process phrase patterns. ~~PhraseMatcher~~ |
-| `token_patterns` | The token patterns present in the entity ruler, keyed by label. ~~Dict[str, List[Dict[str, Union[str, List[dict]]]]~~ |
-| `phrase_patterns` | The phrase patterns present in the entity ruler, keyed by label. ~~Dict[str, List[Doc]]~~ |
+```diff
+ ruler = nlp.get_pipe("entity_ruler")
+- ruler.remove("id")
++ ruler.remove_by_id("id")
+```
diff --git a/website/docs/api/example.md b/website/docs/api/example.md
index ca9d3c056..63768d58f 100644
--- a/website/docs/api/example.md
+++ b/website/docs/api/example.md
@@ -23,11 +23,13 @@ both documents.
> ```python
> from spacy.tokens import Doc
> from spacy.training import Example
->
-> words = ["hello", "world", "!"]
-> spaces = [True, False, False]
-> predicted = Doc(nlp.vocab, words=words, spaces=spaces)
-> reference = parse_gold_doc(my_data)
+> pred_words = ["Apply", "some", "sunscreen"]
+> pred_spaces = [True, True, False]
+> gold_words = ["Apply", "some", "sun", "screen"]
+> gold_spaces = [True, True, False, False]
+> gold_tags = ["VERB", "DET", "NOUN", "NOUN"]
+> predicted = Doc(nlp.vocab, words=pred_words, spaces=pred_spaces)
+> reference = Doc(nlp.vocab, words=gold_words, spaces=gold_spaces, tags=gold_tags)
> example = Example(predicted, reference)
> ```
@@ -286,10 +288,14 @@ Calculate alignment tables between two tokenizations.
### Alignment attributes {#alignment-attributes"}
-| Name | Description |
-| ----- | --------------------------------------------------------------------- |
-| `x2y` | The `Ragged` object holding the alignment from `x` to `y`. ~~Ragged~~ |
-| `y2x` | The `Ragged` object holding the alignment from `y` to `x`. ~~Ragged~~ |
+Alignment attributes are managed using `AlignmentArray`, which is a
+simplified version of Thinc's [Ragged](https://thinc.ai/docs/api-types#ragged)
+type that only supports the `data` and `length` attributes.
+
+| Name | Description |
+| ----- | ------------------------------------------------------------------------------------- |
+| `x2y` | The `AlignmentArray` object holding the alignment from `x` to `y`. ~~AlignmentArray~~ |
+| `y2x` | The `AlignmentArray` object holding the alignment from `y` to `x`. ~~AlignmentArray~~ |
@@ -309,10 +315,10 @@ tokenizations add up to the same string. For example, you'll be able to align
> spacy_tokens = ["obama", "'s", "podcast"]
> alignment = Alignment.from_strings(bert_tokens, spacy_tokens)
> a2b = alignment.x2y
-> assert list(a2b.dataXd) == [0, 1, 1, 2]
+> assert list(a2b.data) == [0, 1, 1, 2]
> ```
>
-> If `a2b.dataXd[1] == a2b.dataXd[2] == 1`, that means that `A[1]` (`"'"`) and
+> If `a2b.data[1] == a2b.data[2] == 1`, that means that `A[1]` (`"'"`) and
> `A[2]` (`"s"`) both align to `B[1]` (`"'s"`).
### Alignment.from_strings {#classmethod tag="function"}
diff --git a/website/docs/api/kb.md b/website/docs/api/kb.md
index e7a8fcd6f..b217a1678 100644
--- a/website/docs/api/kb.md
+++ b/website/docs/api/kb.md
@@ -4,27 +4,45 @@ teaser:
A storage class for entities and aliases of a specific knowledge base
(ontology)
tag: class
-source: spacy/kb.pyx
+source: spacy/kb/kb.pyx
new: 2.2
---
-The `KnowledgeBase` object provides a method to generate
-[`Candidate`](/api/kb/#candidate) objects, which are plausible external
+The `KnowledgeBase` object is an abstract class providing a method to generate
+[`Candidate`](/api/kb#candidate) objects, which are plausible external
identifiers given a certain textual mention. Each such `Candidate` holds
information from the relevant KB entities, such as its frequency in text and
possible aliases. Each entity in the knowledge base also has a pretrained entity
vector of a fixed size.
+Beyond that, `KnowledgeBase` classes have to implement a number of utility
+functions called by the [`EntityLinker`](/api/entitylinker) component.
+
+
+
+This class was not abstract up to spaCy version 3.5. The `KnowledgeBase`
+implementation up to that point is available as `InMemoryLookupKB` from 3.5
+onwards.
+
+
+
## KnowledgeBase.\_\_init\_\_ {#init tag="method"}
-Create the knowledge base.
+`KnowledgeBase` is an abstract class and cannot be instantiated. Its child
+classes should call `__init__()` to set up some necessary attributes.
> #### Example
>
> ```python
> from spacy.kb import KnowledgeBase
+> from spacy.vocab import Vocab
+>
+> class FullyImplementedKB(KnowledgeBase):
+> def __init__(self, vocab: Vocab, entity_vector_length: int):
+> super().__init__(vocab, entity_vector_length)
+> ...
> vocab = nlp.vocab
-> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64)
+> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64)
> ```
| Name | Description |
@@ -40,133 +58,66 @@ The length of the fixed-size entity vectors in the knowledge base.
| ----------- | ------------------------------------------------ |
| **RETURNS** | Length of the fixed-size entity vectors. ~~int~~ |
-## KnowledgeBase.add_entity {#add_entity tag="method"}
+## KnowledgeBase.get_candidates {#get_candidates tag="method"}
-Add an entity to the knowledge base, specifying its corpus frequency and entity
-vector, which should be of length
-[`entity_vector_length`](/api/kb#entity_vector_length).
+Given a certain textual mention as input, retrieve a list of candidate entities
+of type [`Candidate`](/api/kb#candidate).
> #### Example
>
> ```python
-> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1)
-> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2)
+> from spacy.lang.en import English
+> nlp = English()
+> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
+> candidates = kb.get_candidates(doc[0:2])
> ```
-| Name | Description |
-| --------------- | ---------------------------------------------------------- |
-| `entity` | The unique entity identifier. ~~str~~ |
-| `freq` | The frequency of the entity in a typical corpus. ~~float~~ |
-| `entity_vector` | The pretrained vector of the entity. ~~numpy.ndarray~~ |
+| Name | Description |
+| ----------- | -------------------------------------------------------------------- |
+| `mention` | The textual mention or alias. ~~Span~~ |
+| **RETURNS** | An iterable of relevant `Candidate` objects. ~~Iterable[Candidate]~~ |
-## KnowledgeBase.set_entities {#set_entities tag="method"}
+## KnowledgeBase.get_candidates_batch {#get_candidates_batch tag="method"}
-Define the full list of entities in the knowledge base, specifying the corpus
-frequency and entity vector for each entity.
+Same as [`get_candidates()`](/api/kb#get_candidates), but for an arbitrary
+number of mentions. The [`EntityLinker`](/api/entitylinker) component will call
+`get_candidates_batch()` instead of `get_candidates()`, if the config parameter
+`candidates_batch_size` is greater or equal than 1.
+
+The default implementation of `get_candidates_batch()` executes
+`get_candidates()` in a loop. We recommend implementing a more efficient way to
+retrieve candidates for multiple mentions at once, if performance is of concern
+to you.
> #### Example
>
> ```python
-> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2])
+> from spacy.lang.en import English
+> nlp = English()
+> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
+> candidates = kb.get_candidates((doc[0:2], doc[3:]))
> ```
-| Name | Description |
-| ------------- | ---------------------------------------------------------------- |
-| `entity_list` | List of unique entity identifiers. ~~Iterable[Union[str, int]]~~ |
-| `freq_list` | List of entity frequencies. ~~Iterable[int]~~ |
-| `vector_list` | List of entity vectors. ~~Iterable[numpy.ndarray]~~ |
-
-## KnowledgeBase.add_alias {#add_alias tag="method"}
-
-Add an alias or mention to the knowledge base, specifying its potential KB
-identifiers and their prior probabilities. The entity identifiers should refer
-to entities previously added with [`add_entity`](/api/kb#add_entity) or
-[`set_entities`](/api/kb#set_entities). The sum of the prior probabilities
-should not exceed 1. Note that an empty string can not be used as alias.
-
-> #### Example
->
-> ```python
-> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3])
-> ```
-
-| Name | Description |
-| --------------- | --------------------------------------------------------------------------------- |
-| `alias` | The textual mention or alias. Can not be the empty string. ~~str~~ |
-| `entities` | The potential entities that the alias may refer to. ~~Iterable[Union[str, int]]~~ |
-| `probabilities` | The prior probabilities of each entity. ~~Iterable[float]~~ |
-
-## KnowledgeBase.\_\_len\_\_ {#len tag="method"}
-
-Get the total number of entities in the knowledge base.
-
-> #### Example
->
-> ```python
-> total_entities = len(kb)
-> ```
-
-| Name | Description |
-| ----------- | ----------------------------------------------------- |
-| **RETURNS** | The number of entities in the knowledge base. ~~int~~ |
-
-## KnowledgeBase.get_entity_strings {#get_entity_strings tag="method"}
-
-Get a list of all entity IDs in the knowledge base.
-
-> #### Example
->
-> ```python
-> all_entities = kb.get_entity_strings()
-> ```
-
-| Name | Description |
-| ----------- | --------------------------------------------------------- |
-| **RETURNS** | The list of entities in the knowledge base. ~~List[str]~~ |
-
-## KnowledgeBase.get_size_aliases {#get_size_aliases tag="method"}
-
-Get the total number of aliases in the knowledge base.
-
-> #### Example
->
-> ```python
-> total_aliases = kb.get_size_aliases()
-> ```
-
-| Name | Description |
-| ----------- | ---------------------------------------------------- |
-| **RETURNS** | The number of aliases in the knowledge base. ~~int~~ |
-
-## KnowledgeBase.get_alias_strings {#get_alias_strings tag="method"}
-
-Get a list of all aliases in the knowledge base.
-
-> #### Example
->
-> ```python
-> all_aliases = kb.get_alias_strings()
-> ```
-
-| Name | Description |
-| ----------- | -------------------------------------------------------- |
-| **RETURNS** | The list of aliases in the knowledge base. ~~List[str]~~ |
+| Name | Description |
+| ----------- | -------------------------------------------------------------------------------------------- |
+| `mentions` | The textual mention or alias. ~~Iterable[Span]~~ |
+| **RETURNS** | An iterable of iterable with relevant `Candidate` objects. ~~Iterable[Iterable[Candidate]]~~ |
## KnowledgeBase.get_alias_candidates {#get_alias_candidates tag="method"}
-Given a certain textual mention as input, retrieve a list of candidate entities
-of type [`Candidate`](/api/kb/#candidate).
+
+This method is _not_ available from spaCy 3.5 onwards.
+
-> #### Example
->
-> ```python
-> candidates = kb.get_alias_candidates("Douglas")
-> ```
-
-| Name | Description |
-| ----------- | ------------------------------------------------------------- |
-| `alias` | The textual mention or alias. ~~str~~ |
-| **RETURNS** | The list of relevant `Candidate` objects. ~~List[Candidate]~~ |
+From spaCy 3.5 on `KnowledgeBase` is an abstract class (with
+[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow
+more flexibility in customizing knowledge bases. Some of its methods were moved
+to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those
+being `get_alias_candidates()`. This method is now available as
+[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
+Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates)
+defaults to
+[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
## KnowledgeBase.get_vector {#get_vector tag="method"}
@@ -178,27 +129,30 @@ Given a certain entity ID, retrieve its pretrained entity vector.
> vector = kb.get_vector("Q42")
> ```
-| Name | Description |
-| ----------- | ------------------------------------ |
-| `entity` | The entity ID. ~~str~~ |
-| **RETURNS** | The entity vector. ~~numpy.ndarray~~ |
+| Name | Description |
+| ----------- | -------------------------------------- |
+| `entity` | The entity ID. ~~str~~ |
+| **RETURNS** | The entity vector. ~~Iterable[float]~~ |
-## KnowledgeBase.get_prior_prob {#get_prior_prob tag="method"}
+## KnowledgeBase.get_vectors {#get_vectors tag="method"}
-Given a certain entity ID and a certain textual mention, retrieve the prior
-probability of the fact that the mention links to the entity ID.
+Same as [`get_vector()`](/api/kb#get_vector), but for an arbitrary number of
+entity IDs.
+
+The default implementation of `get_vectors()` executes `get_vector()` in a loop.
+We recommend implementing a more efficient way to retrieve vectors for multiple
+entities at once, if performance is of concern to you.
> #### Example
>
> ```python
-> probability = kb.get_prior_prob("Q42", "Douglas")
+> vectors = kb.get_vectors(("Q42", "Q3107329"))
> ```
-| Name | Description |
-| ----------- | ------------------------------------------------------------------------- |
-| `entity` | The entity ID. ~~str~~ |
-| `alias` | The textual mention or alias. ~~str~~ |
-| **RETURNS** | The prior probability of the `alias` referring to the `entity`. ~~float~~ |
+| Name | Description |
+| ----------- | --------------------------------------------------------- |
+| `entities` | The entity IDs. ~~Iterable[str]~~ |
+| **RETURNS** | The entity vectors. ~~Iterable[Iterable[numpy.ndarray]]~~ |
## KnowledgeBase.to_disk {#to_disk tag="method"}
@@ -207,12 +161,13 @@ Save the current state of the knowledge base to a directory.
> #### Example
>
> ```python
-> kb.to_disk(loc)
+> kb.to_disk(path)
> ```
-| Name | Description |
-| ----- | ------------------------------------------------------------------------------------------------------------------------------------------ |
-| `loc` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| Name | Description |
+| --------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
+| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
## KnowledgeBase.from_disk {#from_disk tag="method"}
@@ -222,16 +177,16 @@ Restore the state of the knowledge base from a given directory. Note that the
> #### Example
>
> ```python
-> from spacy.kb import KnowledgeBase
> from spacy.vocab import Vocab
> vocab = Vocab().from_disk("/path/to/vocab")
-> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64)
+> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64)
> kb.from_disk("/path/to/kb")
> ```
| Name | Description |
| ----------- | ----------------------------------------------------------------------------------------------- |
| `loc` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
| **RETURNS** | The modified `KnowledgeBase` object. ~~KnowledgeBase~~ |
## Candidate {#candidate tag="class"}
diff --git a/website/docs/api/kb_in_memory.md b/website/docs/api/kb_in_memory.md
new file mode 100644
index 000000000..9e3279e6a
--- /dev/null
+++ b/website/docs/api/kb_in_memory.md
@@ -0,0 +1,302 @@
+---
+title: InMemoryLookupKB
+teaser:
+ The default implementation of the KnowledgeBase interface. Stores all
+ information in-memory.
+tag: class
+source: spacy/kb/kb_in_memory.pyx
+new: 3.5
+---
+
+The `InMemoryLookupKB` class inherits from [`KnowledgeBase`](/api/kb) and
+implements all of its methods. It stores all KB data in-memory and generates
+[`Candidate`](/api/kb#candidate) objects by exactly matching mentions with
+entity names. It's highly optimized for both a low memory footprint and speed of
+retrieval.
+
+## InMemoryLookupKB.\_\_init\_\_ {#init tag="method"}
+
+Create the knowledge base.
+
+> #### Example
+>
+> ```python
+> from spacy.kb import InMemoryLookupKB
+> vocab = nlp.vocab
+> kb = InMemoryLookupKB(vocab=vocab, entity_vector_length=64)
+> ```
+
+| Name | Description |
+| ---------------------- | ------------------------------------------------ |
+| `vocab` | The shared vocabulary. ~~Vocab~~ |
+| `entity_vector_length` | Length of the fixed-size entity vectors. ~~int~~ |
+
+## InMemoryLookupKB.entity_vector_length {#entity_vector_length tag="property"}
+
+The length of the fixed-size entity vectors in the knowledge base.
+
+| Name | Description |
+| ----------- | ------------------------------------------------ |
+| **RETURNS** | Length of the fixed-size entity vectors. ~~int~~ |
+
+## InMemoryLookupKB.add_entity {#add_entity tag="method"}
+
+Add an entity to the knowledge base, specifying its corpus frequency and entity
+vector, which should be of length
+[`entity_vector_length`](/api/kb_in_memory#entity_vector_length).
+
+> #### Example
+>
+> ```python
+> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1)
+> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2)
+> ```
+
+| Name | Description |
+| --------------- | ---------------------------------------------------------- |
+| `entity` | The unique entity identifier. ~~str~~ |
+| `freq` | The frequency of the entity in a typical corpus. ~~float~~ |
+| `entity_vector` | The pretrained vector of the entity. ~~numpy.ndarray~~ |
+
+## InMemoryLookupKB.set_entities {#set_entities tag="method"}
+
+Define the full list of entities in the knowledge base, specifying the corpus
+frequency and entity vector for each entity.
+
+> #### Example
+>
+> ```python
+> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2])
+> ```
+
+| Name | Description |
+| ------------- | ---------------------------------------------------------------- |
+| `entity_list` | List of unique entity identifiers. ~~Iterable[Union[str, int]]~~ |
+| `freq_list` | List of entity frequencies. ~~Iterable[int]~~ |
+| `vector_list` | List of entity vectors. ~~Iterable[numpy.ndarray]~~ |
+
+## InMemoryLookupKB.add_alias {#add_alias tag="method"}
+
+Add an alias or mention to the knowledge base, specifying its potential KB
+identifiers and their prior probabilities. The entity identifiers should refer
+to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity)
+or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior
+probabilities should not exceed 1. Note that an empty string can not be used as
+alias.
+
+> #### Example
+>
+> ```python
+> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3])
+> ```
+
+| Name | Description |
+| --------------- | --------------------------------------------------------------------------------- |
+| `alias` | The textual mention or alias. Can not be the empty string. ~~str~~ |
+| `entities` | The potential entities that the alias may refer to. ~~Iterable[Union[str, int]]~~ |
+| `probabilities` | The prior probabilities of each entity. ~~Iterable[float]~~ |
+
+## InMemoryLookupKB.\_\_len\_\_ {#len tag="method"}
+
+Get the total number of entities in the knowledge base.
+
+> #### Example
+>
+> ```python
+> total_entities = len(kb)
+> ```
+
+| Name | Description |
+| ----------- | ----------------------------------------------------- |
+| **RETURNS** | The number of entities in the knowledge base. ~~int~~ |
+
+## InMemoryLookupKB.get_entity_strings {#get_entity_strings tag="method"}
+
+Get a list of all entity IDs in the knowledge base.
+
+> #### Example
+>
+> ```python
+> all_entities = kb.get_entity_strings()
+> ```
+
+| Name | Description |
+| ----------- | --------------------------------------------------------- |
+| **RETURNS** | The list of entities in the knowledge base. ~~List[str]~~ |
+
+## InMemoryLookupKB.get_size_aliases {#get_size_aliases tag="method"}
+
+Get the total number of aliases in the knowledge base.
+
+> #### Example
+>
+> ```python
+> total_aliases = kb.get_size_aliases()
+> ```
+
+| Name | Description |
+| ----------- | ---------------------------------------------------- |
+| **RETURNS** | The number of aliases in the knowledge base. ~~int~~ |
+
+## InMemoryLookupKB.get_alias_strings {#get_alias_strings tag="method"}
+
+Get a list of all aliases in the knowledge base.
+
+> #### Example
+>
+> ```python
+> all_aliases = kb.get_alias_strings()
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------------------------------- |
+| **RETURNS** | The list of aliases in the knowledge base. ~~List[str]~~ |
+
+## InMemoryLookupKB.get_candidates {#get_candidates tag="method"}
+
+Given a certain textual mention as input, retrieve a list of candidate entities
+of type [`Candidate`](/api/kb#candidate). Wraps
+[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
+
+> #### Example
+>
+> ```python
+> from spacy.lang.en import English
+> nlp = English()
+> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
+> candidates = kb.get_candidates(doc[0:2])
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------------------------------------------- |
+| `mention` | The textual mention or alias. ~~Span~~ |
+| **RETURNS** | An iterable of relevant `Candidate` objects. ~~Iterable[Candidate]~~ |
+
+## InMemoryLookupKB.get_candidates_batch {#get_candidates_batch tag="method"}
+
+Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an
+arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component
+will call `get_candidates_batch()` instead of `get_candidates()`, if the config
+parameter `candidates_batch_size` is greater or equal than 1.
+
+The default implementation of `get_candidates_batch()` executes
+`get_candidates()` in a loop. We recommend implementing a more efficient way to
+retrieve candidates for multiple mentions at once, if performance is of concern
+to you.
+
+> #### Example
+>
+> ```python
+> from spacy.lang.en import English
+> nlp = English()
+> doc = nlp("Douglas Adams wrote 'The Hitchhiker's Guide to the Galaxy'.")
+> candidates = kb.get_candidates((doc[0:2], doc[3:]))
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------------------------------------------------------------------- |
+| `mentions` | The textual mention or alias. ~~Iterable[Span]~~ |
+| **RETURNS** | An iterable of iterable with relevant `Candidate` objects. ~~Iterable[Iterable[Candidate]]~~ |
+
+## InMemoryLookupKB.get_alias_candidates {#get_alias_candidates tag="method"}
+
+Given a certain textual mention as input, retrieve a list of candidate entities
+of type [`Candidate`](/api/kb#candidate).
+
+> #### Example
+>
+> ```python
+> candidates = kb.get_alias_candidates("Douglas")
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------- |
+| `alias` | The textual mention or alias. ~~str~~ |
+| **RETURNS** | The list of relevant `Candidate` objects. ~~List[Candidate]~~ |
+
+## InMemoryLookupKB.get_vector {#get_vector tag="method"}
+
+Given a certain entity ID, retrieve its pretrained entity vector.
+
+> #### Example
+>
+> ```python
+> vector = kb.get_vector("Q42")
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------ |
+| `entity` | The entity ID. ~~str~~ |
+| **RETURNS** | The entity vector. ~~numpy.ndarray~~ |
+
+## InMemoryLookupKB.get_vectors {#get_vectors tag="method"}
+
+Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary
+number of entity IDs.
+
+The default implementation of `get_vectors()` executes `get_vector()` in a loop.
+We recommend implementing a more efficient way to retrieve vectors for multiple
+entities at once, if performance is of concern to you.
+
+> #### Example
+>
+> ```python
+> vectors = kb.get_vectors(("Q42", "Q3107329"))
+> ```
+
+| Name | Description |
+| ----------- | --------------------------------------------------------- |
+| `entities` | The entity IDs. ~~Iterable[str]~~ |
+| **RETURNS** | The entity vectors. ~~Iterable[Iterable[numpy.ndarray]]~~ |
+
+## InMemoryLookupKB.get_prior_prob {#get_prior_prob tag="method"}
+
+Given a certain entity ID and a certain textual mention, retrieve the prior
+probability of the fact that the mention links to the entity ID.
+
+> #### Example
+>
+> ```python
+> probability = kb.get_prior_prob("Q42", "Douglas")
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------------------- |
+| `entity` | The entity ID. ~~str~~ |
+| `alias` | The textual mention or alias. ~~str~~ |
+| **RETURNS** | The prior probability of the `alias` referring to the `entity`. ~~float~~ |
+
+## InMemoryLookupKB.to_disk {#to_disk tag="method"}
+
+Save the current state of the knowledge base to a directory.
+
+> #### Example
+>
+> ```python
+> kb.to_disk(path)
+> ```
+
+| Name | Description |
+| --------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
+| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
+
+## InMemoryLookupKB.from_disk {#from_disk tag="method"}
+
+Restore the state of the knowledge base from a given directory. Note that the
+[`Vocab`](/api/vocab) should also be the same as the one used to create the KB.
+
+> #### Example
+>
+> ```python
+> from spacy.vocab import Vocab
+> vocab = Vocab().from_disk("/path/to/vocab")
+> kb = FullyImplementedKB(vocab=vocab, entity_vector_length=64)
+> kb.from_disk("/path/to/kb")
+> ```
+
+| Name | Description |
+| ----------- | ----------------------------------------------------------------------------------------------- |
+| `loc` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| `exclude` | List of components to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The modified `KnowledgeBase` object. ~~KnowledgeBase~~ |
diff --git a/website/docs/api/language.md b/website/docs/api/language.md
index ed763e36a..4d568df62 100644
--- a/website/docs/api/language.md
+++ b/website/docs/api/language.md
@@ -63,18 +63,18 @@ spaCy loads a model under the hood based on its
> nlp = Language.from_config(config)
> ```
-| Name | Description |
-| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
-| _keyword-only_ | |
-| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
-| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
-| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
-| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
-| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
-| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
-| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
-| **RETURNS** | The initialized object. ~~Language~~ |
+| Name | Description |
+| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
+| _keyword-only_ | |
+| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
+| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ |
+| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
+| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
+| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
+| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
+| **RETURNS** | The initialized object. ~~Language~~ |
## Language.component {#component tag="classmethod" new="3"}
@@ -164,6 +164,9 @@ examples, see the
Apply the pipeline to some text. The text can span multiple sentences, and can
contain arbitrary whitespace. Alignment into the original string is preserved.
+Instead of text, a `Doc` can be passed as input, in which case tokenization is
+skipped, but the rest of the pipeline is run.
+
> #### Example
>
> ```python
@@ -173,7 +176,7 @@ contain arbitrary whitespace. Alignment into the original string is preserved.
| Name | Description |
| --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
-| `text` | The text to be processed. ~~str~~ |
+| `text` | The text to be processed, or a Doc. ~~Union[str, Doc]~~ |
| _keyword-only_ | |
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
@@ -184,6 +187,9 @@ contain arbitrary whitespace. Alignment into the original string is preserved.
Process texts as a stream, and yield `Doc` objects in order. This is usually
more efficient than processing texts one-by-one.
+Instead of text, a `Doc` object can be passed as input. In this case
+tokenization is skipped but the rest of the pipeline is run.
+
> #### Example
>
> ```python
@@ -192,16 +198,16 @@ more efficient than processing texts one-by-one.
> assert doc.has_annotation("DEP")
> ```
-| Name | Description |
-| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `texts` | A sequence of strings. ~~Iterable[str]~~ |
-| _keyword-only_ | |
-| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ |
-| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ |
-| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
-| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
-| `n_process` 2.2.2 | Number of processors to use. Defaults to `1`. ~~int~~ |
-| **YIELDS** | Documents in the order of the original text. ~~Doc~~ |
+| Name | Description |
+| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ |
+| _keyword-only_ | |
+| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ |
+| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ |
+| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
+| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
+| `n_process` | Number of processors to use. Defaults to `1`. ~~int~~ |
+| **YIELDS** | Documents in the order of the original text. ~~Doc~~ |
## Language.set_error_handler {#set_error_handler tag="method" new="3"}
@@ -253,15 +259,6 @@ either in the [config](/usage/training#config), or by calling
[`pipe.add_label`](/api/pipe#add_label) for each possible output label (e.g. for
the tagger or textcat).
-
-
-This method was previously called `begin_training`. It now also takes a
-**function** that is called with no arguments and returns a sequence of
-[`Example`](/api/example) objects instead of tuples of `Doc` and `GoldParse`
-objects.
-
-
-
> #### Example
>
> ```python
@@ -1024,21 +1021,21 @@ details.
## Attributes {#attributes}
-| Name | Description |
-| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
-| `vocab` | A container for the lexical types. ~~Vocab~~ |
-| `tokenizer` | The tokenizer. ~~Tokenizer~~ |
-| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
-| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
-| `pipe_names` 2 | List of pipeline component names, in order. ~~List[str]~~ |
-| `pipe_labels` 2.2 | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
-| `pipe_factories` 2.2 | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
-| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
-| `factory_names` 3 | List of all available factory names. ~~List[str]~~ |
-| `components` 3 | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
-| `component_names` 3 | List of all available component names, including components that are currently disabled. ~~List[str]~~ |
-| `disabled` 3 | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ |
-| `path` 2 | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ |
+| Name | Description |
+| -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
+| `vocab` | A container for the lexical types. ~~Vocab~~ |
+| `tokenizer` | The tokenizer. ~~Tokenizer~~ |
+| `make_doc` | Callable that takes a string and returns a `Doc`. ~~Callable[[str], Doc]~~ |
+| `pipeline` | List of `(name, component)` tuples describing the current processing pipeline, in order. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
+| `pipe_names` | List of pipeline component names, in order. ~~List[str]~~ |
+| `pipe_labels` | List of labels set by the pipeline components, if available, keyed by component name. ~~Dict[str, List[str]]~~ |
+| `pipe_factories` | Dictionary of pipeline component names, mapped to their factory names. ~~Dict[str, str]~~ |
+| `factories` | All available factory functions, keyed by name. ~~Dict[str, Callable[[...], Callable[[Doc], Doc]]]~~ |
+| `factory_names` 3 | List of all available factory names. ~~List[str]~~ |
+| `components` 3 | List of all available `(name, component)` tuples, including components that are currently disabled. ~~List[Tuple[str, Callable[[Doc], Doc]]]~~ |
+| `component_names` 3 | List of all available component names, including components that are currently disabled. ~~List[str]~~ |
+| `disabled` 3 | Names of components that are currently disabled and don't run as part of the pipeline. ~~List[str]~~ |
+| `path` | Path to the pipeline data directory, if a pipeline is loaded from a path or package. Otherwise `None`. ~~Optional[Path]~~ |
## Class attributes {#class-attributes}
diff --git a/website/docs/api/lexeme.md b/website/docs/api/lexeme.md
index c5d4b7544..e13f25209 100644
--- a/website/docs/api/lexeme.md
+++ b/website/docs/api/lexeme.md
@@ -121,44 +121,43 @@ The L2 norm of the lexeme's vector representation.
## Attributes {#attributes}
-| Name | Description |
-| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `vocab` | The lexeme's vocabulary. ~~Vocab~~ |
-| `text` | Verbatim text content. ~~str~~ |
-| `orth` | ID of the verbatim text content. ~~int~~ |
-| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
-| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
-| `flags` | Container of the lexeme's binary flags. ~~int~~ |
-| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ |
-| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ |
-| `lower` | Lowercase form of the word. ~~int~~ |
-| `lower_` | Lowercase form of the word. ~~str~~ |
-| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
-| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
-| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ |
-| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ |
-| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ |
-| `suffix_` | Length-N substring from the start of the word. Defaults to `N=3`. ~~str~~ |
-| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ |
-| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ |
-| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ |
-| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ |
-| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ |
-| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ |
-| `is_punct` | Is the lexeme punctuation? ~~bool~~ |
-| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ |
-| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ |
-| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ |
-| `is_bracket` | Is the lexeme a bracket? ~~bool~~ |
-| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ |
-| `is_currency` 2.0.8 | Is the lexeme a currency symbol? ~~bool~~ |
-| `like_url` | Does the lexeme resemble a URL? ~~bool~~ |
-| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
-| `like_email` | Does the lexeme resemble an email address? ~~bool~~ |
-| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
-| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ |
-| `lang` | Language of the parent vocabulary. ~~int~~ |
-| `lang_` | Language of the parent vocabulary. ~~str~~ |
-| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ |
-| `cluster` | Brown cluster ID. ~~int~~ |
-| `sentiment` | A scalar value indicating the positivity or negativity of the lexeme. ~~float~~ |
+| Name | Description |
+| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `vocab` | The lexeme's vocabulary. ~~Vocab~~ |
+| `text` | Verbatim text content. ~~str~~ |
+| `orth` | ID of the verbatim text content. ~~int~~ |
+| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
+| `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
+| `flags` | Container of the lexeme's binary flags. ~~int~~ |
+| `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ |
+| `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ |
+| `lower` | Lowercase form of the word. ~~int~~ |
+| `lower_` | Lowercase form of the word. ~~str~~ |
+| `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
+| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
+| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ |
+| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ |
+| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ |
+| `suffix_` | Length-N substring from the end of the word. Defaults to `N=3`. ~~str~~ |
+| `is_alpha` | Does the lexeme consist of alphabetic characters? Equivalent to `lexeme.text.isalpha()`. ~~bool~~ |
+| `is_ascii` | Does the lexeme consist of ASCII characters? Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. ~~bool~~ |
+| `is_digit` | Does the lexeme consist of digits? Equivalent to `lexeme.text.isdigit()`. ~~bool~~ |
+| `is_lower` | Is the lexeme in lowercase? Equivalent to `lexeme.text.islower()`. ~~bool~~ |
+| `is_upper` | Is the lexeme in uppercase? Equivalent to `lexeme.text.isupper()`. ~~bool~~ |
+| `is_title` | Is the lexeme in titlecase? Equivalent to `lexeme.text.istitle()`. ~~bool~~ |
+| `is_punct` | Is the lexeme punctuation? ~~bool~~ |
+| `is_left_punct` | Is the lexeme a left punctuation mark, e.g. `(`? ~~bool~~ |
+| `is_right_punct` | Is the lexeme a right punctuation mark, e.g. `)`? ~~bool~~ |
+| `is_space` | Does the lexeme consist of whitespace characters? Equivalent to `lexeme.text.isspace()`. ~~bool~~ |
+| `is_bracket` | Is the lexeme a bracket? ~~bool~~ |
+| `is_quote` | Is the lexeme a quotation mark? ~~bool~~ |
+| `is_currency` | Is the lexeme a currency symbol? ~~bool~~ |
+| `like_url` | Does the lexeme resemble a URL? ~~bool~~ |
+| `like_num` | Does the lexeme represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
+| `like_email` | Does the lexeme resemble an email address? ~~bool~~ |
+| `is_oov` | Is the lexeme out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
+| `is_stop` | Is the lexeme part of a "stop list"? ~~bool~~ |
+| `lang` | Language of the parent vocabulary. ~~int~~ |
+| `lang_` | Language of the parent vocabulary. ~~str~~ |
+| `prob` | Smoothed log probability estimate of the lexeme's word type (context-independent entry in the vocabulary). ~~float~~ |
+| `cluster` | Brown cluster ID. ~~int~~ |
diff --git a/website/docs/api/matcher.md b/website/docs/api/matcher.md
index ff6923cf2..e3fc86e48 100644
--- a/website/docs/api/matcher.md
+++ b/website/docs/api/matcher.md
@@ -33,7 +33,7 @@ rule-based matching are:
| Attribute | Description |
| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
-| `TEXT` 2.1 | The exact verbatim text of a token. ~~str~~ |
+| `TEXT` | The exact verbatim text of a token. ~~str~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
@@ -48,7 +48,7 @@ rule-based matching are:
| `ENT_IOB` | The IOB part of the token's entity tag. ~~str~~ |
| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
| `ENT_KB_ID` | The token's entity knowledge base ID (`ent_kb_id`). ~~str~~ |
-| `_` 2.1 | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
+| `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `OP` | Operator or quantifier to determine how often to match a token pattern. ~~str~~ |
Operators and quantifiers define **how often** a token pattern should be
@@ -109,10 +109,10 @@ string where an integer is expected) or unexpected property names.
> matcher = Matcher(nlp.vocab)
> ```
-| Name | Description |
-| --------------------------------------- | ----------------------------------------------------------------------------------------------------- |
-| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
-| `validate` 2.1 | Validate all patterns added to this matcher. ~~bool~~ |
+| Name | Description |
+| ---------- | ----------------------------------------------------------------------------------------------------- |
+| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
+| `validate` | Validate all patterns added to this matcher. ~~bool~~ |
## Matcher.\_\_call\_\_ {#call tag="method"}
diff --git a/website/docs/api/phrasematcher.md b/website/docs/api/phrasematcher.md
index b06198916..0ef4e54da 100644
--- a/website/docs/api/phrasematcher.md
+++ b/website/docs/api/phrasematcher.md
@@ -36,11 +36,11 @@ be shown.
> matcher = PhraseMatcher(nlp.vocab)
> ```
-| Name | Description |
-| --------------------------------------- | ------------------------------------------------------------------------------------------------------ |
-| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
-| `attr` 2.1 | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ |
-| `validate` 2.1 | Validate patterns added to the matcher. ~~bool~~ |
+| Name | Description |
+| ---------- | ------------------------------------------------------------------------------------------------------ |
+| `vocab` | The vocabulary object, which must be shared with the documents the matcher will operate on. ~~Vocab~~ |
+| `attr` | The token attribute to match on. Defaults to `ORTH`, i.e. the verbatim token text. ~~Union[int, str]~~ |
+| `validate` | Validate patterns added to the matcher. ~~bool~~ |
## PhraseMatcher.\_\_call\_\_ {#call tag="method"}
diff --git a/website/docs/api/pipe.md b/website/docs/api/pipe.md
index 263942e3e..70a4648b6 100644
--- a/website/docs/api/pipe.md
+++ b/website/docs/api/pipe.md
@@ -152,12 +152,6 @@ network,
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize).
-
-
-This method was previously called `begin_training`.
-
-
-
> #### Example
>
> ```python
diff --git a/website/docs/api/pipeline-functions.md b/website/docs/api/pipeline-functions.md
index 1b7017ca7..070292782 100644
--- a/website/docs/api/pipeline-functions.md
+++ b/website/docs/api/pipeline-functions.md
@@ -153,3 +153,36 @@ whole pipeline has run.
| `attrs` | A dict of the `Doc` attributes and the values to set them to. Defaults to `{"tensor": None, "_.trf_data": None}` to clean up after `tok2vec` and `transformer` components. ~~dict~~ |
| `silent` | If `False`, show warnings if attributes aren't found or can't be set. Defaults to `True`. ~~bool~~ |
| **RETURNS** | The modified `Doc` with the modified attributes. ~~Doc~~ |
+
+## span_cleaner {#span_cleaner tag="function,experimental"}
+
+Remove `SpanGroup`s from `doc.spans` based on a key prefix. This is used to
+clean up after the [`CoreferenceResolver`](/api/coref) when it's paired with a
+[`SpanResolver`](/api/span-resolver).
+
+
+
+This pipeline function is not yet integrated into spaCy core, and is available
+via the extension package
+[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting
+in version 0.6.0. It exposes the component via
+[entry points](/usage/saving-loading/#entry-points), so if you have the package
+installed, using `factory = "span_cleaner"` in your
+[training config](/usage/training#config) or `nlp.add_pipe("span_cleaner")` will
+work out-of-the-box.
+
+
+
+> #### Example
+>
+> ```python
+> config = {"prefix": "coref_head_clusters"}
+> nlp.add_pipe("span_cleaner", config=config)
+> doc = nlp("text")
+> assert "coref_head_clusters_1" not in doc.spans
+> ```
+
+| Setting | Description |
+| ----------- | ------------------------------------------------------------------------------------------------------------------------- |
+| `prefix` | A prefix to check `SpanGroup` keys for. Any matching groups will be removed. Defaults to `"coref_head_clusters"`. ~~str~~ |
+| **RETURNS** | The modified `Doc` with any matching spans removed. ~~Doc~~ |
diff --git a/website/docs/api/scorer.md b/website/docs/api/scorer.md
index 8dbe3b276..9ef36e6fc 100644
--- a/website/docs/api/scorer.md
+++ b/website/docs/api/scorer.md
@@ -229,16 +229,17 @@ The reported `{attr}_score` depends on the classification properties:
> print(scores["cats_macro_auc"])
> ```
-| Name | Description |
-| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
-| `attr` | The attribute to score. ~~str~~ |
-| _keyword-only_ | |
-| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ |
-| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ |
-| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. ~~bool~~ |
-| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ |
-| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ |
+| Name | Description |
+| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
+| `attr` | The attribute to score. ~~str~~ |
+| _keyword-only_ | |
+| `getter` | Defaults to `getattr`. If provided, `getter(doc, attr)` should return the cats for an individual `Doc`. ~~Callable[[Doc, str], Dict[str, float]]~~ |
+| labels | The set of possible labels. Defaults to `[]`. ~~Iterable[str]~~ |
+| `multi_label` | Whether the attribute allows multiple labels. Defaults to `True`. When set to `False` (exclusive labels), missing gold labels are interpreted as `0.0` and the threshold is set to `0.0`. ~~bool~~ |
+| `positive_label` | The positive label for a binary task with exclusive classes. Defaults to `None`. ~~Optional[str]~~ |
+| `threshold` | Cutoff to consider a prediction "positive". Defaults to `0.5` for multi-label, and `0.0` (i.e. whatever's highest scoring) otherwise. ~~float~~ |
+| **RETURNS** | A dictionary containing the scores, with inapplicable scores as `None`. ~~Dict[str, Optional[float]]~~ |
## Scorer.score_links {#score_links tag="staticmethod" new="3"}
@@ -270,3 +271,62 @@ Compute micro-PRF and per-entity PRF scores.
| Name | Description |
| ---------- | ------------------------------------------------------------------------------------------------------------------- |
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
+
+## score_coref_clusters {#score_coref_clusters tag="experimental"}
+
+Returns LEA ([Moosavi and Strube, 2016](https://aclanthology.org/P16-1060/)) PRF
+scores for coreference clusters.
+
+
+
+Note this scoring function is not yet included in spaCy core - for details, see
+the [CoreferenceResolver](/api/coref) docs.
+
+
+
+> #### Example
+>
+> ```python
+> scores = score_coref_clusters(
+> examples,
+> span_cluster_prefix="coref_clusters",
+> )
+> print(scores["coref_f"])
+> ```
+
+| Name | Description |
+| --------------------- | ------------------------------------------------------------------------------------------------------------------- |
+| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
+| _keyword-only_ | |
+| `span_cluster_prefix` | The prefix used for spans representing coreference clusters. ~~str~~ |
+| **RETURNS** | A dictionary containing the scores. ~~Dict[str, Optional[float]]~~ |
+
+## score_span_predictions {#score_span_predictions tag="experimental"}
+
+Return accuracy for reconstructions of spans from single tokens. Only exactly
+correct predictions are counted as correct, there is no partial credit for near
+answers. Used by the [SpanResolver](/api/span-resolver).
+
+
+
+Note this scoring function is not yet included in spaCy core - for details, see
+the [SpanResolver](/api/span-resolver) docs.
+
+
+
+> #### Example
+>
+> ```python
+> scores = score_span_predictions(
+> examples,
+> output_prefix="coref_clusters",
+> )
+> print(scores["span_coref_clusters_accuracy"])
+> ```
+
+| Name | Description |
+| --------------- | ------------------------------------------------------------------------------------------------------------------- |
+| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
+| _keyword-only_ | |
+| `output_prefix` | The prefix used for spans representing the final predicted spans. ~~str~~ |
+| **RETURNS** | A dictionary containing the scores. ~~Dict[str, Optional[float]]~~ |
diff --git a/website/docs/api/span-resolver.md b/website/docs/api/span-resolver.md
new file mode 100644
index 000000000..3e992cd03
--- /dev/null
+++ b/website/docs/api/span-resolver.md
@@ -0,0 +1,356 @@
+---
+title: SpanResolver
+tag: class,experimental
+source: spacy-experimental/coref/span_resolver_component.py
+teaser: 'Pipeline component for resolving tokens into spans'
+api_base_class: /api/pipe
+api_string_name: span_resolver
+api_trainable: true
+---
+
+> #### Installation
+>
+> ```bash
+> $ pip install -U spacy-experimental
+> ```
+
+
+
+This component not yet integrated into spaCy core, and is available via the
+extension package
+[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting
+in version 0.6.0. It exposes the component via
+[entry points](/usage/saving-loading/#entry-points), so if you have the package
+installed, using `factory = "experimental_span_resolver"` in your
+[training config](/usage/training#config) or
+`nlp.add_pipe("experimental_span_resolver")` will work out-of-the-box.
+
+
+
+A `SpanResolver` component takes in tokens (represented as `Span` objects of
+length 1) and resolves them into `Span` objects of arbitrary length. The initial
+use case is as a post-processing step on word-level
+[coreference resolution](/api/coref). The input and output keys used to store
+`Span` objects are configurable.
+
+## Assigned Attributes {#assigned-attributes}
+
+Predictions will be saved to `Doc.spans` as [`SpanGroup`s](/api/spangroup).
+
+Input token spans will be read in using an input prefix, by default
+`"coref_head_clusters"`, and output spans will be saved using an output prefix
+(default `"coref_clusters"`) plus a serial number starting from one. The
+prefixes are configurable.
+
+| Location | Value |
+| ------------------------------------------------- | ------------------------------------------------------------------------- |
+| `Doc.spans[output_prefix + "_" + cluster_number]` | One group of predicted spans. Cluster number starts from 1. ~~SpanGroup~~ |
+
+## Config and implementation {#config}
+
+The default config is defined by the pipeline component factory and describes
+how the component should be configured. You can override its settings via the
+`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your
+[`config.cfg` for training](/usage/training#config). See the
+[model architectures](/api/architectures#coref-architectures) documentation for
+details on the architectures and their arguments and hyperparameters.
+
+> #### Example
+>
+> ```python
+> from spacy_experimental.coref.span_resolver_component import DEFAULT_SPAN_RESOLVER_MODEL
+> from spacy_experimental.coref.coref_util import DEFAULT_CLUSTER_PREFIX, DEFAULT_CLUSTER_HEAD_PREFIX
+> config={
+> "model": DEFAULT_SPAN_RESOLVER_MODEL,
+> "input_prefix": DEFAULT_CLUSTER_HEAD_PREFIX,
+> "output_prefix": DEFAULT_CLUSTER_PREFIX,
+> },
+> nlp.add_pipe("experimental_span_resolver", config=config)
+> ```
+
+| Setting | Description |
+| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [SpanResolver](/api/architectures#SpanResolver). ~~Model~~ |
+| `input_prefix` | The prefix to use for input `SpanGroup`s. Defaults to `coref_head_clusters`. ~~str~~ |
+| `output_prefix` | The prefix for predicted `SpanGroup`s. Defaults to `coref_clusters`. ~~str~~ |
+
+## SpanResolver.\_\_init\_\_ {#init tag="method"}
+
+> #### Example
+>
+> ```python
+> # Construction via add_pipe with default model
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+>
+> # Construction via add_pipe with custom model
+> config = {"model": {"@architectures": "my_span_resolver.v1"}}
+> span_resolver = nlp.add_pipe("experimental_span_resolver", config=config)
+>
+> # Construction from class
+> from spacy_experimental.coref.span_resolver_component import SpanResolver
+> span_resolver = SpanResolver(nlp.vocab, model)
+> ```
+
+Create a new pipeline instance. In your application, you would normally use a
+shortcut for this and instantiate the component using its string name and
+[`nlp.add_pipe`](/api/language#add_pipe).
+
+| Name | Description |
+| --------------- | --------------------------------------------------------------------------------------------------- |
+| `vocab` | The shared vocabulary. ~~Vocab~~ |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ |
+| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
+| _keyword-only_ | |
+| `input_prefix` | The prefix to use for input `SpanGroup`s. Defaults to `coref_head_clusters`. ~~str~~ |
+| `output_prefix` | The prefix for predicted `SpanGroup`s. Defaults to `coref_clusters`. ~~str~~ |
+
+## SpanResolver.\_\_call\_\_ {#call tag="method"}
+
+Apply the pipe to one document. The document is modified in place and returned.
+This usually happens under the hood when the `nlp` object is called on a text
+and all pipeline components are applied to the `Doc` in order. Both
+[`__call__`](#call) and [`pipe`](#pipe) delegate to the [`predict`](#predict)
+and [`set_annotations`](#set_annotations) methods.
+
+> #### Example
+>
+> ```python
+> doc = nlp("This is a sentence.")
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> # This usually happens under the hood
+> processed = span_resolver(doc)
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------- |
+| `doc` | The document to process. ~~Doc~~ |
+| **RETURNS** | The processed document. ~~Doc~~ |
+
+## SpanResolver.pipe {#pipe tag="method"}
+
+Apply the pipe to a stream of documents. This usually happens under the hood
+when the `nlp` object is called on a text and all pipeline components are
+applied to the `Doc` in order. Both [`__call__`](/api/span-resolver#call) and
+[`pipe`](/api/span-resolver#pipe) delegate to the
+[`predict`](/api/span-resolver#predict) and
+[`set_annotations`](/api/span-resolver#set_annotations) methods.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> for doc in span_resolver.pipe(docs, batch_size=50):
+> pass
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------- |
+| `stream` | A stream of documents. ~~Iterable[Doc]~~ |
+| _keyword-only_ | |
+| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ |
+| **YIELDS** | The processed documents in order. ~~Doc~~ |
+
+## SpanResolver.initialize {#initialize tag="method"}
+
+Initialize the component for training. `get_examples` should be a function that
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
+[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
+setting up the label scheme based on the data. This method is typically called
+by [`Language.initialize`](/api/language#initialize).
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> span_resolver.initialize(lambda: examples, nlp=nlp)
+> ```
+
+| Name | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
+| _keyword-only_ | |
+| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
+
+## SpanResolver.predict {#predict tag="method"}
+
+Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
+modifying them. Predictions are returned as a list of `MentionClusters`, one for
+each input `Doc`. A `MentionClusters` instance is just a list of lists of pairs
+of `int`s, where each item corresponds to an input `SpanGroup`, and the `int`s
+correspond to token indices.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> spans = span_resolver.predict([doc1, doc2])
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------- |
+| `docs` | The documents to predict. ~~Iterable[Doc]~~ |
+| **RETURNS** | The predicted spans for the `Doc`s. ~~List[MentionClusters]~~ |
+
+## SpanResolver.set_annotations {#set_annotations tag="method"}
+
+Modify a batch of documents, saving predictions using the output prefix in
+`Doc.spans`.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> spans = span_resolver.predict([doc1, doc2])
+> span_resolver.set_annotations([doc1, doc2], spans)
+> ```
+
+| Name | Description |
+| ------- | ------------------------------------------------------------- |
+| `docs` | The documents to modify. ~~Iterable[Doc]~~ |
+| `spans` | The predicted spans for the `docs`. ~~List[MentionClusters]~~ |
+
+## SpanResolver.update {#update tag="method"}
+
+Learn from a batch of [`Example`](/api/example) objects. Delegates to
+[`predict`](/api/span-resolver#predict).
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> optimizer = nlp.initialize()
+> losses = span_resolver.update(examples, sgd=optimizer)
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------ |
+| `examples` | A batch of [`Example`](/api/example) objects to learn from. ~~Iterable[Example]~~ |
+| _keyword-only_ | |
+| `drop` | The dropout rate. ~~float~~ |
+| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
+| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
+| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
+
+## SpanResolver.create_optimizer {#create_optimizer tag="method"}
+
+Create an optimizer for the pipeline component.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> optimizer = span_resolver.create_optimizer()
+> ```
+
+| Name | Description |
+| ----------- | ---------------------------- |
+| **RETURNS** | The optimizer. ~~Optimizer~~ |
+
+## SpanResolver.use_params {#use_params tag="method, contextmanager"}
+
+Modify the pipe's model, to use the given parameter values. At the end of the
+context, the original parameters are restored.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> with span_resolver.use_params(optimizer.averages):
+> span_resolver.to_disk("/best_model")
+> ```
+
+| Name | Description |
+| -------- | -------------------------------------------------- |
+| `params` | The parameter values to use in the model. ~~dict~~ |
+
+## SpanResolver.to_disk {#to_disk tag="method"}
+
+Serialize the pipe to disk.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> span_resolver.to_disk("/path/to/span_resolver")
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
+| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+
+## SpanResolver.from_disk {#from_disk tag="method"}
+
+Load the pipe from disk. Modifies the object in place and returns it.
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> span_resolver.from_disk("/path/to/span_resolver")
+> ```
+
+| Name | Description |
+| -------------- | ----------------------------------------------------------------------------------------------- |
+| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The modified `SpanResolver` object. ~~SpanResolver~~ |
+
+## SpanResolver.to_bytes {#to_bytes tag="method"}
+
+> #### Example
+>
+> ```python
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> span_resolver_bytes = span_resolver.to_bytes()
+> ```
+
+Serialize the pipe to a bytestring.
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------- |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The serialized form of the `SpanResolver` object. ~~bytes~~ |
+
+## SpanResolver.from_bytes {#from_bytes tag="method"}
+
+Load the pipe from a bytestring. Modifies the object in place and returns it.
+
+> #### Example
+>
+> ```python
+> span_resolver_bytes = span_resolver.to_bytes()
+> span_resolver = nlp.add_pipe("experimental_span_resolver")
+> span_resolver.from_bytes(span_resolver_bytes)
+> ```
+
+| Name | Description |
+| -------------- | ------------------------------------------------------------------------------------------- |
+| `bytes_data` | The data to load from. ~~bytes~~ |
+| _keyword-only_ | |
+| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
+| **RETURNS** | The `SpanResolver` object. ~~SpanResolver~~ |
+
+## Serialization fields {#serialization-fields}
+
+During serialization, spaCy will export several data fields used to restore
+different aspects of the object. If needed, you can exclude them from
+serialization by passing in the string names via the `exclude` argument.
+
+> #### Example
+>
+> ```python
+> data = span_resolver.to_disk("/path", exclude=["vocab"])
+> ```
+
+| Name | Description |
+| ------- | -------------------------------------------------------------- |
+| `vocab` | The shared [`Vocab`](/api/vocab). |
+| `cfg` | The config file. You usually don't want to exclude this. |
+| `model` | The binary model data. You usually don't want to exclude this. |
diff --git a/website/docs/api/span.md b/website/docs/api/span.md
index be522c31f..264418006 100644
--- a/website/docs/api/span.md
+++ b/website/docs/api/span.md
@@ -186,14 +186,14 @@ the character indices don't map to a valid span.
> assert span.text == "New York"
> ```
-| Name | Description |
-| ------------------------------------ | ----------------------------------------------------------------------------------------- |
-| `start` | The index of the first character of the span. ~~int~~ |
-| `end` | The index of the last character after the span. ~~int~~ |
-| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
-| `kb_id` 2.2 | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
-| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
-| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
+| Name | Description |
+| ----------- | ----------------------------------------------------------------------------------------- |
+| `start` | The index of the first character of the span. ~~int~~ |
+| `end` | The index of the last character after the span. ~~int~~ |
+| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
+| `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
+| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
+| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Span.similarity {#similarity tag="method" model="vectors"}
@@ -544,26 +544,25 @@ overlaps with will be returned.
## Attributes {#attributes}
-| Name | Description |
-| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
-| `doc` | The parent document. ~~Doc~~ |
-| `tensor` 2.1.7 | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
-| `start` | The token offset for the start of the span. ~~int~~ |
-| `end` | The token offset for the end of the span. ~~int~~ |
-| `start_char` | The character offset for the start of the span. ~~int~~ |
-| `end_char` | The character offset for the end of the span. ~~int~~ |
-| `text` | A string representation of the span text. ~~str~~ |
-| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ |
-| `orth` | ID of the verbatim text content. ~~int~~ |
-| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
-| `label` | The hash value of the span's label. ~~int~~ |
-| `label_` | The span's label. ~~str~~ |
-| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
-| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
-| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
-| `ent_id` | Alias for `id`: the hash value of the span's ID. ~~int~~ |
-| `ent_id_` | Alias for `id_`: the span's ID. ~~str~~ |
-| `id` | The hash value of the span's ID. ~~int~~ |
-| `id_` | The span's ID. ~~str~~ |
-| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ |
-| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
+| Name | Description |
+| -------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `doc` | The parent document. ~~Doc~~ |
+| `tensor` | The span's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
+| `start` | The token offset for the start of the span. ~~int~~ |
+| `end` | The token offset for the end of the span. ~~int~~ |
+| `start_char` | The character offset for the start of the span. ~~int~~ |
+| `end_char` | The character offset for the end of the span. ~~int~~ |
+| `text` | A string representation of the span text. ~~str~~ |
+| `text_with_ws` | The text content of the span with a trailing whitespace character if the last token has one. ~~str~~ |
+| `orth` | ID of the verbatim text content. ~~int~~ |
+| `orth_` | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
+| `label` | The hash value of the span's label. ~~int~~ |
+| `label_` | The span's label. ~~str~~ |
+| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
+| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
+| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
+| `ent_id` | Alias for `id`: the hash value of the span's ID. ~~int~~ |
+| `ent_id_` | Alias for `id_`: the span's ID. ~~str~~ |
+| `id` | The hash value of the span's ID. ~~int~~ |
+| `id_` | The span's ID. ~~str~~ |
+| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
diff --git a/website/docs/api/spangroup.md b/website/docs/api/spangroup.md
index 2d1cf73c4..bd9659acb 100644
--- a/website/docs/api/spangroup.md
+++ b/website/docs/api/spangroup.md
@@ -202,6 +202,23 @@ already present in the current span group.
| `other` | The span group or spans to append. ~~Union[SpanGroup, Iterable[Span]]~~ |
| **RETURNS** | The span group. ~~SpanGroup~~ |
+## SpanGroup.\_\_iter\_\_ {#iter tag="method" new="3.5"}
+
+Iterate over the spans in this span group.
+
+> #### Example
+>
+> ```python
+> doc = nlp("Their goi ng home")
+> doc.spans["errors"] = [doc[0:1], doc[1:3]]
+> for error_span in doc.spans["errors"]:
+> print(error_span)
+> ```
+
+| Name | Description |
+| ---------- | ----------------------------------- |
+| **YIELDS** | A span in this span group. ~~Span~~ |
+
## SpanGroup.append {#append tag="method"}
Add a [`Span`](/api/span) object to the group. The span must refer to the same
diff --git a/website/docs/api/spanruler.md b/website/docs/api/spanruler.md
index b573f7c58..1339d0967 100644
--- a/website/docs/api/spanruler.md
+++ b/website/docs/api/spanruler.md
@@ -13,6 +13,17 @@ The span ruler lets you add spans to [`Doc.spans`](/api/doc#spans) and/or
usage examples, see the docs on
[rule-based span matching](/usage/rule-based-matching#spanruler).
+
+
+As of spaCy v4, there is no separate `EntityRuler` class. The entity ruler is
+implemented as a special case of the `SpanRuler` component.
+
+See the [migration guide](/api/entityruler#migrating) for differences between
+the v3 `EntityRuler` and v4 `SpanRuler` implementations of the `entity_ruler`
+component.
+
+
+
## Assigned Attributes {#assigned-attributes}
Matches will be saved to `Doc.spans[spans_key]` as a
diff --git a/website/docs/api/stringstore.md b/website/docs/api/stringstore.md
index cd414b1f0..b509659ef 100644
--- a/website/docs/api/stringstore.md
+++ b/website/docs/api/stringstore.md
@@ -40,7 +40,8 @@ Get the number of strings in the store.
## StringStore.\_\_getitem\_\_ {#getitem tag="method"}
-Retrieve a string from a given hash, or vice versa.
+Retrieve a string from a given hash. If a string is passed as the input, add it
+to the store and return its hash.
> #### Example
>
@@ -51,14 +52,14 @@ Retrieve a string from a given hash, or vice versa.
> assert stringstore[apple_hash] == "apple"
> ```
-| Name | Description |
-| -------------- | ----------------------------------------------- |
-| `string_or_id` | The value to encode. ~~Union[bytes, str, int]~~ |
-| **RETURNS** | The value to be retrieved. ~~Union[str, int]~~ |
+| Name | Description |
+| ---------------- | ---------------------------------------------------------------------------- |
+| `string_or_hash` | The hash value to lookup or the string to store. ~~Union[str, int]~~ |
+| **RETURNS** | The stored string or the hash of the newly added string. ~~Union[str, int]~~ |
## StringStore.\_\_contains\_\_ {#contains tag="method"}
-Check whether a string is in the store.
+Check whether a string or a hash is in the store.
> #### Example
>
@@ -68,15 +69,14 @@ Check whether a string is in the store.
> assert not "cherry" in stringstore
> ```
-| Name | Description |
-| ----------- | ----------------------------------------------- |
-| `string` | The string to check. ~~str~~ |
-| **RETURNS** | Whether the store contains the string. ~~bool~~ |
+| Name | Description |
+| ---------------- | ------------------------------------------------------- |
+| `string_or_hash` | The string or hash to check. ~~Union[str, int]~~ |
+| **RETURNS** | Whether the store contains the string or hash. ~~bool~~ |
## StringStore.\_\_iter\_\_ {#iter tag="method"}
-Iterate over the strings in the store, in order. Note that a newly initialized
-store will always include an empty string `""` at position `0`.
+Iterate over the stored strings in insertion order.
> #### Example
>
@@ -86,11 +86,59 @@ store will always include an empty string `""` at position `0`.
> assert all_strings == ["apple", "orange"]
> ```
-| Name | Description |
-| ---------- | ------------------------------ |
-| **YIELDS** | A string in the store. ~~str~~ |
+| Name | Description |
+| ----------- | ------------------------------ |
+| **RETURNS** | A string in the store. ~~str~~ |
-## StringStore.add {#add tag="method" new="2"}
+## StringStore.items {#iter tag="method" new="4"}
+
+Iterate over the stored string-hash pairs in insertion order.
+
+> #### Example
+>
+> ```python
+> stringstore = StringStore(["apple", "orange"])
+> all_strings_and_hashes = stringstore.items()
+> assert all_strings_and_hashes == [("apple", 8566208034543834098), ("orange", 2208928596161743350)]
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------ |
+| **RETURNS** | A list of string-hash pairs. ~~List[Tuple[str, int]]~~ |
+
+## StringStore.keys {#iter tag="method" new="4"}
+
+Iterate over the stored strings in insertion order.
+
+> #### Example
+>
+> ```python
+> stringstore = StringStore(["apple", "orange"])
+> all_strings = stringstore.keys()
+> assert all_strings == ["apple", "orange"]
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------- |
+| **RETURNS** | A list of strings. ~~List[str]~~ |
+
+## StringStore.values {#iter tag="method" new="4"}
+
+Iterate over the stored string hashes in insertion order.
+
+> #### Example
+>
+> ```python
+> stringstore = StringStore(["apple", "orange"])
+> all_hashes = stringstore.values()
+> assert all_hashes == [8566208034543834098, 2208928596161743350]
+> ```
+
+| Name | Description |
+| ----------- | -------------------------------------- |
+| **RETURNS** | A list of string hashes. ~~List[int]~~ |
+
+## StringStore.add {#add tag="method"}
Add a string to the `StringStore`.
@@ -110,7 +158,7 @@ Add a string to the `StringStore`.
| `string` | The string to add. ~~str~~ |
| **RETURNS** | The string's hash value. ~~int~~ |
-## StringStore.to_disk {#to_disk tag="method" new="2"}
+## StringStore.to_disk {#to_disk tag="method"}
Save the current state to a directory.
diff --git a/website/docs/api/tagger.md b/website/docs/api/tagger.md
index 0d77d9bf4..102793377 100644
--- a/website/docs/api/tagger.md
+++ b/website/docs/api/tagger.md
@@ -142,12 +142,6 @@ arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the
config.
-
-
-This method was previously called `begin_training`.
-
-
-
> #### Example
>
> ```python
diff --git a/website/docs/api/textcategorizer.md b/website/docs/api/textcategorizer.md
index d8a609693..b69c87a28 100644
--- a/website/docs/api/textcategorizer.md
+++ b/website/docs/api/textcategorizer.md
@@ -63,7 +63,6 @@ architectures and their arguments and hyperparameters.
> ```python
> from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL
> config = {
-> "threshold": 0.5,
> "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
> }
> nlp.add_pipe("textcat", config=config)
@@ -82,7 +81,7 @@ architectures and their arguments and hyperparameters.
| Setting | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
+| `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ |
| `model` | A model instance that predicts scores for each category. Defaults to [TextCatEnsemble](/api/architectures#TextCatEnsemble). ~~Model[List[Doc], List[Floats2d]]~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
@@ -123,7 +122,7 @@ shortcut for this and instantiate the component using its string name and
| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ |
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
| _keyword-only_ | |
-| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
+| `threshold` | Cutoff to consider a prediction "positive", relevant for `textcat_multilabel` when calculating accuracy scores. ~~float~~ |
| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
| `save_activations` 4.0 | Save activations in `Doc` when annotating. The supported activations is `"probabilities"`. ~~Union[bool, list[str]]~~ |
@@ -188,12 +187,6 @@ arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the
config.
-
-
-This method was previously called `begin_training`.
-
-
-
> #### Example
>
> ```python
diff --git a/website/docs/api/token.md b/website/docs/api/token.md
index 73447e4d3..25155d961 100644
--- a/website/docs/api/token.md
+++ b/website/docs/api/token.md
@@ -403,75 +403,74 @@ The L2 norm of the token's vector representation.
## Attributes {#attributes}
-| Name | Description |
-| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `doc` | The parent document. ~~Doc~~ |
-| `lex` 3 | The underlying lexeme. ~~Lexeme~~ |
-| `sent` 2.0.12 | The sentence span that this token is a part of. ~~Span~~ |
-| `text` | Verbatim text content. ~~str~~ |
-| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ |
-| `whitespace_` | Trailing space character if present. ~~str~~ |
-| `orth` | ID of the verbatim text content. ~~int~~ |
-| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
-| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ |
-| `tensor` 2.1.7 | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
-| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ |
-| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ |
-| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ |
-| `i` | The index of the token within the parent document. ~~int~~ |
-| `ent_type` | Named entity type. ~~int~~ |
-| `ent_type_` | Named entity type. ~~str~~ |
-| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ |
-| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ |
-| `ent_kb_id` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ |
-| `ent_kb_id_` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ |
-| `ent_id` | ID of the entity the token is an instance of, if any. ~~int~~ |
-| `ent_id_` | ID of the entity the token is an instance of, if any. ~~str~~ |
-| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
-| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
-| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
-| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
-| `lower` | Lowercase form of the token. ~~int~~ |
-| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
-| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
-| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
-| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ |
-| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ |
-| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ |
-| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ |
-| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ |
-| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ |
-| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ |
-| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ |
-| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ |
-| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ |
-| `is_punct` | Is the token punctuation? ~~bool~~ |
-| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ |
-| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ |
-| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. |
-| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. |
-| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ |
-| `is_bracket` | Is the token a bracket? ~~bool~~ |
-| `is_quote` | Is the token a quotation mark? ~~bool~~ |
-| `is_currency` 2.0.8 | Is the token a currency symbol? ~~bool~~ |
-| `like_url` | Does the token resemble a URL? ~~bool~~ |
-| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
-| `like_email` | Does the token resemble an email address? ~~bool~~ |
-| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
-| `is_stop` | Is the token part of a "stop list"? ~~bool~~ |
-| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ |
-| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ |
-| `tag` | Fine-grained part-of-speech. ~~int~~ |
-| `tag_` | Fine-grained part-of-speech. ~~str~~ |
-| `morph` 3 | Morphological analysis. ~~MorphAnalysis~~ |
-| `dep` | Syntactic dependency relation. ~~int~~ |
-| `dep_` | Syntactic dependency relation. ~~str~~ |
-| `lang` | Language of the parent document's vocabulary. ~~int~~ |
-| `lang_` | Language of the parent document's vocabulary. ~~str~~ |
-| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ |
-| `idx` | The character offset of the token within the parent document. ~~int~~ |
-| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ |
-| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
-| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
-| `cluster` | Brown cluster ID. ~~int~~ |
-| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
+| Name | Description |
+| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `doc` | The parent document. ~~Doc~~ |
+| `lex` 3 | The underlying lexeme. ~~Lexeme~~ |
+| `sent` | The sentence span that this token is a part of. ~~Span~~ |
+| `text` | Verbatim text content. ~~str~~ |
+| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ |
+| `whitespace_` | Trailing space character if present. ~~str~~ |
+| `orth` | ID of the verbatim text content. ~~int~~ |
+| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
+| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ |
+| `tensor` | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
+| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ |
+| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ |
+| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ |
+| `i` | The index of the token within the parent document. ~~int~~ |
+| `ent_type` | Named entity type. ~~int~~ |
+| `ent_type_` | Named entity type. ~~str~~ |
+| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ |
+| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ |
+| `ent_kb_id` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ |
+| `ent_kb_id_` | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ |
+| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ |
+| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ |
+| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
+| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
+| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
+| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
+| `lower` | Lowercase form of the token. ~~int~~ |
+| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
+| `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
+| `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
+| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ |
+| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ |
+| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ |
+| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ |
+| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ |
+| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ |
+| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ |
+| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ |
+| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ |
+| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ |
+| `is_punct` | Is the token punctuation? ~~bool~~ |
+| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ |
+| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ |
+| `is_sent_start` | Does the token start a sentence? ~~bool~~ or `None` if unknown. Defaults to `True` for the first token in the `Doc`. |
+| `is_sent_end` | Does the token end a sentence? ~~bool~~ or `None` if unknown. |
+| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ |
+| `is_bracket` | Is the token a bracket? ~~bool~~ |
+| `is_quote` | Is the token a quotation mark? ~~bool~~ |
+| `is_currency` | Is the token a currency symbol? ~~bool~~ |
+| `like_url` | Does the token resemble a URL? ~~bool~~ |
+| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ |
+| `like_email` | Does the token resemble an email address? ~~bool~~ |
+| `is_oov` | Is the token out-of-vocabulary (i.e. does it not have a word vector)? ~~bool~~ |
+| `is_stop` | Is the token part of a "stop list"? ~~bool~~ |
+| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~int~~ |
+| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/u/pos/). ~~str~~ |
+| `tag` | Fine-grained part-of-speech. ~~int~~ |
+| `tag_` | Fine-grained part-of-speech. ~~str~~ |
+| `morph` 3 | Morphological analysis. ~~MorphAnalysis~~ |
+| `dep` | Syntactic dependency relation. ~~int~~ |
+| `dep_` | Syntactic dependency relation. ~~str~~ |
+| `lang` | Language of the parent document's vocabulary. ~~int~~ |
+| `lang_` | Language of the parent document's vocabulary. ~~str~~ |
+| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ |
+| `idx` | The character offset of the token within the parent document. ~~int~~ |
+| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
+| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
+| `cluster` | Brown cluster ID. ~~int~~ |
+| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md
index 220b2d6e9..883c5e3b9 100644
--- a/website/docs/api/top-level.md
+++ b/website/docs/api/top-level.md
@@ -45,16 +45,16 @@ specified separately using the new `exclude` keyword argument.
> nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"])
> ```
-| Name | Description |
-| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
-| _keyword-only_ | |
-| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
-| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
-| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ |
-| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
-| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
-| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
+| Name | Description |
+| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
+| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). Is merged with the config entry `nlp.disabled`. ~~Union[str, Iterable[str]]~~ |
+| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ |
+| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
+| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
+| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's
[`config.cfg`](/api/data-formats#config), uses the language and pipeline
@@ -354,22 +354,22 @@ If a setting is not present in the options, the default value will be used.
> displacy.serve(doc, style="dep", options=options)
> ```
-| Name | Description |
-| ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- |
-| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ |
-| `add_lemma` 2.2.4 | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ |
-| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ |
-| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ |
-| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ |
-| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ |
-| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ |
-| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ |
-| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ |
-| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ |
-| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ |
-| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ |
-| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ |
-| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ |
+| Name | Description |
+| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------- |
+| `fine_grained` | Use fine-grained part-of-speech tags (`Token.tag_`) instead of coarse-grained tags (`Token.pos_`). Defaults to `False`. ~~bool~~ |
+| `add_lemma` | Print the lemmas in a separate row below the token texts. Defaults to `False`. ~~bool~~ |
+| `collapse_punct` | Attach punctuation to tokens. Can make the parse more readable, as it prevents long arcs to attach punctuation. Defaults to `True`. ~~bool~~ |
+| `collapse_phrases` | Merge noun phrases into one token. Defaults to `False`. ~~bool~~ |
+| `compact` | "Compact mode" with square arrows that takes up less space. Defaults to `False`. ~~bool~~ |
+| `color` | Text color (HEX, RGB or color names). Defaults to `"#000000"`. ~~str~~ |
+| `bg` | Background color (HEX, RGB or color names). Defaults to `"#ffffff"`. ~~str~~ |
+| `font` | Font name or font family for all text. Defaults to `"Arial"`. ~~str~~ |
+| `offset_x` | Spacing on left side of the SVG in px. Defaults to `50`. ~~int~~ |
+| `arrow_stroke` | Width of arrow path in px. Defaults to `2`. ~~int~~ |
+| `arrow_width` | Width of arrow head in px. Defaults to `10` in regular mode and `8` in compact mode. ~~int~~ |
+| `arrow_spacing` | Spacing between arrows in px to avoid overlaps. Defaults to `20` in regular mode and `12` in compact mode. ~~int~~ |
+| `word_spacing` | Vertical spacing between words and arcs in px. Defaults to `45`. ~~int~~ |
+| `distance` | Distance between words in px. Defaults to `175` in regular mode and `150` in compact mode. ~~int~~ |
#### Named Entity Visualizer options {#displacy_options-ent}
@@ -385,7 +385,7 @@ If a setting is not present in the options, the default value will be used.
| ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ |
| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ |
-| `template` 2.2 | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
+| `template` | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
| `kb_url_template` 3.2.1 | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in. ~~Optional[str]~~ |
#### Span Visualizer options {#displacy_options-span}
@@ -513,7 +513,7 @@ a [Weights & Biases](https://www.wandb.com/) dashboard.
Instead of using one of the built-in loggers, you can
[implement your own](/usage/training#custom-logging).
-#### spacy.ConsoleLogger.v2 {#ConsoleLogger tag="registered function"}
+#### spacy.ConsoleLogger.v2 {tag="registered function"}
> #### Example config
>
@@ -564,11 +564,33 @@ start decreasing across epochs.
-| Name | Description |
-| ---------------- | --------------------------------------------------------------------- |
-| `progress_bar` | Whether the logger should print the progress bar ~~bool~~ |
-| `console_output` | Whether the logger should print the logs on the console. ~~bool~~ |
-| `output_file` | The file to save the training logs to. ~~Optional[Union[str, Path]]~~ |
+| Name | Description |
+| ---------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| `progress_bar` | Whether the logger should print a progress bar tracking the steps till the next evaluation pass (default: `False`). ~~bool~~ |
+| `console_output` | Whether the logger should print the logs in the console (default: `True`). ~~bool~~ |
+| `output_file` | The file to save the training logs to (default: `None`). ~~Optional[Union[str, Path]]~~ |
+
+#### spacy.ConsoleLogger.v3 {#ConsoleLogger tag="registered function"}
+
+> #### Example config
+>
+> ```ini
+> [training.logger]
+> @loggers = "spacy.ConsoleLogger.v3"
+> progress_bar = "all_steps"
+> console_output = true
+> output_file = "training_log.jsonl"
+> ```
+
+Writes the results of a training step to the console in a tabular format and
+optionally saves them to a `jsonl` file.
+
+| Name | Description |
+| ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `progress_bar` | Type of progress bar to show in the console: `"train"`, `"eval"` or `None`. |
+| | The bar tracks the number of steps until `training.max_steps` and `training.eval_frequency` are reached respectively (default: `None`). ~~Optional[str]~~ |
+| `console_output` | Whether the logger should print the logs in the console (default: `True`). ~~bool~~ |
+| `output_file` | The file to save the training logs to (default: `None`). ~~Optional[Union[str, Path]]~~ |
## Readers {#readers}
@@ -887,6 +909,27 @@ backprop passes.
| `backprop_color` | Color identifier for backpropagation passes. Defaults to `-1`. ~~int~~ |
| **CREATES** | A function that takes the current `nlp` and wraps forward/backprop passes in NVTX ranges. ~~Callable[[Language], Language]~~ |
+### spacy.models_and_pipes_with_nvtx_range.v1 {#models_and_pipes_with_nvtx_range tag="registered function" new="3.4"}
+
+> #### Example config
+>
+> ```ini
+> [nlp]
+> after_pipeline_creation = {"@callbacks":"spacy.models_and_pipes_with_nvtx_range.v1"}
+> ```
+
+Recursively wrap both the models and methods of each pipe using
+[NVTX](https://nvidia.github.io/NVTX/) range markers. By default, the following
+methods are wrapped: `pipe`, `predict`, `set_annotations`, `update`, `rehearse`,
+`get_loss`, `initialize`, `begin_update`, `finish_update`, `update`.
+
+| Name | Description |
+| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `forward_color` | Color identifier for model forward passes. Defaults to `-1`. ~~int~~ |
+| `backprop_color` | Color identifier for model backpropagation passes. Defaults to `-1`. ~~int~~ |
+| `additional_pipe_functions` | Additional pipeline methods to wrap. Keys are pipeline names and values are lists of method identifiers. Defaults to `None`. ~~Optional[Dict[str, List[str]]]~~ |
+| **CREATES** | A function that takes the current `nlp` and wraps pipe models and methods in NVTX ranges. ~~Callable[[Language], Language]~~ |
+
## Training data and alignment {#gold source="spacy/training"}
### training.offsets_to_biluo_tags {#offsets_to_biluo_tags tag="function"}
@@ -983,6 +1026,54 @@ This method was previously available as `spacy.gold.spans_from_biluo_tags`.
| `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags with each tag describing one token. Each tag string will be of the form of either `""`, `"O"` or `"{action}-{label}"`, where action is one of `"B"`, `"I"`, `"L"`, `"U"`. ~~List[str]~~ |
| **RETURNS** | A sequence of `Span` objects with added entity labels. ~~List[Span]~~ |
+### training.biluo_to_iob {#biluo_to_iob tag="function"}
+
+Convert a sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags to
+[IOB](/usage/linguistic-features#accessing-ner) tags. This is useful if you want
+use the BILUO tags with a model that only supports IOB tags.
+
+> #### Example
+>
+> ```python
+> from spacy.training import biluo_to_iob
+>
+> tags = ["O", "O", "B-LOC", "I-LOC", "L-LOC", "O"]
+> iob_tags = biluo_to_iob(tags)
+> assert iob_tags == ["O", "O", "B-LOC", "I-LOC", "I-LOC", "O"]
+> ```
+
+| Name | Description |
+| ----------- | --------------------------------------------------------------------------------------- |
+| `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ |
+| **RETURNS** | A list of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ |
+
+### training.iob_to_biluo {#iob_to_biluo tag="function"}
+
+Convert a sequence of [IOB](/usage/linguistic-features#accessing-ner) tags to
+[BILUO](/usage/linguistic-features#accessing-ner) tags. This is useful if you
+want use the IOB tags with a model that only supports BILUO tags.
+
+
+
+This method was previously available as `spacy.gold.iob_to_biluo`.
+
+
+
+> #### Example
+>
+> ```python
+> from spacy.training import iob_to_biluo
+>
+> tags = ["O", "O", "B-LOC", "I-LOC", "O"]
+> biluo_tags = iob_to_biluo(tags)
+> assert biluo_tags == ["O", "O", "B-LOC", "L-LOC", "O"]
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------------------------------- |
+| `tags` | A sequence of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ |
+| **RETURNS** | A list of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ |
+
## Utility functions {#util source="spacy/util.py"}
spaCy comes with a small collection of utility functions located in
diff --git a/website/docs/api/vectors.md b/website/docs/api/vectors.md
index 9636ea04c..d4702b592 100644
--- a/website/docs/api/vectors.md
+++ b/website/docs/api/vectors.md
@@ -50,7 +50,7 @@ modified later.
| _keyword-only_ | |
| `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ |
| `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ |
-| `data` | The vector data. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
+| `data` | The vector data. ~~numpy.ndarray[ndim=2, dtype=float32]~~ |
| `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ |
| `name` | A name to identify the vectors table. ~~str~~ |
| `mode` 3.2 | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ |
diff --git a/website/docs/api/vocab.md b/website/docs/api/vocab.md
index 2e4a206ec..5e4de219a 100644
--- a/website/docs/api/vocab.md
+++ b/website/docs/api/vocab.md
@@ -21,15 +21,15 @@ Create the vocabulary.
> vocab = Vocab(strings=["hello", "world"])
> ```
-| Name | Description |
-| ------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ |
-| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ |
-| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ |
-| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ |
-| `vectors_name` 2.2 | A name to identify the vectors table. ~~str~~ |
-| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ |
-| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
+| Name | Description |
+| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ |
+| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ |
+| `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ |
+| `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ |
+| `vectors_name` | A name to identify the vectors table. ~~str~~ |
+| `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ |
+| `get_noun_chunks` | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
## Vocab.\_\_len\_\_ {#len tag="method"}
@@ -308,14 +308,14 @@ Load state from a binary string.
> assert type(PERSON) == int
> ```
-| Name | Description |
-| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ |
-| `vectors` 2 | A table associating word IDs to word vectors. ~~Vectors~~ |
-| `vectors_length` | Number of dimensions for each word vector. ~~int~~ |
-| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ |
-| `writing_system` 2.1 | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
-| `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
+| Name | Description |
+| ---------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ |
+| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ |
+| `vectors_length` | Number of dimensions for each word vector. ~~int~~ |
+| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ |
+| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
+| `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
## Serialization fields {#serialization-fields}
diff --git a/website/docs/styleguide.md b/website/docs/styleguide.md
index ed6f9d99b..47bca1ed4 100644
--- a/website/docs/styleguide.md
+++ b/website/docs/styleguide.md
@@ -8,9 +8,7 @@ menu:
- ['Typography', 'typography']
- ['Elements', 'elements']
- ['Components', 'components']
- - ['Setup & Installation', 'setup']
- ['Markdown Reference', 'markdown']
- - ['Project Structure', 'structure']
- ['Editorial', 'editorial']
sidebar:
- label: Styleguide
@@ -25,6 +23,610 @@ sidebar:
url: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md
---
-import Readme from 'README.md'
+The [spacy.io](https://spacy.io) website is implemented using
+[Gatsby](https://www.gatsbyjs.org) with
+[Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This
+allows authoring content in **straightforward Markdown** without the usual
+limitations. Standard elements can be overwritten with powerful
+[React](http://reactjs.org/) components and wherever Markdown syntax isn't
+enough, JSX components can be used.
-
+> #### Contributing to the site
+>
+> The docs can always use another example or more detail, and they should always
+> be up to date and not misleading. We always appreciate a
+> [pull request](https://github.com/explosion/spaCy/pulls). To quickly find the
+> correct file to edit, simply click on the "Suggest edits" button at the bottom
+> of a page.
+>
+> For more details on editing the site locally, see the installation
+> instructions and markdown reference below.
+
+## Logo {#logo source="website/src/images/logo.svg"}
+
+import { Logos } from 'widgets/styleguide'
+
+If you would like to use the spaCy logo on your site, please get in touch and
+ask us first. However, if you want to show support and tell others that your
+project is using spaCy, you can grab one of our
+[spaCy badges](/usage/spacy-101#faq-project-with-spacy).
+
+
+
+## Colors {#colors}
+
+import { Colors, Patterns } from 'widgets/styleguide'
+
+
+
+### Patterns
+
+
+
+## Typography {#typography}
+
+import { H1, H2, H3, H4, H5, Label, InlineList, Comment } from
+'components/typography'
+
+> #### Markdown
+>
+> ```markdown_
+> ## Headline 2
+> ## Headline 2 {#some_id}
+> ## Headline 2 {#some_id tag="method"}
+> ```
+>
+> #### JSX
+>
+> ```jsx
+> Headline 2
+> Headline 2
+> Headline 2
+> ```
+
+Headlines are set in
+[HK Grotesk](http://cargocollective.com/hanken/HK-Grotesk-Open-Source-Font) by
+Hanken Design. All other body text and code uses the best-matching default
+system font to provide a "native" reading experience. All code uses the
+[JetBrains Mono](https://www.jetbrains.com/lp/mono/) typeface by JetBrains.
+
+
+
+Level 2 headings are automatically wrapped in `` elements at compile
+time, using a custom
+[Markdown transformer](https://github.com/explosion/spaCy/tree/master/website/plugins/remark-wrap-section.js).
+This makes it easier to highlight the section that's currently in the viewpoint
+in the sidebar menu.
+
+
+
+
+Headline 1
+Headline 2
+Headline 3
+Headline 4
+Headline 5
+
+
+
+---
+
+The following optional attributes can be set on the headline to modify it. For
+example, to add a tag for the documented type or mark features that have been
+introduced in a specific version or require statistical models to be loaded.
+Tags are also available as standalone ` ` components.
+
+| Argument | Example | Result |
+| -------- | -------------------------- | ----------------------------------------- |
+| `tag` | `{tag="method"}` | method |
+| `new` | `{new="3"}` | 3 |
+| `model` | `{model="tagger, parser"}` | tagger, parser |
+| `hidden` | `{hidden="true"}` | |
+
+## Elements {#elements}
+
+### Links {#links}
+
+> #### Markdown
+>
+> ```markdown
+> [I am a link](https://spacy.io)
+> ```
+>
+> #### JSX
+>
+> ```jsx
+> I am a link
+> ```
+
+Special link styles are used depending on the link URL.
+
+- [I am a regular external link](https://explosion.ai)
+- [I am a link to the documentation](/api/doc)
+- [I am a link to an architecture](/api/architectures#HashEmbedCNN)
+- [I am a link to a model](/models/en#en_core_web_sm)
+- [I am a link to GitHub](https://github.com/explosion/spaCy)
+
+### Abbreviations {#abbr}
+
+import { Abbr } from 'components/typography'
+
+> #### JSX
+>
+> ```jsx
+> Abbreviation
+> ```
+
+Some text with an abbreviation. On small
+screens, I collapse and the explanation text is displayed next to the
+abbreviation.
+
+### Tags {#tags}
+
+import Tag from 'components/tag'
+
+> ```jsx
+> method
+> 4
+> tagger, parser
+> ```
+
+Tags can be used together with headlines, or next to properties across the
+documentation, and combined with tooltips to provide additional information. An
+optional `variant` argument can be used for special tags. `variant="new"` makes
+the tag take a version number to mark new features. Using the component,
+visibility of this tag can later be toggled once the feature isn't considered
+new anymore. Setting `variant="model"` takes a description of model capabilities
+and can be used to mark features that require a respective model to be
+installed.
+
+
+
+method 4 tagger,
+parser
+
+
+
+### Buttons {#buttons}
+
+import Button from 'components/button'
+
+> ```jsx
+>
+>
+> ```
+
+Link buttons come in two variants, `primary` and `secondary` and two sizes, with
+an optional `large` size modifier. Since they're mostly used as enhanced links,
+the buttons are implemented as styled links instead of native button elements.
+
+
+
+
+
+
+
+
+
+## Components
+
+### Table {#table}
+
+> #### Markdown
+>
+> ```markdown_
+> | Header 1 | Header 2 |
+> | -------- | -------- |
+> | Column 1 | Column 2 |
+> ```
+>
+> #### JSX
+>
+> ```markup
+>
+> Header 1 Header 2
+> Column 1 Column 2
+>
+> ```
+
+Tables are used to present data and API documentation. Certain keywords can be
+used to mark a footer row with a distinct style, for example to visualize the
+return values of a documented function.
+
+| Header 1 | Header 2 | Header 3 | Header 4 |
+| ----------- | -------- | :------: | -------: |
+| Column 1 | Column 2 | Column 3 | Column 4 |
+| Column 1 | Column 2 | Column 3 | Column 4 |
+| Column 1 | Column 2 | Column 3 | Column 4 |
+| Column 1 | Column 2 | Column 3 | Column 4 |
+| **RETURNS** | Column 2 | Column 3 | Column 4 |
+
+Tables also support optional "divider" rows that are typically used to denote
+keyword-only arguments in API documentation. To turn a row into a dividing
+headline, it should only include content in its first cell, and its value should
+be italicized:
+
+> #### Markdown
+>
+> ```markdown_
+> | Header 1 | Header 2 | Header 3 |
+> | -------- | -------- | -------- |
+> | Column 1 | Column 2 | Column 3 |
+> | _Hello_ | | |
+> | Column 1 | Column 2 | Column 3 |
+> ```
+
+| Header 1 | Header 2 | Header 3 |
+| -------- | -------- | -------- |
+| Column 1 | Column 2 | Column 3 |
+| _Hello_ | | |
+| Column 1 | Column 2 | Column 3 |
+
+### Type Annotations {#type-annotations}
+
+> #### Markdown
+>
+> ```markdown_
+> ~~Model[List[Doc], Floats2d]~~
+> ```
+>
+> #### JSX
+>
+> ```markup
+> Model[List[Doc], Floats2d]
+> ```
+
+Type annotations are special inline code blocks are used to describe Python
+types in the [type hints](https://docs.python.org/3/library/typing.html) format.
+The special component will split the type, apply syntax highlighting and link
+all types that specify links in `meta/type-annotations.json`. Types can link to
+internal or external documentation pages. To make it easy to represent the type
+annotations in Markdown, the rendering "hijacks" the `~~` tags that would
+typically be converted to a `` element – but in this case, text surrounded
+by `~~` becomes a type annotation.
+
+- ~~Dict[str, List[Union[Doc, Span]]]~~
+- ~~Model[List[Doc], List[numpy.ndarray]]~~
+
+Type annotations support a special visual style in tables and will render as a
+separate row, under the cell text. This allows the API docs to display complex
+types without taking up too much space in the cell. The type annotation should
+always be the **last element** in the row.
+
+> #### Markdown
+>
+> ```markdown_
+> | Header 1 | Header 2 |
+> | -------- | ----------------------- |
+> | Column 1 | Column 2 ~~List[Doc]~~ |
+> ```
+
+| Name | Description |
+| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `vocab` | The shared vocabulary. ~~Vocab~~ |
+| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. ~~Model[List[Doc], FullTransformerBatch]~~ |
+| `set_extra_annotations` | Function that takes a batch of `Doc` objects and transformer outputs and can set additional annotations on the `Doc`. ~~Callable[[List[Doc], FullTransformerBatch], None]~~ |
+
+### List {#list}
+
+> #### Markdown
+>
+> ```markdown_
+> 1. One
+> 2. Two
+> ```
+>
+> #### JSX
+>
+> ```markup
+>
+> - One
+> - Two
+>
+> ```
+
+Lists are available as bulleted and numbered. Markdown lists are transformed
+automatically.
+
+- I am a bulleted list
+- I have nice bullets
+- Lorem ipsum dolor
+- consectetur adipiscing elit
+
+1. I am an ordered list
+2. I have nice numbers
+3. Lorem ipsum dolor
+4. consectetur adipiscing elit
+
+### Aside {#aside}
+
+> #### Markdown
+>
+> ```markdown_
+> > #### Aside title
+> > This is aside text.
+> ```
+>
+> #### JSX
+>
+> ```jsx
+>
+> ```
+
+Asides can be used to display additional notes and content in the right-hand
+column. Asides can contain text, code and other elements if needed. Visually,
+asides are moved to the side on the X-axis, and displayed at the same level they
+were inserted. On small screens, they collapse and are rendered in their
+original position, in between the text.
+
+To make them easier to use in Markdown, paragraphs formatted as blockquotes will
+turn into asides by default. Level 4 headlines (with a leading `####`) will
+become aside titles.
+
+### Code Block {#code-block}
+
+> #### Markdown
+>
+> ````markdown_
+> ```python
+> ### This is a title
+> import spacy
+> ```
+> ````
+>
+> #### JSX
+>
+> ```jsx
+>
+> import spacy
+>
+> ```
+
+Code blocks use the [Prism](http://prismjs.com/) syntax highlighter with a
+custom theme. The language can be set individually on each block, and defaults
+to raw text with no highlighting. An optional label can be added as the first
+line with the prefix `####` (Python-like) and `///` (JavaScript-like). the
+indented block as plain text and preserve whitespace.
+
+```python
+### Using spaCy
+import spacy
+nlp = spacy.load("en_core_web_sm")
+doc = nlp("This is a sentence.")
+for token in doc:
+ print(token.text, token.pos_)
+```
+
+Code blocks and also specify an optional range of line numbers to highlight by
+adding `{highlight="..."}` to the headline. Acceptable ranges are spans like
+`5-7`, but also `5-7,10` or `5-7,10,13-14`.
+
+> #### Markdown
+>
+> ````markdown_
+> ```python
+> ### This is a title {highlight="1-2"}
+> import spacy
+> nlp = spacy.load("en_core_web_sm")
+> ```
+> ````
+
+```python
+### Using the matcher {highlight="5-7"}
+import spacy
+from spacy.matcher import Matcher
+
+nlp = spacy.load('en_core_web_sm')
+matcher = Matcher(nlp.vocab)
+pattern = [{"LOWER": "hello"}, {"IS_PUNCT": True}, {"LOWER": "world"}]
+matcher.add("HelloWorld", None, pattern)
+doc = nlp("Hello, world! Hello world!")
+matches = matcher(doc)
+```
+
+Adding `{executable="true"}` to the title turns the code into an executable
+block, powered by [Binder](https://mybinder.org) and
+[Juniper](https://github.com/ines/juniper). If JavaScript is disabled, the
+interactive widget defaults to a regular code block.
+
+> #### Markdown
+>
+> ````markdown_
+> ```python
+> ### {executable="true"}
+> import spacy
+> nlp = spacy.load("en_core_web_sm")
+> ```
+> ````
+
+```python
+### {executable="true"}
+import spacy
+nlp = spacy.load("en_core_web_sm")
+doc = nlp("This is a sentence.")
+for token in doc:
+ print(token.text, token.pos_)
+```
+
+If a code block only contains a URL to a GitHub file, the raw file contents are
+embedded automatically and syntax highlighting is applied. The link to the
+original file is shown at the top of the widget.
+
+> #### Markdown
+>
+> ````markdown_
+> ```python
+> https://github.com/...
+> ```
+> ````
+>
+> #### JSX
+>
+> ```jsx
+>
+> ```
+
+```python
+https://github.com/explosion/spaCy/tree/master/spacy/language.py
+```
+
+### Infobox {#infobox}
+
+import Infobox from 'components/infobox'
+
+> #### JSX
+>
+> ```jsx
+> Regular infobox
+> This is a warning.
+> This is dangerous.
+> ```
+
+Infoboxes can be used to add notes, updates, warnings or additional information
+to a page or section. Semantically, they're implemented and interpreted as an
+`aside` element. Infoboxes can take an optional `title` argument, as well as an
+optional `variant` (either `"warning"` or `"danger"`).
+
+
+
+If needed, an infobox can contain regular text, `inline code`, lists and other
+blocks.
+
+
+
+
+
+If needed, an infobox can contain regular text, `inline code`, lists and other
+blocks.
+
+
+
+
+
+If needed, an infobox can contain regular text, `inline code`, lists and other
+blocks.
+
+
+
+### Accordion {#accordion}
+
+import Accordion from 'components/accordion'
+
+> #### JSX
+>
+> ```jsx
+>
+> Accordion content goes here.
+>
+> ```
+
+Accordions are collapsible sections that are mostly used for lengthy tables,
+like the tag and label annotation schemes for different languages. They all need
+to be presented – but chances are the user doesn't actually care about _all_ of
+them, especially not at the same time. So it's fairly reasonable to hide them
+begin a click. This particular implementation was inspired by the amazing
+[Inclusive Components blog](https://inclusive-components.design/collapsible-sections/).
+
+
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante,
+pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt
+nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor
+gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor,
+sit amet dignissim justo congue.
+
+
+
+## Markdown reference {#markdown}
+
+All page content and page meta lives in the `.md` files in the `/docs`
+directory. The frontmatter block at the top of each file defines the page title
+and other settings like the sidebar menu.
+
+````markdown
+---
+title: Page title
+---
+
+## Headline starting a section {#some_id}
+
+This is a regular paragraph with a [link](https://spacy.io) and **bold text**.
+
+> #### This is an aside title
+>
+> This is aside text.
+
+### Subheadline
+
+| Header 1 | Header 2 |
+| -------- | -------- |
+| Column 1 | Column 2 |
+
+```python
+### Code block title {highlight="2-3"}
+import spacy
+nlp = spacy.load("en_core_web_sm")
+doc = nlp("Hello world")
+```
+
+
+
+This is content in the infobox.
+
+
+````
+
+In addition to the native markdown elements, you can use the components
+[` `][infobox], [` `][accordion], [``][abbr] and
+[` `][tag] via their JSX syntax.
+
+[infobox]: https://spacy.io/styleguide#infobox
+[accordion]: https://spacy.io/styleguide#accordion
+[abbr]: https://spacy.io/styleguide#abbr
+[tag]: https://spacy.io/styleguide#tag
+
+## Editorial {#editorial}
+
+- "spaCy" should always be spelled with a lowercase "s" and a capital "C",
+ unless it specifically refers to the Python package or Python import `spacy`
+ (in which case it should be formatted as code).
+ - ✅ spaCy is a library for advanced NLP in Python.
+ - ❌ Spacy is a library for advanced NLP in Python.
+ - ✅ First, you need to install the `spacy` package from pip.
+- Mentions of code, like function names, classes, variable names etc. in inline
+ text should be formatted as `code`.
+ - ✅ "Calling the `nlp` object on a text returns a `Doc`."
+- Objects that have pages in the [API docs](/api) should be linked – for
+ example, [`Doc`](/api/doc) or [`Language.to_disk`](/api/language#to_disk). The
+ mentions should still be formatted as code within the link. Links pointing to
+ the API docs will automatically receive a little icon. However, if a paragraph
+ includes many references to the API, the links can easily get messy. In that
+ case, we typically only link the first mention of an object and not any
+ subsequent ones.
+ - ✅ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
+ [`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a `Doc` object
+ from a `Span`.
+ - ❌ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
+ [`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a
+ [`Doc`](/api/doc) object from a [`Span`](/api/span).
+
+* Other things we format as code are: references to trained pipeline packages
+ like `en_core_web_sm` or file names like `code.py` or `meta.json`.
+
+ - ✅ After training, the `config.cfg` is saved to disk.
+
+* [Type annotations](#type-annotations) are a special type of code formatting,
+ expressed by wrapping the text in `~~` instead of backticks. The result looks
+ like this: ~~List[Doc]~~. All references to known types will be linked
+ automatically.
+
+ - ✅ The model has the input type ~~List[Doc]~~ and it outputs a
+ ~~List[Array2d]~~.
+
+* We try to keep links meaningful but short.
+ - ✅ For details, see the usage guide on
+ [training with custom code](/usage/training#custom-code).
+ - ❌ For details, see
+ [the usage guide on training with custom code](/usage/training#custom-code).
+ - ❌ For details, see the usage guide on training with custom code
+ [here](/usage/training#custom-code).
diff --git a/website/docs/usage/101/_architecture.md b/website/docs/usage/101/_architecture.md
index 22e2b961e..ecc7f2fd9 100644
--- a/website/docs/usage/101/_architecture.md
+++ b/website/docs/usage/101/_architecture.md
@@ -41,25 +41,27 @@ components for different language processing tasks and also allows adding

-| Name | Description |
-| ----------------------------------------------- | ------------------------------------------------------------------------------------------- |
-| [`AttributeRuler`](/api/attributeruler) | Set token attributes using matcher rules. |
-| [`DependencyParser`](/api/dependencyparser) | Predict syntactic dependencies. |
-| [`EditTreeLemmatizer`](/api/edittreelemmatizer) | Predict base forms of words. |
-| [`EntityLinker`](/api/entitylinker) | Disambiguate named entities to nodes in a knowledge base. |
-| [`EntityRecognizer`](/api/entityrecognizer) | Predict named entities, e.g. persons or products. |
-| [`EntityRuler`](/api/entityruler) | Add entity spans to the `Doc` using token-based rules or exact phrase matches. |
-| [`Lemmatizer`](/api/lemmatizer) | Determine the base forms of words using rules and lookups. |
-| [`Morphologizer`](/api/morphologizer) | Predict morphological features and coarse-grained part-of-speech tags. |
-| [`SentenceRecognizer`](/api/sentencerecognizer) | Predict sentence boundaries. |
-| [`Sentencizer`](/api/sentencizer) | Implement rule-based sentence boundary detection that doesn't require the dependency parse. |
-| [`Tagger`](/api/tagger) | Predict part-of-speech tags. |
-| [`TextCategorizer`](/api/textcategorizer) | Predict categories or labels over the whole document. |
-| [`Tok2Vec`](/api/tok2vec) | Apply a "token-to-vector" model and set its outputs. |
-| [`Tokenizer`](/api/tokenizer) | Segment raw text and create `Doc` objects from the words. |
-| [`TrainablePipe`](/api/pipe) | Class that all trainable pipeline components inherit from. |
-| [`Transformer`](/api/transformer) | Use a transformer model and set its outputs. |
-| [Other functions](/api/pipeline-functions) | Automatically apply something to the `Doc`, e.g. to merge spans of tokens. |
+| Component name | Component class | Description |
+| ---------------------- | ---------------------------------------------------- | ------------------------------------------------------------------------------------------- |
+| `attribute_ruler` | [`AttributeRuler`](/api/attributeruler) | Set token attributes using matcher rules. |
+| `entity_linker` | [`EntityLinker`](/api/entitylinker) | Disambiguate named entities to nodes in a knowledge base. |
+| `entity_ruler` | [`SpanRuler`](/api/spanruler) | Add entity spans to the `Doc` using token-based rules or exact phrase matches. |
+| `lemmatizer` | [`Lemmatizer`](/api/lemmatizer) | Determine the base forms of words using rules and lookups. |
+| `morphologizer` | [`Morphologizer`](/api/morphologizer) | Predict morphological features and coarse-grained part-of-speech tags. |
+| `ner` | [`EntityRecognizer`](/api/entityrecognizer) | Predict named entities, e.g. persons or products. |
+| `parser` | [`DependencyParser`](/api/dependencyparser) | Predict syntactic dependencies. |
+| `senter` | [`SentenceRecognizer`](/api/sentencerecognizer) | Predict sentence boundaries. |
+| `sentencizer` | [`Sentencizer`](/api/sentencizer) | Implement rule-based sentence boundary detection that doesn't require the dependency parse. |
+| `span_ruler` | [`SpanRuler`](/api/spanruler) | Add spans to the `Doc` using token-based rules or exact phrase matches. |
+| `tagger` | [`Tagger`](/api/tagger) | Predict part-of-speech tags. |
+| `textcat` | [`TextCategorizer`](/api/textcategorizer) | Predict exactly one category or label over a whole document. |
+| `textcat_multilabel` | [`MultiLabel_TextCategorizer`](/api/textcategorizer) | Predict 0, 1 or more categories or labels over a whole document. |
+| `tok2vec` | [`Tok2Vec`](/api/tok2vec) | Apply a "token-to-vector" model and set its outputs. |
+| `tokenizer` | [`Tokenizer`](/api/tokenizer) | Segment raw text and create `Doc` objects from the words. |
+| `trainable_lemmatizer` | [`EditTreeLemmatizer`](/api/edittreelemmatizer) | Predict base forms of words. |
+| `transformer` | [`Transformer`](/api/transformer) | Use a transformer model and set its outputs. |
+| - | [`TrainablePipe`](/api/pipe) | Class that all trainable pipeline components inherit from. |
+| - | [Other functions](/api/pipeline-functions) | Automatically apply something to the `Doc`, e.g. to merge spans of tokens. |
### Matchers {#architecture-matchers}
@@ -78,7 +80,9 @@ operates on a `Doc` and gives you access to the matched tokens **in context**.
| Name | Description |
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
| [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. |
-| [`KnowledgeBase`](/api/kb) | Storage for entities and aliases of a knowledge base for entity linking. |
+| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. |
+| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. |
+| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. |
| [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. |
| [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. |
| [`Morphology`](/api/morphology) | Store morphological analyses and map them to and from hash values. |
diff --git a/website/docs/usage/101/_pipelines.md b/website/docs/usage/101/_pipelines.md
index f43219f41..3a6d67a37 100644
--- a/website/docs/usage/101/_pipelines.md
+++ b/website/docs/usage/101/_pipelines.md
@@ -53,9 +53,9 @@ example, a custom lemmatizer may need the part-of-speech tags assigned, so it'll
only work if it's added after the tagger. The parser will respect pre-defined
sentence boundaries, so if a previous component in the pipeline sets them, its
dependency predictions may be different. Similarly, it matters if you add the
-[`EntityRuler`](/api/entityruler) before or after the statistical entity
-recognizer: if it's added before, the entity recognizer will take the existing
-entities into account when making predictions. The
+[`SpanRuler`](/api/spanruler) before or after the statistical entity recognizer:
+if it's added before and it is writing to `doc.ents`, then the entity recognizer
+will take those existing entities into account when making predictions. The
[`EntityLinker`](/api/entitylinker), which resolves named entities to knowledge
base IDs, should be preceded by a pipeline component that recognizes entities
such as the [`EntityRecognizer`](/api/entityrecognizer).
diff --git a/website/docs/usage/index.md b/website/docs/usage/index.md
index 1f4869606..d6ad6681c 100644
--- a/website/docs/usage/index.md
+++ b/website/docs/usage/index.md
@@ -75,7 +75,6 @@ spaCy's [`setup.cfg`](%%GITHUB_SPACY/setup.cfg) for details on what's included.
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. |
| `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. |
-| `ray` | Install [`spacy-ray`](https://github.com/explosion/spacy-ray) to add CLI commands for [parallel training](/usage/training#parallel-training). |
| `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. |
| `apple` | Install [`thinc-apple-ops`](https://github.com/explosion/thinc-apple-ops) to improve performance on an Apple M1. |
| `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). |
@@ -236,10 +235,10 @@ package to see what the oldest recommended versions of `numpy` are.
Some additional options may be useful for spaCy developers who are editing the
source code and recompiling frequently.
-- Install in editable mode. Changes to `.py` files will be reflected as soon
- as the files are saved, but edits to Cython files (`.pxd`, `.pyx`) will
- require the `pip install` command below to be run again. Before installing in
- editable mode, be sure you have removed any previous installs with
+- Install in editable mode. Changes to `.py` files will be reflected as soon as
+ the files are saved, but edits to Cython files (`.pxd`, `.pyx`) will require
+ the `pip install` command below to be run again. Before installing in editable
+ mode, be sure you have removed any previous installs with
`pip uninstall spacy`, which you may need to run multiple times to remove all
traces of earlier installs.
@@ -248,8 +247,8 @@ source code and recompiling frequently.
$ pip install --no-build-isolation --editable .
```
-- Build in parallel. Starting in v3.4.0, you can specify the number of
- build jobs with the environment variable `SPACY_NUM_BUILD_JOBS`:
+- Build in parallel. Starting in v3.4.0, you can specify the number of build
+ jobs with the environment variable `SPACY_NUM_BUILD_JOBS`:
```bash
$ pip install -r requirements.txt
diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md
index 82472c67e..099678c40 100644
--- a/website/docs/usage/linguistic-features.md
+++ b/website/docs/usage/linguistic-features.md
@@ -1422,9 +1422,9 @@ other_tokens = ["i", "listened", "to", "obama", "'", "s", "podcasts", "."]
spacy_tokens = ["i", "listened", "to", "obama", "'s", "podcasts", "."]
align = Alignment.from_strings(other_tokens, spacy_tokens)
print(f"a -> b, lengths: {align.x2y.lengths}") # array([1, 1, 1, 1, 1, 1, 1, 1])
-print(f"a -> b, mapping: {align.x2y.dataXd}") # array([0, 1, 2, 3, 4, 4, 5, 6]) : two tokens both refer to "'s"
+print(f"a -> b, mapping: {align.x2y.data}") # array([0, 1, 2, 3, 4, 4, 5, 6]) : two tokens both refer to "'s"
print(f"b -> a, lengths: {align.y2x.lengths}") # array([1, 1, 1, 1, 2, 1, 1]) : the token "'s" refers to two tokens
-print(f"b -> a, mappings: {align.y2x.dataXd}") # array([0, 1, 2, 3, 4, 5, 6, 7])
+print(f"b -> a, mappings: {align.y2x.data}") # array([0, 1, 2, 3, 4, 5, 6, 7])
```
Here are some insights from the alignment information generated in the example
@@ -1433,10 +1433,10 @@ above:
- The one-to-one mappings for the first four tokens are identical, which means
they map to each other. This makes sense because they're also identical in the
input: `"i"`, `"listened"`, `"to"` and `"obama"`.
-- The value of `x2y.dataXd[6]` is `5`, which means that `other_tokens[6]`
+- The value of `x2y.data[6]` is `5`, which means that `other_tokens[6]`
(`"podcasts"`) aligns to `spacy_tokens[5]` (also `"podcasts"`).
-- `x2y.dataXd[4]` and `x2y.dataXd[5]` are both `4`, which means that both tokens
- 4 and 5 of `other_tokens` (`"'"` and `"s"`) align to token 4 of `spacy_tokens`
+- `x2y.data[4]` and `x2y.data[5]` are both `4`, which means that both tokens 4
+ and 5 of `other_tokens` (`"'"` and `"s"`) align to token 4 of `spacy_tokens`
(`"'s"`).
diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md
index 3b1558bd8..03d0d535c 100644
--- a/website/docs/usage/models.md
+++ b/website/docs/usage/models.md
@@ -342,22 +342,6 @@ The easiest way to download a trained pipeline is via spaCy's
[`download`](/api/cli#download) command. It takes care of finding the
best-matching package compatible with your spaCy installation.
-> #### Important note for v3.0
->
-> Note that as of spaCy v3.0, shortcut links like `en` that create (potentially
-> brittle) symlinks in your spaCy installation are **deprecated**. To download
-> and load an installed pipeline package, use its full name:
->
-> ```diff
-> - python -m spacy download en
-> + python -m spacy download en_core_web_sm
-> ```
->
-> ```diff
-> - nlp = spacy.load("en")
-> + nlp = spacy.load("en_core_web_sm")
-> ```
-
```cli
# Download best-matching version of a package for your spaCy installation
$ python -m spacy download en_core_web_sm
@@ -489,17 +473,6 @@ spacy.cli.download("en_core_web_sm")
To load a pipeline package, use [`spacy.load`](/api/top-level#spacy.load) with
the package name or a path to the data directory:
-> #### Important note for v3.0
->
-> Note that as of spaCy v3.0, shortcut links like `en` that create (potentially
-> brittle) symlinks in your spaCy installation are **deprecated**. To download
-> and load an installed pipeline package, use its full name:
->
-> ```diff
-> - python -m spacy download en
-> + python -m spacy download en_core_web_sm
-> ```
-
```python
import spacy
nlp = spacy.load("en_core_web_sm") # load package "en_core_web_sm"
diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md
index bd28810ae..b3940458b 100644
--- a/website/docs/usage/processing-pipelines.md
+++ b/website/docs/usage/processing-pipelines.md
@@ -303,13 +303,14 @@ available pipeline components and component functions.
> ruler = nlp.add_pipe("entity_ruler")
> ```
-| String name | Component | Description |
+| Component name | Component class | Description |
| ---------------------- | ---------------------------------------------------- | ----------------------------------------------------------------------------------------- |
| `tagger` | [`Tagger`](/api/tagger) | Assign part-of-speech-tags. |
| `parser` | [`DependencyParser`](/api/dependencyparser) | Assign dependency labels. |
| `ner` | [`EntityRecognizer`](/api/entityrecognizer) | Assign named entities. |
| `entity_linker` | [`EntityLinker`](/api/entitylinker) | Assign knowledge base IDs to named entities. Should be added after the entity recognizer. |
-| `entity_ruler` | [`EntityRuler`](/api/entityruler) | Assign named entities based on pattern rules and dictionaries. |
+| `span_ruler` | [`SpanRuler`](/api/spanruler) | Assign spans based on pattern rules and dictionaries. |
+| `entity_ruler` | [`SpanRuler`](/api/spanruler) | Assign named entities based on pattern rules and dictionaries. |
| `textcat` | [`TextCategorizer`](/api/textcategorizer) | Assign text categories: exactly one category is predicted per document. |
| `textcat_multilabel` | [`MultiLabel_TextCategorizer`](/api/textcategorizer) | Assign text categories in a multi-label setting: zero, one or more labels per document. |
| `lemmatizer` | [`Lemmatizer`](/api/lemmatizer) | Assign base forms to words using rules and lookups. |
@@ -363,7 +364,9 @@ nlp.enable_pipe("tagger")
```
In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is
-set, all components except for those in `enable` are disabled.
+set, all components except for those in `enable` are disabled. If `enable` and
+`disable` conflict (i.e. the same component is included in both), an error is
+raised.
```python
# Load the complete pipeline, but disable all components except for tok2vec and tagger
@@ -1398,8 +1401,8 @@ Writing to a `._` attribute instead of to the `Doc` directly keeps a clearer
separation and makes it easier to ensure backwards compatibility. For example,
if you've implemented your own `.coref` property and spaCy claims it one day,
it'll break your code. Similarly, just by looking at the code, you'll
-immediately know what's built-in and what's custom – for example,
-`doc.sentiment` is spaCy, while `doc._.sent_score` isn't.
+immediately know what's built-in and what's custom – for example, `doc.lang` is
+spaCy, while `doc._.language` isn't.
diff --git a/website/docs/usage/projects.md b/website/docs/usage/projects.md
index 566ae561b..f57578049 100644
--- a/website/docs/usage/projects.md
+++ b/website/docs/usage/projects.md
@@ -148,6 +148,13 @@ skipped. You can also set `--force` to force re-running a command, or `--dry` to
perform a "dry run" and see what would happen (without actually running the
script).
+Since spaCy v3.4.2, `spacy projects run` checks your installed dependencies to
+verify that your environment is properly set up and aligns with the project's
+`requirements.txt`, if there is one. If missing or conflicting dependencies are
+detected, a corresponding warning is displayed. If you'd like to disable the
+dependency check, set `check_requirements: false` in your project's
+`project.yml`.
+
### 4. Run a workflow {#run-workfow}
> #### project.yml
@@ -226,26 +233,49 @@ pipelines.
```yaml
%%GITHUB_PROJECTS/pipelines/tagger_parser_ud/project.yml
```
+
> #### Tip: Overriding variables on the CLI
>
-> If you want to override one or more variables on the CLI and are not already specifying a
-> project directory, you need to add `.` as a placeholder:
+> If you want to override one or more variables on the CLI and are not already
+> specifying a project directory, you need to add `.` as a placeholder:
>
> ```
> python -m spacy project run test . --vars.foo bar
> ```
-| Section | Description |
-| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). |
-| `description` | An optional project description used in [auto-generated docs](#custom-docs). |
-| `vars` | A dictionary of variables that can be referenced in paths, URLs and scripts and overriden on the CLI, just like [`config.cfg` variables](/usage/training#config-interpolation). For example, `${vars.name}` will use the value of the variable `name`. Variables need to be defined in the section `vars`, but can be a nested dict, so you're able to reference `${vars.model.name}`. |
-| `env` | A dictionary of variables, mapped to the names of environment variables that will be read in when running the project. For example, `${env.name}` will use the value of the environment variable defined as `name`. |
-| `directories` | An optional list of [directories](#project-files) that should be created in the project for assets, training outputs, metrics etc. spaCy will make sure that these directories always exist. |
-| `assets` | A list of assets that can be fetched with the [`project assets`](/api/cli#project-assets) command. `url` defines a URL or local path, `dest` is the destination file relative to the project directory, and an optional `checksum` ensures that an error is raised if the file's checksum doesn't match. Instead of `url`, you can also provide a `git` block with the keys `repo`, `branch` and `path`, to download from a Git repo. |
-| `workflows` | A dictionary of workflow names, mapped to a list of command names, to execute in order. Workflows can be run with the [`project run`](/api/cli#project-run) command. |
-| `commands` | A list of named commands. A command can define an optional help message (shown in the CLI when the user adds `--help`) and the `script`, a list of commands to run. The `deps` and `outputs` let you define the created file the command depends on and produces, respectively. This lets spaCy determine whether a command needs to be re-run because its dependencies or outputs changed. Commands can be run as part of a workflow, or separately with the [`project run`](/api/cli#project-run) command. |
-| `spacy_version` | Optional spaCy version range like `>=3.0.0,<3.1.0` that the project is compatible with. If it's loaded with an incompatible version, an error is raised when the project is loaded. |
+> #### Tip: Environment Variables
+>
+> Commands in a project file are not executed in a shell, so they don't have
+> direct access to environment variables. But you can insert environment
+> variables using the `env` dictionary to make values available for
+> interpolation, just like values in `vars`. Here's an example `env` dict that
+> makes `$PATH` available as `ENV_PATH`:
+>
+> ```yaml
+> env:
+> ENV_PATH: PATH
+> ```
+>
+> This can be used in a project command like so:
+>
+> ```yaml
+> - name: 'echo-path'
+> script:
+> - 'echo ${env.ENV_PATH}'
+> ```
+
+| Section | Description |
+| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). |
+| `description` | An optional project description used in [auto-generated docs](#custom-docs). |
+| `vars` | A dictionary of variables that can be referenced in paths, URLs and scripts and overriden on the CLI, just like [`config.cfg` variables](/usage/training#config-interpolation). For example, `${vars.name}` will use the value of the variable `name`. Variables need to be defined in the section `vars`, but can be a nested dict, so you're able to reference `${vars.model.name}`. |
+| `env` | A dictionary of variables, mapped to the names of environment variables that will be read in when running the project. For example, `${env.name}` will use the value of the environment variable defined as `name`. |
+| `directories` | An optional list of [directories](#project-files) that should be created in the project for assets, training outputs, metrics etc. spaCy will make sure that these directories always exist. |
+| `assets` | A list of assets that can be fetched with the [`project assets`](/api/cli#project-assets) command. `url` defines a URL or local path, `dest` is the destination file relative to the project directory, and an optional `checksum` ensures that an error is raised if the file's checksum doesn't match. Instead of `url`, you can also provide a `git` block with the keys `repo`, `branch` and `path`, to download from a Git repo. |
+| `workflows` | A dictionary of workflow names, mapped to a list of command names, to execute in order. Workflows can be run with the [`project run`](/api/cli#project-run) command. |
+| `commands` | A list of named commands. A command can define an optional help message (shown in the CLI when the user adds `--help`) and the `script`, a list of commands to run. The `deps` and `outputs` let you define the created file the command depends on and produces, respectively. This lets spaCy determine whether a command needs to be re-run because its dependencies or outputs changed. Commands can be run as part of a workflow, or separately with the [`project run`](/api/cli#project-run) command. |
+| `spacy_version` | Optional spaCy version range like `>=3.0.0,<3.1.0` that the project is compatible with. If it's loaded with an incompatible version, an error is raised when the project is loaded. |
+| `check_requirements` 3.4.2 | A flag determining whether to verify that the installed dependencies align with the project's `requirements.txt`. Defaults to `true`. |
### Data assets {#data-assets}
@@ -613,12 +643,13 @@ locally.
You can list one or more remotes in the `remotes` section of your
[`project.yml`](#project-yml) by mapping a string name to the URL of the
-storage. Under the hood, spaCy uses the
-[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to
-communicate with the remote storages, so you can use any protocol that
-`smart-open` supports, including [S3](https://aws.amazon.com/s3/),
-[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although
-you may need to install extra dependencies to use certain protocols.
+storage. Under the hood, spaCy uses
+[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
+remote storages, so you can use any protocol that `Pathy` supports, including
+[S3](https://aws.amazon.com/s3/),
+[Google Cloud Storage](https://cloud.google.com/storage), and the local
+filesystem, although you may need to install extra dependencies to use certain
+protocols.
> #### Example
>
@@ -631,7 +662,6 @@ you may need to install extra dependencies to use certain protocols.
remotes:
default: 's3://my-spacy-bucket'
local: '/mnt/scratch/cache'
- stuff: 'ssh://myserver.example.com/whatever'
```
@@ -758,7 +788,7 @@ and [`dvc repro`](https://dvc.org/doc/command-reference/repro) to reproduce the
workflow or individual commands.
```cli
-$ python -m spacy project dvc [workflow_name]
+$ python -m spacy project dvc [project_dir] [workflow_name]
```
@@ -984,54 +1014,6 @@ https://github.com/explosion/projects/blob/v3/integrations/fastapi/scripts/main.
---
-### Ray {#ray}
-
-> #### Installation
->
-> ```cli
-> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS
-> # Check that the CLI is registered
-> $ python -m spacy ray --help
-> ```
-
-[Ray](https://ray.io/) is a fast and simple framework for building and running
-**distributed applications**. You can use Ray for parallel and distributed
-training with spaCy via our lightweight
-[`spacy-ray`](https://github.com/explosion/spacy-ray) extension package. If the
-package is installed in the same environment as spaCy, it will automatically add
-[`spacy ray`](/api/cli#ray) commands to your spaCy CLI. See the usage guide on
-[parallel training](/usage/training#parallel-training) for more details on how
-it works under the hood.
-
-
-
-Get started with parallel training using our project template. It trains a
-simple model on a Universal Dependencies Treebank and lets you parallelize the
-training with Ray.
-
-
-
-You can integrate [`spacy ray train`](/api/cli#ray-train) into your
-`project.yml` just like the regular training command and pass it the config, and
-optional output directory or remote storage URL and config overrides if needed.
-
-
-```yaml
-### project.yml
-commands:
- - name: "ray"
- help: "Train a model via parallel training with Ray"
- script:
- - "python -m spacy ray train configs/config.cfg -o training/ --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy"
- deps:
- - "corpus/train.spacy"
- - "corpus/dev.spacy"
- outputs:
- - "training/model-best"
-```
-
----
-
### Weights & Biases {#wandb}
[Weights & Biases](https://www.wandb.com/) is a popular platform for experiment
diff --git a/website/docs/usage/rule-based-matching.md b/website/docs/usage/rule-based-matching.md
index bf1891df1..aa1015455 100644
--- a/website/docs/usage/rule-based-matching.md
+++ b/website/docs/usage/rule-based-matching.md
@@ -162,7 +162,7 @@ rule-based matching are:
| Attribute | Description |
| ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ORTH` | The exact verbatim text of a token. ~~str~~ |
-| `TEXT` 2.1 | The exact verbatim text of a token. ~~str~~ |
+| `TEXT` | The exact verbatim text of a token. ~~str~~ |
| `NORM` | The normalized form of the token text. ~~str~~ |
| `LOWER` | The lowercase form of the token text. ~~str~~ |
| `LENGTH` | The length of the token text. ~~int~~ |
@@ -174,7 +174,7 @@ rule-based matching are:
| `SPACY` | Token has a trailing space. ~~bool~~ |
| `POS`, `TAG`, `MORPH`, `DEP`, `LEMMA`, `SHAPE` | The token's simple and extended part-of-speech tag, morphological analysis, dependency label, lemma, shape. Note that the values of these attributes are case-sensitive. For a list of available part-of-speech tags and dependency labels, see the [Annotation Specifications](/api/annotation). ~~str~~ |
| `ENT_TYPE` | The token's entity label. ~~str~~ |
-| `_` 2.1 | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
+| `_` | Properties in [custom extension attributes](/usage/processing-pipelines#custom-components-attributes). ~~Dict[str, Any]~~ |
| `OP` | [Operator or quantifier](#quantifiers) to determine how often to match a token pattern. ~~str~~ |
@@ -375,7 +375,7 @@ scoped quantifiers – instead, you can build those behaviors with `on_match`
callbacks.
| OP | Description |
-|---------|------------------------------------------------------------------------|
+| ------- | ---------------------------------------------------------------------- |
| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
| `+` | Require the pattern to match 1 or more times. |
@@ -471,7 +471,7 @@ matches = matcher(doc)
```
A very similar logic has been implemented in the built-in
-[`EntityRuler`](/api/entityruler) by the way. It also takes care of handling
+[`entity_ruler`](/api/entityruler) by the way. It also takes care of handling
overlapping matches, which you would otherwise have to take care of yourself.
> #### Tip: Visualizing matches
@@ -776,6 +776,9 @@ whitespace, making them easy to match as well.
### {executable="true"}
from spacy.lang.en import English
from spacy.matcher import Matcher
+from spacy.tokens import Doc
+
+Doc.set_extension("sentiment", default=0.0)
nlp = English() # We only want the tokenizer, so no need to load a pipeline
matcher = Matcher(nlp.vocab)
@@ -791,9 +794,9 @@ neg_patterns = [[{"ORTH": emoji}] for emoji in neg_emoji]
def label_sentiment(matcher, doc, i, matches):
match_id, start, end = matches[i]
if doc.vocab.strings[match_id] == "HAPPY": # Don't forget to get string!
- doc.sentiment += 0.1 # Add 0.1 for positive sentiment
+ doc._.sentiment += 0.1 # Add 0.1 for positive sentiment
elif doc.vocab.strings[match_id] == "SAD":
- doc.sentiment -= 0.1 # Subtract 0.1 for negative sentiment
+ doc._.sentiment -= 0.1 # Subtract 0.1 for negative sentiment
matcher.add("HAPPY", pos_patterns, on_match=label_sentiment) # Add positive pattern
matcher.add("SAD", neg_patterns, on_match=label_sentiment) # Add negative pattern
@@ -823,16 +826,17 @@ the emoji span will make it available as `span._.emoji_desc`.
```python
from emojipedia import Emojipedia # Installation: pip install emojipedia
-from spacy.tokens import Span # Get the global Span object
+from spacy.tokens import Doc, Span # Get the global Doc and Span object
Span.set_extension("emoji_desc", default=None) # Register the custom attribute
+Doc.set_extension("sentiment", default=0.0)
def label_sentiment(matcher, doc, i, matches):
match_id, start, end = matches[i]
if doc.vocab.strings[match_id] == "HAPPY": # Don't forget to get string!
- doc.sentiment += 0.1 # Add 0.1 for positive sentiment
+ doc._.sentiment += 0.1 # Add 0.1 for positive sentiment
elif doc.vocab.strings[match_id] == "SAD":
- doc.sentiment -= 0.1 # Subtract 0.1 for negative sentiment
+ doc._.sentiment -= 0.1 # Subtract 0.1 for negative sentiment
span = doc[start:end]
emoji = Emojipedia.search(span[0].text) # Get data for emoji
span._.emoji_desc = emoji.title # Assign emoji description
@@ -1270,7 +1274,7 @@ of patterns such as `{}` that match any token in the sentence.
## Rule-based entity recognition {#entityruler new="2.1"}
-The [`EntityRuler`](/api/entityruler) is a component that lets you add named
+The [`entity_ruler`](/api/entityruler) is a component that lets you add named
entities based on pattern dictionaries, which makes it easy to combine
rule-based and statistical named entity recognition for even more powerful
pipelines.
@@ -1295,13 +1299,12 @@ pattern. The entity ruler accepts two types of patterns:
### Using the entity ruler {#entityruler-usage}
-The [`EntityRuler`](/api/entityruler) is a pipeline component that's typically
-added via [`nlp.add_pipe`](/api/language#add_pipe). When the `nlp` object is
-called on a text, it will find matches in the `doc` and add them as entities to
-the `doc.ents`, using the specified pattern label as the entity label. If any
-matches were to overlap, the pattern matching most tokens takes priority. If
-they also happen to be equally long, then the match occurring first in the `Doc`
-is chosen.
+The `entity_ruler` is a pipeline component that's typically added via
+[`nlp.add_pipe`](/api/language#add_pipe). When the `nlp` object is called on a
+text, it will find matches in the `doc` and add them as entities to `doc.ents`,
+using the specified pattern label as the entity label. If any matches were to
+overlap, the pattern matching most tokens takes priority. If they also happen to
+be equally long, then the match occurring first in the `Doc` is chosen.
```python
### {executable="true"}
@@ -1339,7 +1342,7 @@ doc = nlp("MyCorp Inc. is a company in the U.S.")
print([(ent.text, ent.label_) for ent in doc.ents])
```
-#### Validating and debugging EntityRuler patterns {#entityruler-pattern-validation new="2.1.8"}
+#### Validating and debugging entity ruler patterns {#entityruler-pattern-validation new="2.1.8"}
The entity ruler can validate patterns against a JSON schema with the config
setting `"validate"`. See details under
@@ -1351,9 +1354,9 @@ ruler = nlp.add_pipe("entity_ruler", config={"validate": True})
### Adding IDs to patterns {#entityruler-ent-ids new="2.2.2"}
-The [`EntityRuler`](/api/entityruler) can also accept an `id` attribute for each
-pattern. Using the `id` attribute allows multiple patterns to be associated with
-the same entity.
+The [`entity_ruler`](/api/entityruler) can also accept an `id` attribute for
+each pattern. Using the `id` attribute allows multiple patterns to be associated
+with the same entity.
```python
### {executable="true"}
@@ -1373,10 +1376,10 @@ doc2 = nlp("Apple is opening its first big office in San Fran.")
print([(ent.text, ent.label_, ent.id_) for ent in doc2.ents])
```
-If the `id` attribute is included in the [`EntityRuler`](/api/entityruler)
-patterns, the `id_` property of the matched entity is set to the `id` given
-in the patterns. So in the example above it's easy to identify that "San
-Francisco" and "San Fran" are both the same entity.
+If the `id` attribute is included in the [`entity_ruler`](/api/entityruler)
+patterns, the `id_` property of the matched entity is set to the `id` given in
+the patterns. So in the example above it's easy to identify that "San Francisco"
+and "San Fran" are both the same entity.
### Using pattern files {#entityruler-files}
@@ -1400,13 +1403,13 @@ new_ruler = nlp.add_pipe("entity_ruler").from_disk("./patterns.jsonl")
If you're using the [Prodigy](https://prodi.gy) annotation tool, you might
recognize these pattern files from bootstrapping your named entity and text
-classification labelling. The patterns for the `EntityRuler` follow the same
+classification labelling. The patterns for the `entity_ruler` follow the same
syntax, so you can use your existing Prodigy pattern files in spaCy, and vice
versa.
-When you save out an `nlp` object that has an `EntityRuler` added to its
+When you save out an `nlp` object that has an `entity_ruler` added to its
pipeline, its patterns are automatically exported to the pipeline directory:
```python
@@ -1429,9 +1432,9 @@ rules included!
When using a large amount of **phrase patterns** (roughly > 10000) it's useful
to understand how the `add_patterns` function of the entity ruler works. For
-each **phrase pattern**, the EntityRuler calls the nlp object to construct a doc
-object. This happens in case you try to add the EntityRuler at the end of an
-existing pipeline with, for example, a POS tagger and want to extract matches
+each **phrase pattern**, the entity ruler calls the nlp object to construct a
+doc object. This happens in case you try to add the entity ruler at the end of
+an existing pipeline with, for example, a POS tagger and want to extract matches
based on the pattern's POS signature. In this case you would pass a config value
of `"phrase_matcher_attr": "POS"` for the entity ruler.
@@ -1792,7 +1795,7 @@ the entity `Span` – for example `._.orgs` or `._.prev_orgs` and
> [`Doc.retokenize`](/api/doc#retokenize) context manager:
>
> ```python
-> with doc.retokenize() as retokenize:
+> with doc.retokenize() as retokenizer:
> for ent in doc.ents:
> retokenizer.merge(ent)
> ```
diff --git a/website/docs/usage/saving-loading.md b/website/docs/usage/saving-loading.md
index 9a4b584a3..70df43336 100644
--- a/website/docs/usage/saving-loading.md
+++ b/website/docs/usage/saving-loading.md
@@ -193,13 +193,13 @@ the data to and from a JSON file.
> #### Real-world example
>
-> To see custom serialization methods in action, check out the new
-> [`EntityRuler`](/api/entityruler) component and its
-> [source](%%GITHUB_SPACY/spacy/pipeline/entity_ruler.py). Patterns added to the
+> To see custom serialization methods in action, check out the
+> [`SpanRuler`](/api/spanruler) component and its
+> [source](%%GITHUB_SPACY/spacy/pipeline/span_ruler.py). Patterns added to the
> component will be saved to a `.jsonl` file if the pipeline is serialized to
> disk, and to a bytestring if the pipeline is serialized to bytes. This allows
-> saving out a pipeline with a rule-based entity recognizer and including all
-> rules _with_ the component data.
+> saving out a pipeline with rule-based components _with_ all the component
+> data.
```python
### {highlight="16-23,25-30"}
@@ -306,12 +306,12 @@ pipeline component factories, language classes and other settings. To make spaCy
use your entry points, your package needs to expose them and it needs to be
installed in the same environment – that's it.
-| Entry point | Description |
-| ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. |
-| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. |
-| `spacy_lookups` 2.2 | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
-| [`spacy_displacy_colors`](#entry-points-displacy) 2.2 | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
+| Entry point | Description |
+| ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. |
+| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. |
+| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
+| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
### Custom components via entry points {#entry-points-components}
diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md
index 5e064b269..f119cf2ed 100644
--- a/website/docs/usage/training.md
+++ b/website/docs/usage/training.md
@@ -424,7 +424,7 @@ your components during training, and the most common scenarios are:
2. Update an existing **trained component** with more examples.
3. Include an existing trained component without updating it.
4. Include a non-trainable component, like a rule-based
- [`EntityRuler`](/api/entityruler) or [`Sentencizer`](/api/sentencizer), or a
+ [`SpanRuler`](/api/spanruler) or [`Sentencizer`](/api/sentencizer), or a
fully [custom component](/usage/processing-pipelines#custom-components).
If a component block defines a `factory`, spaCy will look it up in the
@@ -480,7 +480,7 @@ as-is. They are also excluded when calling
> parse. So the evaluation results should always reflect what your pipeline will
> produce at runtime. If you want a frozen component to run (without updating)
> during training as well, so that downstream components can use its
-> **predictions**, you can add it to the list of
+> **predictions**, you should add it to the list of
> [`annotating_components`](/usage/training#annotating-components).
```ini
@@ -1572,77 +1572,6 @@ token-based annotations like the dependency parse or entity labels, you'll need
to take care to adjust the `Example` object so its annotations match and remain
valid.
-## Parallel & distributed training with Ray {#parallel-training}
-
-> #### Installation
->
-> ```cli
-> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS
-> # Check that the CLI is registered
-> $ python -m spacy ray --help
-> ```
-
-[Ray](https://ray.io/) is a fast and simple framework for building and running
-**distributed applications**. You can use Ray to train spaCy on one or more
-remote machines, potentially speeding up your training process. Parallel
-training won't always be faster though – it depends on your batch size, models,
-and hardware.
-
-
-
-To use Ray with spaCy, you need the
-[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed.
-Installing the package will automatically add the `ray` command to the spaCy
-CLI.
-
-
-
-The [`spacy ray train`](/api/cli#ray-train) command follows the same API as
-[`spacy train`](/api/cli#train), with a few extra options to configure the Ray
-setup. You can optionally set the `--address` option to point to your Ray
-cluster. If it's not set, Ray will run locally.
-
-```cli
-python -m spacy ray train config.cfg --n-workers 2
-```
-
-
-
-Get started with parallel training using our project template. It trains a
-simple model on a Universal Dependencies Treebank and lets you parallelize the
-training with Ray.
-
-
-
-### How parallel training works {#parallel-training-details}
-
-Each worker receives a shard of the **data** and builds a copy of the **model
-and optimizer** from the [`config.cfg`](#config). It also has a communication
-channel to **pass gradients and parameters** to the other workers. Additionally,
-each worker is given ownership of a subset of the parameter arrays. Every
-parameter array is owned by exactly one worker, and the workers are given a
-mapping so they know which worker owns which parameter.
-
-
-
-As training proceeds, every worker will be computing gradients for **all** of
-the model parameters. When they compute gradients for parameters they don't own,
-they'll **send them to the worker** that does own that parameter, along with a
-version identifier so that the owner can decide whether to discard the gradient.
-Workers use the gradients they receive and the ones they compute locally to
-update the parameters they own, and then broadcast the updated array and a new
-version ID to the other workers.
-
-This training procedure is **asynchronous** and **non-blocking**. Workers always
-push their gradient increments and parameter updates, they do not have to pull
-them and block on the result, so the transfers can happen in the background,
-overlapped with the actual training work. The workers also do not have to stop
-and wait for each other ("synchronize") at the start of each batch. This is very
-useful for spaCy, because spaCy is often trained on long documents, which means
-**batches can vary in size** significantly. Uneven workloads make synchronous
-gradient descent inefficient, because if one batch is slow, all of the other
-workers are stuck waiting for it to complete before they can continue.
-
## Internal training API {#api}
diff --git a/website/docs/usage/v3-4.md b/website/docs/usage/v3-4.md
index 7cc4570d5..e6987e7a2 100644
--- a/website/docs/usage/v3-4.md
+++ b/website/docs/usage/v3-4.md
@@ -63,12 +63,12 @@ All CNN pipelines have been extended with whitespace augmentation.
The English CNN pipelines have new word vectors:
-| Package | Model Version | TAG | Parser LAS | NER F |
-| ----------------------------------------------- | ------------- | ---: | ---------: | ----: |
-| [`en_core_news_md`](/models/en#en_core_news_md) | v3.3.0 | 97.3 | 90.1 | 84.6 |
-| [`en_core_news_md`](/models/en#en_core_news_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 |
-| [`en_core_news_lg`](/models/en#en_core_news_md) | v3.3.0 | 97.4 | 90.1 | 85.3 |
-| [`en_core_news_lg`](/models/en#en_core_news_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 |
+| Package | Model Version | TAG | Parser LAS | NER F |
+| --------------------------------------------- | ------------- | ---: | ---------: | ----: |
+| [`en_core_web_md`](/models/en#en_core_web_md) | v3.3.0 | 97.3 | 90.1 | 84.6 |
+| [`en_core_web_md`](/models/en#en_core_web_md) | v3.4.0 | 97.2 | 90.3 | 85.5 |
+| [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.3.0 | 97.4 | 90.1 | 85.3 |
+| [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 |
## Notes about upgrading from v3.3 {#upgrading}
diff --git a/website/docs/usage/v3.md b/website/docs/usage/v3.md
index 971779ed3..64f93b7c0 100644
--- a/website/docs/usage/v3.md
+++ b/website/docs/usage/v3.md
@@ -15,18 +15,6 @@ menu:
> To help you make the transition from v2.x to v3.0, we've uploaded the old
> website to [**v2.spacy.io**](https://v2.spacy.io/docs).
-
-
-Want to make the transition from spaCy v2 to spaCy v3 as smooth as possible for
-you and your organization? We're now offering commercial **migration support**
-for your spaCy pipelines! We've put a lot of work into making it easy to upgrade
-your existing code and training workflows – but custom projects may always need
-some custom work, especially when it comes to taking advantage of the new
-capabilities.
-[**Details & application →**](https://form.typeform.com/to/vMs2zSjM)
-
-
-
diff --git a/website/meta/languages.json b/website/meta/languages.json
index 0028b4a5f..15158df79 100644
--- a/website/meta/languages.json
+++ b/website/meta/languages.json
@@ -4,12 +4,22 @@
"code": "af",
"name": "Afrikaans"
},
+ {
+ "code": "am",
+ "name": "Amharic",
+ "has_examples": true
+ },
{
"code": "ar",
"name": "Arabic",
"example": "هذه جملة",
"has_examples": true
},
+ {
+ "code": "az",
+ "name": "Azerbaijani",
+ "has_examples": true
+ },
{
"code": "bg",
"name": "Bulgarian",
@@ -65,7 +75,7 @@
{
"code": "dsb",
"name": "Lower Sorbian",
- "has_examples": true
+ "has_examples": true
},
{
"code": "el",
@@ -142,6 +152,11 @@
"code": "ga",
"name": "Irish"
},
+ {
+ "code": "grc",
+ "name": "Ancient Greek",
+ "has_examples": true
+ },
{
"code": "gu",
"name": "Gujarati",
@@ -172,7 +187,7 @@
{
"code": "hsb",
"name": "Upper Sorbian",
- "has_examples": true
+ "has_examples": true
},
{
"code": "hu",
@@ -260,6 +275,10 @@
"example": "Адамга эң кыйыны — күн сайын адам болуу",
"has_examples": true
},
+ {
+ "code": "la",
+ "name": "Latin"
+ },
{
"code": "lb",
"name": "Luxembourgish",
@@ -448,6 +467,11 @@
"example": "นี่คือประโยค",
"has_examples": true
},
+ {
+ "code": "ti",
+ "name": "Tigrinya",
+ "has_examples": true
+ },
{
"code": "tl",
"name": "Tagalog"
@@ -538,6 +562,7 @@
"url": "https://github.com/explosion/spacy-pkuseg"
}
],
+ "example": "这是一个用于示例的句子。",
"has_examples": true
}
],
diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json
index 1b743636c..339e4085b 100644
--- a/website/meta/sidebars.json
+++ b/website/meta/sidebars.json
@@ -12,7 +12,6 @@
{ "text": "New in v3.0", "url": "/usage/v3" },
{ "text": "New in v3.1", "url": "/usage/v3-1" },
{ "text": "New in v3.2", "url": "/usage/v3-2" },
- { "text": "New in v3.2", "url": "/usage/v3-2" },
{ "text": "New in v3.3", "url": "/usage/v3-3" },
{ "text": "New in v3.4", "url": "/usage/v3-4" }
]
@@ -46,7 +45,7 @@
{ "text": "v2.x Documentation", "url": "https://v2.spacy.io" },
{
"text": "Custom Solutions",
- "url": "https://explosion.ai/spacy-tailored-pipelines"
+ "url": "https://explosion.ai/custom-solutions"
}
]
}
@@ -95,6 +94,7 @@
"label": "Pipeline",
"items": [
{ "text": "AttributeRuler", "url": "/api/attributeruler" },
+ { "text": "CoreferenceResolver", "url": "/api/coref" },
{ "text": "DependencyParser", "url": "/api/dependencyparser" },
{ "text": "EditTreeLemmatizer", "url": "/api/edittreelemmatizer" },
{ "text": "EntityLinker", "url": "/api/entitylinker" },
@@ -105,6 +105,7 @@
{ "text": "SentenceRecognizer", "url": "/api/sentencerecognizer" },
{ "text": "Sentencizer", "url": "/api/sentencizer" },
{ "text": "SpanCategorizer", "url": "/api/spancategorizer" },
+ { "text": "SpanResolver", "url": "/api/span-resolver" },
{ "text": "SpanRuler", "url": "/api/spanruler" },
{ "text": "Tagger", "url": "/api/tagger" },
{ "text": "TextCategorizer", "url": "/api/textcategorizer" },
diff --git a/website/meta/site.json b/website/meta/site.json
index 360a72178..fa79d3c69 100644
--- a/website/meta/site.json
+++ b/website/meta/site.json
@@ -51,7 +51,7 @@
{ "text": "Online Course", "url": "https://course.spacy.io" },
{
"text": "Custom Solutions",
- "url": "https://explosion.ai/spacy-tailored-pipelines"
+ "url": "https://explosion.ai/custom-solutions"
}
]
},
diff --git a/website/meta/universe.json b/website/meta/universe.json
index 9145855c6..7c2bb98b7 100644
--- a/website/meta/universe.json
+++ b/website/meta/universe.json
@@ -1,5 +1,129 @@
{
"resources": [
+ {
+ "id": "grecy",
+ "title": "greCy",
+ "slogan": "Ancient Greek pipelines for spaCy",
+ "description": "greCy offers state-of-the-art pipelines for ancient Greek NLP. The repository makes language models available in various sizes, some of them containing floret word vectors and a BERT transformer layer.",
+ "github": "jmyerston/greCy",
+ "code_example": [
+ "import spacy",
+ "#After installing the grc_ud_proiel_trf wheel package from the greCy repository",
+ "",
+ "nlp = spacy.load('grc_ud_proiel_trf')",
+ "doc = nlp('δοκῶ μοι περὶ ὧν πυνθάνεσθε οὐκ ἀμελέτητος εἶναι.')",
+ "",
+ "for token in doc:",
+ " print(token.text, token.norm_, token.lemma_, token.pos_, token.tag_)"
+ ],
+ "code_language": "python",
+ "author": "Jacobo Myerston",
+ "author_links": {
+ "twitter": "@jcbmyrstn",
+ "github": "jmyerston",
+ "website": "https://huggingface.co/spaces/Jacobo/syntax"
+ },
+ "category": ["pipeline", "research"],
+ "tags": ["ancient Greek"]
+ },
+ {
+ "id": "spacy-cleaner",
+ "title": "spacy-cleaner",
+ "slogan": "Easily clean text with spaCy!",
+ "description": "**spacy-cleaner** utilises spaCy `Language` models to replace, remove, and \n mutate spaCy tokens. Cleaning actions available are:\n\n* Remove/replace stopwords.\n* Remove/replace punctuation.\n* Remove/replace numbers.\n* Remove/replace emails.\n* Remove/replace URLs.\n* Perform lemmatisation.\n\nSee our [docs](https://ce11an.github.io/spacy-cleaner/) for more information.",
+ "github": "Ce11an/spacy-cleaner",
+ "pip": "spacy-cleaner",
+ "code_example": [
+ "import spacy",
+ "import spacy_cleaner",
+ "from spacy_cleaner.processing import removers, replacers, mutators",
+ "",
+ "model = spacy.load(\"en_core_web_sm\")",
+ "pipeline = spacy_cleaner.Pipeline(",
+ " model,",
+ " removers.remove_stopword_token,",
+ " replacers.replace_punctuation_token,",
+ " mutators.mutate_lemma_token,",
+ ")",
+ "",
+ "texts = [\"Hello, my name is Cellan! I love to swim!\"]",
+ "",
+ "pipeline.clean(texts)",
+ "# ['hello _IS_PUNCT_ Cellan _IS_PUNCT_ love swim _IS_PUNCT_']"
+ ],
+ "code_language": "python",
+ "url": "https://ce11an.github.io/spacy-cleaner/",
+ "image": "https://raw.githubusercontent.com/Ce11an/spacy-cleaner/main/docs/assets/images/spacemen.png",
+ "author": "Cellan Hall",
+ "author_links": {
+ "twitter": "Ce11an",
+ "github": "Ce11an",
+ "website": "https://www.linkedin.com/in/cellan-hall/"
+ },
+ "category": [
+ "extension"
+ ],
+ "tags": [
+ "text-processing"
+ ]
+ },
+ {
+ "id": "Zshot",
+ "title": "Zshot",
+ "slogan": "Zero and Few shot named entity & relationships recognition",
+ "github": "ibm/zshot",
+ "pip": "zshot",
+ "code_example": [
+ "import spacy",
+ "from zshot import PipelineConfig, displacy",
+ "from zshot.linker import LinkerRegen",
+ "from zshot.mentions_extractor import MentionsExtractorSpacy",
+ "from zshot.utils.data_models import Entity",
+ "",
+ "nlp = spacy.load('en_core_web_sm')",
+ "# zero shot definition of entities",
+ "nlp_config = PipelineConfig(",
+ " mentions_extractor=MentionsExtractorSpacy(),",
+ " linker=LinkerRegen(),",
+ " entities=[",
+ " Entity(name='Paris',",
+ " description='Paris is located in northern central France, in a north-bending arc of the river Seine'),",
+ " Entity(name='IBM',",
+ " description='International Business Machines Corporation (IBM) is an American multinational technology corporation headquartered in Armonk, New York'),",
+ " Entity(name='New York', description='New York is a city in U.S. state'),",
+ " Entity(name='Florida', description='southeasternmost U.S. state'),",
+ " Entity(name='American',",
+ " description='American, something of, from, or related to the United States of America, commonly known as the United States or America'),",
+ " Entity(name='Chemical formula',",
+ " description='In chemistry, a chemical formula is a way of presenting information about the chemical proportions of atoms that constitute a particular chemical compound or molecul'),",
+ " Entity(name='Acetamide',",
+ " description='Acetamide (systematic name: ethanamide) is an organic compound with the formula CH3CONH2. It is the simplest amide derived from acetic acid. It finds some use as a plasticizer and as an industrial solvent.'),",
+ " Entity(name='Armonk',",
+ " description='Armonk is a hamlet and census-designated place (CDP) in the town of North Castle, located in Westchester County, New York, United States.'),",
+ " Entity(name='Acetic Acid',",
+ " description='Acetic acid, systematically named ethanoic acid, is an acidic, colourless liquid and organic compound with the chemical formula CH3COOH'),",
+ " Entity(name='Industrial solvent',",
+ " description='Acetamide (systematic name: ethanamide) is an organic compound with the formula CH3CONH2. It is the simplest amide derived from acetic acid. It finds some use as a plasticizer and as an industrial solvent.'),",
+ " ]",
+ ")",
+ "nlp.add_pipe('zshot', config=nlp_config, last=True)",
+ "",
+ "text = 'International Business Machines Corporation (IBM) is an American multinational technology corporation' \\",
+ " ' headquartered in Armonk, New York, with operations in over 171 countries.'",
+ "",
+ "doc = nlp(text)",
+ "displacy.serve(doc, style='ent')"
+ ],
+ "thumb": "https://ibm.github.io/zshot/img/graph.png",
+ "url": "https://ibm.github.io/zshot/",
+ "author": "IBM Research",
+ "author_links": {
+ "github": "ibm",
+ "twitter": "IBMResearch",
+ "website": "https://research.ibm.com/labs/ireland/"
+ },
+ "category": ["scientific", "models", "research"]
+ },
{
"id": "concepcy",
"title": "concepCy",
@@ -337,37 +461,6 @@
},
"category": ["standalone"]
},
- {
- "id": "spikex",
- "title": "SpikeX - SpaCy Pipes for Knowledge Extraction",
- "slogan": "Use SpikeX to build knowledge extraction tools with almost-zero effort",
- "description": "SpikeX is a collection of pipes ready to be plugged in a spaCy pipeline. It aims to help in building knowledge extraction tools with almost-zero effort.",
- "github": "erre-quadro/spikex",
- "pip": "spikex",
- "code_example": [
- "from spacy import load as spacy_load",
- "from spikex.wikigraph import load as wg_load",
- "from spikex.pipes import WikiPageX",
- "",
- "# load a spacy model and get a doc",
- "nlp = spacy_load('en_core_web_sm')",
- "doc = nlp('An apple a day keeps the doctor away')",
- "# load a WikiGraph",
- "wg = wg_load('simplewiki_core')",
- "# get a WikiPageX and extract all pages",
- "wikipagex = WikiPageX(wg)",
- "doc = wikipagex(doc)",
- "# see all pages extracted from the doc",
- "for span in doc._.wiki_spans:",
- " print(span._.wiki_pages)"
- ],
- "category": ["pipeline", "standalone"],
- "author": "Erre Quadro",
- "author_links": {
- "github": "erre-quadro",
- "website": "https://www.errequadrosrl.com"
- }
- },
{
"id": "spacy-dbpedia-spotlight",
"title": "DBpedia Spotlight for SpaCy",
@@ -433,17 +526,6 @@
"tags": ["sentiment", "textblob"],
"spacy_version": 3
},
- {
- "id": "spacy-ray",
- "title": "spacy-ray",
- "slogan": "Parallel and distributed training with spaCy and Ray",
- "description": "[Ray](https://ray.io/) is a fast and simple framework for building and running **distributed applications**. This very lightweight extension package lets you use Ray for parallel and distributed training with spaCy. If `spacy-ray` is installed in the same environment as spaCy, it will automatically add `spacy ray` commands to your spaCy CLI.",
- "github": "explosion/spacy-ray",
- "pip": "spacy-ray",
- "category": ["training"],
- "author": "Explosion / Anyscale",
- "thumb": "https://i.imgur.com/7so6ZpS.png"
- },
{
"id": "spacy-sentence-bert",
"title": "spaCy - sentence-transformers",
@@ -939,31 +1021,13 @@
"author_links": {
"github": "mholtzscher"
},
- "category": ["pipeline"]
- },
- {
- "id": "spacy-sentence-segmenter",
- "title": "Sentence Segmenter",
- "slogan": "Custom sentence segmentation for spaCy",
- "code_example": [
- "from seg.newline.segmenter import NewLineSegmenter",
- "import spacy",
- "",
- "nlseg = NewLineSegmenter()",
- "nlp = spacy.load('en')",
- "nlp.add_pipe(nlseg.set_sent_starts, name='sentence_segmenter', before='parser')",
- "doc = nlp(my_doc_text)"
- ],
- "author": "tc64",
- "author_links": {
- "github": "tc64"
- },
- "category": ["pipeline"]
+ "category": ["pipeline"],
+ "spacy_version": 2
},
{
"id": "spacy_cld",
"title": "spaCy-CLD",
- "slogan": "Add language detection to your spaCy pipeline using CLD2",
+ "slogan": "Add language detection to your spaCy v2 pipeline using CLD2",
"description": "spaCy-CLD operates on `Doc` and `Span` spaCy objects. When called on a `Doc` or `Span`, the object is given two attributes: `languages` (a list of up to 3 language codes) and `language_scores` (a dictionary mapping language codes to confidence scores between 0 and 1).\n\nspacy-cld is a little extension that wraps the [PYCLD2](https://github.com/aboSamoor/pycld2) Python library, which in turn wraps the [Compact Language Detector 2](https://github.com/CLD2Owners/cld2) C library originally built at Google for the Chromium project. CLD2 uses character n-grams as features and a Naive Bayes classifier to identify 80+ languages from Unicode text strings (or XML/HTML). It can detect up to 3 different languages in a given document, and reports a confidence score (reported in with each language.",
"github": "nickdavidhaynes/spacy-cld",
"pip": "spacy_cld",
@@ -983,7 +1047,8 @@
"author_links": {
"github": "nickdavidhaynes"
},
- "category": ["pipeline"]
+ "category": ["pipeline"],
+ "spacy_version": 2
},
{
"id": "spacy-iwnlp",
@@ -1057,7 +1122,8 @@
"github": "sammous"
},
"category": ["pipeline"],
- "tags": ["pos", "lemmatizer", "french"]
+ "tags": ["pos", "lemmatizer", "french"],
+ "spacy_version": 2
},
{
"id": "lemmy",
@@ -1251,8 +1317,8 @@
},
{
"id": "neuralcoref",
- "slogan": "State-of-the-art coreference resolution based on neural nets and spaCy",
- "description": "This coreference resolution module is based on the super fast [spaCy](https://spacy.io/) parser and uses the neural net scoring model described in [Deep Reinforcement Learning for Mention-Ranking Coreference Models](http://cs.stanford.edu/people/kevclark/resources/clark-manning-emnlp2016-deep.pdf) by Kevin Clark and Christopher D. Manning, EMNLP 2016. Since ✨Neuralcoref v2.0, you can train the coreference resolution system on your own dataset — e.g., another language than English! — **provided you have an annotated dataset**. Note that to use neuralcoref with spaCy > 2.1.0, you'll have to install neuralcoref from source.",
+ "slogan": "State-of-the-art coreference resolution based on neural nets and spaCy v2",
+ "description": "This coreference resolution module is based on the super fast spaCy parser and uses the neural net scoring model described in [Deep Reinforcement Learning for Mention-Ranking Coreference Models](http://cs.stanford.edu/people/kevclark/resources/clark-manning-emnlp2016-deep.pdf) by Kevin Clark and Christopher D. Manning, EMNLP 2016. Since ✨Neuralcoref v2.0, you can train the coreference resolution system on your own dataset — e.g., another language than English! — **provided you have an annotated dataset**. Note that to use neuralcoref with spaCy > 2.1.0, you'll have to install neuralcoref from source, and v3+ is not supported.",
"github": "huggingface/neuralcoref",
"thumb": "https://i.imgur.com/j6FO9O6.jpg",
"code_example": [
@@ -1273,7 +1339,8 @@
"github": "huggingface"
},
"category": ["standalone", "conversational", "models"],
- "tags": ["coref"]
+ "tags": ["coref"],
+ "spacy_version": 2
},
{
"id": "neuralcoref-vizualizer",
@@ -1349,7 +1416,7 @@
"import spacy",
"import explacy",
"",
- "nlp = spacy.load('en')",
+ "nlp = spacy.load('en_core_web_sm')",
"explacy.print_parse_info(nlp, 'The salad was surprisingly tasty.')"
],
"author": "Tyler Neylon",
@@ -1386,13 +1453,26 @@
"image": "https://jasonkessler.github.io/2012conventions0.0.2.2.png",
"code_example": [
"import spacy",
- "import scattertext as st",
"",
- "nlp = spacy.load('en')",
- "corpus = st.CorpusFromPandas(convention_df,",
- " category_col='party',",
- " text_col='text',",
- " nlp=nlp).build()"
+ "from scattertext import SampleCorpora, produce_scattertext_explorer",
+ "from scattertext import produce_scattertext_html",
+ "from scattertext.CorpusFromPandas import CorpusFromPandas",
+ "",
+ "nlp = spacy.load('en_core_web_sm')",
+ "convention_df = SampleCorpora.ConventionData2012.get_data()",
+ "corpus = CorpusFromPandas(convention_df,",
+ " category_col='party',",
+ " text_col='text',",
+ " nlp=nlp).build()",
+ "",
+ "html = produce_scattertext_html(corpus,",
+ " category='democrat',",
+ " category_name='Democratic',",
+ " not_category_name='Republican',",
+ " minimum_term_frequency=5,",
+ " width_in_pixels=1000)",
+ "open('./simple.html', 'wb').write(html.encode('utf-8'))",
+ "print('Open ./simple.html in Chrome or Firefox.')"
],
"author": "Jason Kessler",
"author_links": {
@@ -1911,17 +1991,6 @@
},
"category": ["books"]
},
- {
- "type": "education",
- "id": "learning-path-spacy",
- "title": "Learning Path: Mastering spaCy for Natural Language Processing",
- "slogan": "O'Reilly, 2017",
- "description": "spaCy, a fast, user-friendly library for teaching computers to understand text, simplifies NLP techniques, such as speech tagging and syntactic dependencies, so you can easily extract information, attributes, and objects from massive amounts of text to then document, measure, and analyze. This Learning Path is a hands-on introduction to using spaCy to discover insights through natural language processing. While end-to-end natural language processing solutions can be complex, you’ll learn the linguistics, algorithms, and machine learning skills to get the job done.",
- "url": "https://www.safaribooksonline.com/library/view/learning-path-mastering/9781491986653/",
- "thumb": "https://i.imgur.com/9MIgMAc.jpg",
- "author": "Aaron Kramer",
- "category": ["courses"]
- },
{
"type": "education",
"id": "introduction-into-spacy-3",
@@ -2403,20 +2472,20 @@
"import spacy",
"from spacy_wordnet.wordnet_annotator import WordnetAnnotator ",
"",
- "# Load an spacy model (supported models are \"es\" and \"en\") ",
- "nlp = spacy.load('en')",
- "# Spacy 3.x",
- "nlp.add_pipe(\"spacy_wordnet\", after='tagger', config={'lang': nlp.lang})",
- "# Spacy 2.x",
+ "# Load a spaCy model (supported languages are \"es\" and \"en\") ",
+ "nlp = spacy.load('en_core_web_sm')",
+ "# spaCy 3.x",
+ "nlp.add_pipe(\"spacy_wordnet\", after='tagger')",
+ "# spaCy 2.x",
"# nlp.add_pipe(WordnetAnnotator(nlp.lang), after='tagger')",
"token = nlp('prices')[0]",
"",
- "# wordnet object link spacy token with nltk wordnet interface by giving acces to",
+ "# WordNet object links spaCy token with NLTK WordNet interface by giving access to",
"# synsets and lemmas ",
"token._.wordnet.synsets()",
"token._.wordnet.lemmas()",
"",
- "# And automatically tags with wordnet domains",
+ "# And automatically add info about WordNet domains",
"token._.wordnet.wordnet_domains()"
],
"author": "recognai",
@@ -3984,7 +4053,48 @@
},
"category": ["pipeline"],
"tags": ["interpretation", "ja"]
+ },
+ {
+ "id": "spacy-partial-tagger",
+ "title": "spaCy - Partial Tagger",
+ "slogan": "Sequence Tagger for Partially Annotated Dataset in spaCy",
+ "description": "This is a library to build a CRF tagger with a partially annotated dataset in spaCy. You can build your own tagger only from dictionary.",
+ "github": "doccano/spacy-partial-tagger",
+ "pip": "spacy-partial-tagger",
+ "category": ["pipeline", "training"],
+ "author": "Yasufumi Taniguchi",
+ "author_links": {
+ "github": "yasufumy"
+ }
+ },
+ {
+ "id": "spacy-pythainlp",
+ "title": "spaCy-PyThaiNLP",
+ "slogan": "PyThaiNLP for spaCy",
+ "description": "This package wraps the PyThaiNLP library to add support for Thai to spaCy.",
+ "github": "PyThaiNLP/spaCy-PyThaiNLP",
+ "code_example": [
+ "import spacy",
+ "import spacy_pythainlp.core",
+ "",
+ "nlp = spacy.blank('th')",
+ "nlp.add_pipe('pythainlp')",
+ "doc = nlp('ผมเป็นคนไทย แต่มะลิอยากไปโรงเรียนส่วนผมจะไปไหน ผมอยากไปเที่ยว')",
+ "",
+ "print(list(doc.sents))",
+ "# output: [ผมเป็นคนไทย แต่มะลิอยากไปโรงเรียนส่วนผมจะไปไหน , ผมอยากไปเที่ยว]"
+ ],
+ "code_language": "python",
+ "author": "Wannaphong Phatthiyaphaibun",
+ "author_links": {
+ "twitter": "@wannaphong_p",
+ "github": "wannaphong",
+ "website": "https://iam.wannaphong.com/"
+ },
+ "category": ["pipeline", "research"],
+ "tags": ["Thai"]
}
+
],
"categories": [
diff --git a/website/src/styles/quickstart.module.sass b/website/src/styles/quickstart.module.sass
index 8ad106a78..d0f9db551 100644
--- a/website/src/styles/quickstart.module.sass
+++ b/website/src/styles/quickstart.module.sass
@@ -149,6 +149,9 @@
& > span
display: block
+ a
+ text-decoration: underline
+
.small
font-size: var(--font-size-code)
line-height: 1.65
diff --git a/website/src/widgets/landing.js b/website/src/widgets/landing.js
index b7ae35f6e..c3aaa8a22 100644
--- a/website/src/widgets/landing.js
+++ b/website/src/widgets/landing.js
@@ -105,13 +105,13 @@ const Landing = ({ data }) => {
-
+
diff --git a/website/src/widgets/quickstart-install.js b/website/src/widgets/quickstart-install.js
index 61c0678dd..28dd14ecc 100644
--- a/website/src/widgets/quickstart-install.js
+++ b/website/src/widgets/quickstart-install.js
@@ -9,7 +9,7 @@ const DEFAULT_PLATFORM = 'x86'
const DEFAULT_MODELS = ['en']
const DEFAULT_OPT = 'efficiency'
const DEFAULT_HARDWARE = 'cpu'
-const DEFAULT_CUDA = 'cuda113'
+const DEFAULT_CUDA = 'cuda-autodetect'
const CUDA = {
'8.0': 'cuda80',
'9.0': 'cuda90',
@@ -17,15 +17,7 @@ const CUDA = {
'9.2': 'cuda92',
'10.0': 'cuda100',
'10.1': 'cuda101',
- '10.2': 'cuda102',
- '11.0': 'cuda110',
- '11.1': 'cuda111',
- '11.2': 'cuda112',
- '11.3': 'cuda113',
- '11.4': 'cuda114',
- '11.5': 'cuda115',
- '11.6': 'cuda116',
- '11.7': 'cuda117',
+ '10.2, 11.0+': 'cuda-autodetect',
}
const LANG_EXTRAS = ['ja'] // only for languages with models
@@ -167,6 +159,9 @@ const QuickstartInstall = ({ id, title }) => {
setters={setters}
showDropdown={showDropdown}
>
+
+ # Note M1 GPU support is experimental, see Thinc issue #792
+
python -m venv .env
@@ -206,7 +201,13 @@ const QuickstartInstall = ({ id, title }) => {
{nightly ? ' --pre' : ''}
conda install -c conda-forge spacy
-
+
+ conda install -c conda-forge cupy
+
+
+ conda install -c conda-forge cupy
+
+
conda install -c conda-forge cupy