Merge branch 'v4' into cleanup/move-legacy-entity-linker

This commit is contained in:
Paul O'Leary McCann 2023-01-31 12:25:35 +09:00
commit 82f1e20c4a
150 changed files with 1875 additions and 658 deletions

10
.gitignore vendored
View File

@ -10,16 +10,6 @@ spacy/tests/package/setup.cfg
spacy/tests/package/pyproject.toml spacy/tests/package/pyproject.toml
spacy/tests/package/requirements.txt spacy/tests/package/requirements.txt
# Website
website/.cache/
website/public/
website/node_modules
website/.npm
website/logs
*.log
npm-debug.log*
quickstart-training-generator.js
# Cython / C extensions # Cython / C extensions
cythonize.json cythonize.json
spacy/*.html spacy/*.html

View File

@ -3,7 +3,7 @@ repos:
rev: 22.3.0 rev: 22.3.0
hooks: hooks:
- id: black - id: black
language_version: python3.7 language_version: python3.8
additional_dependencies: ['click==8.0.4'] additional_dependencies: ['click==8.0.4']
- repo: https://github.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 5.0.4 rev: 5.0.4

View File

@ -271,7 +271,7 @@ except: # noqa: E722
### Python conventions ### Python conventions
All Python code must be written **compatible with Python 3.6+**. More detailed All Python code must be written **compatible with Python 3.8+**. More detailed
code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md). code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md).
#### I/O and handling paths #### I/O and handling paths

View File

@ -5,7 +5,7 @@ override SPACY_EXTRAS = spacy-lookups-data==1.0.2 jieba spacy-pkuseg==0.0.28 sud
endif endif
ifndef PYVER ifndef PYVER
override PYVER = 3.6 override PYVER = 3.8
endif endif
VENV := ./env$(PYVER) VENV := ./env$(PYVER)

View File

@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE).
💫 **Version 3.4 out now!** 💫 **Version 3.5 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases) [Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
@ -105,7 +105,7 @@ For detailed installation instructions, see the
- **Operating system**: macOS / OS X · Linux · Windows (Cygwin, MinGW, Visual - **Operating system**: macOS / OS X · Linux · Windows (Cygwin, MinGW, Visual
Studio) Studio)
- **Python version**: Python 3.6+ (only 64 bit) - **Python version**: Python 3.8+ (only 64 bit)
- **Package managers**: [pip] · [conda] (via `conda-forge`) - **Package managers**: [pip] · [conda] (via `conda-forge`)
[pip]: https://pypi.org/project/spacy/ [pip]: https://pypi.org/project/spacy/

View File

@ -11,25 +11,39 @@ trigger:
exclude: exclude:
- "website/*" - "website/*"
- "*.md" - "*.md"
- "*.mdx"
- ".github/workflows/*" - ".github/workflows/*"
pr: pr:
paths: paths:
exclude: exclude:
- "*.md" - "*.md"
- "*.mdx"
- "website/docs/*" - "website/docs/*"
- "website/src/*" - "website/src/*"
- "website/meta/*.tsx"
- "website/meta/*.mjs"
- "website/meta/languages.json"
- "website/meta/site.json"
- "website/meta/sidebars.json"
- "website/meta/type-annotations.json"
- "website/pages/*"
- ".github/workflows/*" - ".github/workflows/*"
jobs: jobs:
# Perform basic checks for most important errors (syntax etc.) Uses the config # Check formatting and linting. Perform basic checks for most important errors
# defined in .flake8 and overwrites the selected codes. # (syntax etc.) Uses the config defined in setup.cfg and overwrites the
# selected codes.
- job: "Validate" - job: "Validate"
pool: pool:
vmImage: "ubuntu-latest" vmImage: "ubuntu-latest"
steps: steps:
- task: UsePythonVersion@0 - task: UsePythonVersion@0
inputs: inputs:
versionSpec: "3.7" versionSpec: "3.8"
- script: |
pip install black==22.3.0
python -m black spacy --check
displayName: "black"
- script: | - script: |
pip install flake8==5.0.4 pip install flake8==5.0.4
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
@ -40,24 +54,6 @@ jobs:
strategy: strategy:
matrix: matrix:
# We're only running one platform per Python version to speed up builds # We're only running one platform per Python version to speed up builds
Python36Linux:
imageName: "ubuntu-20.04"
python.version: "3.6"
# Python36Windows:
# imageName: "windows-latest"
# python.version: "3.6"
# Python36Mac:
# imageName: "macos-latest"
# python.version: "3.6"
# Python37Linux:
# imageName: "ubuntu-20.04"
# python.version: "3.7"
Python37Windows:
imageName: "windows-latest"
python.version: "3.7"
# Python37Mac:
# imageName: "macos-latest"
# python.version: "3.7"
# Python38Linux: # Python38Linux:
# imageName: "ubuntu-latest" # imageName: "ubuntu-latest"
# python.version: "3.8" # python.version: "3.8"

View File

@ -1,5 +1,5 @@
# Our libraries # Our libraries
spacy-legacy>=3.0.12,<3.1.0 spacy-legacy>=4.0.0.dev0,<4.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
@ -22,7 +22,6 @@ langcodes>=3.2.0,<4.0.0
# Official Python utilities # Official Python utilities
setuptools setuptools
packaging>=20.0 packaging>=20.0
typing_extensions>=3.7.4.1,<4.2.0; python_version < "3.8"
# Development dependencies # Development dependencies
pre-commit>=2.13.0 pre-commit>=2.13.0
cython>=0.25,<3.0 cython>=0.25,<3.0
@ -31,8 +30,7 @@ pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0 mock>=2.0.0,<3.0.0
flake8>=3.8.0,<6.0.0 flake8>=3.8.0,<6.0.0
hypothesis>=3.27.0,<7.0.0 hypothesis>=3.27.0,<7.0.0
mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7" mypy>=0.990,<0.1000; platform_machine != "aarch64"
types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1 types-mock>=0.1.1
types-setuptools>=57.0.0 types-setuptools>=57.0.0
types-requests types-requests

View File

@ -17,8 +17,6 @@ classifiers =
Operating System :: Microsoft :: Windows Operating System :: Microsoft :: Windows
Programming Language :: Cython Programming Language :: Cython
Programming Language :: Python :: 3 Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.10
@ -31,10 +29,10 @@ project_urls =
[options] [options]
zip_safe = false zip_safe = false
include_package_data = true include_package_data = true
python_requires = >=3.6 python_requires = >=3.8
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.12,<3.1.0 spacy-legacy>=4.0.0.dev0,<4.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
@ -55,7 +53,6 @@ install_requires =
# Official Python utilities # Official Python utilities
setuptools setuptools
packaging>=20.0 packaging>=20.0
typing_extensions>=3.7.4,<4.2.0; python_version < "3.8"
langcodes>=3.2.0,<4.0.0 langcodes>=3.2.0,<4.0.0
[options.entry_points] [options.entry_points]

View File

@ -4,6 +4,7 @@ from ._util import app, setup_cli # noqa: F401
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands # These are the actual functions, NOT the wrapped CLI commands. The CLI commands
# are registered automatically and won't have to be imported here. # are registered automatically and won't have to be imported here.
from .benchmark_speed import benchmark_speed_cli # noqa: F401
from .download import download # noqa: F401 from .download import download # noqa: F401
from .info import info # noqa: F401 from .info import info # noqa: F401
from .package import package # noqa: F401 from .package import package # noqa: F401

View File

@ -1,4 +1,4 @@
from typing import Dict, Any, Union, List, Optional, Tuple, Iterable from typing import Dict, Any, Union, List, Optional, Tuple, Iterable, Literal
from typing import TYPE_CHECKING, overload from typing import TYPE_CHECKING, overload
import sys import sys
import shutil import shutil
@ -16,7 +16,6 @@ from thinc.util import gpu_is_available
from configparser import InterpolationError from configparser import InterpolationError
import os import os
from ..compat import Literal
from ..schemas import ProjectConfigSchema, validate from ..schemas import ProjectConfigSchema, validate
from ..util import import_file, run_command, make_tempdir, registry, logger from ..util import import_file, run_command, make_tempdir, registry, logger
from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS
@ -46,6 +45,7 @@ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes
commands to check and validate your config files, training and evaluation data, commands to check and validate your config files, training and evaluation data,
and custom model implementations. and custom model implementations.
""" """
BENCHMARK_HELP = """Commands for benchmarking pipelines."""
INIT_HELP = """Commands for initializing configs and pipeline packages.""" INIT_HELP = """Commands for initializing configs and pipeline packages."""
# Wrappers for Typer's annotations. Initially created to set defaults and to # Wrappers for Typer's annotations. Initially created to set defaults and to
@ -54,12 +54,14 @@ Arg = typer.Argument
Opt = typer.Option Opt = typer.Option
app = typer.Typer(name=NAME, help=HELP) app = typer.Typer(name=NAME, help=HELP)
benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True)
project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True) project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True)
debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True) debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True)
init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True) init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True)
app.add_typer(project_cli) app.add_typer(project_cli)
app.add_typer(debug_cli) app.add_typer(debug_cli)
app.add_typer(benchmark_cli)
app.add_typer(init_cli) app.add_typer(init_cli)

View File

@ -0,0 +1,174 @@
from typing import Iterable, List, Optional
import random
from itertools import islice
import numpy
from pathlib import Path
import time
from tqdm import tqdm
import typer
from wasabi import msg
from .. import util
from ..language import Language
from ..tokens import Doc
from ..training import Corpus
from ._util import Arg, Opt, benchmark_cli, setup_gpu
@benchmark_cli.command(
"speed",
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
)
def benchmark_speed_cli(
# fmt: off
ctx: typer.Context,
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
batch_size: Optional[int] = Opt(None, "--batch-size", "-b", min=1, help="Override the pipeline batch size"),
no_shuffle: bool = Opt(False, "--no-shuffle", help="Do not shuffle benchmark data"),
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,),
warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"),
# fmt: on
):
"""
Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark
data in the binary .spacy format.
"""
setup_gpu(use_gpu=use_gpu, silent=False)
nlp = util.load_model(model)
batch_size = batch_size if batch_size is not None else nlp.batch_size
corpus = Corpus(data_path)
docs = [eg.predicted for eg in corpus(nlp)]
if len(docs) == 0:
msg.fail("Cannot benchmark speed using an empty corpus.", exits=1)
print(f"Warming up for {warmup_epochs} epochs...")
warmup(nlp, docs, warmup_epochs, batch_size)
print()
print(f"Benchmarking {n_batches} batches...")
wps = benchmark(nlp, docs, n_batches, batch_size, not no_shuffle)
print()
print_outliers(wps)
print_mean_with_ci(wps)
# Lowercased, behaves as a context manager function.
class time_context:
"""Register the running time of a context."""
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, type, value, traceback):
self.elapsed = time.perf_counter() - self.start
class Quartiles:
"""Calculate the q1, q2, q3 quartiles and the inter-quartile range (iqr)
of a sample."""
q1: float
q2: float
q3: float
iqr: float
def __init__(self, sample: numpy.ndarray) -> None:
self.q1 = numpy.quantile(sample, 0.25)
self.q2 = numpy.quantile(sample, 0.5)
self.q3 = numpy.quantile(sample, 0.75)
self.iqr = self.q3 - self.q1
def annotate(
nlp: Language, docs: List[Doc], batch_size: Optional[int]
) -> numpy.ndarray:
docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size)
wps = []
while True:
with time_context() as elapsed:
batch_docs = list(
islice(docs, batch_size if batch_size else nlp.batch_size)
)
if len(batch_docs) == 0:
break
n_tokens = count_tokens(batch_docs)
wps.append(n_tokens / elapsed.elapsed)
return numpy.array(wps)
def benchmark(
nlp: Language,
docs: List[Doc],
n_batches: int,
batch_size: int,
shuffle: bool,
) -> numpy.ndarray:
if shuffle:
bench_docs = [
nlp.make_doc(random.choice(docs).text)
for _ in range(n_batches * batch_size)
]
else:
bench_docs = [
nlp.make_doc(docs[i % len(docs)].text)
for i in range(n_batches * batch_size)
]
return annotate(nlp, bench_docs, batch_size)
def bootstrap(x, statistic=numpy.mean, iterations=10000) -> numpy.ndarray:
"""Apply a statistic to repeated random samples of an array."""
return numpy.fromiter(
(
statistic(numpy.random.choice(x, len(x), replace=True))
for _ in range(iterations)
),
numpy.float64,
)
def count_tokens(docs: Iterable[Doc]) -> int:
return sum(len(doc) for doc in docs)
def print_mean_with_ci(sample: numpy.ndarray):
mean = numpy.mean(sample)
bootstrap_means = bootstrap(sample)
bootstrap_means.sort()
# 95% confidence interval
low = bootstrap_means[int(len(bootstrap_means) * 0.025)]
high = bootstrap_means[int(len(bootstrap_means) * 0.975)]
print(f"Mean: {mean:.1f} words/s (95% CI: {low-mean:.1f} +{high-mean:.1f})")
def print_outliers(sample: numpy.ndarray):
quartiles = Quartiles(sample)
n_outliers = numpy.sum(
(sample < (quartiles.q1 - 1.5 * quartiles.iqr))
| (sample > (quartiles.q3 + 1.5 * quartiles.iqr))
)
n_extreme_outliers = numpy.sum(
(sample < (quartiles.q1 - 3.0 * quartiles.iqr))
| (sample > (quartiles.q3 + 3.0 * quartiles.iqr))
)
print(
f"Outliers: {(100 * n_outliers) / len(sample):.1f}%, extreme outliers: {(100 * n_extreme_outliers) / len(sample)}%"
)
def warmup(
nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int]
) -> numpy.ndarray:
docs = warmup_epochs * docs
return annotate(nlp, docs, batch_size)

View File

@ -1,5 +1,5 @@
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
from typing import cast, overload from typing import Literal, cast, overload
from pathlib import Path from pathlib import Path
from collections import Counter from collections import Counter
import sys import sys
@ -17,10 +17,10 @@ from ..pipeline import TrainablePipe
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
from ..pipeline._parser_internals.nonproj import DELIMITER from ..pipeline._parser_internals.nonproj import DELIMITER
from ..pipeline import Morphologizer, SpanCategorizer from ..pipeline import Morphologizer, SpanCategorizer
from ..pipeline._edit_tree_internals.edit_trees import EditTrees
from ..morphology import Morphology from ..morphology import Morphology
from ..language import Language from ..language import Language
from ..util import registry, resolve_dot_names from ..util import registry, resolve_dot_names
from ..compat import Literal
from ..vectors import Mode as VectorsMode from ..vectors import Mode as VectorsMode
from .. import util from .. import util
@ -671,6 +671,59 @@ def debug_data(
f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles" f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles"
) )
if "trainable_lemmatizer" in factory_names:
msg.divider("Trainable Lemmatizer")
trees_train: Set[str] = gold_train_data["lemmatizer_trees"]
trees_dev: Set[str] = gold_dev_data["lemmatizer_trees"]
# This is necessary context when someone is attempting to interpret whether the
# number of trees exclusively in the dev set is meaningful.
msg.info(f"{len(trees_train)} lemmatizer trees generated from training data")
msg.info(f"{len(trees_dev)} lemmatizer trees generated from dev data")
dev_not_train = trees_dev - trees_train
if len(dev_not_train) != 0:
pct = len(dev_not_train) / len(trees_dev)
msg.info(
f"{len(dev_not_train)} lemmatizer trees ({pct*100:.1f}% of dev trees)"
" were found exclusively in the dev data."
)
else:
# Would we ever expect this case? It seems like it would be pretty rare,
# and we might actually want a warning?
msg.info("All trees in dev data present in training data.")
if gold_train_data["n_low_cardinality_lemmas"] > 0:
n = gold_train_data["n_low_cardinality_lemmas"]
msg.warn(f"{n} training docs with 0 or 1 unique lemmas.")
if gold_dev_data["n_low_cardinality_lemmas"] > 0:
n = gold_dev_data["n_low_cardinality_lemmas"]
msg.warn(f"{n} dev docs with 0 or 1 unique lemmas.")
if gold_train_data["no_lemma_annotations"] > 0:
n = gold_train_data["no_lemma_annotations"]
msg.warn(f"{n} training docs with no lemma annotations.")
else:
msg.good("All training docs have lemma annotations.")
if gold_dev_data["no_lemma_annotations"] > 0:
n = gold_dev_data["no_lemma_annotations"]
msg.warn(f"{n} dev docs with no lemma annotations.")
else:
msg.good("All dev docs have lemma annotations.")
if gold_train_data["partial_lemma_annotations"] > 0:
n = gold_train_data["partial_lemma_annotations"]
msg.info(f"{n} training docs with partial lemma annotations.")
else:
msg.good("All training docs have complete lemma annotations.")
if gold_dev_data["partial_lemma_annotations"] > 0:
n = gold_dev_data["partial_lemma_annotations"]
msg.info(f"{n} dev docs with partial lemma annotations.")
else:
msg.good("All dev docs have complete lemma annotations.")
msg.divider("Summary") msg.divider("Summary")
good_counts = msg.counts[MESSAGES.GOOD] good_counts = msg.counts[MESSAGES.GOOD]
warn_counts = msg.counts[MESSAGES.WARN] warn_counts = msg.counts[MESSAGES.WARN]
@ -732,7 +785,13 @@ def _compile_gold(
"n_cats_multilabel": 0, "n_cats_multilabel": 0,
"n_cats_bad_values": 0, "n_cats_bad_values": 0,
"texts": set(), "texts": set(),
"lemmatizer_trees": set(),
"no_lemma_annotations": 0,
"partial_lemma_annotations": 0,
"n_low_cardinality_lemmas": 0,
} }
if "trainable_lemmatizer" in factory_names:
trees = EditTrees(nlp.vocab.strings)
for eg in examples: for eg in examples:
gold = eg.reference gold = eg.reference
doc = eg.predicted doc = eg.predicted
@ -862,6 +921,25 @@ def _compile_gold(
data["n_nonproj"] += 1 data["n_nonproj"] += 1
if nonproj.contains_cycle(aligned_heads): if nonproj.contains_cycle(aligned_heads):
data["n_cycles"] += 1 data["n_cycles"] += 1
if "trainable_lemmatizer" in factory_names:
# from EditTreeLemmatizer._labels_from_data
if all(token.lemma == 0 for token in gold):
data["no_lemma_annotations"] += 1
continue
if any(token.lemma == 0 for token in gold):
data["partial_lemma_annotations"] += 1
lemma_set = set()
for token in gold:
if token.lemma != 0:
lemma_set.add(token.lemma)
tree_id = trees.add(token.text, token.lemma_)
tree_str = trees.tree_to_str(tree_id)
data["lemmatizer_trees"].add(tree_str)
# We want to identify cases where lemmas aren't assigned
# or are all assigned the same value, as this would indicate
# an issue since we're expecting a large set of lemmas
if len(lemma_set) < 2 and len(gold) > 1:
data["n_low_cardinality_lemmas"] += 1
return data return data

View File

@ -7,12 +7,15 @@ from thinc.api import fix_random_seed
from ..training import Corpus from ..training import Corpus
from ..tokens import Doc from ..tokens import Doc
from ._util import app, Arg, Opt, setup_gpu, import_code from ._util import app, Arg, Opt, setup_gpu, import_code, benchmark_cli
from ..scorer import Scorer from ..scorer import Scorer
from .. import util from .. import util
from .. import displacy from .. import displacy
@benchmark_cli.command(
"accuracy",
)
@app.command("evaluate") @app.command("evaluate")
def evaluate_cli( def evaluate_cli(
# fmt: off # fmt: off
@ -36,7 +39,7 @@ def evaluate_cli(
dependency parses in a HTML file, set as output directory as the dependency parses in a HTML file, set as output directory as the
displacy_path argument. displacy_path argument.
DOCS: https://spacy.io/api/cli#evaluate DOCS: https://spacy.io/api/cli#benchmark-accuracy
""" """
import_code(code_path) import_code(code_path)
evaluate( evaluate(

View File

@ -22,19 +22,6 @@ try:
except ImportError: except ImportError:
cupy = None cupy = None
if sys.version_info[:2] >= (3, 8): # Python 3.8+
from typing import Literal, Protocol, runtime_checkable
else:
from typing_extensions import Literal, Protocol, runtime_checkable # noqa: F401
# Important note: The importlib_metadata "backport" includes functionality
# that's not part of the built-in importlib.metadata. We should treat this
# import like the built-in and only use what's available there.
try: # Python 3.8+
import importlib.metadata as importlib_metadata
except ImportError:
from catalogue import _importlib_metadata as importlib_metadata # type: ignore[no-redef] # noqa: F401
from thinc.api import Optimizer # noqa: F401 from thinc.api import Optimizer # noqa: F401
pickle = pickle pickle = pickle

View File

@ -106,9 +106,7 @@ def serve(
if is_in_jupyter(): if is_in_jupyter():
warnings.warn(Warnings.W011) warnings.warn(Warnings.W011)
render( render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
docs, style=style, page=page, minify=minify, options=options, manual=manual
)
httpd = simple_server.make_server(host, port, app) httpd = simple_server.make_server(host, port, app)
print(f"\nUsing the '{style}' visualizer") print(f"\nUsing the '{style}' visualizer")
print(f"Serving on http://{host}:{port} ...\n") print(f"Serving on http://{host}:{port} ...\n")

View File

@ -1,5 +1,5 @@
from typing import Literal
import warnings import warnings
from .compat import Literal
class ErrorsWithCodes(type): class ErrorsWithCodes(type):
@ -949,8 +949,8 @@ class Errors(metaclass=ErrorsWithCodes):
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.") E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}") E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}")
E1049 = ("No available port found for displaCy on host {host}. Please specify an available port " E1049 = ("No available port found for displaCy on host {host}. Please specify an available port "
"with `displacy.serve(doc, port)`") "with `displacy.serve(doc, port=port)`")
E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port)` " E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port=port)` "
"or use `auto_switch_port=True` to pick an available port automatically.") "or use `auto_switch_port=True` to pick an available port automatically.")
# v4 error strings # v4 error strings

View File

@ -25,7 +25,7 @@ cdef class InMemoryLookupKB(KnowledgeBase):
"""An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases, """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts. to support entity linking of named entities to real-world concepts.
DOCS: https://spacy.io/api/kb_in_memory DOCS: https://spacy.io/api/inmemorylookupkb
""" """
def __init__(self, Vocab vocab, entity_vector_length): def __init__(self, Vocab vocab, entity_vector_length):

View File

@ -1,4 +1,4 @@
from typing import Iterator, Optional, Any, Dict, Callable, Iterable from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Literal
from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@ -22,7 +22,7 @@ from . import ty
from .tokens.underscore import Underscore from .tokens.underscore import Underscore
from .vocab import Vocab, create_vocab from .vocab import Vocab, create_vocab
from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
from .training import Example, validate_examples from .training import Example, validate_examples, validate_distillation_examples
from .training.initialize import init_vocab, init_tok2vec from .training.initialize import init_vocab, init_tok2vec
from .scorer import Scorer from .scorer import Scorer
from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES
@ -40,7 +40,6 @@ from .git_info import GIT_VERSION
from . import util from . import util
from . import about from . import about
from .lookups import load_lookups from .lookups import load_lookups
from .compat import Literal
PipeCallable = Callable[[Doc], Doc] PipeCallable = Callable[[Doc], Doc]
@ -1018,6 +1017,102 @@ class Language:
raise ValueError(Errors.E005.format(name=name, returned_type=type(doc))) raise ValueError(Errors.E005.format(name=name, returned_type=type(doc)))
return doc return doc
def distill(
self,
teacher: "Language",
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
component_cfg: Optional[Dict[str, Dict[str, Any]]] = None,
exclude: Iterable[str] = SimpleFrozenList(),
annotates: Iterable[str] = SimpleFrozenList(),
student_to_teacher: Optional[Dict[str, str]] = None,
):
"""Distill the models in a student pipeline from a teacher pipeline.
teacher (Language): Teacher to distill from.
examples (Iterable[Example]): Distillation examples. The reference
(teacher) and predicted (student) docs must have the same number of
tokens and the same orthography.
drop (float): The dropout rate.
sgd (Optional[Optimizer]): An optimizer.
losses (Optional(Dict[str, float])): Dictionary to update with the loss,
keyed by component.
component_cfg (Optional[Dict[str, Dict[str, Any]]]): Config parameters
for specific pipeline components, keyed by component name.
exclude (Iterable[str]): Names of components that shouldn't be updated.
annotates (Iterable[str]): Names of components that should set
annotations on the predicted examples after updating.
student_to_teacher (Optional[Dict[str, str]]): Map student pipe name to
teacher pipe name, only needed for pipes where the student pipe
name does not match the teacher pipe name.
RETURNS (Dict[str, float]): The updated losses dictionary
DOCS: https://spacy.io/api/language#distill
"""
if student_to_teacher is None:
student_to_teacher = {}
if losses is None:
losses = {}
if isinstance(examples, list) and len(examples) == 0:
return losses
validate_distillation_examples(examples, "Language.distill")
examples = _copy_examples(examples)
if sgd is None:
if self._optimizer is None:
self._optimizer = self.create_optimizer()
sgd = self._optimizer
if component_cfg is None:
component_cfg = {}
pipe_kwargs = {}
for student_name, student_proc in self.pipeline:
component_cfg.setdefault(student_name, {})
pipe_kwargs[student_name] = deepcopy(component_cfg[student_name])
component_cfg[student_name].setdefault("drop", drop)
pipe_kwargs[student_name].setdefault("batch_size", self.batch_size)
teacher_pipes = dict(teacher.pipeline)
for student_name, student_proc in self.pipeline:
if student_name in annotates:
for doc, eg in zip(
_pipe(
(eg.predicted for eg in examples),
proc=student_proc,
name=student_name,
default_error_handler=self.default_error_handler,
kwargs=pipe_kwargs[student_name],
),
examples,
):
eg.predicted = doc
if (
student_name not in exclude
and isinstance(student_proc, ty.DistillableComponent)
and student_proc.is_distillable
):
# A missing teacher pipe is not an error, some student pipes
# do not need a teacher, such as tok2vec layer losses.
teacher_name = (
student_to_teacher[student_name]
if student_name in student_to_teacher
else student_name
)
teacher_pipe = teacher_pipes.get(teacher_name, None)
student_proc.distill(
teacher_pipe,
examples,
sgd=sgd,
losses=losses,
**component_cfg[student_name],
)
return losses
def disable_pipes(self, *names) -> "DisabledPipes": def disable_pipes(self, *names) -> "DisabledPipes":
"""Disable one or more pipeline components. If used as a context """Disable one or more pipeline components. If used as a context
manager, the pipeline will be restored to the initial state at the end manager, the pipeline will be restored to the initial state at the end
@ -1243,12 +1338,16 @@ class Language:
self, self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None, get_examples: Optional[Callable[[], Iterable[Example]]] = None,
*, *,
labels: Optional[Dict[str, Any]] = None,
sgd: Optional[Optimizer] = None, sgd: Optional[Optimizer] = None,
) -> Optimizer: ) -> Optimizer:
"""Initialize the pipe for training, using data examples if available. """Initialize the pipe for training, using data examples if available.
get_examples (Callable[[], Iterable[Example]]): Optional function that get_examples (Callable[[], Iterable[Example]]): Optional function that
returns gold-standard Example objects. returns gold-standard Example objects.
labels (Optional[Dict[str, Any]]): Labels to pass to pipe initialization,
using the names of the pipes as keys. Overrides labels that are in
the model configuration.
sgd (Optional[Optimizer]): An optimizer to use for updates. If not sgd (Optional[Optimizer]): An optimizer to use for updates. If not
provided, will be created using the .create_optimizer() method. provided, will be created using the .create_optimizer() method.
RETURNS (thinc.api.Optimizer): The optimizer. RETURNS (thinc.api.Optimizer): The optimizer.
@ -1293,6 +1392,8 @@ class Language:
for name, proc in self.pipeline: for name, proc in self.pipeline:
if isinstance(proc, ty.InitializableComponent): if isinstance(proc, ty.InitializableComponent):
p_settings = I["components"].get(name, {}) p_settings = I["components"].get(name, {})
if labels is not None and name in labels:
p_settings["labels"] = labels[name]
p_settings = validate_init_settings( p_settings = validate_init_settings(
proc.initialize, p_settings, section="components", name=name proc.initialize, p_settings, section="components", name=name
) )
@ -1726,6 +1827,7 @@ class Language:
# using the nlp.config with all defaults. # using the nlp.config with all defaults.
config = util.copy_config(config) config = util.copy_config(config)
orig_pipeline = config.pop("components", {}) orig_pipeline = config.pop("components", {})
orig_distill = config.pop("distill", None)
orig_pretraining = config.pop("pretraining", None) orig_pretraining = config.pop("pretraining", None)
config["components"] = {} config["components"] = {}
if auto_fill: if auto_fill:
@ -1734,6 +1836,9 @@ class Language:
filled = config filled = config
filled["components"] = orig_pipeline filled["components"] = orig_pipeline
config["components"] = orig_pipeline config["components"] = orig_pipeline
if orig_distill is not None:
filled["distill"] = orig_distill
config["distill"] = orig_distill
if orig_pretraining is not None: if orig_pretraining is not None:
filled["pretraining"] = orig_pretraining filled["pretraining"] = orig_pretraining
config["pretraining"] = orig_pretraining config["pretraining"] = orig_pretraining

View File

@ -41,7 +41,7 @@ cdef class Lexeme:
""" """
self.vocab = vocab self.vocab = vocab
self.orth = orth self.orth = orth
self.c = <LexemeC*><void*>vocab.get_by_orth(vocab.mem, orth) self.c = <LexemeC*><void*>vocab.get_by_orth(orth)
if self.c.orth != orth: if self.c.orth != orth:
raise ValueError(Errors.E071.format(orth=orth, vocab_orth=self.c.orth)) raise ValueError(Errors.E071.format(orth=orth, vocab_orth=self.c.orth))

View File

@ -22,7 +22,7 @@ cpdef bint levenshtein_compare(input_text: str, pattern_text: str, fuzzy: int =
max_edits = fuzzy max_edits = fuzzy
else: else:
# allow at least two edits (to allow at least one transposition) and up # allow at least two edits (to allow at least one transposition) and up
# to 20% of the pattern string length # to 30% of the pattern string length
max_edits = max(2, round(0.3 * len(pattern_text))) max_edits = max(2, round(0.3 * len(pattern_text)))
return levenshtein(input_text, pattern_text, max_edits) <= max_edits return levenshtein(input_text, pattern_text, max_edits) <= max_edits

View File

@ -1,12 +1,15 @@
from typing import Any, List, Dict, Tuple, Optional, Callable, Union from typing import Any, List, Dict, Tuple, Optional, Callable, Union, Literal
from typing import Iterator, Iterable, overload from typing import Iterator, Iterable, overload
from ..compat import Literal
from ..vocab import Vocab from ..vocab import Vocab
from ..tokens import Doc, Span from ..tokens import Doc, Span
class Matcher: class Matcher:
def __init__(self, vocab: Vocab, validate: bool = ..., def __init__(
fuzzy_compare: Callable[[str, str, int], bool] = ...) -> None: ... self,
vocab: Vocab,
validate: bool = ...,
fuzzy_compare: Callable[[str, str, int], bool] = ...,
) -> None: ...
def __reduce__(self) -> Any: ... def __reduce__(self) -> Any: ...
def __len__(self) -> int: ... def __len__(self) -> int: ...
def __contains__(self, key: str) -> bool: ... def __contains__(self, key: str) -> bool: ...

View File

@ -1,5 +1,5 @@
from typing import List, Tuple, Union, Optional, Callable, Any, Dict, overload from typing import List, Tuple, Union, Optional, Callable, Any, Dict, Literal
from ..compat import Literal from typing import overload
from .matcher import Matcher from .matcher import Matcher
from ..vocab import Vocab from ..vocab import Vocab
from ..tokens import Doc, Span from ..tokens import Doc, Span

View File

@ -1,10 +1,9 @@
from typing import Optional, List, Tuple, Any from typing import Optional, List, Tuple, Any, Literal
from thinc.types import Floats2d from thinc.types import Floats2d
from thinc.api import Model from thinc.api import Model
import warnings import warnings
from ...errors import Errors, Warnings from ...errors import Errors, Warnings
from ...compat import Literal
from ...util import registry from ...util import registry
from ..tb_framework import TransitionModel from ..tb_framework import TransitionModel
from ...tokens.doc import Doc from ...tokens.doc import Doc

View File

@ -5,7 +5,7 @@ from itertools import islice
import numpy as np import numpy as np
import srsly import srsly
from thinc.api import Config, Model from thinc.api import Config, Model, SequenceCategoricalCrossentropy, NumpyOps
from thinc.types import ArrayXd, Floats2d, Ints1d from thinc.types import ArrayXd, Floats2d, Ints1d
from thinc.legacy import LegacySequenceCategoricalCrossentropy from thinc.legacy import LegacySequenceCategoricalCrossentropy
@ -22,6 +22,8 @@ from .. import util
ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]] ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
# The cutoff value of *top_k* above which an alternative method is used to process guesses.
TOP_K_GUARDRAIL = 20
default_model_config = """ default_model_config = """
@ -125,6 +127,7 @@ class EditTreeLemmatizer(TrainablePipe):
self.cfg: Dict[str, Any] = {"labels": []} self.cfg: Dict[str, Any] = {"labels": []}
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations self.save_activations = save_activations
self.numpy_ops = NumpyOps()
def get_loss( def get_loss(
self, examples: Iterable[Example], scores: List[Floats2d] self, examples: Iterable[Example], scores: List[Floats2d]
@ -140,7 +143,7 @@ class EditTreeLemmatizer(TrainablePipe):
for (predicted, gold_lemma) in zip( for (predicted, gold_lemma) in zip(
eg.predicted, eg.get_aligned("LEMMA", as_string=True) eg.predicted, eg.get_aligned("LEMMA", as_string=True)
): ):
if gold_lemma is None: if gold_lemma is None or gold_lemma == "":
label = -1 label = -1
else: else:
tree_id = self.trees.add(predicted.text, gold_lemma) tree_id = self.trees.add(predicted.text, gold_lemma)
@ -165,7 +168,7 @@ class EditTreeLemmatizer(TrainablePipe):
student_scores: Scores representing the student model's predictions. student_scores: Scores representing the student model's predictions.
RETURNS (Tuple[float, float]): The loss and the gradient. RETURNS (Tuple[float, float]): The loss and the gradient.
DOCS: https://spacy.io/api/edittreelemmatizer#get_teacher_student_loss DOCS: https://spacy.io/api/edittreelemmatizer#get_teacher_student_loss
""" """
loss_func = LegacySequenceCategoricalCrossentropy(normalize=False) loss_func = LegacySequenceCategoricalCrossentropy(normalize=False)
@ -175,6 +178,18 @@ class EditTreeLemmatizer(TrainablePipe):
return float(loss), d_scores return float(loss), d_scores
def predict(self, docs: Iterable[Doc]) -> ActivationsT: def predict(self, docs: Iterable[Doc]) -> ActivationsT:
if self.top_k == 1:
scores2guesses = self._scores2guesses_top_k_equals_1
elif self.top_k <= TOP_K_GUARDRAIL:
scores2guesses = self._scores2guesses_top_k_greater_1
else:
scores2guesses = self._scores2guesses_top_k_guardrail
# The behaviour of *_scores2guesses_top_k_greater_1()* is efficient for values
# of *top_k>1* that are likely to be useful when the edit tree lemmatizer is used
# for its principal purpose of lemmatizing tokens. However, the code could also
# be used for other purposes, and with very large values of *top_k* the method
# becomes inefficient. In such cases, *_scores2guesses_top_k_guardrail()* is used
# instead.
n_docs = len(list(docs)) n_docs = len(list(docs))
if not any(len(doc) for doc in docs): if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs. # Handle cases where there are no tokens in any docs.
@ -189,20 +204,52 @@ class EditTreeLemmatizer(TrainablePipe):
return {"probabilities": scores, "tree_ids": guesses} return {"probabilities": scores, "tree_ids": guesses}
scores = self.model.predict(docs) scores = self.model.predict(docs)
assert len(scores) == n_docs assert len(scores) == n_docs
guesses = self._scores2guesses(docs, scores) guesses = scores2guesses(docs, scores)
assert len(guesses) == n_docs assert len(guesses) == n_docs
return {"probabilities": scores, "tree_ids": guesses} return {"probabilities": scores, "tree_ids": guesses}
def _scores2guesses(self, docs, scores): def _scores2guesses_top_k_equals_1(self, docs, scores):
guesses = [] guesses = []
for doc, doc_scores in zip(docs, scores): for doc, doc_scores in zip(docs, scores):
if self.top_k == 1: doc_guesses = doc_scores.argmax(axis=1)
doc_guesses = doc_scores.argmax(axis=1).reshape(-1, 1) doc_guesses = self.numpy_ops.asarray(doc_guesses)
else:
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
if not isinstance(doc_guesses, np.ndarray): doc_compat_guesses = []
doc_guesses = doc_guesses.get() for i, token in enumerate(doc):
tree_id = self.cfg["labels"][doc_guesses[i]]
if self.trees.apply(tree_id, token.text) is not None:
doc_compat_guesses.append(tree_id)
else:
doc_compat_guesses.append(-1)
guesses.append(np.array(doc_compat_guesses))
return guesses
def _scores2guesses_top_k_greater_1(self, docs, scores):
guesses = []
top_k = min(self.top_k, len(self.labels))
for doc, doc_scores in zip(docs, scores):
doc_scores = self.numpy_ops.asarray(doc_scores)
doc_compat_guesses = []
for i, token in enumerate(doc):
for _ in range(top_k):
candidate = int(doc_scores[i].argmax())
candidate_tree_id = self.cfg["labels"][candidate]
if self.trees.apply(candidate_tree_id, token.text) is not None:
doc_compat_guesses.append(candidate_tree_id)
break
doc_scores[i, candidate] = np.finfo(np.float32).min
else:
doc_compat_guesses.append(-1)
guesses.append(np.array(doc_compat_guesses))
return guesses
def _scores2guesses_top_k_guardrail(self, docs, scores):
guesses = []
for doc, doc_scores in zip(docs, scores):
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
doc_guesses = self.numpy_ops.asarray(doc_guesses)
doc_compat_guesses = [] doc_compat_guesses = []
for token, candidates in zip(doc, doc_guesses): for token, candidates in zip(doc, doc_guesses):

View File

@ -459,7 +459,11 @@ class EntityLinker(TrainablePipe):
docs_ents: List[Ragged] = [] docs_ents: List[Ragged] = []
docs_scores: List[Ragged] = [] docs_scores: List[Ragged] = []
if not docs: if not docs:
return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores} return {
KNOWLEDGE_BASE_IDS: final_kb_ids,
"ents": docs_ents,
"scores": docs_scores,
}
if isinstance(docs, Doc): if isinstance(docs, Doc):
docs = [docs] docs = [docs]
for doc in docs: for doc in docs:
@ -591,7 +595,11 @@ class EntityLinker(TrainablePipe):
method="predict", msg="result variables not of equal length" method="predict", msg="result variables not of equal length"
) )
raise RuntimeError(err) raise RuntimeError(err)
return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores} return {
KNOWLEDGE_BASE_IDS: final_kb_ids,
"ents": docs_ents,
"scores": docs_scores,
}
def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None: def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of documents, using pre-computed scores. """Modify a batch of documents, using pre-computed scores.

View File

@ -252,8 +252,11 @@ class EntityRecognizer(Parser):
def labels(self): def labels(self):
# Get the labels from the model by looking at the available moves, e.g. # Get the labels from the model by looking at the available moves, e.g.
# B-PERSON, I-PERSON, L-PERSON, U-PERSON # B-PERSON, I-PERSON, L-PERSON, U-PERSON
labels = set(remove_bilu_prefix(move) for move in self.move_names labels = set(
if move[0] in ("B", "I", "L", "U")) remove_bilu_prefix(move)
for move in self.move_names
if move[0] in ("B", "I", "L", "U")
)
return tuple(sorted(labels)) return tuple(sorted(labels))
def scored_ents(self, beams): def scored_ents(self, beams):

View File

@ -1,12 +1,11 @@
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
from typing import Union from typing import Union, Protocol, runtime_checkable
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d from thinc.types import Ragged, Ints2d, Floats2d
import numpy import numpy
from ..compat import Protocol, runtime_checkable
from ..scorer import Scorer from ..scorer import Scorer
from ..language import Language from ..language import Language
from .trainable_pipe import TrainablePipe from .trainable_pipe import TrainablePipe

View File

@ -71,8 +71,8 @@ cdef class TrainablePipe(Pipe):
teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn
from. from.
examples (Iterable[Example]): Distillation examples. The reference examples (Iterable[Example]): Distillation examples. The reference
and predicted docs must have the same number of tokens and the (teacher) and predicted (student) docs must have the same number of
same orthography. tokens and the same orthography.
drop (float): dropout rate. drop (float): dropout rate.
sgd (Optional[Optimizer]): An optimizer. Will be created via sgd (Optional[Optimizer]): An optimizer. Will be created via
create_optimizer if not set. create_optimizer if not set.

View File

@ -224,8 +224,8 @@ class Parser(TrainablePipe):
teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn
from. from.
examples (Iterable[Example]): Distillation examples. The reference examples (Iterable[Example]): Distillation examples. The reference
and predicted docs must have the same number of tokens and the (teacher) and predicted (student) docs must have the same number of
same orthography. tokens and the same orthography.
drop (float): dropout rate. drop (float): dropout rate.
sgd (Optional[Optimizer]): An optimizer. Will be created via sgd (Optional[Optimizer]): An optimizer. Will be created via
create_optimizer if not set. create_optimizer if not set.

View File

@ -1,6 +1,5 @@
from typing import Dict, List, Union, Optional, Any, Callable, Type, Tuple from typing import Dict, List, Union, Optional, Any, Callable, Type, Tuple
from typing import Iterable, TypeVar, TYPE_CHECKING from typing import Iterable, TypeVar, Literal, TYPE_CHECKING
from .compat import Literal
from enum import Enum from enum import Enum
from pydantic import BaseModel, Field, ValidationError, validator, create_model from pydantic import BaseModel, Field, ValidationError, validator, create_model
from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr
@ -163,15 +162,33 @@ class TokenPatternString(BaseModel):
IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset") IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset")
INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects") INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects")
FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy") FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy")
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy1") FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy2") None, alias="fuzzy1"
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy3") )
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy4") FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy5") None, alias="fuzzy2"
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy6") )
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy7") FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy8") None, alias="fuzzy3"
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy9") )
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy4"
)
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy5"
)
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy6"
)
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy7"
)
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy8"
)
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy9"
)
class Config: class Config:
extra = "forbid" extra = "forbid"

View File

@ -103,14 +103,15 @@ def test_initialize_from_labels():
} }
def test_no_data(): @pytest.mark.parametrize("top_k", (1, 5, 30))
def test_no_data(top_k):
# Test that the lemmatizer provides a nice error when there's no tagging data / labels # Test that the lemmatizer provides a nice error when there's no tagging data / labels
TEXTCAT_DATA = [ TEXTCAT_DATA = [
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}), ("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}), ("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
] ]
nlp = English() nlp = English()
nlp.add_pipe("trainable_lemmatizer") nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
nlp.add_pipe("textcat") nlp.add_pipe("textcat")
train_examples = [] train_examples = []
@ -121,10 +122,11 @@ def test_no_data():
nlp.initialize(get_examples=lambda: train_examples) nlp.initialize(get_examples=lambda: train_examples)
def test_incomplete_data(): @pytest.mark.parametrize("top_k", (1, 5, 30))
def test_incomplete_data(top_k):
# Test that the lemmatizer works with incomplete information # Test that the lemmatizer works with incomplete information
nlp = English() nlp = English()
lemmatizer = nlp.add_pipe("trainable_lemmatizer") lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
lemmatizer.min_tree_freq = 1 lemmatizer.min_tree_freq = 1
train_examples = [] train_examples = []
for t in PARTIAL_DATA: for t in PARTIAL_DATA:
@ -141,10 +143,25 @@ def test_incomplete_data():
assert doc[1].lemma_ == "like" assert doc[1].lemma_ == "like"
assert doc[2].lemma_ == "blue" assert doc[2].lemma_ == "blue"
# Check that incomplete annotations are ignored.
scores, _ = lemmatizer.model([eg.predicted for eg in train_examples], is_train=True)
_, dX = lemmatizer.get_loss(train_examples, scores)
xp = lemmatizer.model.ops.xp
def test_overfitting_IO(): # Missing annotations.
assert xp.count_nonzero(dX[0][0]) == 0
assert xp.count_nonzero(dX[0][3]) == 0
assert xp.count_nonzero(dX[1][0]) == 0
assert xp.count_nonzero(dX[1][3]) == 0
# Misaligned annotations.
assert xp.count_nonzero(dX[1][1]) == 0
@pytest.mark.parametrize("top_k", (1, 5, 30))
def test_overfitting_IO(top_k):
nlp = English() nlp = English()
lemmatizer = nlp.add_pipe("trainable_lemmatizer") lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
lemmatizer.min_tree_freq = 1 lemmatizer.min_tree_freq = 1
train_examples = [] train_examples = []
for t in TRAIN_DATA: for t in TRAIN_DATA:
@ -177,7 +194,7 @@ def test_overfitting_IO():
# Check model after a {to,from}_bytes roundtrip # Check model after a {to,from}_bytes roundtrip
nlp_bytes = nlp.to_bytes() nlp_bytes = nlp.to_bytes()
nlp3 = English() nlp3 = English()
nlp3.add_pipe("trainable_lemmatizer") nlp3.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
nlp3.from_bytes(nlp_bytes) nlp3.from_bytes(nlp_bytes)
doc3 = nlp3(test_text) doc3 = nlp3(test_text)
assert doc3[0].lemma_ == "she" assert doc3[0].lemma_ == "she"

View File

@ -618,7 +618,6 @@ def test_string_to_list_intify(value):
assert string_to_list(value, intify=True) == [1, 2, 3] assert string_to_list(value, intify=True) == [1, 2, 3]
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_download_compatibility(): def test_download_compatibility():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -629,7 +628,6 @@ def test_download_compatibility():
assert get_minor_version(about.__version__) == get_minor_version(version) assert get_minor_version(about.__version__) == get_minor_version(version)
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_validate_compatibility_table(): def test_validate_compatibility_table():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -1076,7 +1074,7 @@ def test_cli_find_threshold(capsys):
) )
with make_tempdir() as nlp_dir: with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir) nlp.to_disk(nlp_dir)
res = find_threshold( best_threshold, best_score, res = find_threshold(
model=nlp_dir, model=nlp_dir,
data_path=docs_dir / "docs.spacy", data_path=docs_dir / "docs.spacy",
pipe_name="tc_multi", pipe_name="tc_multi",
@ -1084,10 +1082,10 @@ def test_cli_find_threshold(capsys):
scores_key="cats_macro_f", scores_key="cats_macro_f",
silent=True, silent=True,
) )
assert res[0] != thresholds[0] assert best_threshold != thresholds[0]
assert thresholds[0] < res[0] < thresholds[9] assert thresholds[0] < best_threshold < thresholds[9]
assert res[1] == 1.0 assert best_score == max(res.values())
assert res[2][1.0] == 0.0 assert res[1.0] == 0.0
# Test with spancat. # Test with spancat.
nlp, _ = init_nlp((("spancat", {}),)) nlp, _ = init_nlp((("spancat", {}),))
@ -1209,3 +1207,69 @@ def test_walk_directory():
assert (len(walk_directory(d, suffix="iob"))) == 2 assert (len(walk_directory(d, suffix="iob"))) == 2
assert (len(walk_directory(d, suffix="conll"))) == 3 assert (len(walk_directory(d, suffix="conll"))) == 3
assert (len(walk_directory(d, suffix="pdf"))) == 0 assert (len(walk_directory(d, suffix="pdf"))) == 0
def test_debug_data_trainable_lemmatizer_basic():
examples = [
("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}),
("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}),
]
nlp = Language()
train_examples = []
for t in examples:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
# ref test_edit_tree_lemmatizer::test_initialize_from_labels
# this results in 4 trees
assert len(data["lemmatizer_trees"]) == 4
def test_debug_data_trainable_lemmatizer_partial():
partial_examples = [
# partial annotation
("She likes green eggs", {"lemmas": ["", "like", "green", ""]}),
# misaligned partial annotation
(
"He hates green eggs",
{
"words": ["He", "hat", "es", "green", "eggs"],
"lemmas": ["", "hat", "e", "green", ""],
},
),
]
nlp = Language()
train_examples = []
for t in partial_examples:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
assert data["partial_lemma_annotations"] == 2
def test_debug_data_trainable_lemmatizer_low_cardinality():
low_cardinality_examples = [
("She likes green eggs", {"lemmas": ["no", "no", "no", "no"]}),
("Eat blue ham", {"lemmas": ["no", "no", "no"]}),
]
nlp = Language()
train_examples = []
for t in low_cardinality_examples:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
assert data["n_low_cardinality_lemmas"] == 2
def test_debug_data_trainable_lemmatizer_not_annotated():
unannotated_examples = [
("She likes green eggs", {}),
("Eat blue ham", {}),
]
nlp = Language()
train_examples = []
for t in unannotated_examples:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
assert data["no_lemma_annotations"] == 2

View File

@ -1,6 +1,7 @@
import os import os
from pathlib import Path from pathlib import Path
from typer.testing import CliRunner from typer.testing import CliRunner
from spacy.tokens import DocBin, Doc
from spacy.cli._util import app from spacy.cli._util import app
from .util import make_tempdir from .util import make_tempdir
@ -31,3 +32,60 @@ def test_convert_auto_conflict():
assert "All input files must be same type" in result.stdout assert "All input files must be same type" in result.stdout
out_files = os.listdir(d_out) out_files = os.listdir(d_out)
assert len(out_files) == 0 assert len(out_files) == 0
def test_benchmark_accuracy_alias():
# Verify that the `evaluate` alias works correctly.
result_benchmark = CliRunner().invoke(app, ["benchmark", "accuracy", "--help"])
result_evaluate = CliRunner().invoke(app, ["evaluate", "--help"])
assert result_benchmark.stdout == result_evaluate.stdout.replace(
"spacy evaluate", "spacy benchmark accuracy"
)
def test_debug_data_trainable_lemmatizer_cli(en_vocab):
train_docs = [
Doc(en_vocab, words=["I", "like", "cats"], lemmas=["I", "like", "cat"]),
Doc(
en_vocab,
words=["Dogs", "are", "great", "too"],
lemmas=["dog", "be", "great", "too"],
),
]
dev_docs = [
Doc(en_vocab, words=["Cats", "are", "cute"], lemmas=["cat", "be", "cute"]),
Doc(en_vocab, words=["Pets", "are", "great"], lemmas=["pet", "be", "great"]),
]
with make_tempdir() as d_in:
train_bin = DocBin(docs=train_docs)
train_bin.to_disk(d_in / "train.spacy")
dev_bin = DocBin(docs=dev_docs)
dev_bin.to_disk(d_in / "dev.spacy")
# `debug data` requires an input pipeline config
CliRunner().invoke(
app,
[
"init",
"config",
f"{d_in}/config.cfg",
"--lang",
"en",
"--pipeline",
"trainable_lemmatizer",
],
)
result_debug_data = CliRunner().invoke(
app,
[
"debug",
"data",
f"{d_in}/config.cfg",
"--paths.train",
f"{d_in}/train.spacy",
"--paths.dev",
f"{d_in}/dev.spacy",
],
)
# Instead of checking specific wording of the output, which may change,
# we'll check that this section of the debug output is present.
assert "= Trainable Lemmatizer =" in result_debug_data.stdout

View File

@ -26,6 +26,12 @@ except ImportError:
pass pass
TAGGER_TRAIN_DATA = [
("I like green eggs", {"tags": ["N", "V", "J", "N"]}),
("Eat blue ham", {"tags": ["V", "J", "N"]}),
]
def evil_component(doc): def evil_component(doc):
if "2" in doc.text: if "2" in doc.text:
raise ValueError("no dice") raise ValueError("no dice")
@ -799,3 +805,66 @@ def test_component_return():
nlp.add_pipe("test_component_bad_pipe") nlp.add_pipe("test_component_bad_pipe")
with pytest.raises(ValueError, match="instead of a Doc"): with pytest.raises(ValueError, match="instead of a Doc"):
nlp("text") nlp("text")
@pytest.mark.slow
@pytest.mark.parametrize("teacher_tagger_name", ["tagger", "teacher_tagger"])
def test_distill(teacher_tagger_name):
teacher = English()
teacher_tagger = teacher.add_pipe("tagger", name=teacher_tagger_name)
train_examples = []
for t in TAGGER_TRAIN_DATA:
train_examples.append(Example.from_dict(teacher.make_doc(t[0]), t[1]))
optimizer = teacher.initialize(get_examples=lambda: train_examples)
for i in range(50):
losses = {}
teacher.update(train_examples, sgd=optimizer, losses=losses)
assert losses[teacher_tagger_name] < 0.00001
student = English()
student_tagger = student.add_pipe("tagger")
student_tagger.min_tree_freq = 1
student_tagger.initialize(
get_examples=lambda: train_examples, labels=teacher_tagger.label_data
)
distill_examples = [
Example.from_dict(teacher.make_doc(t[0]), {}) for t in TAGGER_TRAIN_DATA
]
student_to_teacher = (
None
if teacher_tagger.name == student_tagger.name
else {student_tagger.name: teacher_tagger.name}
)
for i in range(50):
losses = {}
student.distill(
teacher,
distill_examples,
sgd=optimizer,
losses=losses,
student_to_teacher=student_to_teacher,
)
assert losses["tagger"] < 0.00001
test_text = "I like blue eggs"
doc = student(test_text)
assert doc[0].tag_ == "N"
assert doc[1].tag_ == "V"
assert doc[2].tag_ == "J"
assert doc[3].tag_ == "N"
# Do an extra update to check if annotates works, though we can't really
# validate the resuls, since the annotations are ephemeral.
student.distill(
teacher,
distill_examples,
sgd=optimizer,
losses=losses,
student_to_teacher=student_to_teacher,
annotates=["tagger"],
)

View File

@ -0,0 +1,78 @@
from typing import IO, Generator, Iterable, List, TextIO, Tuple
from contextlib import contextmanager
from pathlib import Path
import pytest
import tempfile
from spacy.lang.en import English
from spacy.training import Example, PlainTextCorpus
from spacy.util import make_tempdir
# Intentional newlines to check that they are skipped.
PLAIN_TEXT_DOC = """
This is a doc. It contains two sentences.
This is another doc.
A third doc.
"""
PLAIN_TEXT_DOC_TOKENIZED = [
[
"This",
"is",
"a",
"doc",
".",
"It",
"contains",
"two",
"sentences",
".",
],
["This", "is", "another", "doc", "."],
["A", "third", "doc", "."],
]
@pytest.mark.parametrize("min_length", [0, 5])
@pytest.mark.parametrize("max_length", [0, 5])
def test_plain_text_reader(min_length, max_length):
nlp = English()
with _string_to_tmp_file(PLAIN_TEXT_DOC) as file_path:
corpus = PlainTextCorpus(
file_path, min_length=min_length, max_length=max_length
)
check = [
doc
for doc in PLAIN_TEXT_DOC_TOKENIZED
if len(doc) >= min_length and (max_length == 0 or len(doc) <= max_length)
]
reference, predicted = _examples_to_tokens(corpus(nlp))
assert reference == check
assert predicted == check
@contextmanager
def _string_to_tmp_file(s: str) -> Generator[Path, None, None]:
with make_tempdir() as d:
file_path = Path(d) / "string.txt"
with open(file_path, "w", encoding="utf-8") as f:
f.write(s)
yield file_path
def _examples_to_tokens(
examples: Iterable[Example],
) -> Tuple[List[List[str]], List[List[str]]]:
reference = []
predicted = []
for eg in examples:
reference.append([t.text for t in eg.reference])
predicted.append([t.text for t in eg.predicted])
return reference, predicted

View File

@ -37,7 +37,7 @@ cdef class Tokenizer:
bint with_special_cases) except -1 bint with_special_cases) except -1
cdef int _tokenize(self, Doc tokens, str span, hash_t key, cdef int _tokenize(self, Doc tokens, str span, hash_t key,
int* has_special, bint with_special_cases) except -1 int* has_special, bint with_special_cases) except -1
cdef str _split_affixes(self, Pool mem, str string, cdef str _split_affixes(self, str string,
vector[LexemeC*] *prefixes, vector[LexemeC*] *prefixes,
vector[LexemeC*] *suffixes, int* has_special, vector[LexemeC*] *suffixes, int* has_special,
bint with_special_cases) bint with_special_cases)

View File

@ -389,14 +389,14 @@ cdef class Tokenizer:
cdef vector[LexemeC*] suffixes cdef vector[LexemeC*] suffixes
cdef int orig_size cdef int orig_size
orig_size = tokens.length orig_size = tokens.length
span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes, span = self._split_affixes(span, &prefixes, &suffixes,
has_special, with_special_cases) has_special, with_special_cases)
self._attach_tokens(tokens, span, &prefixes, &suffixes, has_special, self._attach_tokens(tokens, span, &prefixes, &suffixes, has_special,
with_special_cases) with_special_cases)
self._save_cached(&tokens.c[orig_size], orig_key, has_special, self._save_cached(&tokens.c[orig_size], orig_key, has_special,
tokens.length - orig_size) tokens.length - orig_size)
cdef str _split_affixes(self, Pool mem, str string, cdef str _split_affixes(self, str string,
vector[const LexemeC*] *prefixes, vector[const LexemeC*] *prefixes,
vector[const LexemeC*] *suffixes, vector[const LexemeC*] *suffixes,
int* has_special, int* has_special,
@ -419,7 +419,7 @@ cdef class Tokenizer:
minus_pre = string[pre_len:] minus_pre = string[pre_len:]
if minus_pre and with_special_cases and self._specials.get(hash_string(minus_pre)) != NULL: if minus_pre and with_special_cases and self._specials.get(hash_string(minus_pre)) != NULL:
string = minus_pre string = minus_pre
prefixes.push_back(self.vocab.get(mem, prefix)) prefixes.push_back(self.vocab.get(prefix))
break break
suf_len = self.find_suffix(string[pre_len:]) suf_len = self.find_suffix(string[pre_len:])
if suf_len != 0: if suf_len != 0:
@ -427,18 +427,18 @@ cdef class Tokenizer:
minus_suf = string[:-suf_len] minus_suf = string[:-suf_len]
if minus_suf and with_special_cases and self._specials.get(hash_string(minus_suf)) != NULL: if minus_suf and with_special_cases and self._specials.get(hash_string(minus_suf)) != NULL:
string = minus_suf string = minus_suf
suffixes.push_back(self.vocab.get(mem, suffix)) suffixes.push_back(self.vocab.get(suffix))
break break
if pre_len and suf_len and (pre_len + suf_len) <= len(string): if pre_len and suf_len and (pre_len + suf_len) <= len(string):
string = string[pre_len:-suf_len] string = string[pre_len:-suf_len]
prefixes.push_back(self.vocab.get(mem, prefix)) prefixes.push_back(self.vocab.get(prefix))
suffixes.push_back(self.vocab.get(mem, suffix)) suffixes.push_back(self.vocab.get(suffix))
elif pre_len: elif pre_len:
string = minus_pre string = minus_pre
prefixes.push_back(self.vocab.get(mem, prefix)) prefixes.push_back(self.vocab.get(prefix))
elif suf_len: elif suf_len:
string = minus_suf string = minus_suf
suffixes.push_back(self.vocab.get(mem, suffix)) suffixes.push_back(self.vocab.get(suffix))
return string return string
cdef int _attach_tokens(self, Doc tokens, str string, cdef int _attach_tokens(self, Doc tokens, str string,
@ -465,11 +465,11 @@ cdef class Tokenizer:
# We're always saying 'no' to spaces here -- the caller will # We're always saying 'no' to spaces here -- the caller will
# fix up the outermost one, with reference to the original. # fix up the outermost one, with reference to the original.
# See Issue #859 # See Issue #859
tokens.push_back(self.vocab.get(tokens.mem, string), False) tokens.push_back(self.vocab.get(string), False)
else: else:
matches = self.find_infix(string) matches = self.find_infix(string)
if not matches: if not matches:
tokens.push_back(self.vocab.get(tokens.mem, string), False) tokens.push_back(self.vocab.get(string), False)
else: else:
# Let's say we have dyn-o-mite-dave - the regex finds the # Let's say we have dyn-o-mite-dave - the regex finds the
# start and end positions of the hyphens # start and end positions of the hyphens
@ -484,7 +484,7 @@ cdef class Tokenizer:
if infix_start != start: if infix_start != start:
span = string[start:infix_start] span = string[start:infix_start]
tokens.push_back(self.vocab.get(tokens.mem, span), False) tokens.push_back(self.vocab.get(span), False)
if infix_start != infix_end: if infix_start != infix_end:
# If infix_start != infix_end, it means the infix # If infix_start != infix_end, it means the infix
@ -492,11 +492,11 @@ cdef class Tokenizer:
# for tokenization in some languages (see # for tokenization in some languages (see
# https://github.com/explosion/spaCy/issues/768) # https://github.com/explosion/spaCy/issues/768)
infix_span = string[infix_start:infix_end] infix_span = string[infix_start:infix_end]
tokens.push_back(self.vocab.get(tokens.mem, infix_span), False) tokens.push_back(self.vocab.get(infix_span), False)
start = infix_end start = infix_end
span = string[start:] span = string[start:]
if span: if span:
tokens.push_back(self.vocab.get(tokens.mem, span), False) tokens.push_back(self.vocab.get(span), False)
cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin() cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin()
while it != suffixes.rend(): while it != suffixes.rend():
lexeme = deref(it) lexeme = deref(it)

View File

@ -266,12 +266,12 @@ cdef class Doc:
cdef const LexemeC* lexeme cdef const LexemeC* lexeme
for word, has_space in zip(words, spaces): for word, has_space in zip(words, spaces):
if isinstance(word, str): if isinstance(word, str):
lexeme = self.vocab.get(self.mem, word) lexeme = self.vocab.get(word)
elif isinstance(word, bytes): elif isinstance(word, bytes):
raise ValueError(Errors.E028.format(value=word)) raise ValueError(Errors.E028.format(value=word))
else: else:
try: try:
lexeme = self.vocab.get_by_orth(self.mem, word) lexeme = self.vocab.get_by_orth(word)
except TypeError: except TypeError:
raise TypeError(Errors.E1022.format(wtype=type(word))) raise TypeError(Errors.E1022.format(wtype=type(word)))
self.push_back(lexeme, has_space) self.push_back(lexeme, has_space)
@ -1430,7 +1430,7 @@ cdef class Doc:
end = start + attrs[i, 0] end = start + attrs[i, 0]
has_space = attrs[i, 1] has_space = attrs[i, 1]
orth_ = text[start:end] orth_ = text[start:end]
lex = self.vocab.get(self.mem, orth_) lex = self.vocab.get(orth_)
self.push_back(lex, has_space) self.push_back(lex, has_space)
start = end + has_space start = end + has_space
self.from_array(msg["array_head"][2:], attrs[:, 2:]) self.from_array(msg["array_head"][2:], attrs[:, 2:])
@ -1536,7 +1536,7 @@ cdef class Doc:
assert words == reconstructed_words assert words == reconstructed_words
for word, has_space in zip(words, spaces): for word, has_space in zip(words, spaces):
lex = self.vocab.get(self.mem, word) lex = self.vocab.get(word)
self.push_back(lex, has_space) self.push_back(lex, has_space)
# Set remaining token-level attributes via Doc.from_array(). # Set remaining token-level attributes via Doc.from_array().

View File

@ -223,7 +223,7 @@ def _merge(Doc doc, merges):
if doc.vocab.vectors_length > 0: if doc.vocab.vectors_length > 0:
doc.vocab.set_vector(new_orth, span.vector) doc.vocab.set_vector(new_orth, span.vector)
token = tokens[token_index] token = tokens[token_index]
lex = doc.vocab.get(doc.mem, new_orth) lex = doc.vocab.get(new_orth)
token.lex = lex token.lex = lex
# We set trailing space here too # We set trailing space here too
token.spacy = doc.c[spans[token_index].end-1].spacy token.spacy = doc.c[spans[token_index].end-1].spacy
@ -359,7 +359,7 @@ def _split(Doc doc, int token_index, orths, heads, attrs):
cdef int idx_offset = 0 cdef int idx_offset = 0
for i, orth in enumerate(orths): for i, orth in enumerate(orths):
token = &doc.c[token_index + i] token = &doc.c[token_index + i]
lex = doc.vocab.get(doc.mem, orth) lex = doc.vocab.get(orth)
token.lex = lex token.lex = lex
# If lemma is currently set, set default lemma to orth # If lemma is currently set, set default lemma to orth
if token.lemma != 0: if token.lemma != 0:

View File

@ -1,4 +1,4 @@
from .corpus import Corpus, JsonlCorpus # noqa: F401 from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401
from .example import Example, validate_examples, validate_get_examples # noqa: F401 from .example import Example, validate_examples, validate_get_examples # noqa: F401
from .example import validate_distillation_examples # noqa: F401 from .example import validate_distillation_examples # noqa: F401
from .alignment import Alignment # noqa: F401 from .alignment import Alignment # noqa: F401

View File

@ -58,6 +58,28 @@ def read_labels(path: Path, *, require: bool = False):
return srsly.read_json(path) return srsly.read_json(path)
@util.registry.readers("spacy.PlainTextCorpus.v1")
def create_plain_text_reader(
path: Optional[Path],
min_length: int = 0,
max_length: int = 0,
) -> Callable[["Language"], Iterable[Doc]]:
"""Iterate Example objects from a file or directory of plain text
UTF-8 files with one line per doc.
path (Path): The directory or filename to read from.
min_length (int): Minimum document length (in tokens). Shorter documents
will be skipped. Defaults to 0, which indicates no limit.
max_length (int): Maximum document length (in tokens). Longer documents will
be skipped. Defaults to 0, which indicates no limit.
DOCS: https://spacy.io/api/corpus#plaintextcorpus
"""
if path is None:
raise ValueError(Errors.E913)
return PlainTextCorpus(path, min_length=min_length, max_length=max_length)
def walk_corpus(path: Union[str, Path], file_type) -> List[Path]: def walk_corpus(path: Union[str, Path], file_type) -> List[Path]:
path = util.ensure_path(path) path = util.ensure_path(path)
if not path.is_dir() and path.parts[-1].endswith(file_type): if not path.is_dir() and path.parts[-1].endswith(file_type):
@ -257,3 +279,52 @@ class JsonlCorpus:
# We don't *need* an example here, but it seems nice to # We don't *need* an example here, but it seems nice to
# make it match the Corpus signature. # make it match the Corpus signature.
yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces)) yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces))
class PlainTextCorpus:
"""Iterate Example objects from a file or directory of plain text
UTF-8 files with one line per doc.
path (Path): The directory or filename to read from.
min_length (int): Minimum document length (in tokens). Shorter documents
will be skipped. Defaults to 0, which indicates no limit.
max_length (int): Maximum document length (in tokens). Longer documents will
be skipped. Defaults to 0, which indicates no limit.
DOCS: https://spacy.io/api/corpus#plaintextcorpus
"""
file_type = "txt"
def __init__(
self,
path: Optional[Union[str, Path]],
*,
min_length: int = 0,
max_length: int = 0,
) -> None:
self.path = util.ensure_path(path)
self.min_length = min_length
self.max_length = max_length
def __call__(self, nlp: "Language") -> Iterator[Example]:
"""Yield examples from the data.
nlp (Language): The current nlp object.
YIELDS (Example): The example objects.
DOCS: https://spacy.io/api/corpus#plaintextcorpus-call
"""
for loc in walk_corpus(self.path, ".txt"):
with open(loc, encoding="utf-8") as f:
for text in f:
text = text.rstrip("\r\n")
if len(text):
doc = nlp.make_doc(text)
if self.min_length >= 1 and len(doc) < self.min_length:
continue
elif self.max_length >= 1 and len(doc) > self.max_length:
continue
# We don't *need* an example here, but it seems nice to
# make it match the Corpus signature.
yield Example(doc, doc.copy())

View File

@ -1,6 +1,5 @@
from typing import TYPE_CHECKING from typing import TYPE_CHECKING, Protocol, runtime_checkable
from typing import Optional, Any, Iterable, Dict, Callable, Sequence, List from typing import Optional, Any, Iterable, Dict, Callable, Sequence, List
from .compat import Protocol, runtime_checkable
from thinc.api import Optimizer, Model from thinc.api import Optimizer, Model
@ -27,6 +26,25 @@ class TrainableComponent(Protocol):
... ...
@runtime_checkable
class DistillableComponent(Protocol):
is_distillable: bool
def distill(
self,
teacher_pipe: Optional[TrainableComponent],
examples: Iterable["Example"],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None
) -> Dict[str, float]:
...
def finish_update(self, sgd: Optimizer) -> None:
...
@runtime_checkable @runtime_checkable
class InitializableComponent(Protocol): class InitializableComponent(Protocol):
def initialize( def initialize(

View File

@ -4,6 +4,7 @@ from typing import Iterator, Pattern, Generator, TYPE_CHECKING
from types import ModuleType from types import ModuleType
import os import os
import importlib import importlib
import importlib.metadata
import importlib.util import importlib.util
import re import re
from pathlib import Path from pathlib import Path
@ -40,7 +41,7 @@ except ImportError:
from .symbols import ORTH from .symbols import ORTH
from .compat import cupy, CudaStream, is_windows, importlib_metadata from .compat import cupy, CudaStream, is_windows
from .errors import Errors, Warnings from .errors import Errors, Warnings
from . import about from . import about
@ -706,8 +707,8 @@ def get_package_version(name: str) -> Optional[str]:
RETURNS (str / None): The version or None if package not installed. RETURNS (str / None): The version or None if package not installed.
""" """
try: try:
return importlib_metadata.version(name) # type: ignore[attr-defined] return importlib.metadata.version(name) # type: ignore[attr-defined]
except importlib_metadata.PackageNotFoundError: # type: ignore[attr-defined] except importlib.metadata.PackageNotFoundError: # type: ignore[attr-defined]
return None return None
@ -895,7 +896,7 @@ def is_package(name: str) -> bool:
RETURNS (bool): True if installed package, False if not. RETURNS (bool): True if installed package, False if not.
""" """
try: try:
importlib_metadata.distribution(name) # type: ignore[attr-defined] importlib.metadata.distribution(name) # type: ignore[attr-defined]
return True return True
except: # noqa: E722 except: # noqa: E722
return False return False
@ -1718,7 +1719,7 @@ def packages_distributions() -> Dict[str, List[str]]:
it's not available in the builtin importlib.metadata. it's not available in the builtin importlib.metadata.
""" """
pkg_to_dist = defaultdict(list) pkg_to_dist = defaultdict(list)
for dist in importlib_metadata.distributions(): for dist in importlib.metadata.distributions():
for pkg in (dist.read_text("top_level.txt") or "").split(): for pkg in (dist.read_text("top_level.txt") or "").split():
pkg_to_dist[pkg].append(dist.metadata["Name"]) pkg_to_dist[pkg].append(dist.metadata["Name"])
return dict(pkg_to_dist) return dict(pkg_to_dist)

View File

@ -35,12 +35,11 @@ cdef class Vocab:
cdef public object lex_attr_getters cdef public object lex_attr_getters
cdef public object cfg cdef public object cfg
cdef const LexemeC* get(self, Pool mem, str string) except NULL cdef const LexemeC* get(self, str string) except NULL
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL cdef const LexemeC* get_by_orth(self, attr_t orth) except NULL
cdef const TokenC* make_fused_token(self, substrings) except NULL cdef const TokenC* make_fused_token(self, substrings) except NULL
cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL cdef const LexemeC* _new_lexeme(self, str string) except NULL
cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1 cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1
cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL
cdef PreshMap _by_orth cdef PreshMap _by_orth

View File

@ -139,7 +139,7 @@ cdef class Vocab:
self.lex_attr_getters[flag_id] = flag_getter self.lex_attr_getters[flag_id] = flag_getter
return flag_id return flag_id
cdef const LexemeC* get(self, Pool mem, str string) except NULL: cdef const LexemeC* get(self, str string) except NULL:
"""Get a pointer to a `LexemeC` from the lexicon, creating a new """Get a pointer to a `LexemeC` from the lexicon, creating a new
`Lexeme` if necessary using memory acquired from the given pool. If the `Lexeme` if necessary using memory acquired from the given pool. If the
pool is the lexicon's own memory, the lexeme is saved in the lexicon. pool is the lexicon's own memory, the lexeme is saved in the lexicon.
@ -157,9 +157,9 @@ cdef class Vocab:
orth=key, orth_id=string)) orth=key, orth_id=string))
return lex return lex
else: else:
return self._new_lexeme(mem, string) return self._new_lexeme(string)
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL: cdef const LexemeC* get_by_orth(self, attr_t orth) except NULL:
"""Get a pointer to a `LexemeC` from the lexicon, creating a new """Get a pointer to a `LexemeC` from the lexicon, creating a new
`Lexeme` if necessary using memory acquired from the given pool. If the `Lexeme` if necessary using memory acquired from the given pool. If the
pool is the lexicon's own memory, the lexeme is saved in the lexicon. pool is the lexicon's own memory, the lexeme is saved in the lexicon.
@ -171,21 +171,10 @@ cdef class Vocab:
if lex != NULL: if lex != NULL:
return lex return lex
else: else:
return self._new_lexeme(mem, self.strings[orth]) return self._new_lexeme(self.strings[orth])
cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL: cdef const LexemeC* _new_lexeme(self, str string) except NULL:
# I think this heuristic is bad, and the Vocab should always lex = <LexemeC*>self.mem.alloc(1, sizeof(LexemeC))
# own the lexemes. It avoids weird bugs this way, as it's how the thing
# was originally supposed to work. The best solution to the growing
# memory use is to periodically reset the vocab, which is an action
# that should be up to the user to do (so we don't need to keep track
# of the doc ownership).
# TODO: Change the C API so that the mem isn't passed in here.
mem = self.mem
#if len(string) < 3 or self.length < 10000:
# mem = self.mem
cdef bint is_oov = mem is not self.mem
lex = <LexemeC*>mem.alloc(1, sizeof(LexemeC))
lex.orth = self.strings.add(string) lex.orth = self.strings.add(string)
lex.length = len(string) lex.length = len(string)
if self.vectors is not None: if self.vectors is not None:
@ -199,8 +188,7 @@ cdef class Vocab:
value = self.strings.add(value) value = self.strings.add(value)
if value is not None: if value is not None:
Lexeme.set_struct_attr(lex, attr, value) Lexeme.set_struct_attr(lex, attr, value)
if not is_oov: self._add_lex_to_vocab(lex.orth, lex)
self._add_lex_to_vocab(lex.orth, lex)
if lex == NULL: if lex == NULL:
raise ValueError(Errors.E085.format(string=string)) raise ValueError(Errors.E085.format(string=string))
return lex return lex
@ -271,7 +259,7 @@ cdef class Vocab:
props = intify_attrs(props, strings_map=self.strings) props = intify_attrs(props, strings_map=self.strings)
token = &tokens[i] token = &tokens[i]
# Set the special tokens up to have arbitrary attributes # Set the special tokens up to have arbitrary attributes
lex = <LexemeC*>self.get_by_orth(self.mem, props[ORTH]) lex = <LexemeC*>self.get_by_orth(props[ORTH])
token.lex = lex token.lex = lex
for attr_id, value in props.items(): for attr_id, value in props.items():
Token.set_struct_attr(token, attr_id, value) Token.set_struct_attr(token, attr_id, value)

9
website/.dockerignore Normal file
View File

@ -0,0 +1,9 @@
.cache/
.next/
public/
node_modules
.npm
logs
*.log
npm-debug.log*
quickstart-training-generator.js

4
website/.gitignore vendored
View File

@ -1,5 +1,7 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
quickstart-training-generator.js
# dependencies # dependencies
/node_modules /node_modules
/.pnp /.pnp
@ -41,4 +43,4 @@ next-env.d.ts
public/robots.txt public/robots.txt
public/sitemap* public/sitemap*
public/sw.js* public/sw.js*
public/workbox* public/workbox*

View File

@ -1,16 +1,14 @@
FROM node:11.15.0 FROM node:18
WORKDIR /spacy-io USER node
RUN npm install -g gatsby-cli@2.7.4
COPY package.json .
COPY package-lock.json .
RUN npm install
# This is so the installed node_modules will be up one directory # This is so the installed node_modules will be up one directory
# from where a user mounts files, so that they don't accidentally mount # from where a user mounts files, so that they don't accidentally mount
# their own node_modules from a different build # their own node_modules from a different build
# https://nodejs.org/api/modules.html#modules_loading_from_node_modules_folders # https://nodejs.org/api/modules.html#modules_loading_from_node_modules_folders
WORKDIR /spacy-io/website/ WORKDIR /home/node
COPY --chown=node package.json .
COPY --chown=node package-lock.json .
RUN npm install
WORKDIR /home/node/website/

View File

@ -41,33 +41,27 @@ If you'd like to do this, **be sure you do _not_ include your local
`node_modules` folder**, since there are some dependencies that need to be built `node_modules` folder**, since there are some dependencies that need to be built
for the image system. Rename it before using. for the image system. Rename it before using.
```bash First build the Docker image. This only needs to be done on the first run
docker run -it \ or when changes are made to `Dockerfile` or the website dependencies:
-v $(pwd):/spacy-io/website \
-p 8000:8000 \
ghcr.io/explosion/spacy-io \
gatsby develop -H 0.0.0.0
```
This will allow you to access the built website at http://0.0.0.0:8000/ in your
browser, and still edit code in your editor while having the site reflect those
changes.
**Note**: If you're working on a Mac with an M1 processor, you might see
segfault errors from `qemu` if you use the default image. To fix this use the
`arm64` tagged image in the `docker run` command
(ghcr.io/explosion/spacy-io:arm64).
### Building the Docker image
If you'd like to build the image locally, you can do so like this:
```bash ```bash
docker build -t spacy-io . docker build -t spacy-io .
``` ```
This will take some time, so if you want to use the prebuilt image you'll save a You can then build and run the website with:
bit of time.
```bash
docker run -it \
--rm \
-v $(pwd):/home/node/website \
-p 3000:3000 \
spacy-io \
npm run dev -- -H 0.0.0.0
```
This will allow you to access the built website at http://0.0.0.0:3000/ in your
browser, and still edit code in your editor while having the site reflect those
changes.
## Project structure ## Project structure

View File

@ -12,6 +12,7 @@ menu:
- ['train', 'train'] - ['train', 'train']
- ['pretrain', 'pretrain'] - ['pretrain', 'pretrain']
- ['evaluate', 'evaluate'] - ['evaluate', 'evaluate']
- ['benchmark', 'benchmark']
- ['apply', 'apply'] - ['apply', 'apply']
- ['find-threshold', 'find-threshold'] - ['find-threshold', 'find-threshold']
- ['assemble', 'assemble'] - ['assemble', 'assemble']
@ -269,10 +270,10 @@ $ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type]
| `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ | | `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ | | `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
| `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ | | `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ | | `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str] (option)~~ |
| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ | | `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ | | `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ | | `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path] (option)~~ |
| `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ | | `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ | | `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
@ -1135,8 +1136,19 @@ $ python -m spacy pretrain [config_path] [output_dir] [--code] [--resume-path] [
## evaluate {id="evaluate",version="2",tag="command"} ## evaluate {id="evaluate",version="2",tag="command"}
Evaluate a trained pipeline. Expects a loadable spaCy pipeline (package name or The `evaluate` subcommand is superseded by
path) and evaluation data in the [`spacy benchmark accuracy`](#benchmark-accuracy). `evaluate` is provided as an
alias to `benchmark accuracy` for compatibility.
## benchmark {id="benchmark", version="3.5"}
The `spacy benchmark` CLI includes commands for benchmarking the accuracy and
speed of your spaCy pipelines.
### accuracy {id="benchmark-accuracy", version="3.5", tag="command"}
Evaluate the accuracy of a trained pipeline. Expects a loadable spaCy pipeline
(package name or path) and evaluation data in the
[binary `.spacy` format](/api/data-formats#binary-training). The [binary `.spacy` format](/api/data-formats#binary-training). The
`--gold-preproc` option sets up the evaluation examples with gold-standard `--gold-preproc` option sets up the evaluation examples with gold-standard
sentences and tokens for the predictions. Gold preprocessing helps the sentences and tokens for the predictions. Gold preprocessing helps the
@ -1147,7 +1159,7 @@ skew. To render a sample of dependency parses in a HTML file using the
`--displacy-path` argument. `--displacy-path` argument.
```bash ```bash
$ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] $ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit]
``` ```
| Name | Description | | Name | Description |
@ -1163,6 +1175,29 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Training results and optional metrics and visualizations. | | **CREATES** | Training results and optional metrics and visualizations. |
### speed {id="benchmark-speed", version="3.5", tag="command"}
Benchmark the speed of a trained pipeline with a 95% confidence interval.
Expects a loadable spaCy pipeline (package name or path) and benchmark data in
the [binary `.spacy` format](/api/data-formats#binary-training). The pipeline is
warmed up before any measurements are taken.
```cli
$ python -m spacy benchmark speed [model] [data_path] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup]
```
| Name | Description |
| -------------------- | -------------------------------------------------------------------------------------------------------- |
| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ |
| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ |
| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ |
| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ |
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ |
| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. |
## apply {id="apply", version="3.5", tag="command"} ## apply {id="apply", version="3.5", tag="command"}
Applies a trained pipeline to data and stores the resulting annotated documents Applies a trained pipeline to data and stores the resulting annotated documents
@ -1176,24 +1211,23 @@ input formats are:
When a directory is provided it is traversed recursively to collect all files. When a directory is provided it is traversed recursively to collect all files.
```cli ```bash
$ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process] $ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process]
``` ```
| Name | Description | | Name | Description |
| ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ | | `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ | | `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ | | `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
| `--code`, `-c` <Tag variant="new">3</Tag> | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | | `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ | | `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ | | `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | | `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--batch-size`, `-b` | Batch size to use for prediction. Defaults to `1`. ~~int (option)~~ | | `--batch-size`, `-b` | Batch size to use for prediction. Defaults to `1`. ~~int (option)~~ |
| `--n-process`, `-n` | Number of processes to use for prediction. Defaults to `1`. ~~int (option)~~ | | `--n-process`, `-n` | Number of processes to use for prediction. Defaults to `1`. ~~int (option)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. | | **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
## find-threshold {id="find-threshold",version="3.5",tag="command"} ## find-threshold {id="find-threshold",version="3.5",tag="command"}

View File

@ -175,3 +175,68 @@ Yield examples from the data.
| ---------- | -------------------------------------- | | ---------- | -------------------------------------- |
| `nlp` | The current `nlp` object. ~~Language~~ | | `nlp` | The current `nlp` object. ~~Language~~ |
| **YIELDS** | The examples. ~~Example~~ | | **YIELDS** | The examples. ~~Example~~ |
## PlainTextCorpus {id="plaintextcorpus",tag="class",version="3.5.1"}
Iterate over documents from a plain text file. Can be used to read the raw text
corpus for language model
[pretraining](/usage/embeddings-transformers#pretraining). The expected file
format is:
- UTF-8 encoding
- One document per line
- Blank lines are ignored.
```text {title="Example"}
Can I ask where you work now and what you do, and if you enjoy it?
They may just pull out of the Seattle market completely, at least until they have autonomous vehicles.
My cynical view on this is that it will never be free to the public. Reason: what would be the draw of joining the military? Right now their selling point is free Healthcare and Education. Ironically both are run horribly and most, that I've talked to, come out wishing they never went in.
```
### PlainTextCorpus.\_\_init\_\_ {id="plaintextcorpus-init",tag="method"}
Initialize the reader.
> #### Example
>
> ```python
> from spacy.training import PlainTextCorpus
>
> corpus = PlainTextCorpus("./data/docs.txt")
> ```
>
> ```ini
> ### Example config
> [corpora.pretrain]
> @readers = "spacy.PlainTextCorpus.v1"
> path = "corpus/raw_text.txt"
> min_length = 0
> max_length = 0
> ```
| Name | Description |
| -------------- | -------------------------------------------------------------------------------------------------------------------------- |
| `path` | The directory or filename to read from. Expects newline-delimited documents in UTF8 format. ~~Union[str, Path]~~ |
| _keyword-only_ | |
| `min_length` | Minimum document length (in tokens). Shorter documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
| `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
### PlainTextCorpus.\_\_call\_\_ {id="plaintextcorpus-call",tag="method"}
Yield examples from the data.
> #### Example
>
> ```python
> from spacy.training import PlainTextCorpus
> import spacy
>
> corpus = PlainTextCorpus("./docs.txt")
> nlp = spacy.blank("en")
> data = corpus(nlp)
> ```
| Name | Description |
| ---------- | -------------------------------------- |
| `nlp` | The current `nlp` object. ~~Language~~ |
| **YIELDS** | The examples. ~~Example~~ |

View File

@ -163,14 +163,13 @@ vocabulary.
> #### Example > #### Example
> >
> ```python > ```python
> lexeme = vocab.get(vocab.mem, "hello") > lexeme = vocab.get("hello")
> ``` > ```
| Name | Description | | Name | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------- | | ----------- | ------------------------------------------------- |
| `mem` | A memory pool. Allocated memory will be freed once the `Vocab` object is garbage collected. ~~cymem.Pool~~ | | `string` | The string of the word to look up. ~~str~~ |
| `string` | The string of the word to look up. ~~str~~ | | **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
### Vocab.get_by_orth {id="vocab_get_by_orth",tag="method"} ### Vocab.get_by_orth {id="vocab_get_by_orth",tag="method"}
@ -183,11 +182,10 @@ vocabulary.
> lexeme = vocab.get_by_orth(doc[0].lex.norm) > lexeme = vocab.get_by_orth(doc[0].lex.norm)
> ``` > ```
| Name | Description | | Name | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------- | | ----------- | ------------------------------------------------------ |
| `mem` | A memory pool. Allocated memory will be freed once the `Vocab` object is garbage collected. ~~cymem.Pool~~ | | `orth` | ID of the verbatim text content. ~~attr_t (uint64_t)~~ |
| `orth` | ID of the verbatim text content. ~~attr_t (uint64_t)~~ | | **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
## StringStore {id="stringstore",tag="cdef class",source="spacy/strings.pxd"} ## StringStore {id="stringstore",tag="cdef class",source="spacy/strings.pxd"}

View File

@ -154,15 +154,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## DependencyParser.pipe {id="pipe",tag="method"} ## DependencyParser.pipe {id="pipe",tag="method"}

View File

@ -138,15 +138,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## EditTreeLemmatizer.pipe {id="pipe",tag="method"} ## EditTreeLemmatizer.pipe {id="pipe",tag="method"}

View File

@ -15,7 +15,7 @@ world". It requires a `KnowledgeBase`, as well as a function to generate
plausible candidates from that `KnowledgeBase` given a certain textual mention, plausible candidates from that `KnowledgeBase` given a certain textual mention,
and a machine learning model to pick the right candidate, given the local and a machine learning model to pick the right candidate, given the local
context of the mention. `EntityLinker` defaults to using the context of the mention. `EntityLinker` defaults to using the
[`InMemoryLookupKB`](/api/kb_in_memory) implementation. [`InMemoryLookupKB`](/api/inmemorylookupkb) implementation.
## Assigned Attributes {id="assigned-attributes"} ## Assigned Attributes {id="assigned-attributes"}

View File

@ -150,15 +150,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## EntityRecognizer.pipe {id="pipe",tag="method"} ## EntityRecognizer.pipe {id="pipe",tag="method"}

View File

@ -43,7 +43,7 @@ The length of the fixed-size entity vectors in the knowledge base.
Add an entity to the knowledge base, specifying its corpus frequency and entity Add an entity to the knowledge base, specifying its corpus frequency and entity
vector, which should be of length vector, which should be of length
[`entity_vector_length`](/api/kb_in_memory#entity_vector_length). [`entity_vector_length`](/api/inmemorylookupkb#entity_vector_length).
> #### Example > #### Example
> >
@ -79,8 +79,9 @@ frequency and entity vector for each entity.
Add an alias or mention to the knowledge base, specifying its potential KB Add an alias or mention to the knowledge base, specifying its potential KB
identifiers and their prior probabilities. The entity identifiers should refer identifiers and their prior probabilities. The entity identifiers should refer
to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity) to entities previously added with
or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior [`add_entity`](/api/inmemorylookupkb#add_entity) or
[`set_entities`](/api/inmemorylookupkb#set_entities). The sum of the prior
probabilities should not exceed 1. Note that an empty string can not be used as probabilities should not exceed 1. Note that an empty string can not be used as
alias. alias.
@ -156,7 +157,7 @@ Get a list of all aliases in the knowledge base.
Given a certain textual mention as input, retrieve a list of candidate entities Given a certain textual mention as input, retrieve a list of candidate entities
of type [`Candidate`](/api/kb#candidate). Wraps of type [`Candidate`](/api/kb#candidate). Wraps
[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). [`get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
> #### Example > #### Example
> >
@ -174,7 +175,7 @@ of type [`Candidate`](/api/kb#candidate). Wraps
## InMemoryLookupKB.get_candidates_batch {id="get_candidates_batch",tag="method"} ## InMemoryLookupKB.get_candidates_batch {id="get_candidates_batch",tag="method"}
Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an Same as [`get_candidates()`](/api/inmemorylookupkb#get_candidates), but for an
arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component
will call `get_candidates_batch()` instead of `get_candidates()`, if the config will call `get_candidates_batch()` instead of `get_candidates()`, if the config
parameter `candidates_batch_size` is greater or equal than 1. parameter `candidates_batch_size` is greater or equal than 1.
@ -231,7 +232,7 @@ Given a certain entity ID, retrieve its pretrained entity vector.
## InMemoryLookupKB.get_vectors {id="get_vectors",tag="method"} ## InMemoryLookupKB.get_vectors {id="get_vectors",tag="method"}
Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary Same as [`get_vector()`](/api/inmemorylookupkb#get_vector), but for an arbitrary
number of entity IDs. number of entity IDs.
The default implementation of `get_vectors()` executes `get_vector()` in a loop. The default implementation of `get_vectors()` executes `get_vector()` in a loop.

View File

@ -21,8 +21,8 @@ functions called by the [`EntityLinker`](/api/entitylinker) component.
<Infobox variant="warning"> <Infobox variant="warning">
This class was not abstract up to spaCy version 3.5. The `KnowledgeBase` This class was not abstract up to spaCy version 3.5. The `KnowledgeBase`
implementation up to that point is available as `InMemoryLookupKB` from 3.5 implementation up to that point is available as
onwards. [`InMemoryLookupKB`](/api/inmemorylookupkb) from 3.5 onwards.
</Infobox> </Infobox>
@ -110,14 +110,15 @@ to you.
</Infobox> </Infobox>
From spaCy 3.5 on `KnowledgeBase` is an abstract class (with From spaCy 3.5 on `KnowledgeBase` is an abstract class (with
[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow [`InMemoryLookupKB`](/api/inmemorylookupkb) being a drop-in replacement) to
more flexibility in customizing knowledge bases. Some of its methods were moved allow more flexibility in customizing knowledge bases. Some of its methods were
to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those moved to [`InMemoryLookupKB`](/api/inmemorylookupkb) during this refactoring,
being `get_alias_candidates()`. This method is now available as one of those being `get_alias_candidates()`. This method is now available as
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). [`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates) Note:
[`InMemoryLookupKB.get_candidates()`](/api/inmemorylookupkb#get_candidates)
defaults to defaults to
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates). [`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
## KnowledgeBase.get_vector {id="get_vector",tag="method"} ## KnowledgeBase.get_vector {id="get_vector",tag="method"}

View File

@ -333,6 +333,34 @@ and custom registered functions if needed. See the
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ | | `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## Language.distill {id="distill",tag="method,experimental",version="4"}
Distill the models in a student pipeline from a teacher pipeline.
> #### Example
>
> ```python
>
> teacher = spacy.load("en_core_web_lg")
> student = English()
> student.add_pipe("tagger")
> student.distill(teacher, examples, sgd=optimizer)
> ```
| Name | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher` | The teacher pipeline to distill from. ~~Language~~ |
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | |
| `drop` | The dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Dictionary to update with the loss, keyed by pipeline component. ~~Optional[Dict[str, float]]~~ |
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
| `exclude` | Names of components that shouldn't be updated. Defaults to `[]`. ~~Iterable[str]~~ |
| `annotates` | Names of components that should set annotations on the prediced examples after updating. Defaults to `[]`. ~~Iterable[str]~~ |
| `student_to_teacher` | Map student component names to teacher component names, only necessary when the names differ. Defaults to `None`. ~~Optional[Dict[str, str]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## Language.rehearse {id="rehearse",tag="method,experimental",version="3"} ## Language.rehearse {id="rehearse",tag="method,experimental",version="3"}
Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the

View File

@ -144,15 +144,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## Morphologizer.pipe {id="pipe",tag="method"} ## Morphologizer.pipe {id="pipe",tag="method"}

View File

@ -257,15 +257,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## TrainablePipe.rehearse {id="rehearse",tag="method,experimental",version="3"} ## TrainablePipe.rehearse {id="rehearse",tag="method,experimental",version="3"}

View File

@ -129,15 +129,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## SentenceRecognizer.pipe {id="pipe",tag="method"} ## SentenceRecognizer.pipe {id="pipe",tag="method"}

View File

@ -128,15 +128,15 @@ This feature is experimental.
> losses = student.distill(teacher_pipe, examples, sgd=optimizer) > losses = student.distill(teacher_pipe, examples, sgd=optimizer)
> ``` > ```
| Name | Description | | Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ | | `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ | | `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
| _keyword-only_ | | | _keyword-only_ | |
| `drop` | Dropout rate. ~~float~~ | | `drop` | Dropout rate. ~~float~~ |
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | | `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | | `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | | **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
## Tagger.pipe {id="pipe",tag="method"} ## Tagger.pipe {id="pipe",tag="method"}

View File

@ -236,17 +236,17 @@ browser. Will run a simple web server.
> displacy.serve([doc1, doc2], style="dep") > displacy.serve([doc1, doc2], style="dep")
> ``` > ```
| Name | Description | | Name | Description |
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ | | `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` <Tag variant="new">3.3</Tag>. Defaults to `"dep"`. ~~str~~ | | `style` <Tag variant="new">3.3</Tag> | Visualization style, `"dep"`, `"ent"` or `"span"`. Defaults to `"dep"`. ~~str~~ |
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ | | `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ | | `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ | | `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ | | `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ |
| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ | | `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ |
| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ | | `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ |
| `auto_select_port` | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ | | `auto_select_port` <Tag variant="new">3.5</Tag> | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ |
### displacy.render {id="displacy.render",tag="method",version="2"} ### displacy.render {id="displacy.render",tag="method",version="2"}

View File

@ -81,7 +81,7 @@ operates on a `Doc` and gives you access to the matched tokens **in context**.
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------- | | ------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
| [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. | | [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. |
| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. | | [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. |
| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. | | [`InMemoryLookupKB`](/api/inmemorylookupkb) | Implementation of `KnowledgeBase` storing all data in memory. |
| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. | | [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. |
| [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. | | [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. |
| [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. | | [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. |

View File

@ -134,6 +134,7 @@ useful for your purpose. Here are some important considerations to keep in mind:
<Image <Image
src="/images/sense2vec.jpg" src="/images/sense2vec.jpg"
href="https://github.com/explosion/sense2vec" href="https://github.com/explosion/sense2vec"
alt="sense2vec Screenshot"
/> />
[`sense2vec`](https://github.com/explosion/sense2vec) is a library developed by [`sense2vec`](https://github.com/explosion/sense2vec) is a library developed by

View File

@ -20,7 +20,7 @@ menu:
## Installation instructions {id="installation"} ## Installation instructions {id="installation"}
spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**, spaCy is compatible with **64-bit CPython 3.8+** and runs on **Unix/Linux**,
**macOS/OS X** and **Windows**. The latest spaCy releases are available over **macOS/OS X** and **Windows**. The latest spaCy releases are available over
[pip](https://pypi.python.org/pypi/spacy) and [pip](https://pypi.python.org/pypi/spacy) and
[conda](https://anaconda.org/conda-forge/spacy). [conda](https://anaconda.org/conda-forge/spacy).
@ -290,7 +290,7 @@ You can configure the build process with the following environment variables:
| Variable | Description | | Variable | Description |
| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `SPACY_EXTRAS` | Additional Python packages to install alongside spaCy with optional version specifications. Should be a string that can be passed to `pip install`. See [`Makefile`](%%GITHUB_SPACY/Makefile) for defaults. | | `SPACY_EXTRAS` | Additional Python packages to install alongside spaCy with optional version specifications. Should be a string that can be passed to `pip install`. See [`Makefile`](%%GITHUB_SPACY/Makefile) for defaults. |
| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.6`. | | `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.8`. |
| `WHEELHOUSE` | Directory to store the wheel files during compilation. Defaults to `./wheelhouse`. | | `WHEELHOUSE` | Directory to store the wheel files during compilation. Defaults to `./wheelhouse`. |
### Run tests {id="run-tests"} ### Run tests {id="run-tests"}

View File

@ -113,6 +113,7 @@ code.
<Image <Image
src="/images/thinc_mypy.jpg" src="/images/thinc_mypy.jpg"
href="https://thinc.ai/docs/usage-type-checking#linting" href="https://thinc.ai/docs/usage-type-checking#linting"
alt="Screenshot of Thinc type checking in VSCode with mypy"
/> />
</Accordion> </Accordion>

View File

@ -943,7 +943,7 @@ full embedded visualizer, as well as individual components.
> $ pip install spacy-streamlit --pre > $ pip install spacy-streamlit --pre
> ``` > ```
![](/images/spacy-streamlit.png) ![Screenshot of the spacy-streamlit package in Streamlit](/images/spacy-streamlit.png)
Using [`spacy-streamlit`](https://github.com/explosion/spacy-streamlit), your Using [`spacy-streamlit`](https://github.com/explosion/spacy-streamlit), your
projects can easily define their own scripts that spin up an interactive projects can easily define their own scripts that spin up an interactive

View File

@ -384,14 +384,14 @@ the more specific attributes `FUZZY1`..`FUZZY9` you can specify the maximum
allowed edit distance directly. allowed edit distance directly.
```python ```python
# Match lowercase with fuzzy matching (allows 2 edits) # Match lowercase with fuzzy matching (allows 3 edits)
pattern = [{"LOWER": {"FUZZY": "definitely"}}] pattern = [{"LOWER": {"FUZZY": "definitely"}}]
# Match custom attribute values with fuzzy matching (allows 2 edits) # Match custom attribute values with fuzzy matching (allows 3 edits)
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}] pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
# Match with exact Levenshtein edit distance limits (allows 3 edits) # Match with exact Levenshtein edit distance limits (allows 4 edits)
pattern = [{"_": {"country": {"FUZZY3": "Kyrgyzstan"}}}] pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}]
``` ```
#### Regex and fuzzy matching with lists {id="regex-fuzzy-lists", version="3.5"} #### Regex and fuzzy matching with lists {id="regex-fuzzy-lists", version="3.5"}

View File

@ -304,6 +304,28 @@ installed in the same environment that's it.
| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. | | `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. | | [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
### Loading probability tables into existing models
You can load a probability table from [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data) into an existing spaCy model like `en_core_web_sm`.
```python
# Requirements: pip install spacy-lookups-data
import spacy
from spacy.lookups import load_lookups
nlp = spacy.load("en_core_web_sm")
lookups = load_lookups("en", ["lexeme_prob"])
nlp.vocab.lookups.add_table("lexeme_prob", lookups.get_table("lexeme_prob"))
```
When training a model from scratch you can also specify probability tables in the `config.cfg`.
```ini {title="config.cfg (excerpt)"}
[initialize.lookups]
@misc = "spacy.LookupsDataLoader.v1"
lang = ${nlp.lang}
tables = ["lexeme_prob"]
```
### Custom components via entry points {id="entry-points-components"} ### Custom components via entry points {id="entry-points-components"}
When you load a pipeline, spaCy will generally use its `config.cfg` to set up When you load a pipeline, spaCy will generally use its `config.cfg` to set up
@ -684,10 +706,15 @@ If your pipeline includes
[custom components](/usage/processing-pipelines#custom-components), model [custom components](/usage/processing-pipelines#custom-components), model
architectures or other [code](/usage/training#custom-code), those functions need architectures or other [code](/usage/training#custom-code), those functions need
to be registered **before** your pipeline is loaded. Otherwise, spaCy won't know to be registered **before** your pipeline is loaded. Otherwise, spaCy won't know
how to create the objects referenced in the config. The how to create the objects referenced in the config. If you're loading your own
[`spacy package`](/api/cli#package) command lets you provide one or more paths pipeline in Python, you can make custom components available just by importing
to Python files containing custom registered functions using the `--code` the code that defines them before calling
argument. [`spacy.load`](/api/top-level#spacy.load). This is also how the `--code`
argument to CLI commands works.
With the [`spacy package`](/api/cli#package) command, you can provide one or
more paths to Python files containing custom registered functions using the
`--code` argument.
> #### \_\_init\_\_.py (excerpt) > #### \_\_init\_\_.py (excerpt)
> >

View File

@ -567,7 +567,10 @@ If you would like to use the spaCy logo on your site, please get in touch and
ask us first. However, if you want to show support and tell others that your ask us first. However, if you want to show support and tell others that your
project is using spaCy, you can grab one of our **spaCy badges** here: project is using spaCy, you can grab one of our **spaCy badges** here:
<img src={`https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg`} /> <img
src={`https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg`}
alt="Built with spaCy"
/>
```markdown ```markdown
[![Built with spaCy](https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg)](https://spacy.io) [![Built with spaCy](https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg)](https://spacy.io)
@ -575,8 +578,9 @@ project is using spaCy, you can grab one of our **spaCy badges** here:
<img <img
src={`https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg`} src={`https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg`}
alt="Made with love and spaCy"
/> />
```markdown ```markdown
[![Built with spaCy](https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg)](https://spacy.io) [![Made with love and spaCy](https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg)](https://spacy.io)
``` ```

215
website/docs/usage/v3-5.mdx Normal file
View File

@ -0,0 +1,215 @@
---
title: What's New in v3.5
teaser: New features and how to upgrade
menu:
- ['New Features', 'features']
- ['Upgrading Notes', 'upgrading']
---
## New features {id="features",hidden="true"}
spaCy v3.5 introduces three new CLI commands, `apply`, `benchmark` and
`find-threshold`, adds fuzzy matching, provides improvements to our entity
linking functionality, and includes a range of language updates and bug fixes.
### New CLI commands {id="cli"}
#### apply CLI
The [`apply` CLI](/api/cli#apply) can be used to apply a pipeline to one or more
`.txt`, `.jsonl` or `.spacy` input files, saving the annotated docs in a single
`.spacy` file.
```bash
$ spacy apply en_core_web_sm my_texts/ output.spacy
```
#### benchmark CLI
The [`benchmark` CLI](/api/cli#benchmark) has been added to extend the existing
`evaluate` functionality with a wider range of profiling subcommands.
The `benchmark accuracy` CLI is introduced as an alias for `evaluate`. The new
`benchmark speed` CLI performs warmup rounds before measuring the speed in words
per second on batches of randomly shuffled documents from the provided data.
```bash
$ spacy benchmark speed my_pipeline data.spacy
```
The output is the mean performance using batches (`nlp.pipe`) with a 95%
confidence interval, e.g., profiling `en_core_web_sm` on CPU:
```none
Outliers: 2.0%, extreme outliers: 0.0%
Mean: 18904.1 words/s (95% CI: -256.9 +244.1)
```
#### find-threshold CLI
The [`find-threshold` CLI](/api/cli#find-threshold) runs a series of trials
across threshold values from `0.0` to `1.0` and identifies the best threshold
for the provided score metric.
The following command runs 20 trials for the `spancat` component in
`my_pipeline`, recording the `spans_sc_f` score for each value of the threshold
`[components.spancat.threshold]` from `0.0` to `1.0`:
```bash
$ spacy find-threshold my_pipeline data.spacy spancat threshold spans_sc_f --n_trials 20
```
The `find-threshold` CLI can be used with `textcat_multilabel`, `spancat` and
custom components with thresholds that are applied while predicting or scoring.
### Fuzzy matching {id="fuzzy"}
New `FUZZY` operators support [fuzzy matching](/usage/rule-based-matching#fuzzy)
with the `Matcher`. By default, the `FUZZY` operator allows a Levenshtein edit
distance of 2 and up to 30% of the pattern string length. `FUZZY1`..`FUZZY9` can
be used to specify the exact number of allowed edits.
```python
# Match lowercase with fuzzy matching (allows up to 3 edits)
pattern = [{"LOWER": {"FUZZY": "definitely"}}]
# Match custom attribute values with fuzzy matching (allows up to 3 edits)
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
# Match with exact Levenshtein edit distance limits (allows up to 4 edits)
pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}]
```
Note that `FUZZY` uses Levenshtein edit distance rather than Damerau-Levenshtein
edit distance, so a transposition like `teh` for `the` counts as two edits, one
insertion and one deletion.
If you'd prefer an alternate fuzzy matching algorithm, you can provide your own
custom method to the `Matcher` or as a config option for an entity ruler and
span ruler.
### FUZZY and REGEX with lists {id="fuzzy-regex-lists"}
The `FUZZY` and `REGEX` operators are also now supported for lists with `IN` and
`NOT_IN`:
```python
pattern = [{"TEXT": {"FUZZY": {"IN": ["awesome", "cool", "wonderful"]}}}]
pattern = [{"TEXT": {"REGEX": {"NOT_IN": ["^awe(some)?$", "^wonder(ful)?"]}}}]
```
### Entity linking generalization {id="el"}
The knowledge base used for entity linking is now easier to customize and has a
new default implementation [`InMemoryLookupKB`](/api/inmemorylookupkb).
### Additional features and improvements {id="additional-features-and-improvements"}
- Language updates:
- Extended support for Slovenian
- Fixed lookup fallback for French and Catalan lemmatizers
- Switch Russian and Ukrainian lemmatizers to `pymorphy3`
- Support for editorial punctuation in Ancient Greek
- Update to Russian tokenizer exceptions
- Small fix for Dutch stop words
- Allow up to `typer` v0.7.x, `mypy` 0.990 and `typing_extensions` v4.4.x.
- New `spacy.ConsoleLogger.v3` with expanded progress
[tracking](/api/top-level#ConsoleLogger).
- Improved scoring behavior for `textcat` with `spacy.textcat_scorer.v2` and
`spacy.textcat_multilabel_scorer.v2`.
- Updates so that downstream components can train properly on a frozen `tok2vec`
or `transformer` layer.
- Allow interpolation of variables in directory names in projects.
- Support for local file system [remotes](/usage/projects#remote) for projects.
- Improve UX around `displacy.serve` when the default port is in use.
- Optional `before_update` callback that is invoked at the start of each
[training step](/api/data-formats#config-training).
- Improve performance of `SpanGroup` and fix typing issues for `SpanGroup` and
`Span` objects.
- Patch a
[security vulnerability](https://github.com/advisories/GHSA-gw9q-c7gh-j9vm) in
extracting tar files.
- Add equality definition for `Vectors`.
- Ensure `Vocab.to_disk` respects the exclude setting for `lookups` and
`vectors`.
- Correctly handle missing annotations in the edit tree lemmatizer.
### Trained pipeline updates {id="pipelines"}
- The CNN pipelines add `IS_SPACE` as a `tok2vec` feature for `tagger` and
`morphologizer` components to improve tagging of non-whitespace vs. whitespace
tokens.
- The transformer pipelines require `spacy-transformers` v1.2, which uses the
exact alignment from `tokenizers` for fast tokenizers instead of the heuristic
alignment from `spacy-alignments`. For all trained pipelines except
`ja_core_news_trf`, the alignments between spaCy tokens and transformer tokens
may be slightly different. More details about the `spacy-transformers` changes
in the
[v1.2.0 release notes](https://github.com/explosion/spacy-transformers/releases/tag/v1.2.0).
## Notes about upgrading from v3.4 {id="upgrading"}
### Validation of textcat values {id="textcat-validation"}
An error is now raised when unsupported values are given as input to train a
`textcat` or `textcat_multilabel` model - ensure that values are `0.0` or `1.0`
as explained in the [docs](/api/textcategorizer#assigned-attributes).
### Updated scorers for tokenization and textcat {id="scores"}
We fixed a bug that inflated the `token_acc` scores in v3.0-v3.4. The reported
`token_acc` will drop from v3.4 to v3.5, but if `token_p/r/f` stay the same,
your tokenization performance has not changed from v3.4.
For new `textcat` or `textcat_multilabel` configs, the new default `v2` scorers:
- ignore `threshold` for `textcat`, so the reported `cats_p/r/f` may increase
slightly in v3.5 even though the underlying predictions are unchanged
- report the performance of only the **final** `textcat` or `textcat_multilabel`
component in the pipeline by default
- allow custom scorers to be used to score multiple `textcat` and
`textcat_multilabel` components with `Scorer.score_cats` by restricting the
evaluation to the component's provided labels
### Pipeline package version compatibility {id="version-compat"}
> #### Using legacy implementations
>
> In spaCy v3, you'll still be able to load and reference legacy implementations
> via [`spacy-legacy`](https://github.com/explosion/spacy-legacy), even if the
> components or architectures change and newer versions are available in the
> core library.
When you're loading a pipeline package trained with an earlier version of spaCy
v3, you will see a warning telling you that the pipeline may be incompatible.
This doesn't necessarily have to be true, but we recommend running your
pipelines against your test suite or evaluation data to make sure there are no
unexpected results.
If you're using one of the [trained pipelines](/models) we provide, you should
run [`spacy download`](/api/cli#download) to update to the latest version. To
see an overview of all installed packages and their compatibility, you can run
[`spacy validate`](/api/cli#validate).
If you've trained your own custom pipeline and you've confirmed that it's still
working as expected, you can update the spaCy version requirements in the
[`meta.json`](/api/data-formats#meta):
```diff
- "spacy_version": ">=3.4.0,<3.5.0",
+ "spacy_version": ">=3.4.0,<3.6.0",
```
### Updating v3.4 configs
To update a config from spaCy v3.4 with the new v3.5 settings, run
[`init fill-config`](/api/cli#init-fill-config):
```cli
$ python -m spacy init fill-config config-v3.4.cfg config-v3.5.cfg
```
In many cases ([`spacy train`](/api/cli#train),
[`spacy.load`](/api/top-level#spacy.load)), the new defaults will be filled in
automatically, but you'll need to fill in the new settings to run
[`debug config`](/api/cli#debug) and [`debug data`](/api/cli#debug-data).

View File

@ -437,6 +437,6 @@ Alternatively, if you're using [Streamlit](https://streamlit.io), check out the
helps you integrate spaCy visualizations into your apps. It includes a full helps you integrate spaCy visualizations into your apps. It includes a full
embedded visualizer, as well as individual components. embedded visualizer, as well as individual components.
![](/images/spacy-streamlit.png) ![Screenshot of the spacy-streamlit package in Streamlit](/images/spacy-streamlit.png)
</Grid> </Grid>

View File

@ -13,7 +13,8 @@
{ "text": "New in v3.1", "url": "/usage/v3-1" }, { "text": "New in v3.1", "url": "/usage/v3-1" },
{ "text": "New in v3.2", "url": "/usage/v3-2" }, { "text": "New in v3.2", "url": "/usage/v3-2" },
{ "text": "New in v3.3", "url": "/usage/v3-3" }, { "text": "New in v3.3", "url": "/usage/v3-3" },
{ "text": "New in v3.4", "url": "/usage/v3-4" } { "text": "New in v3.4", "url": "/usage/v3-4" },
{ "text": "New in v3.5", "url": "/usage/v3-5" }
] ]
}, },
{ {
@ -129,6 +130,7 @@
"items": [ "items": [
{ "text": "Attributes", "url": "/api/attributes" }, { "text": "Attributes", "url": "/api/attributes" },
{ "text": "Corpus", "url": "/api/corpus" }, { "text": "Corpus", "url": "/api/corpus" },
{ "text": "InMemoryLookupKB", "url": "/api/inmemorylookupkb" },
{ "text": "KnowledgeBase", "url": "/api/kb" }, { "text": "KnowledgeBase", "url": "/api/kb" },
{ "text": "Lookups", "url": "/api/lookups" }, { "text": "Lookups", "url": "/api/lookups" },
{ "text": "MorphAnalysis", "url": "/api/morphology#morphanalysis" }, { "text": "MorphAnalysis", "url": "/api/morphology#morphanalysis" },

View File

@ -27,7 +27,7 @@
"indexName": "spacy" "indexName": "spacy"
}, },
"binderUrl": "explosion/spacy-io-binder", "binderUrl": "explosion/spacy-io-binder",
"binderVersion": "3.4", "binderVersion": "3.5",
"sections": [ "sections": [
{ "id": "usage", "title": "Usage Documentation", "theme": "blue" }, { "id": "usage", "title": "Usage Documentation", "theme": "blue" },
{ "id": "models", "title": "Models Documentation", "theme": "blue" }, { "id": "models", "title": "Models Documentation", "theme": "blue" },

View File

@ -2381,7 +2381,7 @@
"author": "Nikita Kitaev", "author": "Nikita Kitaev",
"author_links": { "author_links": {
"github": "nikitakit", "github": "nikitakit",
"website": " http://kitaev.io" "website": "http://kitaev.io"
}, },
"category": ["research", "pipeline"] "category": ["research", "pipeline"]
}, },

View File

@ -17,7 +17,7 @@ export default function App({ Component, pageProps }: AppProps) {
<link rel="manifest" href="/manifest.webmanifest" /> <link rel="manifest" href="/manifest.webmanifest" />
<meta <meta
name="viewport" name="viewport"
content="width=device-width, initial-scale=1.0, minimum-scale=1 maximum-scale=1.0, user-scalable=0, shrink-to-fit=no, viewport-fit=cover" content="width=device-width, initial-scale=1.0, minimum-scale=1, maximum-scale=5.0, shrink-to-fit=no, viewport-fit=cover"
/> />
<meta name="theme-color" content="#09a3d5" /> <meta name="theme-color" content="#09a3d5" />
<link rel="apple-touch-icon" sizes="192x192" href="/icons/icon-192x192.png" /> <link rel="apple-touch-icon" sizes="192x192" href="/icons/icon-192x192.png" />

View File

@ -13,7 +13,7 @@ import {
LandingBanner, LandingBanner,
} from '../src/components/landing' } from '../src/components/landing'
import { H2 } from '../src/components/typography' import { H2 } from '../src/components/typography'
import { InlineCode } from '../src/components/code' import { InlineCode } from '../src/components/inlineCode'
import { Ul, Li } from '../src/components/list' import { Ul, Li } from '../src/components/list'
import Button from '../src/components/button' import Button from '../src/components/button'
import Link from '../src/components/link' import Link from '../src/components/link'
@ -89,8 +89,8 @@ const Landing = () => {
</LandingCard> </LandingCard>
<LandingCard title="Awesome ecosystem" url="/usage/projects" button="Read more"> <LandingCard title="Awesome ecosystem" url="/usage/projects" button="Read more">
In the five years since its release, spaCy has become an industry standard with Since its release in 2015, spaCy has become an industry standard with a huge
a huge ecosystem. Choose from a variety of plugins, integrate with your machine ecosystem. Choose from a variety of plugins, integrate with your machine
learning stack and build custom components and workflows. learning stack and build custom components and workflows.
</LandingCard> </LandingCard>
</LandingGrid> </LandingGrid>
@ -162,7 +162,7 @@ const Landing = () => {
small small
> >
<p> <p>
<Link to="https://prodi.gy" hidden> <Link to="https://prodi.gy" noLinkLayout>
<ImageFill <ImageFill
image={prodigyImage} image={prodigyImage}
alt="Prodigy: Radically efficient machine teaching" alt="Prodigy: Radically efficient machine teaching"
@ -206,7 +206,10 @@ const Landing = () => {
<LandingGrid cols={2}> <LandingGrid cols={2}>
<LandingCol> <LandingCol>
<Link to="/usage/projects" hidden> <Link to="/usage/projects" hidden>
<ImageFill image={projectsImage} /> <ImageFill
image={projectsImage}
alt="Illustration of project workflow and commands"
/>
</Link> </Link>
<br /> <br />
<br /> <br />

View File

@ -33,7 +33,7 @@ export default function Accordion({ title, id, expanded = false, spaced = false,
<Link <Link
to={`#${id}`} to={`#${id}`}
className={classes.anchor} className={classes.anchor}
hidden noLinkLayout
onClick={(event) => event.stopPropagation()} onClick={(event) => event.stopPropagation()}
> >
&para; &para;

View File

@ -1,6 +1,7 @@
import React from 'react' import React from 'react'
import PropTypes from 'prop-types' import PropTypes from 'prop-types'
import classNames from 'classnames' import classNames from 'classnames'
import ImageNext from 'next/image'
import Link from './link' import Link from './link'
import { H5 } from './typography' import { H5 } from './typography'
@ -10,7 +11,7 @@ export default function Card({ title, to, image, header, small, onClick, childre
return ( return (
<div className={classNames(classes.root, { [classes.small]: !!small })}> <div className={classNames(classes.root, { [classes.small]: !!small })}>
{header && ( {header && (
<Link to={to} onClick={onClick} hidden> <Link to={to} onClick={onClick} noLinkLayout>
{header} {header}
</Link> </Link>
)} )}
@ -18,18 +19,17 @@ export default function Card({ title, to, image, header, small, onClick, childre
<H5 className={classes.title}> <H5 className={classes.title}>
{image && ( {image && (
<div className={classes.image}> <div className={classes.image}>
{/* eslint-disable-next-line @next/next/no-img-element */} <ImageNext src={image} height={35} width={35} alt={`${title} Logo`} />
<img src={image} width={35} alt="" />
</div> </div>
)} )}
{title && ( {title && (
<Link to={to} onClick={onClick} hidden> <Link to={to} onClick={onClick} noLinkLayout>
{title} {title}
</Link> </Link>
)} )}
</H5> </H5>
)} )}
<Link to={to} onClick={onClick} hidden> <Link to={to} onClick={onClick} noLinkLayout>
{children} {children}
</Link> </Link>
</div> </div>

View File

@ -14,96 +14,16 @@ import 'prismjs/components/prism-markdown.min.js'
import 'prismjs/components/prism-python.min.js' import 'prismjs/components/prism-python.min.js'
import 'prismjs/components/prism-yaml.min.js' import 'prismjs/components/prism-yaml.min.js'
import CUSTOM_TYPES from '../../meta/type-annotations.json' import { isString } from './util'
import { isString, htmlToReact } from './util'
import Link, { OptionalLink } from './link' import Link, { OptionalLink } from './link'
import GitHubCode from './github' import GitHubCode from './github'
import Juniper from './juniper'
import classes from '../styles/code.module.sass' import classes from '../styles/code.module.sass'
import siteMetadata from '../../meta/site.json' import siteMetadata from '../../meta/site.json'
import { binderBranch } from '../../meta/dynamicMeta.mjs' import { binderBranch } from '../../meta/dynamicMeta.mjs'
import dynamic from 'next/dynamic'
const WRAP_THRESHOLD = 30
const CLI_GROUPS = ['init', 'debug', 'project', 'ray', 'huggingface-hub'] const CLI_GROUPS = ['init', 'debug', 'project', 'ray', 'huggingface-hub']
const CodeBlock = (props) => (
<Pre>
<Code {...props} />
</Pre>
)
export default CodeBlock
export const Pre = (props) => {
return <pre className={classes['pre']}>{props.children}</pre>
}
export const InlineCode = ({ wrap = false, className, children, ...props }) => {
const codeClassNames = classNames(classes['inline-code'], className, {
[classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD),
})
return (
<code className={codeClassNames} {...props}>
{children}
</code>
)
}
InlineCode.propTypes = {
wrap: PropTypes.bool,
className: PropTypes.string,
children: PropTypes.node,
}
function linkType(el, showLink = true) {
if (!isString(el) || !el.length) return el
const elStr = el.trim()
if (!elStr) return el
const typeUrl = CUSTOM_TYPES[elStr]
const url = typeUrl == true ? DEFAULT_TYPE_URL : typeUrl
const ws = el[0] == ' '
return url && showLink ? (
<Fragment>
{ws && ' '}
<Link to={url} hideIcon>
{elStr}
</Link>
</Fragment>
) : (
el
)
}
export const TypeAnnotation = ({ lang = 'python', link = true, children }) => {
// Hacky, but we're temporarily replacing a dot to prevent it from being split during highlighting
const TMP_DOT = '۔'
const code = Array.isArray(children) ? children.join('') : children || ''
const [rawText, meta] = code.split(/(?= \(.+\)$)/)
const rawStr = rawText.replace(/\./g, TMP_DOT)
const rawHtml =
lang === 'none' || !code ? code : Prism.highlight(rawStr, Prism.languages[lang], lang)
const html = rawHtml.replace(new RegExp(TMP_DOT, 'g'), '.').replace(/\n/g, ' ')
const result = htmlToReact(html)
const elements = Array.isArray(result) ? result : [result]
const annotClassNames = classNames(
'type-annotation',
`language-${lang}`,
classes['inline-code'],
classes['type-annotation'],
{
[classes['wrap']]: code.length >= WRAP_THRESHOLD,
}
)
return (
<span className={annotClassNames} role="code" aria-label="Type annotation">
{elements.map((el, i) => (
<Fragment key={i}>{linkType(el, !!link)}</Fragment>
))}
{meta && <span className={classes['type-annotation-meta']}>{meta}</span>}
</span>
)
}
const splitLines = (children) => { const splitLines = (children) => {
const listChildrenPerLine = [] const listChildrenPerLine = []
@ -235,7 +155,7 @@ const handlePromot = ({ lineFlat, prompt }) => {
<Fragment key={j}> <Fragment key={j}>
{j !== 0 && ' '} {j !== 0 && ' '}
<span className={itemClassNames}> <span className={itemClassNames}>
<OptionalLink hidden hideIcon to={url}> <OptionalLink noLinkLayout hideIcon to={url}>
{text} {text}
</OptionalLink> </OptionalLink>
</span> </span>
@ -288,7 +208,7 @@ const addLineHighlight = (children, highlight) => {
}) })
} }
export const CodeHighlighted = ({ children, highlight, lang }) => { const CodeHighlighted = ({ children, highlight, lang }) => {
const [html, setHtml] = useState() const [html, setHtml] = useState()
useEffect( useEffect(
@ -305,7 +225,7 @@ export const CodeHighlighted = ({ children, highlight, lang }) => {
return <>{html}</> return <>{html}</>
} }
export class Code extends React.Component { export default class Code extends React.Component {
static defaultProps = { static defaultProps = {
lang: 'none', lang: 'none',
executable: null, executable: null,
@ -354,6 +274,8 @@ export class Code extends React.Component {
} }
} }
const JuniperDynamic = dynamic(() => import('./juniper'))
const JuniperWrapper = ({ title, lang, children }) => { const JuniperWrapper = ({ title, lang, children }) => {
const { binderUrl, binderVersion } = siteMetadata const { binderUrl, binderVersion } = siteMetadata
const juniperTitle = title || 'Editable Code' const juniperTitle = title || 'Editable Code'
@ -363,13 +285,13 @@ const JuniperWrapper = ({ title, lang, children }) => {
{juniperTitle} {juniperTitle}
<span className={classes['juniper-meta']}> <span className={classes['juniper-meta']}>
spaCy v{binderVersion} &middot; Python 3 &middot; via{' '} spaCy v{binderVersion} &middot; Python 3 &middot; via{' '}
<Link to="https://mybinder.org/" hidden> <Link to="https://mybinder.org/" noLinkLayout>
Binder Binder
</Link> </Link>
</span> </span>
</h4> </h4>
<Juniper <JuniperDynamic
repo={binderUrl} repo={binderUrl}
branch={binderBranch} branch={binderBranch}
lang={lang} lang={lang}
@ -381,7 +303,7 @@ const JuniperWrapper = ({ title, lang, children }) => {
}} }}
> >
{children} {children}
</Juniper> </JuniperDynamic>
</div> </div>
) )
} }

View File

@ -0,0 +1,14 @@
import React from 'react'
import Code from './codeDynamic'
import classes from '../styles/code.module.sass'
export const Pre = (props) => {
return <pre className={classes['pre']}>{props.children}</pre>
}
const CodeBlock = (props) => (
<Pre>
<Code {...props} />
</Pre>
)
export default CodeBlock

View File

@ -0,0 +1,5 @@
import dynamic from 'next/dynamic'
export default dynamic(() => import('./code'), {
loading: () => <div style={{ color: 'white', padding: '1rem' }}>Loading...</div>,
})

View File

@ -14,7 +14,7 @@ export function copyToClipboard(ref, callback) {
} }
} }
export default function CopyInput({ text, prefix }) { export default function CopyInput({ text, description, prefix }) {
const isClient = typeof window !== 'undefined' const isClient = typeof window !== 'undefined'
const [supportsCopy, setSupportsCopy] = useState(false) const [supportsCopy, setSupportsCopy] = useState(false)
@ -41,6 +41,7 @@ export default function CopyInput({ text, prefix }) {
defaultValue={text} defaultValue={text}
rows={1} rows={1}
onClick={selectText} onClick={selectText}
aria-label={description}
/> />
{supportsCopy && ( {supportsCopy && (
<button title="Copy to clipboard" onClick={onClick}> <button title="Copy to clipboard" onClick={onClick}>

View File

@ -5,8 +5,8 @@ import ImageNext from 'next/image'
import Link from './link' import Link from './link'
import Button from './button' import Button from './button'
import { InlineCode } from './code' import { InlineCode } from './inlineCode'
import { MarkdownToReact } from './util' import MarkdownToReact from './markdownToReactDynamic'
import classes from '../styles/embed.module.sass' import classes from '../styles/embed.module.sass'
@ -88,10 +88,16 @@ const Image = ({ src, alt, title, href, ...props }) => {
const markdownComponents = { code: InlineCode, p: Fragment, a: Link } const markdownComponents = { code: InlineCode, p: Fragment, a: Link }
return ( return (
<figure className="gatsby-resp-image-figure"> <figure className="gatsby-resp-image-figure">
<Link className={linkClassNames} href={href ?? src} hidden forceExternal> {href ? (
{/* eslint-disable-next-line @next/next/no-img-element */} <Link className={linkClassNames} href={href} noLinkLayout forceExternal>
{/* eslint-disable-next-line @next/next/no-img-element */}
<img className={classes.image} src={src} alt={alt} width={650} height="auto" />
</Link>
) : (
/* eslint-disable-next-line @next/next/no-img-element */
<img className={classes.image} src={src} alt={alt} width={650} height="auto" /> <img className={classes.image} src={src} alt={alt} width={650} height="auto" />
</Link> )}
{title && ( {title && (
<figcaption className="gatsby-resp-image-figcaption"> <figcaption className="gatsby-resp-image-figcaption">
<MarkdownToReact markdown={title} /> <MarkdownToReact markdown={title} />
@ -104,7 +110,7 @@ const Image = ({ src, alt, title, href, ...props }) => {
const ImageFill = ({ image, ...props }) => { const ImageFill = ({ image, ...props }) => {
return ( return (
<span <span
class={classes['figure-fill']} className={classes['figure-fill']}
style={{ paddingBottom: `${(image.height / image.width) * 100}%` }} style={{ paddingBottom: `${(image.height / image.width) * 100}%` }}
> >
<ImageNext src={image.src} {...props} fill /> <ImageNext src={image.src} {...props} fill />

View File

@ -21,7 +21,7 @@ export default function Footer({ wide = false }) {
<li className={classes.label}>{label}</li> <li className={classes.label}>{label}</li>
{items.map(({ text, url }, j) => ( {items.map(({ text, url }, j) => (
<li key={j}> <li key={j}>
<Link to={url} hidden> <Link to={url} noLinkLayout>
{text} {text}
</Link> </Link>
</li> </li>
@ -42,14 +42,14 @@ export default function Footer({ wide = false }) {
<div className={classNames(classes.content, classes.copy)}> <div className={classNames(classes.content, classes.copy)}>
<span> <span>
&copy; 2016-{new Date().getFullYear()}{' '} &copy; 2016-{new Date().getFullYear()}{' '}
<Link to={companyUrl} hidden> <Link to={companyUrl} noLinkLayout>
{company} {company}
</Link> </Link>
</span> </span>
<Link to={companyUrl} aria-label={company} hidden className={classes.logo}> <Link to={companyUrl} aria-label={company} noLinkLayout className={classes.logo}>
<SVG src={explosionLogo.src} width={45} height={45} /> <SVG src={explosionLogo.src} width={45} height={45} />
</Link> </Link>
<Link to={`${companyUrl}/legal`} hidden> <Link to={`${companyUrl}/legal`} noLinkLayout>
Legal / Imprint Legal / Imprint
</Link> </Link>
</div> </div>

View File

@ -5,7 +5,7 @@ import classNames from 'classnames'
import Icon from './icon' import Icon from './icon'
import Link from './link' import Link from './link'
import classes from '../styles/code.module.sass' import classes from '../styles/code.module.sass'
import { Code } from './code' import Code from './codeDynamic'
const defaultErrorMsg = `Can't fetch code example from GitHub :( const defaultErrorMsg = `Can't fetch code example from GitHub :(
@ -42,7 +42,7 @@ const GitHubCode = ({ url, lang, errorMsg = defaultErrorMsg, className }) => {
return ( return (
<> <>
<header className={classes.header}> <header className={classes.header}>
<Link to={url} hidden> <Link to={url} noLinkLayout>
<Icon name="github" width={16} inline /> <Icon name="github" width={16} inline />
<code <code
className={classNames(classes['inline-code'], classes['inline-code-dark'])} className={classNames(classes['inline-code'], classes['inline-code-dark'])}

View File

@ -0,0 +1,12 @@
import { Parser as HtmlToReactParser } from 'html-to-react'
const htmlToReactParser = new HtmlToReactParser()
/**
* Convert raw HTML to React elements
* @param {string} html - The HTML markup to convert.
* @returns {Node} - The converted React elements.
*/
export default function HtmlToReact(props) {
return htmlToReactParser.parse(props.children)
}

View File

@ -0,0 +1,23 @@
import React from 'react'
import PropTypes from 'prop-types'
import classNames from 'classnames'
import { isString } from './util'
import classes from '../styles/code.module.sass'
const WRAP_THRESHOLD = 30
export const InlineCode = ({ wrap = false, className, children, ...props }) => {
const codeClassNames = classNames(classes['inline-code'], className, {
[classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD),
})
return (
<code className={codeClassNames} {...props}>
{children}
</code>
)
}
InlineCode.propTypes = {
wrap: PropTypes.bool,
className: PropTypes.string,
children: PropTypes.node,
}

View File

@ -12,17 +12,17 @@ const spacyTheme = createTheme({
theme: 'dark', theme: 'dark',
settings: { settings: {
background: 'var(--color-front)', background: 'var(--color-front)',
foreground: 'var(--color-subtle)', foreground: 'var(--color-subtle-on-dark)',
caret: 'var(--color-theme-dark)', caret: 'var(--color-theme-dark)',
selection: 'var(--color-theme)', selection: 'var(--color-theme-dark)',
selectionMatch: 'var(--color-theme)', selectionMatch: 'var(--color-theme-dark)',
gutterBackground: 'var(--color-front)', gutterBackground: 'var(--color-front)',
gutterForeground: 'var(--color-subtle)', gutterForeground: 'var(--color-subtle-on-dark)',
fontFamily: 'var(--font-code)', fontFamily: 'var(--font-code)',
}, },
styles: [ styles: [
{ tag: t.comment, color: 'var(--syntax-comment)' }, { tag: t.comment, color: 'var(--syntax-comment)' },
{ tag: t.variableName, color: 'var(--color-subtle)' }, { tag: t.variableName, color: 'var(--color-subtle-on-dark)' },
{ tag: [t.string, t.special(t.brace)], color: '#fff' }, { tag: [t.string, t.special(t.brace)], color: '#fff' },
{ tag: t.number, color: 'var(--syntax-number)' }, { tag: t.number, color: 'var(--syntax-number)' },
{ tag: t.string, color: 'var(--syntax-selector)' }, { tag: t.string, color: 'var(--syntax-selector)' },

View File

@ -1,17 +1,17 @@
import React from 'react' import React from 'react'
import classNames from 'classnames' import classNames from 'classnames'
import patternDefault from '../images/pattern_blue.jpg' import patternDefault from '../images/pattern_blue.png'
import patternNightly from '../images/pattern_nightly.jpg' import patternNightly from '../images/pattern_nightly.png'
import patternLegacy from '../images/pattern_legacy.jpg' import patternLegacy from '../images/pattern_legacy.png'
import overlayDefault from '../images/pattern_landing.jpg' import overlayDefault from '../images/pattern_landing.png'
import overlayNightly from '../images/pattern_landing_nightly.jpg' import overlayNightly from '../images/pattern_landing_nightly.png'
import overlayLegacy from '../images/pattern_landing_legacy.jpg' import overlayLegacy from '../images/pattern_landing_legacy.png'
import Grid from './grid' import Grid from './grid'
import { Content } from './main' import { Content } from './main'
import Button from './button' import Button from './button'
import CodeBlock from './code' import CodeBlock from './codeBlock'
import { H1, H2, H3 } from './typography' import { H1, H2, H3 } from './typography'
import Link from './link' import Link from './link'
import classes from '../styles/landing.module.sass' import classes from '../styles/landing.module.sass'
@ -110,6 +110,7 @@ export const LandingBanner = ({
}) })
const style = { const style = {
'--color-theme': background, '--color-theme': background,
'--color-theme-dark': background,
'--color-back': color, '--color-back': color,
backgroundImage: backgroundImage ? `url(${backgroundImage})` : null, backgroundImage: backgroundImage ? `url(${backgroundImage})` : null,
} }
@ -124,7 +125,7 @@ export const LandingBanner = ({
<span className={classes['label']}>{label}</span> <span className={classes['label']}>{label}</span>
</div> </div>
)} )}
<Link to={to} hidden> <Link to={to} noLinkLayout>
{title} {title}
</Link> </Link>
</Heading> </Heading>

View File

@ -26,7 +26,7 @@ export default function Link({
to, to,
href, href,
onClick, onClick,
hidden = false, noLinkLayout = false,
hideIcon = false, hideIcon = false,
ws = false, ws = false,
forceExternal = false, forceExternal = false,
@ -36,10 +36,10 @@ export default function Link({
const dest = to || href const dest = to || href
const external = forceExternal || /(http(s?)):\/\//gi.test(dest) const external = forceExternal || /(http(s?)):\/\//gi.test(dest)
const icon = getIcon(dest) const icon = getIcon(dest)
const withIcon = !hidden && !hideIcon && !!icon && !isImage(children) const withIcon = !noLinkLayout && !hideIcon && !!icon && !isImage(children)
const sourceWithText = withIcon && isString(children) const sourceWithText = withIcon && isString(children)
const linkClassNames = classNames(classes.root, className, { const linkClassNames = classNames(classes.root, className, {
[classes.hidden]: hidden, [classes['no-link-layout']]: noLinkLayout,
[classes.nowrap]: (withIcon && !sourceWithText) || icon === 'network', [classes.nowrap]: (withIcon && !sourceWithText) || icon === 'network',
[classes['with-icon']]: withIcon, [classes['with-icon']]: withIcon,
}) })
@ -97,7 +97,7 @@ Link.propTypes = {
to: PropTypes.string, to: PropTypes.string,
href: PropTypes.string, href: PropTypes.string,
onClick: PropTypes.func, onClick: PropTypes.func,
hidden: PropTypes.bool, noLinkLayout: PropTypes.bool,
hideIcon: PropTypes.bool, hideIcon: PropTypes.bool,
ws: PropTypes.bool, ws: PropTypes.bool,
className: PropTypes.string, className: PropTypes.string,

View File

@ -2,11 +2,11 @@ import React from 'react'
import PropTypes from 'prop-types' import PropTypes from 'prop-types'
import classNames from 'classnames' import classNames from 'classnames'
import patternBlue from '../images/pattern_blue.jpg' import patternBlue from '../images/pattern_blue.png'
import patternGreen from '../images/pattern_green.jpg' import patternGreen from '../images/pattern_green.png'
import patternPurple from '../images/pattern_purple.jpg' import patternPurple from '../images/pattern_purple.png'
import patternNightly from '../images/pattern_nightly.jpg' import patternNightly from '../images/pattern_nightly.png'
import patternLegacy from '../images/pattern_legacy.jpg' import patternLegacy from '../images/pattern_legacy.png'
import classes from '../styles/main.module.sass' import classes from '../styles/main.module.sass'
const patterns = { const patterns = {

View File

@ -0,0 +1,32 @@
import React, { useEffect, useState } from 'react'
import { serialize } from 'next-mdx-remote/serialize'
import { MDXRemote } from 'next-mdx-remote'
import remarkPlugins from '../../plugins/index.mjs'
/**
* Convert raw Markdown to React
* @param {String} markdown - The Markdown markup to convert.
* @param {Object} [remarkReactComponents] - Optional React components to use
* for HTML elements.
* @returns {Node} - The converted React elements.
*/
export default function MarkdownToReact({ markdown }) {
const [mdx, setMdx] = useState(null)
useEffect(() => {
const getMdx = async () => {
setMdx(
await serialize(markdown, {
parseFrontmatter: false,
mdxOptions: {
remarkPlugins,
},
})
)
}
getMdx()
}, [markdown])
return mdx ? <MDXRemote {...mdx} /> : <></>
}

View File

@ -0,0 +1,5 @@
import dynamic from 'next/dynamic'
export default dynamic(() => import('./markdownToReact'), {
loading: () => <p>Loading...</p>,
})

View File

@ -30,7 +30,7 @@ const NavigationDropdown = ({ items = [], section }) => {
export default function Navigation({ title, items = [], section, search, alert, children }) { export default function Navigation({ title, items = [], section, search, alert, children }) {
const logo = ( const logo = (
<Link to="/" aria-label={title} hidden> <Link to="/" aria-label={title} noLinkLayout>
<h1 className={classes.title}>{title}</h1> <h1 className={classes.title}>{title}</h1>
<SVG src={logoSpacy.src} className={classes.logo} width={300} height={96} /> <SVG src={logoSpacy.src} className={classes.logo} width={300} height={96} />
</Link> </Link>
@ -57,7 +57,7 @@ export default function Navigation({ title, items = [], section, search, alert,
}) })
return ( return (
<li key={i} className={itemClassNames}> <li key={i} className={itemClassNames}>
<Link to={url} tabIndex={isActive ? '-1' : null} hidden> <Link to={url} tabIndex={isActive ? '-1' : null} noLinkLayout>
{text} {text}
</Link> </Link>
</li> </li>

View File

@ -251,7 +251,12 @@ const Quickstart = ({
</menu> </menu>
</pre> </pre>
{showCopy && ( {showCopy && (
<textarea ref={copyAreaRef} className={classes['copy-area']} rows={1} /> <textarea
ref={copyAreaRef}
className={classes['copy-area']}
rows={1}
aria-label={`Interactive code example for ${title}`}
/>
)} )}
</div> </div>
</Container> </Container>

View File

@ -9,15 +9,15 @@ import classes from '../styles/readnext.module.sass'
export default function ReadNext({ title, to }) { export default function ReadNext({ title, to }) {
return ( return (
<div className={classes.root}> <Link to={to} noLinkLayout className={classes.root}>
<Link to={to} hidden> <span>
<Label>Read next</Label> <Label>Read next</Label>
{title} {title}
</Link> </span>
<Link to={to} hidden className={classes.icon} aria-hidden="true"> <span className={classes.icon}>
<Icon name="arrowright" /> <Icon name="arrowright" aria-hidden="true" />
</Link> </span>
</div> </Link>
) )
} }

View File

@ -9,6 +9,8 @@ import socialImageLegacy from '../images/social_legacy.jpg'
import siteMetadata from '../../meta/site.json' import siteMetadata from '../../meta/site.json'
import Head from 'next/head' import Head from 'next/head'
import { siteUrl } from '../../meta/dynamicMeta.mjs'
function getPageTitle(title, sitename, slogan, sectionTitle, nightly, legacy) { function getPageTitle(title, sitename, slogan, sectionTitle, nightly, legacy) {
if (sectionTitle && title) { if (sectionTitle && title) {
const suffix = nightly ? ' (nightly)' : legacy ? ' (legacy)' : '' const suffix = nightly ? ' (nightly)' : legacy ? ' (legacy)' : ''
@ -25,7 +27,7 @@ function getImage(section, nightly, legacy) {
if (legacy) return socialImageLegacy if (legacy) return socialImageLegacy
if (section === 'api') return socialImageApi if (section === 'api') return socialImageApi
if (section === 'universe') return socialImageUniverse if (section === 'universe') return socialImageUniverse
return socialImageDefault return `${siteUrl}${socialImageDefault.src}`
} }
export default function SEO({ export default function SEO({
@ -46,7 +48,7 @@ export default function SEO({
nightly, nightly,
legacy legacy
) )
const socialImage = getImage(section, nightly, legacy).src const socialImage = getImage(section, nightly, legacy)
const meta = [ const meta = [
{ {
name: 'description', name: 'description',

Some files were not shown because too many files have changed in this diff Show More