mirror of
https://github.com/explosion/spaCy.git
synced 2025-07-30 01:50:03 +03:00
Merge branch 'v4' into cleanup/move-legacy-entity-linker
This commit is contained in:
commit
82f1e20c4a
10
.gitignore
vendored
10
.gitignore
vendored
|
@ -10,16 +10,6 @@ spacy/tests/package/setup.cfg
|
|||
spacy/tests/package/pyproject.toml
|
||||
spacy/tests/package/requirements.txt
|
||||
|
||||
# Website
|
||||
website/.cache/
|
||||
website/public/
|
||||
website/node_modules
|
||||
website/.npm
|
||||
website/logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
quickstart-training-generator.js
|
||||
|
||||
# Cython / C extensions
|
||||
cythonize.json
|
||||
spacy/*.html
|
||||
|
|
|
@ -3,7 +3,7 @@ repos:
|
|||
rev: 22.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python3.7
|
||||
language_version: python3.8
|
||||
additional_dependencies: ['click==8.0.4']
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 5.0.4
|
||||
|
|
|
@ -271,7 +271,7 @@ except: # noqa: E722
|
|||
|
||||
### Python conventions
|
||||
|
||||
All Python code must be written **compatible with Python 3.6+**. More detailed
|
||||
All Python code must be written **compatible with Python 3.8+**. More detailed
|
||||
code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md).
|
||||
|
||||
#### I/O and handling paths
|
||||
|
|
2
Makefile
2
Makefile
|
@ -5,7 +5,7 @@ override SPACY_EXTRAS = spacy-lookups-data==1.0.2 jieba spacy-pkuseg==0.0.28 sud
|
|||
endif
|
||||
|
||||
ifndef PYVER
|
||||
override PYVER = 3.6
|
||||
override PYVER = 3.8
|
||||
endif
|
||||
|
||||
VENV := ./env$(PYVER)
|
||||
|
|
|
@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
|
|||
model packaging, deployment and workflow management. spaCy is commercial
|
||||
open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE).
|
||||
|
||||
💫 **Version 3.4 out now!**
|
||||
💫 **Version 3.5 out now!**
|
||||
[Check out the release notes here.](https://github.com/explosion/spaCy/releases)
|
||||
|
||||
[](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
|
||||
|
@ -105,7 +105,7 @@ For detailed installation instructions, see the
|
|||
|
||||
- **Operating system**: macOS / OS X · Linux · Windows (Cygwin, MinGW, Visual
|
||||
Studio)
|
||||
- **Python version**: Python 3.6+ (only 64 bit)
|
||||
- **Python version**: Python 3.8+ (only 64 bit)
|
||||
- **Package managers**: [pip] · [conda] (via `conda-forge`)
|
||||
|
||||
[pip]: https://pypi.org/project/spacy/
|
||||
|
|
|
@ -11,25 +11,39 @@ trigger:
|
|||
exclude:
|
||||
- "website/*"
|
||||
- "*.md"
|
||||
- "*.mdx"
|
||||
- ".github/workflows/*"
|
||||
pr:
|
||||
paths:
|
||||
exclude:
|
||||
- "*.md"
|
||||
- "*.mdx"
|
||||
- "website/docs/*"
|
||||
- "website/src/*"
|
||||
- "website/meta/*.tsx"
|
||||
- "website/meta/*.mjs"
|
||||
- "website/meta/languages.json"
|
||||
- "website/meta/site.json"
|
||||
- "website/meta/sidebars.json"
|
||||
- "website/meta/type-annotations.json"
|
||||
- "website/pages/*"
|
||||
- ".github/workflows/*"
|
||||
|
||||
jobs:
|
||||
# Perform basic checks for most important errors (syntax etc.) Uses the config
|
||||
# defined in .flake8 and overwrites the selected codes.
|
||||
# Check formatting and linting. Perform basic checks for most important errors
|
||||
# (syntax etc.) Uses the config defined in setup.cfg and overwrites the
|
||||
# selected codes.
|
||||
- job: "Validate"
|
||||
pool:
|
||||
vmImage: "ubuntu-latest"
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: "3.7"
|
||||
versionSpec: "3.8"
|
||||
- script: |
|
||||
pip install black==22.3.0
|
||||
python -m black spacy --check
|
||||
displayName: "black"
|
||||
- script: |
|
||||
pip install flake8==5.0.4
|
||||
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
|
||||
|
@ -40,24 +54,6 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
# We're only running one platform per Python version to speed up builds
|
||||
Python36Linux:
|
||||
imageName: "ubuntu-20.04"
|
||||
python.version: "3.6"
|
||||
# Python36Windows:
|
||||
# imageName: "windows-latest"
|
||||
# python.version: "3.6"
|
||||
# Python36Mac:
|
||||
# imageName: "macos-latest"
|
||||
# python.version: "3.6"
|
||||
# Python37Linux:
|
||||
# imageName: "ubuntu-20.04"
|
||||
# python.version: "3.7"
|
||||
Python37Windows:
|
||||
imageName: "windows-latest"
|
||||
python.version: "3.7"
|
||||
# Python37Mac:
|
||||
# imageName: "macos-latest"
|
||||
# python.version: "3.7"
|
||||
# Python38Linux:
|
||||
# imageName: "ubuntu-latest"
|
||||
# python.version: "3.8"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Our libraries
|
||||
spacy-legacy>=3.0.12,<3.1.0
|
||||
spacy-legacy>=4.0.0.dev0,<4.1.0
|
||||
spacy-loggers>=1.0.0,<2.0.0
|
||||
cymem>=2.0.2,<2.1.0
|
||||
preshed>=3.0.2,<3.1.0
|
||||
|
@ -22,7 +22,6 @@ langcodes>=3.2.0,<4.0.0
|
|||
# Official Python utilities
|
||||
setuptools
|
||||
packaging>=20.0
|
||||
typing_extensions>=3.7.4.1,<4.2.0; python_version < "3.8"
|
||||
# Development dependencies
|
||||
pre-commit>=2.13.0
|
||||
cython>=0.25,<3.0
|
||||
|
@ -31,8 +30,7 @@ pytest-timeout>=1.3.0,<2.0.0
|
|||
mock>=2.0.0,<3.0.0
|
||||
flake8>=3.8.0,<6.0.0
|
||||
hypothesis>=3.27.0,<7.0.0
|
||||
mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
|
||||
types-dataclasses>=0.1.3; python_version < "3.7"
|
||||
mypy>=0.990,<0.1000; platform_machine != "aarch64"
|
||||
types-mock>=0.1.1
|
||||
types-setuptools>=57.0.0
|
||||
types-requests
|
||||
|
|
|
@ -17,8 +17,6 @@ classifiers =
|
|||
Operating System :: Microsoft :: Windows
|
||||
Programming Language :: Cython
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.6
|
||||
Programming Language :: Python :: 3.7
|
||||
Programming Language :: Python :: 3.8
|
||||
Programming Language :: Python :: 3.9
|
||||
Programming Language :: Python :: 3.10
|
||||
|
@ -31,10 +29,10 @@ project_urls =
|
|||
[options]
|
||||
zip_safe = false
|
||||
include_package_data = true
|
||||
python_requires = >=3.6
|
||||
python_requires = >=3.8
|
||||
install_requires =
|
||||
# Our libraries
|
||||
spacy-legacy>=3.0.12,<3.1.0
|
||||
spacy-legacy>=4.0.0.dev0,<4.1.0
|
||||
spacy-loggers>=1.0.0,<2.0.0
|
||||
murmurhash>=0.28.0,<1.1.0
|
||||
cymem>=2.0.2,<2.1.0
|
||||
|
@ -55,7 +53,6 @@ install_requires =
|
|||
# Official Python utilities
|
||||
setuptools
|
||||
packaging>=20.0
|
||||
typing_extensions>=3.7.4,<4.2.0; python_version < "3.8"
|
||||
langcodes>=3.2.0,<4.0.0
|
||||
|
||||
[options.entry_points]
|
||||
|
|
|
@ -4,6 +4,7 @@ from ._util import app, setup_cli # noqa: F401
|
|||
|
||||
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands
|
||||
# are registered automatically and won't have to be imported here.
|
||||
from .benchmark_speed import benchmark_speed_cli # noqa: F401
|
||||
from .download import download # noqa: F401
|
||||
from .info import info # noqa: F401
|
||||
from .package import package # noqa: F401
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Dict, Any, Union, List, Optional, Tuple, Iterable
|
||||
from typing import Dict, Any, Union, List, Optional, Tuple, Iterable, Literal
|
||||
from typing import TYPE_CHECKING, overload
|
||||
import sys
|
||||
import shutil
|
||||
|
@ -16,7 +16,6 @@ from thinc.util import gpu_is_available
|
|||
from configparser import InterpolationError
|
||||
import os
|
||||
|
||||
from ..compat import Literal
|
||||
from ..schemas import ProjectConfigSchema, validate
|
||||
from ..util import import_file, run_command, make_tempdir, registry, logger
|
||||
from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS
|
||||
|
@ -46,6 +45,7 @@ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes
|
|||
commands to check and validate your config files, training and evaluation data,
|
||||
and custom model implementations.
|
||||
"""
|
||||
BENCHMARK_HELP = """Commands for benchmarking pipelines."""
|
||||
INIT_HELP = """Commands for initializing configs and pipeline packages."""
|
||||
|
||||
# Wrappers for Typer's annotations. Initially created to set defaults and to
|
||||
|
@ -54,12 +54,14 @@ Arg = typer.Argument
|
|||
Opt = typer.Option
|
||||
|
||||
app = typer.Typer(name=NAME, help=HELP)
|
||||
benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True)
|
||||
project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True)
|
||||
debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True)
|
||||
init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True)
|
||||
|
||||
app.add_typer(project_cli)
|
||||
app.add_typer(debug_cli)
|
||||
app.add_typer(benchmark_cli)
|
||||
app.add_typer(init_cli)
|
||||
|
||||
|
||||
|
|
174
spacy/cli/benchmark_speed.py
Normal file
174
spacy/cli/benchmark_speed.py
Normal file
|
@ -0,0 +1,174 @@
|
|||
from typing import Iterable, List, Optional
|
||||
import random
|
||||
from itertools import islice
|
||||
import numpy
|
||||
from pathlib import Path
|
||||
import time
|
||||
from tqdm import tqdm
|
||||
import typer
|
||||
from wasabi import msg
|
||||
|
||||
from .. import util
|
||||
from ..language import Language
|
||||
from ..tokens import Doc
|
||||
from ..training import Corpus
|
||||
from ._util import Arg, Opt, benchmark_cli, setup_gpu
|
||||
|
||||
|
||||
@benchmark_cli.command(
|
||||
"speed",
|
||||
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
|
||||
)
|
||||
def benchmark_speed_cli(
|
||||
# fmt: off
|
||||
ctx: typer.Context,
|
||||
model: str = Arg(..., help="Model name or path"),
|
||||
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
|
||||
batch_size: Optional[int] = Opt(None, "--batch-size", "-b", min=1, help="Override the pipeline batch size"),
|
||||
no_shuffle: bool = Opt(False, "--no-shuffle", help="Do not shuffle benchmark data"),
|
||||
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
|
||||
n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,),
|
||||
warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"),
|
||||
# fmt: on
|
||||
):
|
||||
"""
|
||||
Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark
|
||||
data in the binary .spacy format.
|
||||
"""
|
||||
setup_gpu(use_gpu=use_gpu, silent=False)
|
||||
|
||||
nlp = util.load_model(model)
|
||||
batch_size = batch_size if batch_size is not None else nlp.batch_size
|
||||
corpus = Corpus(data_path)
|
||||
docs = [eg.predicted for eg in corpus(nlp)]
|
||||
|
||||
if len(docs) == 0:
|
||||
msg.fail("Cannot benchmark speed using an empty corpus.", exits=1)
|
||||
|
||||
print(f"Warming up for {warmup_epochs} epochs...")
|
||||
warmup(nlp, docs, warmup_epochs, batch_size)
|
||||
|
||||
print()
|
||||
print(f"Benchmarking {n_batches} batches...")
|
||||
wps = benchmark(nlp, docs, n_batches, batch_size, not no_shuffle)
|
||||
|
||||
print()
|
||||
print_outliers(wps)
|
||||
print_mean_with_ci(wps)
|
||||
|
||||
|
||||
# Lowercased, behaves as a context manager function.
|
||||
class time_context:
|
||||
"""Register the running time of a context."""
|
||||
|
||||
def __enter__(self):
|
||||
self.start = time.perf_counter()
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.elapsed = time.perf_counter() - self.start
|
||||
|
||||
|
||||
class Quartiles:
|
||||
"""Calculate the q1, q2, q3 quartiles and the inter-quartile range (iqr)
|
||||
of a sample."""
|
||||
|
||||
q1: float
|
||||
q2: float
|
||||
q3: float
|
||||
iqr: float
|
||||
|
||||
def __init__(self, sample: numpy.ndarray) -> None:
|
||||
self.q1 = numpy.quantile(sample, 0.25)
|
||||
self.q2 = numpy.quantile(sample, 0.5)
|
||||
self.q3 = numpy.quantile(sample, 0.75)
|
||||
self.iqr = self.q3 - self.q1
|
||||
|
||||
|
||||
def annotate(
|
||||
nlp: Language, docs: List[Doc], batch_size: Optional[int]
|
||||
) -> numpy.ndarray:
|
||||
docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size)
|
||||
wps = []
|
||||
while True:
|
||||
with time_context() as elapsed:
|
||||
batch_docs = list(
|
||||
islice(docs, batch_size if batch_size else nlp.batch_size)
|
||||
)
|
||||
if len(batch_docs) == 0:
|
||||
break
|
||||
n_tokens = count_tokens(batch_docs)
|
||||
wps.append(n_tokens / elapsed.elapsed)
|
||||
|
||||
return numpy.array(wps)
|
||||
|
||||
|
||||
def benchmark(
|
||||
nlp: Language,
|
||||
docs: List[Doc],
|
||||
n_batches: int,
|
||||
batch_size: int,
|
||||
shuffle: bool,
|
||||
) -> numpy.ndarray:
|
||||
if shuffle:
|
||||
bench_docs = [
|
||||
nlp.make_doc(random.choice(docs).text)
|
||||
for _ in range(n_batches * batch_size)
|
||||
]
|
||||
else:
|
||||
bench_docs = [
|
||||
nlp.make_doc(docs[i % len(docs)].text)
|
||||
for i in range(n_batches * batch_size)
|
||||
]
|
||||
|
||||
return annotate(nlp, bench_docs, batch_size)
|
||||
|
||||
|
||||
def bootstrap(x, statistic=numpy.mean, iterations=10000) -> numpy.ndarray:
|
||||
"""Apply a statistic to repeated random samples of an array."""
|
||||
return numpy.fromiter(
|
||||
(
|
||||
statistic(numpy.random.choice(x, len(x), replace=True))
|
||||
for _ in range(iterations)
|
||||
),
|
||||
numpy.float64,
|
||||
)
|
||||
|
||||
|
||||
def count_tokens(docs: Iterable[Doc]) -> int:
|
||||
return sum(len(doc) for doc in docs)
|
||||
|
||||
|
||||
def print_mean_with_ci(sample: numpy.ndarray):
|
||||
mean = numpy.mean(sample)
|
||||
bootstrap_means = bootstrap(sample)
|
||||
bootstrap_means.sort()
|
||||
|
||||
# 95% confidence interval
|
||||
low = bootstrap_means[int(len(bootstrap_means) * 0.025)]
|
||||
high = bootstrap_means[int(len(bootstrap_means) * 0.975)]
|
||||
|
||||
print(f"Mean: {mean:.1f} words/s (95% CI: {low-mean:.1f} +{high-mean:.1f})")
|
||||
|
||||
|
||||
def print_outliers(sample: numpy.ndarray):
|
||||
quartiles = Quartiles(sample)
|
||||
|
||||
n_outliers = numpy.sum(
|
||||
(sample < (quartiles.q1 - 1.5 * quartiles.iqr))
|
||||
| (sample > (quartiles.q3 + 1.5 * quartiles.iqr))
|
||||
)
|
||||
n_extreme_outliers = numpy.sum(
|
||||
(sample < (quartiles.q1 - 3.0 * quartiles.iqr))
|
||||
| (sample > (quartiles.q3 + 3.0 * quartiles.iqr))
|
||||
)
|
||||
print(
|
||||
f"Outliers: {(100 * n_outliers) / len(sample):.1f}%, extreme outliers: {(100 * n_extreme_outliers) / len(sample)}%"
|
||||
)
|
||||
|
||||
|
||||
def warmup(
|
||||
nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int]
|
||||
) -> numpy.ndarray:
|
||||
docs = warmup_epochs * docs
|
||||
return annotate(nlp, docs, batch_size)
|
|
@ -1,5 +1,5 @@
|
|||
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
|
||||
from typing import cast, overload
|
||||
from typing import Literal, cast, overload
|
||||
from pathlib import Path
|
||||
from collections import Counter
|
||||
import sys
|
||||
|
@ -17,10 +17,10 @@ from ..pipeline import TrainablePipe
|
|||
from ..pipeline._parser_internals import nonproj
|
||||
from ..pipeline._parser_internals.nonproj import DELIMITER
|
||||
from ..pipeline import Morphologizer, SpanCategorizer
|
||||
from ..pipeline._edit_tree_internals.edit_trees import EditTrees
|
||||
from ..morphology import Morphology
|
||||
from ..language import Language
|
||||
from ..util import registry, resolve_dot_names
|
||||
from ..compat import Literal
|
||||
from ..vectors import Mode as VectorsMode
|
||||
from .. import util
|
||||
|
||||
|
@ -671,6 +671,59 @@ def debug_data(
|
|||
f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles"
|
||||
)
|
||||
|
||||
if "trainable_lemmatizer" in factory_names:
|
||||
msg.divider("Trainable Lemmatizer")
|
||||
trees_train: Set[str] = gold_train_data["lemmatizer_trees"]
|
||||
trees_dev: Set[str] = gold_dev_data["lemmatizer_trees"]
|
||||
# This is necessary context when someone is attempting to interpret whether the
|
||||
# number of trees exclusively in the dev set is meaningful.
|
||||
msg.info(f"{len(trees_train)} lemmatizer trees generated from training data")
|
||||
msg.info(f"{len(trees_dev)} lemmatizer trees generated from dev data")
|
||||
dev_not_train = trees_dev - trees_train
|
||||
|
||||
if len(dev_not_train) != 0:
|
||||
pct = len(dev_not_train) / len(trees_dev)
|
||||
msg.info(
|
||||
f"{len(dev_not_train)} lemmatizer trees ({pct*100:.1f}% of dev trees)"
|
||||
" were found exclusively in the dev data."
|
||||
)
|
||||
else:
|
||||
# Would we ever expect this case? It seems like it would be pretty rare,
|
||||
# and we might actually want a warning?
|
||||
msg.info("All trees in dev data present in training data.")
|
||||
|
||||
if gold_train_data["n_low_cardinality_lemmas"] > 0:
|
||||
n = gold_train_data["n_low_cardinality_lemmas"]
|
||||
msg.warn(f"{n} training docs with 0 or 1 unique lemmas.")
|
||||
|
||||
if gold_dev_data["n_low_cardinality_lemmas"] > 0:
|
||||
n = gold_dev_data["n_low_cardinality_lemmas"]
|
||||
msg.warn(f"{n} dev docs with 0 or 1 unique lemmas.")
|
||||
|
||||
if gold_train_data["no_lemma_annotations"] > 0:
|
||||
n = gold_train_data["no_lemma_annotations"]
|
||||
msg.warn(f"{n} training docs with no lemma annotations.")
|
||||
else:
|
||||
msg.good("All training docs have lemma annotations.")
|
||||
|
||||
if gold_dev_data["no_lemma_annotations"] > 0:
|
||||
n = gold_dev_data["no_lemma_annotations"]
|
||||
msg.warn(f"{n} dev docs with no lemma annotations.")
|
||||
else:
|
||||
msg.good("All dev docs have lemma annotations.")
|
||||
|
||||
if gold_train_data["partial_lemma_annotations"] > 0:
|
||||
n = gold_train_data["partial_lemma_annotations"]
|
||||
msg.info(f"{n} training docs with partial lemma annotations.")
|
||||
else:
|
||||
msg.good("All training docs have complete lemma annotations.")
|
||||
|
||||
if gold_dev_data["partial_lemma_annotations"] > 0:
|
||||
n = gold_dev_data["partial_lemma_annotations"]
|
||||
msg.info(f"{n} dev docs with partial lemma annotations.")
|
||||
else:
|
||||
msg.good("All dev docs have complete lemma annotations.")
|
||||
|
||||
msg.divider("Summary")
|
||||
good_counts = msg.counts[MESSAGES.GOOD]
|
||||
warn_counts = msg.counts[MESSAGES.WARN]
|
||||
|
@ -732,7 +785,13 @@ def _compile_gold(
|
|||
"n_cats_multilabel": 0,
|
||||
"n_cats_bad_values": 0,
|
||||
"texts": set(),
|
||||
"lemmatizer_trees": set(),
|
||||
"no_lemma_annotations": 0,
|
||||
"partial_lemma_annotations": 0,
|
||||
"n_low_cardinality_lemmas": 0,
|
||||
}
|
||||
if "trainable_lemmatizer" in factory_names:
|
||||
trees = EditTrees(nlp.vocab.strings)
|
||||
for eg in examples:
|
||||
gold = eg.reference
|
||||
doc = eg.predicted
|
||||
|
@ -862,6 +921,25 @@ def _compile_gold(
|
|||
data["n_nonproj"] += 1
|
||||
if nonproj.contains_cycle(aligned_heads):
|
||||
data["n_cycles"] += 1
|
||||
if "trainable_lemmatizer" in factory_names:
|
||||
# from EditTreeLemmatizer._labels_from_data
|
||||
if all(token.lemma == 0 for token in gold):
|
||||
data["no_lemma_annotations"] += 1
|
||||
continue
|
||||
if any(token.lemma == 0 for token in gold):
|
||||
data["partial_lemma_annotations"] += 1
|
||||
lemma_set = set()
|
||||
for token in gold:
|
||||
if token.lemma != 0:
|
||||
lemma_set.add(token.lemma)
|
||||
tree_id = trees.add(token.text, token.lemma_)
|
||||
tree_str = trees.tree_to_str(tree_id)
|
||||
data["lemmatizer_trees"].add(tree_str)
|
||||
# We want to identify cases where lemmas aren't assigned
|
||||
# or are all assigned the same value, as this would indicate
|
||||
# an issue since we're expecting a large set of lemmas
|
||||
if len(lemma_set) < 2 and len(gold) > 1:
|
||||
data["n_low_cardinality_lemmas"] += 1
|
||||
return data
|
||||
|
||||
|
||||
|
|
|
@ -7,12 +7,15 @@ from thinc.api import fix_random_seed
|
|||
|
||||
from ..training import Corpus
|
||||
from ..tokens import Doc
|
||||
from ._util import app, Arg, Opt, setup_gpu, import_code
|
||||
from ._util import app, Arg, Opt, setup_gpu, import_code, benchmark_cli
|
||||
from ..scorer import Scorer
|
||||
from .. import util
|
||||
from .. import displacy
|
||||
|
||||
|
||||
@benchmark_cli.command(
|
||||
"accuracy",
|
||||
)
|
||||
@app.command("evaluate")
|
||||
def evaluate_cli(
|
||||
# fmt: off
|
||||
|
@ -36,7 +39,7 @@ def evaluate_cli(
|
|||
dependency parses in a HTML file, set as output directory as the
|
||||
displacy_path argument.
|
||||
|
||||
DOCS: https://spacy.io/api/cli#evaluate
|
||||
DOCS: https://spacy.io/api/cli#benchmark-accuracy
|
||||
"""
|
||||
import_code(code_path)
|
||||
evaluate(
|
||||
|
|
|
@ -22,19 +22,6 @@ try:
|
|||
except ImportError:
|
||||
cupy = None
|
||||
|
||||
if sys.version_info[:2] >= (3, 8): # Python 3.8+
|
||||
from typing import Literal, Protocol, runtime_checkable
|
||||
else:
|
||||
from typing_extensions import Literal, Protocol, runtime_checkable # noqa: F401
|
||||
|
||||
# Important note: The importlib_metadata "backport" includes functionality
|
||||
# that's not part of the built-in importlib.metadata. We should treat this
|
||||
# import like the built-in and only use what's available there.
|
||||
try: # Python 3.8+
|
||||
import importlib.metadata as importlib_metadata
|
||||
except ImportError:
|
||||
from catalogue import _importlib_metadata as importlib_metadata # type: ignore[no-redef] # noqa: F401
|
||||
|
||||
from thinc.api import Optimizer # noqa: F401
|
||||
|
||||
pickle = pickle
|
||||
|
|
|
@ -106,9 +106,7 @@ def serve(
|
|||
|
||||
if is_in_jupyter():
|
||||
warnings.warn(Warnings.W011)
|
||||
render(
|
||||
docs, style=style, page=page, minify=minify, options=options, manual=manual
|
||||
)
|
||||
render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
|
||||
httpd = simple_server.make_server(host, port, app)
|
||||
print(f"\nUsing the '{style}' visualizer")
|
||||
print(f"Serving on http://{host}:{port} ...\n")
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from typing import Literal
|
||||
import warnings
|
||||
from .compat import Literal
|
||||
|
||||
|
||||
class ErrorsWithCodes(type):
|
||||
|
@ -949,8 +949,8 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
|
||||
E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}")
|
||||
E1049 = ("No available port found for displaCy on host {host}. Please specify an available port "
|
||||
"with `displacy.serve(doc, port)`")
|
||||
E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port)` "
|
||||
"with `displacy.serve(doc, port=port)`")
|
||||
E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port=port)` "
|
||||
"or use `auto_switch_port=True` to pick an available port automatically.")
|
||||
|
||||
# v4 error strings
|
||||
|
|
|
@ -25,7 +25,7 @@ cdef class InMemoryLookupKB(KnowledgeBase):
|
|||
"""An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
|
||||
to support entity linking of named entities to real-world concepts.
|
||||
|
||||
DOCS: https://spacy.io/api/kb_in_memory
|
||||
DOCS: https://spacy.io/api/inmemorylookupkb
|
||||
"""
|
||||
|
||||
def __init__(self, Vocab vocab, entity_vector_length):
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Iterator, Optional, Any, Dict, Callable, Iterable
|
||||
from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Literal
|
||||
from typing import Union, Tuple, List, Set, Pattern, Sequence
|
||||
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
|
||||
|
||||
|
@ -22,7 +22,7 @@ from . import ty
|
|||
from .tokens.underscore import Underscore
|
||||
from .vocab import Vocab, create_vocab
|
||||
from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
|
||||
from .training import Example, validate_examples
|
||||
from .training import Example, validate_examples, validate_distillation_examples
|
||||
from .training.initialize import init_vocab, init_tok2vec
|
||||
from .scorer import Scorer
|
||||
from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES
|
||||
|
@ -40,7 +40,6 @@ from .git_info import GIT_VERSION
|
|||
from . import util
|
||||
from . import about
|
||||
from .lookups import load_lookups
|
||||
from .compat import Literal
|
||||
|
||||
|
||||
PipeCallable = Callable[[Doc], Doc]
|
||||
|
@ -1018,6 +1017,102 @@ class Language:
|
|||
raise ValueError(Errors.E005.format(name=name, returned_type=type(doc)))
|
||||
return doc
|
||||
|
||||
def distill(
|
||||
self,
|
||||
teacher: "Language",
|
||||
examples: Iterable[Example],
|
||||
*,
|
||||
drop: float = 0.0,
|
||||
sgd: Optional[Optimizer] = None,
|
||||
losses: Optional[Dict[str, float]] = None,
|
||||
component_cfg: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
exclude: Iterable[str] = SimpleFrozenList(),
|
||||
annotates: Iterable[str] = SimpleFrozenList(),
|
||||
student_to_teacher: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""Distill the models in a student pipeline from a teacher pipeline.
|
||||
teacher (Language): Teacher to distill from.
|
||||
examples (Iterable[Example]): Distillation examples. The reference
|
||||
(teacher) and predicted (student) docs must have the same number of
|
||||
tokens and the same orthography.
|
||||
drop (float): The dropout rate.
|
||||
sgd (Optional[Optimizer]): An optimizer.
|
||||
losses (Optional(Dict[str, float])): Dictionary to update with the loss,
|
||||
keyed by component.
|
||||
component_cfg (Optional[Dict[str, Dict[str, Any]]]): Config parameters
|
||||
for specific pipeline components, keyed by component name.
|
||||
exclude (Iterable[str]): Names of components that shouldn't be updated.
|
||||
annotates (Iterable[str]): Names of components that should set
|
||||
annotations on the predicted examples after updating.
|
||||
student_to_teacher (Optional[Dict[str, str]]): Map student pipe name to
|
||||
teacher pipe name, only needed for pipes where the student pipe
|
||||
name does not match the teacher pipe name.
|
||||
RETURNS (Dict[str, float]): The updated losses dictionary
|
||||
|
||||
DOCS: https://spacy.io/api/language#distill
|
||||
"""
|
||||
if student_to_teacher is None:
|
||||
student_to_teacher = {}
|
||||
if losses is None:
|
||||
losses = {}
|
||||
if isinstance(examples, list) and len(examples) == 0:
|
||||
return losses
|
||||
|
||||
validate_distillation_examples(examples, "Language.distill")
|
||||
examples = _copy_examples(examples)
|
||||
|
||||
if sgd is None:
|
||||
if self._optimizer is None:
|
||||
self._optimizer = self.create_optimizer()
|
||||
sgd = self._optimizer
|
||||
|
||||
if component_cfg is None:
|
||||
component_cfg = {}
|
||||
pipe_kwargs = {}
|
||||
for student_name, student_proc in self.pipeline:
|
||||
component_cfg.setdefault(student_name, {})
|
||||
pipe_kwargs[student_name] = deepcopy(component_cfg[student_name])
|
||||
component_cfg[student_name].setdefault("drop", drop)
|
||||
pipe_kwargs[student_name].setdefault("batch_size", self.batch_size)
|
||||
|
||||
teacher_pipes = dict(teacher.pipeline)
|
||||
for student_name, student_proc in self.pipeline:
|
||||
if student_name in annotates:
|
||||
for doc, eg in zip(
|
||||
_pipe(
|
||||
(eg.predicted for eg in examples),
|
||||
proc=student_proc,
|
||||
name=student_name,
|
||||
default_error_handler=self.default_error_handler,
|
||||
kwargs=pipe_kwargs[student_name],
|
||||
),
|
||||
examples,
|
||||
):
|
||||
eg.predicted = doc
|
||||
|
||||
if (
|
||||
student_name not in exclude
|
||||
and isinstance(student_proc, ty.DistillableComponent)
|
||||
and student_proc.is_distillable
|
||||
):
|
||||
# A missing teacher pipe is not an error, some student pipes
|
||||
# do not need a teacher, such as tok2vec layer losses.
|
||||
teacher_name = (
|
||||
student_to_teacher[student_name]
|
||||
if student_name in student_to_teacher
|
||||
else student_name
|
||||
)
|
||||
teacher_pipe = teacher_pipes.get(teacher_name, None)
|
||||
student_proc.distill(
|
||||
teacher_pipe,
|
||||
examples,
|
||||
sgd=sgd,
|
||||
losses=losses,
|
||||
**component_cfg[student_name],
|
||||
)
|
||||
|
||||
return losses
|
||||
|
||||
def disable_pipes(self, *names) -> "DisabledPipes":
|
||||
"""Disable one or more pipeline components. If used as a context
|
||||
manager, the pipeline will be restored to the initial state at the end
|
||||
|
@ -1243,12 +1338,16 @@ class Language:
|
|||
self,
|
||||
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
|
||||
*,
|
||||
labels: Optional[Dict[str, Any]] = None,
|
||||
sgd: Optional[Optimizer] = None,
|
||||
) -> Optimizer:
|
||||
"""Initialize the pipe for training, using data examples if available.
|
||||
|
||||
get_examples (Callable[[], Iterable[Example]]): Optional function that
|
||||
returns gold-standard Example objects.
|
||||
labels (Optional[Dict[str, Any]]): Labels to pass to pipe initialization,
|
||||
using the names of the pipes as keys. Overrides labels that are in
|
||||
the model configuration.
|
||||
sgd (Optional[Optimizer]): An optimizer to use for updates. If not
|
||||
provided, will be created using the .create_optimizer() method.
|
||||
RETURNS (thinc.api.Optimizer): The optimizer.
|
||||
|
@ -1293,6 +1392,8 @@ class Language:
|
|||
for name, proc in self.pipeline:
|
||||
if isinstance(proc, ty.InitializableComponent):
|
||||
p_settings = I["components"].get(name, {})
|
||||
if labels is not None and name in labels:
|
||||
p_settings["labels"] = labels[name]
|
||||
p_settings = validate_init_settings(
|
||||
proc.initialize, p_settings, section="components", name=name
|
||||
)
|
||||
|
@ -1726,6 +1827,7 @@ class Language:
|
|||
# using the nlp.config with all defaults.
|
||||
config = util.copy_config(config)
|
||||
orig_pipeline = config.pop("components", {})
|
||||
orig_distill = config.pop("distill", None)
|
||||
orig_pretraining = config.pop("pretraining", None)
|
||||
config["components"] = {}
|
||||
if auto_fill:
|
||||
|
@ -1734,6 +1836,9 @@ class Language:
|
|||
filled = config
|
||||
filled["components"] = orig_pipeline
|
||||
config["components"] = orig_pipeline
|
||||
if orig_distill is not None:
|
||||
filled["distill"] = orig_distill
|
||||
config["distill"] = orig_distill
|
||||
if orig_pretraining is not None:
|
||||
filled["pretraining"] = orig_pretraining
|
||||
config["pretraining"] = orig_pretraining
|
||||
|
|
|
@ -41,7 +41,7 @@ cdef class Lexeme:
|
|||
"""
|
||||
self.vocab = vocab
|
||||
self.orth = orth
|
||||
self.c = <LexemeC*><void*>vocab.get_by_orth(vocab.mem, orth)
|
||||
self.c = <LexemeC*><void*>vocab.get_by_orth(orth)
|
||||
if self.c.orth != orth:
|
||||
raise ValueError(Errors.E071.format(orth=orth, vocab_orth=self.c.orth))
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ cpdef bint levenshtein_compare(input_text: str, pattern_text: str, fuzzy: int =
|
|||
max_edits = fuzzy
|
||||
else:
|
||||
# allow at least two edits (to allow at least one transposition) and up
|
||||
# to 20% of the pattern string length
|
||||
# to 30% of the pattern string length
|
||||
max_edits = max(2, round(0.3 * len(pattern_text)))
|
||||
return levenshtein(input_text, pattern_text, max_edits) <= max_edits
|
||||
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
from typing import Any, List, Dict, Tuple, Optional, Callable, Union
|
||||
from typing import Any, List, Dict, Tuple, Optional, Callable, Union, Literal
|
||||
from typing import Iterator, Iterable, overload
|
||||
from ..compat import Literal
|
||||
from ..vocab import Vocab
|
||||
from ..tokens import Doc, Span
|
||||
|
||||
class Matcher:
|
||||
def __init__(self, vocab: Vocab, validate: bool = ...,
|
||||
fuzzy_compare: Callable[[str, str, int], bool] = ...) -> None: ...
|
||||
def __init__(
|
||||
self,
|
||||
vocab: Vocab,
|
||||
validate: bool = ...,
|
||||
fuzzy_compare: Callable[[str, str, int], bool] = ...,
|
||||
) -> None: ...
|
||||
def __reduce__(self) -> Any: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __contains__(self, key: str) -> bool: ...
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from typing import List, Tuple, Union, Optional, Callable, Any, Dict, overload
|
||||
from ..compat import Literal
|
||||
from typing import List, Tuple, Union, Optional, Callable, Any, Dict, Literal
|
||||
from typing import overload
|
||||
from .matcher import Matcher
|
||||
from ..vocab import Vocab
|
||||
from ..tokens import Doc, Span
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
from typing import Optional, List, Tuple, Any
|
||||
from typing import Optional, List, Tuple, Any, Literal
|
||||
from thinc.types import Floats2d
|
||||
from thinc.api import Model
|
||||
import warnings
|
||||
|
||||
from ...errors import Errors, Warnings
|
||||
from ...compat import Literal
|
||||
from ...util import registry
|
||||
from ..tb_framework import TransitionModel
|
||||
from ...tokens.doc import Doc
|
||||
|
|
|
@ -5,7 +5,7 @@ from itertools import islice
|
|||
import numpy as np
|
||||
|
||||
import srsly
|
||||
from thinc.api import Config, Model
|
||||
from thinc.api import Config, Model, SequenceCategoricalCrossentropy, NumpyOps
|
||||
from thinc.types import ArrayXd, Floats2d, Ints1d
|
||||
from thinc.legacy import LegacySequenceCategoricalCrossentropy
|
||||
|
||||
|
@ -22,6 +22,8 @@ from .. import util
|
|||
|
||||
|
||||
ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
|
||||
# The cutoff value of *top_k* above which an alternative method is used to process guesses.
|
||||
TOP_K_GUARDRAIL = 20
|
||||
|
||||
|
||||
default_model_config = """
|
||||
|
@ -125,6 +127,7 @@ class EditTreeLemmatizer(TrainablePipe):
|
|||
self.cfg: Dict[str, Any] = {"labels": []}
|
||||
self.scorer = scorer
|
||||
self.save_activations = save_activations
|
||||
self.numpy_ops = NumpyOps()
|
||||
|
||||
def get_loss(
|
||||
self, examples: Iterable[Example], scores: List[Floats2d]
|
||||
|
@ -140,7 +143,7 @@ class EditTreeLemmatizer(TrainablePipe):
|
|||
for (predicted, gold_lemma) in zip(
|
||||
eg.predicted, eg.get_aligned("LEMMA", as_string=True)
|
||||
):
|
||||
if gold_lemma is None:
|
||||
if gold_lemma is None or gold_lemma == "":
|
||||
label = -1
|
||||
else:
|
||||
tree_id = self.trees.add(predicted.text, gold_lemma)
|
||||
|
@ -165,7 +168,7 @@ class EditTreeLemmatizer(TrainablePipe):
|
|||
student_scores: Scores representing the student model's predictions.
|
||||
|
||||
RETURNS (Tuple[float, float]): The loss and the gradient.
|
||||
|
||||
|
||||
DOCS: https://spacy.io/api/edittreelemmatizer#get_teacher_student_loss
|
||||
"""
|
||||
loss_func = LegacySequenceCategoricalCrossentropy(normalize=False)
|
||||
|
@ -175,6 +178,18 @@ class EditTreeLemmatizer(TrainablePipe):
|
|||
return float(loss), d_scores
|
||||
|
||||
def predict(self, docs: Iterable[Doc]) -> ActivationsT:
|
||||
if self.top_k == 1:
|
||||
scores2guesses = self._scores2guesses_top_k_equals_1
|
||||
elif self.top_k <= TOP_K_GUARDRAIL:
|
||||
scores2guesses = self._scores2guesses_top_k_greater_1
|
||||
else:
|
||||
scores2guesses = self._scores2guesses_top_k_guardrail
|
||||
# The behaviour of *_scores2guesses_top_k_greater_1()* is efficient for values
|
||||
# of *top_k>1* that are likely to be useful when the edit tree lemmatizer is used
|
||||
# for its principal purpose of lemmatizing tokens. However, the code could also
|
||||
# be used for other purposes, and with very large values of *top_k* the method
|
||||
# becomes inefficient. In such cases, *_scores2guesses_top_k_guardrail()* is used
|
||||
# instead.
|
||||
n_docs = len(list(docs))
|
||||
if not any(len(doc) for doc in docs):
|
||||
# Handle cases where there are no tokens in any docs.
|
||||
|
@ -189,20 +204,52 @@ class EditTreeLemmatizer(TrainablePipe):
|
|||
return {"probabilities": scores, "tree_ids": guesses}
|
||||
scores = self.model.predict(docs)
|
||||
assert len(scores) == n_docs
|
||||
guesses = self._scores2guesses(docs, scores)
|
||||
guesses = scores2guesses(docs, scores)
|
||||
assert len(guesses) == n_docs
|
||||
return {"probabilities": scores, "tree_ids": guesses}
|
||||
|
||||
def _scores2guesses(self, docs, scores):
|
||||
def _scores2guesses_top_k_equals_1(self, docs, scores):
|
||||
guesses = []
|
||||
for doc, doc_scores in zip(docs, scores):
|
||||
if self.top_k == 1:
|
||||
doc_guesses = doc_scores.argmax(axis=1).reshape(-1, 1)
|
||||
else:
|
||||
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
|
||||
doc_guesses = doc_scores.argmax(axis=1)
|
||||
doc_guesses = self.numpy_ops.asarray(doc_guesses)
|
||||
|
||||
if not isinstance(doc_guesses, np.ndarray):
|
||||
doc_guesses = doc_guesses.get()
|
||||
doc_compat_guesses = []
|
||||
for i, token in enumerate(doc):
|
||||
tree_id = self.cfg["labels"][doc_guesses[i]]
|
||||
if self.trees.apply(tree_id, token.text) is not None:
|
||||
doc_compat_guesses.append(tree_id)
|
||||
else:
|
||||
doc_compat_guesses.append(-1)
|
||||
guesses.append(np.array(doc_compat_guesses))
|
||||
|
||||
return guesses
|
||||
|
||||
def _scores2guesses_top_k_greater_1(self, docs, scores):
|
||||
guesses = []
|
||||
top_k = min(self.top_k, len(self.labels))
|
||||
for doc, doc_scores in zip(docs, scores):
|
||||
doc_scores = self.numpy_ops.asarray(doc_scores)
|
||||
doc_compat_guesses = []
|
||||
for i, token in enumerate(doc):
|
||||
for _ in range(top_k):
|
||||
candidate = int(doc_scores[i].argmax())
|
||||
candidate_tree_id = self.cfg["labels"][candidate]
|
||||
if self.trees.apply(candidate_tree_id, token.text) is not None:
|
||||
doc_compat_guesses.append(candidate_tree_id)
|
||||
break
|
||||
doc_scores[i, candidate] = np.finfo(np.float32).min
|
||||
else:
|
||||
doc_compat_guesses.append(-1)
|
||||
guesses.append(np.array(doc_compat_guesses))
|
||||
|
||||
return guesses
|
||||
|
||||
def _scores2guesses_top_k_guardrail(self, docs, scores):
|
||||
guesses = []
|
||||
for doc, doc_scores in zip(docs, scores):
|
||||
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
|
||||
doc_guesses = self.numpy_ops.asarray(doc_guesses)
|
||||
|
||||
doc_compat_guesses = []
|
||||
for token, candidates in zip(doc, doc_guesses):
|
||||
|
|
|
@ -459,7 +459,11 @@ class EntityLinker(TrainablePipe):
|
|||
docs_ents: List[Ragged] = []
|
||||
docs_scores: List[Ragged] = []
|
||||
if not docs:
|
||||
return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
|
||||
return {
|
||||
KNOWLEDGE_BASE_IDS: final_kb_ids,
|
||||
"ents": docs_ents,
|
||||
"scores": docs_scores,
|
||||
}
|
||||
if isinstance(docs, Doc):
|
||||
docs = [docs]
|
||||
for doc in docs:
|
||||
|
@ -591,7 +595,11 @@ class EntityLinker(TrainablePipe):
|
|||
method="predict", msg="result variables not of equal length"
|
||||
)
|
||||
raise RuntimeError(err)
|
||||
return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
|
||||
return {
|
||||
KNOWLEDGE_BASE_IDS: final_kb_ids,
|
||||
"ents": docs_ents,
|
||||
"scores": docs_scores,
|
||||
}
|
||||
|
||||
def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
|
||||
"""Modify a batch of documents, using pre-computed scores.
|
||||
|
|
|
@ -252,8 +252,11 @@ class EntityRecognizer(Parser):
|
|||
def labels(self):
|
||||
# Get the labels from the model by looking at the available moves, e.g.
|
||||
# B-PERSON, I-PERSON, L-PERSON, U-PERSON
|
||||
labels = set(remove_bilu_prefix(move) for move in self.move_names
|
||||
if move[0] in ("B", "I", "L", "U"))
|
||||
labels = set(
|
||||
remove_bilu_prefix(move)
|
||||
for move in self.move_names
|
||||
if move[0] in ("B", "I", "L", "U")
|
||||
)
|
||||
return tuple(sorted(labels))
|
||||
|
||||
def scored_ents(self, beams):
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
|
||||
from typing import Union
|
||||
from typing import Union, Protocol, runtime_checkable
|
||||
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
|
||||
from thinc.api import Optimizer
|
||||
from thinc.types import Ragged, Ints2d, Floats2d
|
||||
|
||||
import numpy
|
||||
|
||||
from ..compat import Protocol, runtime_checkable
|
||||
from ..scorer import Scorer
|
||||
from ..language import Language
|
||||
from .trainable_pipe import TrainablePipe
|
||||
|
|
|
@ -71,8 +71,8 @@ cdef class TrainablePipe(Pipe):
|
|||
teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn
|
||||
from.
|
||||
examples (Iterable[Example]): Distillation examples. The reference
|
||||
and predicted docs must have the same number of tokens and the
|
||||
same orthography.
|
||||
(teacher) and predicted (student) docs must have the same number of
|
||||
tokens and the same orthography.
|
||||
drop (float): dropout rate.
|
||||
sgd (Optional[Optimizer]): An optimizer. Will be created via
|
||||
create_optimizer if not set.
|
||||
|
|
|
@ -224,8 +224,8 @@ class Parser(TrainablePipe):
|
|||
teacher_pipe (Optional[TrainablePipe]): The teacher pipe to learn
|
||||
from.
|
||||
examples (Iterable[Example]): Distillation examples. The reference
|
||||
and predicted docs must have the same number of tokens and the
|
||||
same orthography.
|
||||
(teacher) and predicted (student) docs must have the same number of
|
||||
tokens and the same orthography.
|
||||
drop (float): dropout rate.
|
||||
sgd (Optional[Optimizer]): An optimizer. Will be created via
|
||||
create_optimizer if not set.
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
from typing import Dict, List, Union, Optional, Any, Callable, Type, Tuple
|
||||
from typing import Iterable, TypeVar, TYPE_CHECKING
|
||||
from .compat import Literal
|
||||
from typing import Iterable, TypeVar, Literal, TYPE_CHECKING
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel, Field, ValidationError, validator, create_model
|
||||
from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr
|
||||
|
@ -163,15 +162,33 @@ class TokenPatternString(BaseModel):
|
|||
IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset")
|
||||
INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects")
|
||||
FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy")
|
||||
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy1")
|
||||
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy2")
|
||||
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy3")
|
||||
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy4")
|
||||
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy5")
|
||||
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy6")
|
||||
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy7")
|
||||
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy8")
|
||||
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy9")
|
||||
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy1"
|
||||
)
|
||||
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy2"
|
||||
)
|
||||
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy3"
|
||||
)
|
||||
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy4"
|
||||
)
|
||||
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy5"
|
||||
)
|
||||
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy6"
|
||||
)
|
||||
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy7"
|
||||
)
|
||||
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy8"
|
||||
)
|
||||
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||
None, alias="fuzzy9"
|
||||
)
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
|
|
|
@ -103,14 +103,15 @@ def test_initialize_from_labels():
|
|||
}
|
||||
|
||||
|
||||
def test_no_data():
|
||||
@pytest.mark.parametrize("top_k", (1, 5, 30))
|
||||
def test_no_data(top_k):
|
||||
# Test that the lemmatizer provides a nice error when there's no tagging data / labels
|
||||
TEXTCAT_DATA = [
|
||||
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
|
||||
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
|
||||
]
|
||||
nlp = English()
|
||||
nlp.add_pipe("trainable_lemmatizer")
|
||||
nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||
nlp.add_pipe("textcat")
|
||||
|
||||
train_examples = []
|
||||
|
@ -121,10 +122,11 @@ def test_no_data():
|
|||
nlp.initialize(get_examples=lambda: train_examples)
|
||||
|
||||
|
||||
def test_incomplete_data():
|
||||
@pytest.mark.parametrize("top_k", (1, 5, 30))
|
||||
def test_incomplete_data(top_k):
|
||||
# Test that the lemmatizer works with incomplete information
|
||||
nlp = English()
|
||||
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
|
||||
lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||
lemmatizer.min_tree_freq = 1
|
||||
train_examples = []
|
||||
for t in PARTIAL_DATA:
|
||||
|
@ -141,10 +143,25 @@ def test_incomplete_data():
|
|||
assert doc[1].lemma_ == "like"
|
||||
assert doc[2].lemma_ == "blue"
|
||||
|
||||
# Check that incomplete annotations are ignored.
|
||||
scores, _ = lemmatizer.model([eg.predicted for eg in train_examples], is_train=True)
|
||||
_, dX = lemmatizer.get_loss(train_examples, scores)
|
||||
xp = lemmatizer.model.ops.xp
|
||||
|
||||
def test_overfitting_IO():
|
||||
# Missing annotations.
|
||||
assert xp.count_nonzero(dX[0][0]) == 0
|
||||
assert xp.count_nonzero(dX[0][3]) == 0
|
||||
assert xp.count_nonzero(dX[1][0]) == 0
|
||||
assert xp.count_nonzero(dX[1][3]) == 0
|
||||
|
||||
# Misaligned annotations.
|
||||
assert xp.count_nonzero(dX[1][1]) == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("top_k", (1, 5, 30))
|
||||
def test_overfitting_IO(top_k):
|
||||
nlp = English()
|
||||
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
|
||||
lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||
lemmatizer.min_tree_freq = 1
|
||||
train_examples = []
|
||||
for t in TRAIN_DATA:
|
||||
|
@ -177,7 +194,7 @@ def test_overfitting_IO():
|
|||
# Check model after a {to,from}_bytes roundtrip
|
||||
nlp_bytes = nlp.to_bytes()
|
||||
nlp3 = English()
|
||||
nlp3.add_pipe("trainable_lemmatizer")
|
||||
nlp3.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||
nlp3.from_bytes(nlp_bytes)
|
||||
doc3 = nlp3(test_text)
|
||||
assert doc3[0].lemma_ == "she"
|
||||
|
|
|
@ -618,7 +618,6 @@ def test_string_to_list_intify(value):
|
|||
assert string_to_list(value, intify=True) == [1, 2, 3]
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
||||
def test_download_compatibility():
|
||||
spec = SpecifierSet("==" + about.__version__)
|
||||
spec.prereleases = False
|
||||
|
@ -629,7 +628,6 @@ def test_download_compatibility():
|
|||
assert get_minor_version(about.__version__) == get_minor_version(version)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
||||
def test_validate_compatibility_table():
|
||||
spec = SpecifierSet("==" + about.__version__)
|
||||
spec.prereleases = False
|
||||
|
@ -1076,7 +1074,7 @@ def test_cli_find_threshold(capsys):
|
|||
)
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
res = find_threshold(
|
||||
best_threshold, best_score, res = find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="tc_multi",
|
||||
|
@ -1084,10 +1082,10 @@ def test_cli_find_threshold(capsys):
|
|||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
assert res[0] != thresholds[0]
|
||||
assert thresholds[0] < res[0] < thresholds[9]
|
||||
assert res[1] == 1.0
|
||||
assert res[2][1.0] == 0.0
|
||||
assert best_threshold != thresholds[0]
|
||||
assert thresholds[0] < best_threshold < thresholds[9]
|
||||
assert best_score == max(res.values())
|
||||
assert res[1.0] == 0.0
|
||||
|
||||
# Test with spancat.
|
||||
nlp, _ = init_nlp((("spancat", {}),))
|
||||
|
@ -1209,3 +1207,69 @@ def test_walk_directory():
|
|||
assert (len(walk_directory(d, suffix="iob"))) == 2
|
||||
assert (len(walk_directory(d, suffix="conll"))) == 3
|
||||
assert (len(walk_directory(d, suffix="pdf"))) == 0
|
||||
|
||||
|
||||
def test_debug_data_trainable_lemmatizer_basic():
|
||||
examples = [
|
||||
("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}),
|
||||
("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}),
|
||||
]
|
||||
nlp = Language()
|
||||
train_examples = []
|
||||
for t in examples:
|
||||
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||
|
||||
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||
# ref test_edit_tree_lemmatizer::test_initialize_from_labels
|
||||
# this results in 4 trees
|
||||
assert len(data["lemmatizer_trees"]) == 4
|
||||
|
||||
|
||||
def test_debug_data_trainable_lemmatizer_partial():
|
||||
partial_examples = [
|
||||
# partial annotation
|
||||
("She likes green eggs", {"lemmas": ["", "like", "green", ""]}),
|
||||
# misaligned partial annotation
|
||||
(
|
||||
"He hates green eggs",
|
||||
{
|
||||
"words": ["He", "hat", "es", "green", "eggs"],
|
||||
"lemmas": ["", "hat", "e", "green", ""],
|
||||
},
|
||||
),
|
||||
]
|
||||
nlp = Language()
|
||||
train_examples = []
|
||||
for t in partial_examples:
|
||||
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||
|
||||
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||
assert data["partial_lemma_annotations"] == 2
|
||||
|
||||
|
||||
def test_debug_data_trainable_lemmatizer_low_cardinality():
|
||||
low_cardinality_examples = [
|
||||
("She likes green eggs", {"lemmas": ["no", "no", "no", "no"]}),
|
||||
("Eat blue ham", {"lemmas": ["no", "no", "no"]}),
|
||||
]
|
||||
nlp = Language()
|
||||
train_examples = []
|
||||
for t in low_cardinality_examples:
|
||||
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||
|
||||
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||
assert data["n_low_cardinality_lemmas"] == 2
|
||||
|
||||
|
||||
def test_debug_data_trainable_lemmatizer_not_annotated():
|
||||
unannotated_examples = [
|
||||
("She likes green eggs", {}),
|
||||
("Eat blue ham", {}),
|
||||
]
|
||||
nlp = Language()
|
||||
train_examples = []
|
||||
for t in unannotated_examples:
|
||||
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||
|
||||
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||
assert data["no_lemma_annotations"] == 2
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
from typer.testing import CliRunner
|
||||
from spacy.tokens import DocBin, Doc
|
||||
|
||||
from spacy.cli._util import app
|
||||
from .util import make_tempdir
|
||||
|
@ -31,3 +32,60 @@ def test_convert_auto_conflict():
|
|||
assert "All input files must be same type" in result.stdout
|
||||
out_files = os.listdir(d_out)
|
||||
assert len(out_files) == 0
|
||||
|
||||
|
||||
def test_benchmark_accuracy_alias():
|
||||
# Verify that the `evaluate` alias works correctly.
|
||||
result_benchmark = CliRunner().invoke(app, ["benchmark", "accuracy", "--help"])
|
||||
result_evaluate = CliRunner().invoke(app, ["evaluate", "--help"])
|
||||
assert result_benchmark.stdout == result_evaluate.stdout.replace(
|
||||
"spacy evaluate", "spacy benchmark accuracy"
|
||||
)
|
||||
|
||||
|
||||
def test_debug_data_trainable_lemmatizer_cli(en_vocab):
|
||||
train_docs = [
|
||||
Doc(en_vocab, words=["I", "like", "cats"], lemmas=["I", "like", "cat"]),
|
||||
Doc(
|
||||
en_vocab,
|
||||
words=["Dogs", "are", "great", "too"],
|
||||
lemmas=["dog", "be", "great", "too"],
|
||||
),
|
||||
]
|
||||
dev_docs = [
|
||||
Doc(en_vocab, words=["Cats", "are", "cute"], lemmas=["cat", "be", "cute"]),
|
||||
Doc(en_vocab, words=["Pets", "are", "great"], lemmas=["pet", "be", "great"]),
|
||||
]
|
||||
with make_tempdir() as d_in:
|
||||
train_bin = DocBin(docs=train_docs)
|
||||
train_bin.to_disk(d_in / "train.spacy")
|
||||
dev_bin = DocBin(docs=dev_docs)
|
||||
dev_bin.to_disk(d_in / "dev.spacy")
|
||||
# `debug data` requires an input pipeline config
|
||||
CliRunner().invoke(
|
||||
app,
|
||||
[
|
||||
"init",
|
||||
"config",
|
||||
f"{d_in}/config.cfg",
|
||||
"--lang",
|
||||
"en",
|
||||
"--pipeline",
|
||||
"trainable_lemmatizer",
|
||||
],
|
||||
)
|
||||
result_debug_data = CliRunner().invoke(
|
||||
app,
|
||||
[
|
||||
"debug",
|
||||
"data",
|
||||
f"{d_in}/config.cfg",
|
||||
"--paths.train",
|
||||
f"{d_in}/train.spacy",
|
||||
"--paths.dev",
|
||||
f"{d_in}/dev.spacy",
|
||||
],
|
||||
)
|
||||
# Instead of checking specific wording of the output, which may change,
|
||||
# we'll check that this section of the debug output is present.
|
||||
assert "= Trainable Lemmatizer =" in result_debug_data.stdout
|
||||
|
|
|
@ -26,6 +26,12 @@ except ImportError:
|
|||
pass
|
||||
|
||||
|
||||
TAGGER_TRAIN_DATA = [
|
||||
("I like green eggs", {"tags": ["N", "V", "J", "N"]}),
|
||||
("Eat blue ham", {"tags": ["V", "J", "N"]}),
|
||||
]
|
||||
|
||||
|
||||
def evil_component(doc):
|
||||
if "2" in doc.text:
|
||||
raise ValueError("no dice")
|
||||
|
@ -799,3 +805,66 @@ def test_component_return():
|
|||
nlp.add_pipe("test_component_bad_pipe")
|
||||
with pytest.raises(ValueError, match="instead of a Doc"):
|
||||
nlp("text")
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("teacher_tagger_name", ["tagger", "teacher_tagger"])
|
||||
def test_distill(teacher_tagger_name):
|
||||
teacher = English()
|
||||
teacher_tagger = teacher.add_pipe("tagger", name=teacher_tagger_name)
|
||||
train_examples = []
|
||||
for t in TAGGER_TRAIN_DATA:
|
||||
train_examples.append(Example.from_dict(teacher.make_doc(t[0]), t[1]))
|
||||
|
||||
optimizer = teacher.initialize(get_examples=lambda: train_examples)
|
||||
|
||||
for i in range(50):
|
||||
losses = {}
|
||||
teacher.update(train_examples, sgd=optimizer, losses=losses)
|
||||
assert losses[teacher_tagger_name] < 0.00001
|
||||
|
||||
student = English()
|
||||
student_tagger = student.add_pipe("tagger")
|
||||
student_tagger.min_tree_freq = 1
|
||||
student_tagger.initialize(
|
||||
get_examples=lambda: train_examples, labels=teacher_tagger.label_data
|
||||
)
|
||||
|
||||
distill_examples = [
|
||||
Example.from_dict(teacher.make_doc(t[0]), {}) for t in TAGGER_TRAIN_DATA
|
||||
]
|
||||
|
||||
student_to_teacher = (
|
||||
None
|
||||
if teacher_tagger.name == student_tagger.name
|
||||
else {student_tagger.name: teacher_tagger.name}
|
||||
)
|
||||
|
||||
for i in range(50):
|
||||
losses = {}
|
||||
student.distill(
|
||||
teacher,
|
||||
distill_examples,
|
||||
sgd=optimizer,
|
||||
losses=losses,
|
||||
student_to_teacher=student_to_teacher,
|
||||
)
|
||||
assert losses["tagger"] < 0.00001
|
||||
|
||||
test_text = "I like blue eggs"
|
||||
doc = student(test_text)
|
||||
assert doc[0].tag_ == "N"
|
||||
assert doc[1].tag_ == "V"
|
||||
assert doc[2].tag_ == "J"
|
||||
assert doc[3].tag_ == "N"
|
||||
|
||||
# Do an extra update to check if annotates works, though we can't really
|
||||
# validate the resuls, since the annotations are ephemeral.
|
||||
student.distill(
|
||||
teacher,
|
||||
distill_examples,
|
||||
sgd=optimizer,
|
||||
losses=losses,
|
||||
student_to_teacher=student_to_teacher,
|
||||
annotates=["tagger"],
|
||||
)
|
||||
|
|
78
spacy/tests/training/test_corpus.py
Normal file
78
spacy/tests/training/test_corpus.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
from typing import IO, Generator, Iterable, List, TextIO, Tuple
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
import pytest
|
||||
import tempfile
|
||||
|
||||
from spacy.lang.en import English
|
||||
from spacy.training import Example, PlainTextCorpus
|
||||
from spacy.util import make_tempdir
|
||||
|
||||
# Intentional newlines to check that they are skipped.
|
||||
PLAIN_TEXT_DOC = """
|
||||
|
||||
This is a doc. It contains two sentences.
|
||||
This is another doc.
|
||||
|
||||
A third doc.
|
||||
|
||||
"""
|
||||
|
||||
PLAIN_TEXT_DOC_TOKENIZED = [
|
||||
[
|
||||
"This",
|
||||
"is",
|
||||
"a",
|
||||
"doc",
|
||||
".",
|
||||
"It",
|
||||
"contains",
|
||||
"two",
|
||||
"sentences",
|
||||
".",
|
||||
],
|
||||
["This", "is", "another", "doc", "."],
|
||||
["A", "third", "doc", "."],
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("min_length", [0, 5])
|
||||
@pytest.mark.parametrize("max_length", [0, 5])
|
||||
def test_plain_text_reader(min_length, max_length):
|
||||
nlp = English()
|
||||
with _string_to_tmp_file(PLAIN_TEXT_DOC) as file_path:
|
||||
corpus = PlainTextCorpus(
|
||||
file_path, min_length=min_length, max_length=max_length
|
||||
)
|
||||
|
||||
check = [
|
||||
doc
|
||||
for doc in PLAIN_TEXT_DOC_TOKENIZED
|
||||
if len(doc) >= min_length and (max_length == 0 or len(doc) <= max_length)
|
||||
]
|
||||
reference, predicted = _examples_to_tokens(corpus(nlp))
|
||||
|
||||
assert reference == check
|
||||
assert predicted == check
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _string_to_tmp_file(s: str) -> Generator[Path, None, None]:
|
||||
with make_tempdir() as d:
|
||||
file_path = Path(d) / "string.txt"
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
f.write(s)
|
||||
yield file_path
|
||||
|
||||
|
||||
def _examples_to_tokens(
|
||||
examples: Iterable[Example],
|
||||
) -> Tuple[List[List[str]], List[List[str]]]:
|
||||
reference = []
|
||||
predicted = []
|
||||
|
||||
for eg in examples:
|
||||
reference.append([t.text for t in eg.reference])
|
||||
predicted.append([t.text for t in eg.predicted])
|
||||
|
||||
return reference, predicted
|
|
@ -37,7 +37,7 @@ cdef class Tokenizer:
|
|||
bint with_special_cases) except -1
|
||||
cdef int _tokenize(self, Doc tokens, str span, hash_t key,
|
||||
int* has_special, bint with_special_cases) except -1
|
||||
cdef str _split_affixes(self, Pool mem, str string,
|
||||
cdef str _split_affixes(self, str string,
|
||||
vector[LexemeC*] *prefixes,
|
||||
vector[LexemeC*] *suffixes, int* has_special,
|
||||
bint with_special_cases)
|
||||
|
|
|
@ -389,14 +389,14 @@ cdef class Tokenizer:
|
|||
cdef vector[LexemeC*] suffixes
|
||||
cdef int orig_size
|
||||
orig_size = tokens.length
|
||||
span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes,
|
||||
span = self._split_affixes(span, &prefixes, &suffixes,
|
||||
has_special, with_special_cases)
|
||||
self._attach_tokens(tokens, span, &prefixes, &suffixes, has_special,
|
||||
with_special_cases)
|
||||
self._save_cached(&tokens.c[orig_size], orig_key, has_special,
|
||||
tokens.length - orig_size)
|
||||
|
||||
cdef str _split_affixes(self, Pool mem, str string,
|
||||
cdef str _split_affixes(self, str string,
|
||||
vector[const LexemeC*] *prefixes,
|
||||
vector[const LexemeC*] *suffixes,
|
||||
int* has_special,
|
||||
|
@ -419,7 +419,7 @@ cdef class Tokenizer:
|
|||
minus_pre = string[pre_len:]
|
||||
if minus_pre and with_special_cases and self._specials.get(hash_string(minus_pre)) != NULL:
|
||||
string = minus_pre
|
||||
prefixes.push_back(self.vocab.get(mem, prefix))
|
||||
prefixes.push_back(self.vocab.get(prefix))
|
||||
break
|
||||
suf_len = self.find_suffix(string[pre_len:])
|
||||
if suf_len != 0:
|
||||
|
@ -427,18 +427,18 @@ cdef class Tokenizer:
|
|||
minus_suf = string[:-suf_len]
|
||||
if minus_suf and with_special_cases and self._specials.get(hash_string(minus_suf)) != NULL:
|
||||
string = minus_suf
|
||||
suffixes.push_back(self.vocab.get(mem, suffix))
|
||||
suffixes.push_back(self.vocab.get(suffix))
|
||||
break
|
||||
if pre_len and suf_len and (pre_len + suf_len) <= len(string):
|
||||
string = string[pre_len:-suf_len]
|
||||
prefixes.push_back(self.vocab.get(mem, prefix))
|
||||
suffixes.push_back(self.vocab.get(mem, suffix))
|
||||
prefixes.push_back(self.vocab.get(prefix))
|
||||
suffixes.push_back(self.vocab.get(suffix))
|
||||
elif pre_len:
|
||||
string = minus_pre
|
||||
prefixes.push_back(self.vocab.get(mem, prefix))
|
||||
prefixes.push_back(self.vocab.get(prefix))
|
||||
elif suf_len:
|
||||
string = minus_suf
|
||||
suffixes.push_back(self.vocab.get(mem, suffix))
|
||||
suffixes.push_back(self.vocab.get(suffix))
|
||||
return string
|
||||
|
||||
cdef int _attach_tokens(self, Doc tokens, str string,
|
||||
|
@ -465,11 +465,11 @@ cdef class Tokenizer:
|
|||
# We're always saying 'no' to spaces here -- the caller will
|
||||
# fix up the outermost one, with reference to the original.
|
||||
# See Issue #859
|
||||
tokens.push_back(self.vocab.get(tokens.mem, string), False)
|
||||
tokens.push_back(self.vocab.get(string), False)
|
||||
else:
|
||||
matches = self.find_infix(string)
|
||||
if not matches:
|
||||
tokens.push_back(self.vocab.get(tokens.mem, string), False)
|
||||
tokens.push_back(self.vocab.get(string), False)
|
||||
else:
|
||||
# Let's say we have dyn-o-mite-dave - the regex finds the
|
||||
# start and end positions of the hyphens
|
||||
|
@ -484,7 +484,7 @@ cdef class Tokenizer:
|
|||
|
||||
if infix_start != start:
|
||||
span = string[start:infix_start]
|
||||
tokens.push_back(self.vocab.get(tokens.mem, span), False)
|
||||
tokens.push_back(self.vocab.get(span), False)
|
||||
|
||||
if infix_start != infix_end:
|
||||
# If infix_start != infix_end, it means the infix
|
||||
|
@ -492,11 +492,11 @@ cdef class Tokenizer:
|
|||
# for tokenization in some languages (see
|
||||
# https://github.com/explosion/spaCy/issues/768)
|
||||
infix_span = string[infix_start:infix_end]
|
||||
tokens.push_back(self.vocab.get(tokens.mem, infix_span), False)
|
||||
tokens.push_back(self.vocab.get(infix_span), False)
|
||||
start = infix_end
|
||||
span = string[start:]
|
||||
if span:
|
||||
tokens.push_back(self.vocab.get(tokens.mem, span), False)
|
||||
tokens.push_back(self.vocab.get(span), False)
|
||||
cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin()
|
||||
while it != suffixes.rend():
|
||||
lexeme = deref(it)
|
||||
|
|
|
@ -266,12 +266,12 @@ cdef class Doc:
|
|||
cdef const LexemeC* lexeme
|
||||
for word, has_space in zip(words, spaces):
|
||||
if isinstance(word, str):
|
||||
lexeme = self.vocab.get(self.mem, word)
|
||||
lexeme = self.vocab.get(word)
|
||||
elif isinstance(word, bytes):
|
||||
raise ValueError(Errors.E028.format(value=word))
|
||||
else:
|
||||
try:
|
||||
lexeme = self.vocab.get_by_orth(self.mem, word)
|
||||
lexeme = self.vocab.get_by_orth(word)
|
||||
except TypeError:
|
||||
raise TypeError(Errors.E1022.format(wtype=type(word)))
|
||||
self.push_back(lexeme, has_space)
|
||||
|
@ -1430,7 +1430,7 @@ cdef class Doc:
|
|||
end = start + attrs[i, 0]
|
||||
has_space = attrs[i, 1]
|
||||
orth_ = text[start:end]
|
||||
lex = self.vocab.get(self.mem, orth_)
|
||||
lex = self.vocab.get(orth_)
|
||||
self.push_back(lex, has_space)
|
||||
start = end + has_space
|
||||
self.from_array(msg["array_head"][2:], attrs[:, 2:])
|
||||
|
@ -1536,7 +1536,7 @@ cdef class Doc:
|
|||
assert words == reconstructed_words
|
||||
|
||||
for word, has_space in zip(words, spaces):
|
||||
lex = self.vocab.get(self.mem, word)
|
||||
lex = self.vocab.get(word)
|
||||
self.push_back(lex, has_space)
|
||||
|
||||
# Set remaining token-level attributes via Doc.from_array().
|
||||
|
|
|
@ -223,7 +223,7 @@ def _merge(Doc doc, merges):
|
|||
if doc.vocab.vectors_length > 0:
|
||||
doc.vocab.set_vector(new_orth, span.vector)
|
||||
token = tokens[token_index]
|
||||
lex = doc.vocab.get(doc.mem, new_orth)
|
||||
lex = doc.vocab.get(new_orth)
|
||||
token.lex = lex
|
||||
# We set trailing space here too
|
||||
token.spacy = doc.c[spans[token_index].end-1].spacy
|
||||
|
@ -359,7 +359,7 @@ def _split(Doc doc, int token_index, orths, heads, attrs):
|
|||
cdef int idx_offset = 0
|
||||
for i, orth in enumerate(orths):
|
||||
token = &doc.c[token_index + i]
|
||||
lex = doc.vocab.get(doc.mem, orth)
|
||||
lex = doc.vocab.get(orth)
|
||||
token.lex = lex
|
||||
# If lemma is currently set, set default lemma to orth
|
||||
if token.lemma != 0:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from .corpus import Corpus, JsonlCorpus # noqa: F401
|
||||
from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401
|
||||
from .example import Example, validate_examples, validate_get_examples # noqa: F401
|
||||
from .example import validate_distillation_examples # noqa: F401
|
||||
from .alignment import Alignment # noqa: F401
|
||||
|
|
|
@ -58,6 +58,28 @@ def read_labels(path: Path, *, require: bool = False):
|
|||
return srsly.read_json(path)
|
||||
|
||||
|
||||
@util.registry.readers("spacy.PlainTextCorpus.v1")
|
||||
def create_plain_text_reader(
|
||||
path: Optional[Path],
|
||||
min_length: int = 0,
|
||||
max_length: int = 0,
|
||||
) -> Callable[["Language"], Iterable[Doc]]:
|
||||
"""Iterate Example objects from a file or directory of plain text
|
||||
UTF-8 files with one line per doc.
|
||||
|
||||
path (Path): The directory or filename to read from.
|
||||
min_length (int): Minimum document length (in tokens). Shorter documents
|
||||
will be skipped. Defaults to 0, which indicates no limit.
|
||||
max_length (int): Maximum document length (in tokens). Longer documents will
|
||||
be skipped. Defaults to 0, which indicates no limit.
|
||||
|
||||
DOCS: https://spacy.io/api/corpus#plaintextcorpus
|
||||
"""
|
||||
if path is None:
|
||||
raise ValueError(Errors.E913)
|
||||
return PlainTextCorpus(path, min_length=min_length, max_length=max_length)
|
||||
|
||||
|
||||
def walk_corpus(path: Union[str, Path], file_type) -> List[Path]:
|
||||
path = util.ensure_path(path)
|
||||
if not path.is_dir() and path.parts[-1].endswith(file_type):
|
||||
|
@ -257,3 +279,52 @@ class JsonlCorpus:
|
|||
# We don't *need* an example here, but it seems nice to
|
||||
# make it match the Corpus signature.
|
||||
yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces))
|
||||
|
||||
|
||||
class PlainTextCorpus:
|
||||
"""Iterate Example objects from a file or directory of plain text
|
||||
UTF-8 files with one line per doc.
|
||||
|
||||
path (Path): The directory or filename to read from.
|
||||
min_length (int): Minimum document length (in tokens). Shorter documents
|
||||
will be skipped. Defaults to 0, which indicates no limit.
|
||||
max_length (int): Maximum document length (in tokens). Longer documents will
|
||||
be skipped. Defaults to 0, which indicates no limit.
|
||||
|
||||
DOCS: https://spacy.io/api/corpus#plaintextcorpus
|
||||
"""
|
||||
|
||||
file_type = "txt"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: Optional[Union[str, Path]],
|
||||
*,
|
||||
min_length: int = 0,
|
||||
max_length: int = 0,
|
||||
) -> None:
|
||||
self.path = util.ensure_path(path)
|
||||
self.min_length = min_length
|
||||
self.max_length = max_length
|
||||
|
||||
def __call__(self, nlp: "Language") -> Iterator[Example]:
|
||||
"""Yield examples from the data.
|
||||
|
||||
nlp (Language): The current nlp object.
|
||||
YIELDS (Example): The example objects.
|
||||
|
||||
DOCS: https://spacy.io/api/corpus#plaintextcorpus-call
|
||||
"""
|
||||
for loc in walk_corpus(self.path, ".txt"):
|
||||
with open(loc, encoding="utf-8") as f:
|
||||
for text in f:
|
||||
text = text.rstrip("\r\n")
|
||||
if len(text):
|
||||
doc = nlp.make_doc(text)
|
||||
if self.min_length >= 1 and len(doc) < self.min_length:
|
||||
continue
|
||||
elif self.max_length >= 1 and len(doc) > self.max_length:
|
||||
continue
|
||||
# We don't *need* an example here, but it seems nice to
|
||||
# make it match the Corpus signature.
|
||||
yield Example(doc, doc.copy())
|
||||
|
|
22
spacy/ty.py
22
spacy/ty.py
|
@ -1,6 +1,5 @@
|
|||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Protocol, runtime_checkable
|
||||
from typing import Optional, Any, Iterable, Dict, Callable, Sequence, List
|
||||
from .compat import Protocol, runtime_checkable
|
||||
|
||||
from thinc.api import Optimizer, Model
|
||||
|
||||
|
@ -27,6 +26,25 @@ class TrainableComponent(Protocol):
|
|||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class DistillableComponent(Protocol):
|
||||
is_distillable: bool
|
||||
|
||||
def distill(
|
||||
self,
|
||||
teacher_pipe: Optional[TrainableComponent],
|
||||
examples: Iterable["Example"],
|
||||
*,
|
||||
drop: float = 0.0,
|
||||
sgd: Optional[Optimizer] = None,
|
||||
losses: Optional[Dict[str, float]] = None
|
||||
) -> Dict[str, float]:
|
||||
...
|
||||
|
||||
def finish_update(self, sgd: Optimizer) -> None:
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class InitializableComponent(Protocol):
|
||||
def initialize(
|
||||
|
|
|
@ -4,6 +4,7 @@ from typing import Iterator, Pattern, Generator, TYPE_CHECKING
|
|||
from types import ModuleType
|
||||
import os
|
||||
import importlib
|
||||
import importlib.metadata
|
||||
import importlib.util
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
@ -40,7 +41,7 @@ except ImportError:
|
|||
|
||||
|
||||
from .symbols import ORTH
|
||||
from .compat import cupy, CudaStream, is_windows, importlib_metadata
|
||||
from .compat import cupy, CudaStream, is_windows
|
||||
from .errors import Errors, Warnings
|
||||
from . import about
|
||||
|
||||
|
@ -706,8 +707,8 @@ def get_package_version(name: str) -> Optional[str]:
|
|||
RETURNS (str / None): The version or None if package not installed.
|
||||
"""
|
||||
try:
|
||||
return importlib_metadata.version(name) # type: ignore[attr-defined]
|
||||
except importlib_metadata.PackageNotFoundError: # type: ignore[attr-defined]
|
||||
return importlib.metadata.version(name) # type: ignore[attr-defined]
|
||||
except importlib.metadata.PackageNotFoundError: # type: ignore[attr-defined]
|
||||
return None
|
||||
|
||||
|
||||
|
@ -895,7 +896,7 @@ def is_package(name: str) -> bool:
|
|||
RETURNS (bool): True if installed package, False if not.
|
||||
"""
|
||||
try:
|
||||
importlib_metadata.distribution(name) # type: ignore[attr-defined]
|
||||
importlib.metadata.distribution(name) # type: ignore[attr-defined]
|
||||
return True
|
||||
except: # noqa: E722
|
||||
return False
|
||||
|
@ -1718,7 +1719,7 @@ def packages_distributions() -> Dict[str, List[str]]:
|
|||
it's not available in the builtin importlib.metadata.
|
||||
"""
|
||||
pkg_to_dist = defaultdict(list)
|
||||
for dist in importlib_metadata.distributions():
|
||||
for dist in importlib.metadata.distributions():
|
||||
for pkg in (dist.read_text("top_level.txt") or "").split():
|
||||
pkg_to_dist[pkg].append(dist.metadata["Name"])
|
||||
return dict(pkg_to_dist)
|
||||
|
|
|
@ -35,12 +35,11 @@ cdef class Vocab:
|
|||
cdef public object lex_attr_getters
|
||||
cdef public object cfg
|
||||
|
||||
cdef const LexemeC* get(self, Pool mem, str string) except NULL
|
||||
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL
|
||||
cdef const LexemeC* get(self, str string) except NULL
|
||||
cdef const LexemeC* get_by_orth(self, attr_t orth) except NULL
|
||||
cdef const TokenC* make_fused_token(self, substrings) except NULL
|
||||
|
||||
cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL
|
||||
cdef const LexemeC* _new_lexeme(self, str string) except NULL
|
||||
cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1
|
||||
cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL
|
||||
|
||||
cdef PreshMap _by_orth
|
||||
|
|
|
@ -139,7 +139,7 @@ cdef class Vocab:
|
|||
self.lex_attr_getters[flag_id] = flag_getter
|
||||
return flag_id
|
||||
|
||||
cdef const LexemeC* get(self, Pool mem, str string) except NULL:
|
||||
cdef const LexemeC* get(self, str string) except NULL:
|
||||
"""Get a pointer to a `LexemeC` from the lexicon, creating a new
|
||||
`Lexeme` if necessary using memory acquired from the given pool. If the
|
||||
pool is the lexicon's own memory, the lexeme is saved in the lexicon.
|
||||
|
@ -157,9 +157,9 @@ cdef class Vocab:
|
|||
orth=key, orth_id=string))
|
||||
return lex
|
||||
else:
|
||||
return self._new_lexeme(mem, string)
|
||||
return self._new_lexeme(string)
|
||||
|
||||
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL:
|
||||
cdef const LexemeC* get_by_orth(self, attr_t orth) except NULL:
|
||||
"""Get a pointer to a `LexemeC` from the lexicon, creating a new
|
||||
`Lexeme` if necessary using memory acquired from the given pool. If the
|
||||
pool is the lexicon's own memory, the lexeme is saved in the lexicon.
|
||||
|
@ -171,21 +171,10 @@ cdef class Vocab:
|
|||
if lex != NULL:
|
||||
return lex
|
||||
else:
|
||||
return self._new_lexeme(mem, self.strings[orth])
|
||||
return self._new_lexeme(self.strings[orth])
|
||||
|
||||
cdef const LexemeC* _new_lexeme(self, Pool mem, str string) except NULL:
|
||||
# I think this heuristic is bad, and the Vocab should always
|
||||
# own the lexemes. It avoids weird bugs this way, as it's how the thing
|
||||
# was originally supposed to work. The best solution to the growing
|
||||
# memory use is to periodically reset the vocab, which is an action
|
||||
# that should be up to the user to do (so we don't need to keep track
|
||||
# of the doc ownership).
|
||||
# TODO: Change the C API so that the mem isn't passed in here.
|
||||
mem = self.mem
|
||||
#if len(string) < 3 or self.length < 10000:
|
||||
# mem = self.mem
|
||||
cdef bint is_oov = mem is not self.mem
|
||||
lex = <LexemeC*>mem.alloc(1, sizeof(LexemeC))
|
||||
cdef const LexemeC* _new_lexeme(self, str string) except NULL:
|
||||
lex = <LexemeC*>self.mem.alloc(1, sizeof(LexemeC))
|
||||
lex.orth = self.strings.add(string)
|
||||
lex.length = len(string)
|
||||
if self.vectors is not None:
|
||||
|
@ -199,8 +188,7 @@ cdef class Vocab:
|
|||
value = self.strings.add(value)
|
||||
if value is not None:
|
||||
Lexeme.set_struct_attr(lex, attr, value)
|
||||
if not is_oov:
|
||||
self._add_lex_to_vocab(lex.orth, lex)
|
||||
self._add_lex_to_vocab(lex.orth, lex)
|
||||
if lex == NULL:
|
||||
raise ValueError(Errors.E085.format(string=string))
|
||||
return lex
|
||||
|
@ -271,7 +259,7 @@ cdef class Vocab:
|
|||
props = intify_attrs(props, strings_map=self.strings)
|
||||
token = &tokens[i]
|
||||
# Set the special tokens up to have arbitrary attributes
|
||||
lex = <LexemeC*>self.get_by_orth(self.mem, props[ORTH])
|
||||
lex = <LexemeC*>self.get_by_orth(props[ORTH])
|
||||
token.lex = lex
|
||||
for attr_id, value in props.items():
|
||||
Token.set_struct_attr(token, attr_id, value)
|
||||
|
|
9
website/.dockerignore
Normal file
9
website/.dockerignore
Normal file
|
@ -0,0 +1,9 @@
|
|||
.cache/
|
||||
.next/
|
||||
public/
|
||||
node_modules
|
||||
.npm
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
quickstart-training-generator.js
|
4
website/.gitignore
vendored
4
website/.gitignore
vendored
|
@ -1,5 +1,7 @@
|
|||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||
|
||||
quickstart-training-generator.js
|
||||
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
|
@ -41,4 +43,4 @@ next-env.d.ts
|
|||
public/robots.txt
|
||||
public/sitemap*
|
||||
public/sw.js*
|
||||
public/workbox*
|
||||
public/workbox*
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
FROM node:11.15.0
|
||||
FROM node:18
|
||||
|
||||
WORKDIR /spacy-io
|
||||
|
||||
RUN npm install -g gatsby-cli@2.7.4
|
||||
|
||||
COPY package.json .
|
||||
COPY package-lock.json .
|
||||
|
||||
RUN npm install
|
||||
USER node
|
||||
|
||||
# This is so the installed node_modules will be up one directory
|
||||
# from where a user mounts files, so that they don't accidentally mount
|
||||
# their own node_modules from a different build
|
||||
# https://nodejs.org/api/modules.html#modules_loading_from_node_modules_folders
|
||||
WORKDIR /spacy-io/website/
|
||||
WORKDIR /home/node
|
||||
COPY --chown=node package.json .
|
||||
COPY --chown=node package-lock.json .
|
||||
RUN npm install
|
||||
|
||||
WORKDIR /home/node/website/
|
||||
|
|
|
@ -41,33 +41,27 @@ If you'd like to do this, **be sure you do _not_ include your local
|
|||
`node_modules` folder**, since there are some dependencies that need to be built
|
||||
for the image system. Rename it before using.
|
||||
|
||||
```bash
|
||||
docker run -it \
|
||||
-v $(pwd):/spacy-io/website \
|
||||
-p 8000:8000 \
|
||||
ghcr.io/explosion/spacy-io \
|
||||
gatsby develop -H 0.0.0.0
|
||||
```
|
||||
|
||||
This will allow you to access the built website at http://0.0.0.0:8000/ in your
|
||||
browser, and still edit code in your editor while having the site reflect those
|
||||
changes.
|
||||
|
||||
**Note**: If you're working on a Mac with an M1 processor, you might see
|
||||
segfault errors from `qemu` if you use the default image. To fix this use the
|
||||
`arm64` tagged image in the `docker run` command
|
||||
(ghcr.io/explosion/spacy-io:arm64).
|
||||
|
||||
### Building the Docker image
|
||||
|
||||
If you'd like to build the image locally, you can do so like this:
|
||||
First build the Docker image. This only needs to be done on the first run
|
||||
or when changes are made to `Dockerfile` or the website dependencies:
|
||||
|
||||
```bash
|
||||
docker build -t spacy-io .
|
||||
```
|
||||
|
||||
This will take some time, so if you want to use the prebuilt image you'll save a
|
||||
bit of time.
|
||||
You can then build and run the website with:
|
||||
|
||||
```bash
|
||||
docker run -it \
|
||||
--rm \
|
||||
-v $(pwd):/home/node/website \
|
||||
-p 3000:3000 \
|
||||
spacy-io \
|
||||
npm run dev -- -H 0.0.0.0
|
||||
```
|
||||
|
||||
This will allow you to access the built website at http://0.0.0.0:3000/ in your
|
||||
browser, and still edit code in your editor while having the site reflect those
|
||||
changes.
|
||||
|
||||
## Project structure
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ menu:
|
|||
- ['train', 'train']
|
||||
- ['pretrain', 'pretrain']
|
||||
- ['evaluate', 'evaluate']
|
||||
- ['benchmark', 'benchmark']
|
||||
- ['apply', 'apply']
|
||||
- ['find-threshold', 'find-threshold']
|
||||
- ['assemble', 'assemble']
|
||||
|
@ -269,10 +270,10 @@ $ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type]
|
|||
| `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
|
||||
| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
|
||||
| `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
|
||||
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
|
||||
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str] (option)~~ |
|
||||
| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
|
||||
| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
|
||||
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
|
||||
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path] (option)~~ |
|
||||
| `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
|
||||
| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
|
||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
|
@ -1135,8 +1136,19 @@ $ python -m spacy pretrain [config_path] [output_dir] [--code] [--resume-path] [
|
|||
|
||||
## evaluate {id="evaluate",version="2",tag="command"}
|
||||
|
||||
Evaluate a trained pipeline. Expects a loadable spaCy pipeline (package name or
|
||||
path) and evaluation data in the
|
||||
The `evaluate` subcommand is superseded by
|
||||
[`spacy benchmark accuracy`](#benchmark-accuracy). `evaluate` is provided as an
|
||||
alias to `benchmark accuracy` for compatibility.
|
||||
|
||||
## benchmark {id="benchmark", version="3.5"}
|
||||
|
||||
The `spacy benchmark` CLI includes commands for benchmarking the accuracy and
|
||||
speed of your spaCy pipelines.
|
||||
|
||||
### accuracy {id="benchmark-accuracy", version="3.5", tag="command"}
|
||||
|
||||
Evaluate the accuracy of a trained pipeline. Expects a loadable spaCy pipeline
|
||||
(package name or path) and evaluation data in the
|
||||
[binary `.spacy` format](/api/data-formats#binary-training). The
|
||||
`--gold-preproc` option sets up the evaluation examples with gold-standard
|
||||
sentences and tokens for the predictions. Gold preprocessing helps the
|
||||
|
@ -1147,7 +1159,7 @@ skew. To render a sample of dependency parses in a HTML file using the
|
|||
`--displacy-path` argument.
|
||||
|
||||
```bash
|
||||
$ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit]
|
||||
$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit]
|
||||
```
|
||||
|
||||
| Name | Description |
|
||||
|
@ -1163,6 +1175,29 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
|
|||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
| **CREATES** | Training results and optional metrics and visualizations. |
|
||||
|
||||
### speed {id="benchmark-speed", version="3.5", tag="command"}
|
||||
|
||||
Benchmark the speed of a trained pipeline with a 95% confidence interval.
|
||||
Expects a loadable spaCy pipeline (package name or path) and benchmark data in
|
||||
the [binary `.spacy` format](/api/data-formats#binary-training). The pipeline is
|
||||
warmed up before any measurements are taken.
|
||||
|
||||
```cli
|
||||
$ python -m spacy benchmark speed [model] [data_path] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup]
|
||||
```
|
||||
|
||||
| Name | Description |
|
||||
| -------------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||
| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ |
|
||||
| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ |
|
||||
| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ |
|
||||
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ |
|
||||
| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ |
|
||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. |
|
||||
|
||||
## apply {id="apply", version="3.5", tag="command"}
|
||||
|
||||
Applies a trained pipeline to data and stores the resulting annotated documents
|
||||
|
@ -1176,24 +1211,23 @@ input formats are:
|
|||
|
||||
When a directory is provided it is traversed recursively to collect all files.
|
||||
|
||||
```cli
|
||||
```bash
|
||||
$ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process]
|
||||
```
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||
| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
|
||||
| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
|
||||
| `--code`, `-c` <Tag variant="new">3</Tag> | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
|
||||
| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
|
||||
| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
|
||||
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--batch-size`, `-b` | Batch size to use for prediction. Defaults to `1`. ~~int (option)~~ |
|
||||
| `--n-process`, `-n` | Number of processes to use for prediction. Defaults to `1`. ~~int (option)~~ |
|
||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
|
||||
|
||||
| Name | Description |
|
||||
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||
| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
|
||||
| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
|
||||
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
|
||||
| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
|
||||
| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
|
||||
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--batch-size`, `-b` | Batch size to use for prediction. Defaults to `1`. ~~int (option)~~ |
|
||||
| `--n-process`, `-n` | Number of processes to use for prediction. Defaults to `1`. ~~int (option)~~ |
|
||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
|
||||
|
||||
## find-threshold {id="find-threshold",version="3.5",tag="command"}
|
||||
|
||||
|
|
|
@ -175,3 +175,68 @@ Yield examples from the data.
|
|||
| ---------- | -------------------------------------- |
|
||||
| `nlp` | The current `nlp` object. ~~Language~~ |
|
||||
| **YIELDS** | The examples. ~~Example~~ |
|
||||
|
||||
## PlainTextCorpus {id="plaintextcorpus",tag="class",version="3.5.1"}
|
||||
|
||||
Iterate over documents from a plain text file. Can be used to read the raw text
|
||||
corpus for language model
|
||||
[pretraining](/usage/embeddings-transformers#pretraining). The expected file
|
||||
format is:
|
||||
|
||||
- UTF-8 encoding
|
||||
- One document per line
|
||||
- Blank lines are ignored.
|
||||
|
||||
```text {title="Example"}
|
||||
Can I ask where you work now and what you do, and if you enjoy it?
|
||||
They may just pull out of the Seattle market completely, at least until they have autonomous vehicles.
|
||||
My cynical view on this is that it will never be free to the public. Reason: what would be the draw of joining the military? Right now their selling point is free Healthcare and Education. Ironically both are run horribly and most, that I've talked to, come out wishing they never went in.
|
||||
```
|
||||
|
||||
### PlainTextCorpus.\_\_init\_\_ {id="plaintextcorpus-init",tag="method"}
|
||||
|
||||
Initialize the reader.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> from spacy.training import PlainTextCorpus
|
||||
>
|
||||
> corpus = PlainTextCorpus("./data/docs.txt")
|
||||
> ```
|
||||
>
|
||||
> ```ini
|
||||
> ### Example config
|
||||
> [corpora.pretrain]
|
||||
> @readers = "spacy.PlainTextCorpus.v1"
|
||||
> path = "corpus/raw_text.txt"
|
||||
> min_length = 0
|
||||
> max_length = 0
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | -------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `path` | The directory or filename to read from. Expects newline-delimited documents in UTF8 format. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `min_length` | Minimum document length (in tokens). Shorter documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
|
||||
| `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
|
||||
|
||||
### PlainTextCorpus.\_\_call\_\_ {id="plaintextcorpus-call",tag="method"}
|
||||
|
||||
Yield examples from the data.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> from spacy.training import PlainTextCorpus
|
||||
> import spacy
|
||||
>
|
||||
> corpus = PlainTextCorpus("./docs.txt")
|
||||
> nlp = spacy.blank("en")
|
||||
> data = corpus(nlp)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ---------- | -------------------------------------- |
|
||||
| `nlp` | The current `nlp` object. ~~Language~~ |
|
||||
| **YIELDS** | The examples. ~~Example~~ |
|
||||
|
|
|
@ -163,14 +163,13 @@ vocabulary.
|
|||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> lexeme = vocab.get(vocab.mem, "hello")
|
||||
> lexeme = vocab.get("hello")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| `mem` | A memory pool. Allocated memory will be freed once the `Vocab` object is garbage collected. ~~cymem.Pool~~ |
|
||||
| `string` | The string of the word to look up. ~~str~~ |
|
||||
| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
|
||||
| Name | Description |
|
||||
| ----------- | ------------------------------------------------- |
|
||||
| `string` | The string of the word to look up. ~~str~~ |
|
||||
| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
|
||||
|
||||
### Vocab.get_by_orth {id="vocab_get_by_orth",tag="method"}
|
||||
|
||||
|
@ -183,11 +182,10 @@ vocabulary.
|
|||
> lexeme = vocab.get_by_orth(doc[0].lex.norm)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| `mem` | A memory pool. Allocated memory will be freed once the `Vocab` object is garbage collected. ~~cymem.Pool~~ |
|
||||
| `orth` | ID of the verbatim text content. ~~attr_t (uint64_t)~~ |
|
||||
| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
|
||||
| Name | Description |
|
||||
| ----------- | ------------------------------------------------------ |
|
||||
| `orth` | ID of the verbatim text content. ~~attr_t (uint64_t)~~ |
|
||||
| **RETURNS** | The lexeme in the vocabulary. ~~const LexemeC\*~~ |
|
||||
|
||||
## StringStore {id="stringstore",tag="cdef class",source="spacy/strings.pxd"}
|
||||
|
||||
|
|
|
@ -154,15 +154,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## DependencyParser.pipe {id="pipe",tag="method"}
|
||||
|
||||
|
|
|
@ -138,15 +138,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## EditTreeLemmatizer.pipe {id="pipe",tag="method"}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ world". It requires a `KnowledgeBase`, as well as a function to generate
|
|||
plausible candidates from that `KnowledgeBase` given a certain textual mention,
|
||||
and a machine learning model to pick the right candidate, given the local
|
||||
context of the mention. `EntityLinker` defaults to using the
|
||||
[`InMemoryLookupKB`](/api/kb_in_memory) implementation.
|
||||
[`InMemoryLookupKB`](/api/inmemorylookupkb) implementation.
|
||||
|
||||
## Assigned Attributes {id="assigned-attributes"}
|
||||
|
||||
|
|
|
@ -150,15 +150,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## EntityRecognizer.pipe {id="pipe",tag="method"}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ The length of the fixed-size entity vectors in the knowledge base.
|
|||
|
||||
Add an entity to the knowledge base, specifying its corpus frequency and entity
|
||||
vector, which should be of length
|
||||
[`entity_vector_length`](/api/kb_in_memory#entity_vector_length).
|
||||
[`entity_vector_length`](/api/inmemorylookupkb#entity_vector_length).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
|
@ -79,8 +79,9 @@ frequency and entity vector for each entity.
|
|||
|
||||
Add an alias or mention to the knowledge base, specifying its potential KB
|
||||
identifiers and their prior probabilities. The entity identifiers should refer
|
||||
to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity)
|
||||
or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior
|
||||
to entities previously added with
|
||||
[`add_entity`](/api/inmemorylookupkb#add_entity) or
|
||||
[`set_entities`](/api/inmemorylookupkb#set_entities). The sum of the prior
|
||||
probabilities should not exceed 1. Note that an empty string can not be used as
|
||||
alias.
|
||||
|
||||
|
@ -156,7 +157,7 @@ Get a list of all aliases in the knowledge base.
|
|||
|
||||
Given a certain textual mention as input, retrieve a list of candidate entities
|
||||
of type [`Candidate`](/api/kb#candidate). Wraps
|
||||
[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
|
||||
[`get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
|
@ -174,7 +175,7 @@ of type [`Candidate`](/api/kb#candidate). Wraps
|
|||
|
||||
## InMemoryLookupKB.get_candidates_batch {id="get_candidates_batch",tag="method"}
|
||||
|
||||
Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an
|
||||
Same as [`get_candidates()`](/api/inmemorylookupkb#get_candidates), but for an
|
||||
arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component
|
||||
will call `get_candidates_batch()` instead of `get_candidates()`, if the config
|
||||
parameter `candidates_batch_size` is greater or equal than 1.
|
||||
|
@ -231,7 +232,7 @@ Given a certain entity ID, retrieve its pretrained entity vector.
|
|||
|
||||
## InMemoryLookupKB.get_vectors {id="get_vectors",tag="method"}
|
||||
|
||||
Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary
|
||||
Same as [`get_vector()`](/api/inmemorylookupkb#get_vector), but for an arbitrary
|
||||
number of entity IDs.
|
||||
|
||||
The default implementation of `get_vectors()` executes `get_vector()` in a loop.
|
|
@ -21,8 +21,8 @@ functions called by the [`EntityLinker`](/api/entitylinker) component.
|
|||
<Infobox variant="warning">
|
||||
|
||||
This class was not abstract up to spaCy version 3.5. The `KnowledgeBase`
|
||||
implementation up to that point is available as `InMemoryLookupKB` from 3.5
|
||||
onwards.
|
||||
implementation up to that point is available as
|
||||
[`InMemoryLookupKB`](/api/inmemorylookupkb) from 3.5 onwards.
|
||||
|
||||
</Infobox>
|
||||
|
||||
|
@ -110,14 +110,15 @@ to you.
|
|||
</Infobox>
|
||||
|
||||
From spaCy 3.5 on `KnowledgeBase` is an abstract class (with
|
||||
[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow
|
||||
more flexibility in customizing knowledge bases. Some of its methods were moved
|
||||
to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those
|
||||
being `get_alias_candidates()`. This method is now available as
|
||||
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
|
||||
Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates)
|
||||
[`InMemoryLookupKB`](/api/inmemorylookupkb) being a drop-in replacement) to
|
||||
allow more flexibility in customizing knowledge bases. Some of its methods were
|
||||
moved to [`InMemoryLookupKB`](/api/inmemorylookupkb) during this refactoring,
|
||||
one of those being `get_alias_candidates()`. This method is now available as
|
||||
[`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
|
||||
Note:
|
||||
[`InMemoryLookupKB.get_candidates()`](/api/inmemorylookupkb#get_candidates)
|
||||
defaults to
|
||||
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
|
||||
[`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
|
||||
|
||||
## KnowledgeBase.get_vector {id="get_vector",tag="method"}
|
||||
|
||||
|
|
|
@ -333,6 +333,34 @@ and custom registered functions if needed. See the
|
|||
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## Language.distill {id="distill",tag="method,experimental",version="4"}
|
||||
|
||||
Distill the models in a student pipeline from a teacher pipeline.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
>
|
||||
> teacher = spacy.load("en_core_web_lg")
|
||||
> student = English()
|
||||
> student.add_pipe("tagger")
|
||||
> student.distill(teacher, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher` | The teacher pipeline to distill from. ~~Language~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | The dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Dictionary to update with the loss, keyed by pipeline component. ~~Optional[Dict[str, float]]~~ |
|
||||
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
|
||||
| `exclude` | Names of components that shouldn't be updated. Defaults to `[]`. ~~Iterable[str]~~ |
|
||||
| `annotates` | Names of components that should set annotations on the prediced examples after updating. Defaults to `[]`. ~~Iterable[str]~~ |
|
||||
| `student_to_teacher` | Map student component names to teacher component names, only necessary when the names differ. Defaults to `None`. ~~Optional[Dict[str, str]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## Language.rehearse {id="rehearse",tag="method,experimental",version="3"}
|
||||
|
||||
Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the
|
||||
|
|
|
@ -144,15 +144,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## Morphologizer.pipe {id="pipe",tag="method"}
|
||||
|
||||
|
|
|
@ -257,15 +257,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## TrainablePipe.rehearse {id="rehearse",tag="method,experimental",version="3"}
|
||||
|
||||
|
|
|
@ -129,15 +129,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## SentenceRecognizer.pipe {id="pipe",tag="method"}
|
||||
|
||||
|
|
|
@ -128,15 +128,15 @@ This feature is experimental.
|
|||
> losses = student.distill(teacher_pipe, examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | Distillation examples. The reference and predicted docs must have the same number of tokens and the same orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `teacher_pipe` | The teacher pipe to learn from. ~~Optional[TrainablePipe]~~ |
|
||||
| `examples` | A batch of [`Example`](/api/example) distillation examples. The reference (teacher) and predicted (student) docs must have the same number of tokens and orthography. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | Dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during distillation. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## Tagger.pipe {id="pipe",tag="method"}
|
||||
|
||||
|
|
|
@ -236,17 +236,17 @@ browser. Will run a simple web server.
|
|||
> displacy.serve([doc1, doc2], style="dep")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
|
||||
| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` <Tag variant="new">3.3</Tag>. Defaults to `"dep"`. ~~str~~ |
|
||||
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
|
||||
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
|
||||
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
|
||||
| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ |
|
||||
| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ |
|
||||
| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ |
|
||||
| `auto_select_port` | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ |
|
||||
| Name | Description |
|
||||
| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
|
||||
| `style` <Tag variant="new">3.3</Tag> | Visualization style, `"dep"`, `"ent"` or `"span"`. Defaults to `"dep"`. ~~str~~ |
|
||||
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
|
||||
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
|
||||
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
|
||||
| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ |
|
||||
| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ |
|
||||
| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ |
|
||||
| `auto_select_port` <Tag variant="new">3.5</Tag> | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ |
|
||||
|
||||
### displacy.render {id="displacy.render",tag="method",version="2"}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ operates on a `Doc` and gives you access to the matched tokens **in context**.
|
|||
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
|
||||
| [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. |
|
||||
| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. |
|
||||
| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. |
|
||||
| [`InMemoryLookupKB`](/api/inmemorylookupkb) | Implementation of `KnowledgeBase` storing all data in memory. |
|
||||
| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. |
|
||||
| [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. |
|
||||
| [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. |
|
||||
|
|
|
@ -134,6 +134,7 @@ useful for your purpose. Here are some important considerations to keep in mind:
|
|||
<Image
|
||||
src="/images/sense2vec.jpg"
|
||||
href="https://github.com/explosion/sense2vec"
|
||||
alt="sense2vec Screenshot"
|
||||
/>
|
||||
|
||||
[`sense2vec`](https://github.com/explosion/sense2vec) is a library developed by
|
||||
|
|
|
@ -20,7 +20,7 @@ menu:
|
|||
|
||||
## Installation instructions {id="installation"}
|
||||
|
||||
spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**,
|
||||
spaCy is compatible with **64-bit CPython 3.8+** and runs on **Unix/Linux**,
|
||||
**macOS/OS X** and **Windows**. The latest spaCy releases are available over
|
||||
[pip](https://pypi.python.org/pypi/spacy) and
|
||||
[conda](https://anaconda.org/conda-forge/spacy).
|
||||
|
@ -290,7 +290,7 @@ You can configure the build process with the following environment variables:
|
|||
| Variable | Description |
|
||||
| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `SPACY_EXTRAS` | Additional Python packages to install alongside spaCy with optional version specifications. Should be a string that can be passed to `pip install`. See [`Makefile`](%%GITHUB_SPACY/Makefile) for defaults. |
|
||||
| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.6`. |
|
||||
| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.8`. |
|
||||
| `WHEELHOUSE` | Directory to store the wheel files during compilation. Defaults to `./wheelhouse`. |
|
||||
|
||||
### Run tests {id="run-tests"}
|
||||
|
|
|
@ -113,6 +113,7 @@ code.
|
|||
<Image
|
||||
src="/images/thinc_mypy.jpg"
|
||||
href="https://thinc.ai/docs/usage-type-checking#linting"
|
||||
alt="Screenshot of Thinc type checking in VSCode with mypy"
|
||||
/>
|
||||
|
||||
</Accordion>
|
||||
|
|
|
@ -943,7 +943,7 @@ full embedded visualizer, as well as individual components.
|
|||
> $ pip install spacy-streamlit --pre
|
||||
> ```
|
||||
|
||||

|
||||

|
||||
|
||||
Using [`spacy-streamlit`](https://github.com/explosion/spacy-streamlit), your
|
||||
projects can easily define their own scripts that spin up an interactive
|
||||
|
|
|
@ -384,14 +384,14 @@ the more specific attributes `FUZZY1`..`FUZZY9` you can specify the maximum
|
|||
allowed edit distance directly.
|
||||
|
||||
```python
|
||||
# Match lowercase with fuzzy matching (allows 2 edits)
|
||||
# Match lowercase with fuzzy matching (allows 3 edits)
|
||||
pattern = [{"LOWER": {"FUZZY": "definitely"}}]
|
||||
|
||||
# Match custom attribute values with fuzzy matching (allows 2 edits)
|
||||
# Match custom attribute values with fuzzy matching (allows 3 edits)
|
||||
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
|
||||
|
||||
# Match with exact Levenshtein edit distance limits (allows 3 edits)
|
||||
pattern = [{"_": {"country": {"FUZZY3": "Kyrgyzstan"}}}]
|
||||
# Match with exact Levenshtein edit distance limits (allows 4 edits)
|
||||
pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}]
|
||||
```
|
||||
|
||||
#### Regex and fuzzy matching with lists {id="regex-fuzzy-lists", version="3.5"}
|
||||
|
|
|
@ -304,6 +304,28 @@ installed in the same environment – that's it.
|
|||
| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
|
||||
| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
|
||||
|
||||
### Loading probability tables into existing models
|
||||
|
||||
You can load a probability table from [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data) into an existing spaCy model like `en_core_web_sm`.
|
||||
|
||||
```python
|
||||
# Requirements: pip install spacy-lookups-data
|
||||
import spacy
|
||||
from spacy.lookups import load_lookups
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
lookups = load_lookups("en", ["lexeme_prob"])
|
||||
nlp.vocab.lookups.add_table("lexeme_prob", lookups.get_table("lexeme_prob"))
|
||||
```
|
||||
|
||||
When training a model from scratch you can also specify probability tables in the `config.cfg`.
|
||||
|
||||
```ini {title="config.cfg (excerpt)"}
|
||||
[initialize.lookups]
|
||||
@misc = "spacy.LookupsDataLoader.v1"
|
||||
lang = ${nlp.lang}
|
||||
tables = ["lexeme_prob"]
|
||||
```
|
||||
|
||||
### Custom components via entry points {id="entry-points-components"}
|
||||
|
||||
When you load a pipeline, spaCy will generally use its `config.cfg` to set up
|
||||
|
@ -684,10 +706,15 @@ If your pipeline includes
|
|||
[custom components](/usage/processing-pipelines#custom-components), model
|
||||
architectures or other [code](/usage/training#custom-code), those functions need
|
||||
to be registered **before** your pipeline is loaded. Otherwise, spaCy won't know
|
||||
how to create the objects referenced in the config. The
|
||||
[`spacy package`](/api/cli#package) command lets you provide one or more paths
|
||||
to Python files containing custom registered functions using the `--code`
|
||||
argument.
|
||||
how to create the objects referenced in the config. If you're loading your own
|
||||
pipeline in Python, you can make custom components available just by importing
|
||||
the code that defines them before calling
|
||||
[`spacy.load`](/api/top-level#spacy.load). This is also how the `--code`
|
||||
argument to CLI commands works.
|
||||
|
||||
With the [`spacy package`](/api/cli#package) command, you can provide one or
|
||||
more paths to Python files containing custom registered functions using the
|
||||
`--code` argument.
|
||||
|
||||
> #### \_\_init\_\_.py (excerpt)
|
||||
>
|
||||
|
|
|
@ -567,7 +567,10 @@ If you would like to use the spaCy logo on your site, please get in touch and
|
|||
ask us first. However, if you want to show support and tell others that your
|
||||
project is using spaCy, you can grab one of our **spaCy badges** here:
|
||||
|
||||
<img src={`https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg`} />
|
||||
<img
|
||||
src={`https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg`}
|
||||
alt="Built with spaCy"
|
||||
/>
|
||||
|
||||
```markdown
|
||||
[](https://spacy.io)
|
||||
|
@ -575,8 +578,9 @@ project is using spaCy, you can grab one of our **spaCy badges** here:
|
|||
|
||||
<img
|
||||
src={`https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg`}
|
||||
alt="Made with love and spaCy"
|
||||
/>
|
||||
|
||||
```markdown
|
||||
[](https://spacy.io)
|
||||
[](https://spacy.io)
|
||||
```
|
||||
|
|
215
website/docs/usage/v3-5.mdx
Normal file
215
website/docs/usage/v3-5.mdx
Normal file
|
@ -0,0 +1,215 @@
|
|||
---
|
||||
title: What's New in v3.5
|
||||
teaser: New features and how to upgrade
|
||||
menu:
|
||||
- ['New Features', 'features']
|
||||
- ['Upgrading Notes', 'upgrading']
|
||||
---
|
||||
|
||||
## New features {id="features",hidden="true"}
|
||||
|
||||
spaCy v3.5 introduces three new CLI commands, `apply`, `benchmark` and
|
||||
`find-threshold`, adds fuzzy matching, provides improvements to our entity
|
||||
linking functionality, and includes a range of language updates and bug fixes.
|
||||
|
||||
### New CLI commands {id="cli"}
|
||||
|
||||
#### apply CLI
|
||||
|
||||
The [`apply` CLI](/api/cli#apply) can be used to apply a pipeline to one or more
|
||||
`.txt`, `.jsonl` or `.spacy` input files, saving the annotated docs in a single
|
||||
`.spacy` file.
|
||||
|
||||
```bash
|
||||
$ spacy apply en_core_web_sm my_texts/ output.spacy
|
||||
```
|
||||
|
||||
#### benchmark CLI
|
||||
|
||||
The [`benchmark` CLI](/api/cli#benchmark) has been added to extend the existing
|
||||
`evaluate` functionality with a wider range of profiling subcommands.
|
||||
|
||||
The `benchmark accuracy` CLI is introduced as an alias for `evaluate`. The new
|
||||
`benchmark speed` CLI performs warmup rounds before measuring the speed in words
|
||||
per second on batches of randomly shuffled documents from the provided data.
|
||||
|
||||
```bash
|
||||
$ spacy benchmark speed my_pipeline data.spacy
|
||||
```
|
||||
|
||||
The output is the mean performance using batches (`nlp.pipe`) with a 95%
|
||||
confidence interval, e.g., profiling `en_core_web_sm` on CPU:
|
||||
|
||||
```none
|
||||
Outliers: 2.0%, extreme outliers: 0.0%
|
||||
Mean: 18904.1 words/s (95% CI: -256.9 +244.1)
|
||||
```
|
||||
|
||||
#### find-threshold CLI
|
||||
|
||||
The [`find-threshold` CLI](/api/cli#find-threshold) runs a series of trials
|
||||
across threshold values from `0.0` to `1.0` and identifies the best threshold
|
||||
for the provided score metric.
|
||||
|
||||
The following command runs 20 trials for the `spancat` component in
|
||||
`my_pipeline`, recording the `spans_sc_f` score for each value of the threshold
|
||||
`[components.spancat.threshold]` from `0.0` to `1.0`:
|
||||
|
||||
```bash
|
||||
$ spacy find-threshold my_pipeline data.spacy spancat threshold spans_sc_f --n_trials 20
|
||||
```
|
||||
|
||||
The `find-threshold` CLI can be used with `textcat_multilabel`, `spancat` and
|
||||
custom components with thresholds that are applied while predicting or scoring.
|
||||
|
||||
### Fuzzy matching {id="fuzzy"}
|
||||
|
||||
New `FUZZY` operators support [fuzzy matching](/usage/rule-based-matching#fuzzy)
|
||||
with the `Matcher`. By default, the `FUZZY` operator allows a Levenshtein edit
|
||||
distance of 2 and up to 30% of the pattern string length. `FUZZY1`..`FUZZY9` can
|
||||
be used to specify the exact number of allowed edits.
|
||||
|
||||
```python
|
||||
# Match lowercase with fuzzy matching (allows up to 3 edits)
|
||||
pattern = [{"LOWER": {"FUZZY": "definitely"}}]
|
||||
|
||||
# Match custom attribute values with fuzzy matching (allows up to 3 edits)
|
||||
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
|
||||
|
||||
# Match with exact Levenshtein edit distance limits (allows up to 4 edits)
|
||||
pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}]
|
||||
```
|
||||
|
||||
Note that `FUZZY` uses Levenshtein edit distance rather than Damerau-Levenshtein
|
||||
edit distance, so a transposition like `teh` for `the` counts as two edits, one
|
||||
insertion and one deletion.
|
||||
|
||||
If you'd prefer an alternate fuzzy matching algorithm, you can provide your own
|
||||
custom method to the `Matcher` or as a config option for an entity ruler and
|
||||
span ruler.
|
||||
|
||||
### FUZZY and REGEX with lists {id="fuzzy-regex-lists"}
|
||||
|
||||
The `FUZZY` and `REGEX` operators are also now supported for lists with `IN` and
|
||||
`NOT_IN`:
|
||||
|
||||
```python
|
||||
pattern = [{"TEXT": {"FUZZY": {"IN": ["awesome", "cool", "wonderful"]}}}]
|
||||
pattern = [{"TEXT": {"REGEX": {"NOT_IN": ["^awe(some)?$", "^wonder(ful)?"]}}}]
|
||||
```
|
||||
|
||||
### Entity linking generalization {id="el"}
|
||||
|
||||
The knowledge base used for entity linking is now easier to customize and has a
|
||||
new default implementation [`InMemoryLookupKB`](/api/inmemorylookupkb).
|
||||
|
||||
### Additional features and improvements {id="additional-features-and-improvements"}
|
||||
|
||||
- Language updates:
|
||||
- Extended support for Slovenian
|
||||
- Fixed lookup fallback for French and Catalan lemmatizers
|
||||
- Switch Russian and Ukrainian lemmatizers to `pymorphy3`
|
||||
- Support for editorial punctuation in Ancient Greek
|
||||
- Update to Russian tokenizer exceptions
|
||||
- Small fix for Dutch stop words
|
||||
- Allow up to `typer` v0.7.x, `mypy` 0.990 and `typing_extensions` v4.4.x.
|
||||
- New `spacy.ConsoleLogger.v3` with expanded progress
|
||||
[tracking](/api/top-level#ConsoleLogger).
|
||||
- Improved scoring behavior for `textcat` with `spacy.textcat_scorer.v2` and
|
||||
`spacy.textcat_multilabel_scorer.v2`.
|
||||
- Updates so that downstream components can train properly on a frozen `tok2vec`
|
||||
or `transformer` layer.
|
||||
- Allow interpolation of variables in directory names in projects.
|
||||
- Support for local file system [remotes](/usage/projects#remote) for projects.
|
||||
- Improve UX around `displacy.serve` when the default port is in use.
|
||||
- Optional `before_update` callback that is invoked at the start of each
|
||||
[training step](/api/data-formats#config-training).
|
||||
- Improve performance of `SpanGroup` and fix typing issues for `SpanGroup` and
|
||||
`Span` objects.
|
||||
- Patch a
|
||||
[security vulnerability](https://github.com/advisories/GHSA-gw9q-c7gh-j9vm) in
|
||||
extracting tar files.
|
||||
- Add equality definition for `Vectors`.
|
||||
- Ensure `Vocab.to_disk` respects the exclude setting for `lookups` and
|
||||
`vectors`.
|
||||
- Correctly handle missing annotations in the edit tree lemmatizer.
|
||||
|
||||
### Trained pipeline updates {id="pipelines"}
|
||||
|
||||
- The CNN pipelines add `IS_SPACE` as a `tok2vec` feature for `tagger` and
|
||||
`morphologizer` components to improve tagging of non-whitespace vs. whitespace
|
||||
tokens.
|
||||
- The transformer pipelines require `spacy-transformers` v1.2, which uses the
|
||||
exact alignment from `tokenizers` for fast tokenizers instead of the heuristic
|
||||
alignment from `spacy-alignments`. For all trained pipelines except
|
||||
`ja_core_news_trf`, the alignments between spaCy tokens and transformer tokens
|
||||
may be slightly different. More details about the `spacy-transformers` changes
|
||||
in the
|
||||
[v1.2.0 release notes](https://github.com/explosion/spacy-transformers/releases/tag/v1.2.0).
|
||||
|
||||
## Notes about upgrading from v3.4 {id="upgrading"}
|
||||
|
||||
### Validation of textcat values {id="textcat-validation"}
|
||||
|
||||
An error is now raised when unsupported values are given as input to train a
|
||||
`textcat` or `textcat_multilabel` model - ensure that values are `0.0` or `1.0`
|
||||
as explained in the [docs](/api/textcategorizer#assigned-attributes).
|
||||
|
||||
### Updated scorers for tokenization and textcat {id="scores"}
|
||||
|
||||
We fixed a bug that inflated the `token_acc` scores in v3.0-v3.4. The reported
|
||||
`token_acc` will drop from v3.4 to v3.5, but if `token_p/r/f` stay the same,
|
||||
your tokenization performance has not changed from v3.4.
|
||||
|
||||
For new `textcat` or `textcat_multilabel` configs, the new default `v2` scorers:
|
||||
|
||||
- ignore `threshold` for `textcat`, so the reported `cats_p/r/f` may increase
|
||||
slightly in v3.5 even though the underlying predictions are unchanged
|
||||
- report the performance of only the **final** `textcat` or `textcat_multilabel`
|
||||
component in the pipeline by default
|
||||
- allow custom scorers to be used to score multiple `textcat` and
|
||||
`textcat_multilabel` components with `Scorer.score_cats` by restricting the
|
||||
evaluation to the component's provided labels
|
||||
|
||||
### Pipeline package version compatibility {id="version-compat"}
|
||||
|
||||
> #### Using legacy implementations
|
||||
>
|
||||
> In spaCy v3, you'll still be able to load and reference legacy implementations
|
||||
> via [`spacy-legacy`](https://github.com/explosion/spacy-legacy), even if the
|
||||
> components or architectures change and newer versions are available in the
|
||||
> core library.
|
||||
|
||||
When you're loading a pipeline package trained with an earlier version of spaCy
|
||||
v3, you will see a warning telling you that the pipeline may be incompatible.
|
||||
This doesn't necessarily have to be true, but we recommend running your
|
||||
pipelines against your test suite or evaluation data to make sure there are no
|
||||
unexpected results.
|
||||
|
||||
If you're using one of the [trained pipelines](/models) we provide, you should
|
||||
run [`spacy download`](/api/cli#download) to update to the latest version. To
|
||||
see an overview of all installed packages and their compatibility, you can run
|
||||
[`spacy validate`](/api/cli#validate).
|
||||
|
||||
If you've trained your own custom pipeline and you've confirmed that it's still
|
||||
working as expected, you can update the spaCy version requirements in the
|
||||
[`meta.json`](/api/data-formats#meta):
|
||||
|
||||
```diff
|
||||
- "spacy_version": ">=3.4.0,<3.5.0",
|
||||
+ "spacy_version": ">=3.4.0,<3.6.0",
|
||||
```
|
||||
|
||||
### Updating v3.4 configs
|
||||
|
||||
To update a config from spaCy v3.4 with the new v3.5 settings, run
|
||||
[`init fill-config`](/api/cli#init-fill-config):
|
||||
|
||||
```cli
|
||||
$ python -m spacy init fill-config config-v3.4.cfg config-v3.5.cfg
|
||||
```
|
||||
|
||||
In many cases ([`spacy train`](/api/cli#train),
|
||||
[`spacy.load`](/api/top-level#spacy.load)), the new defaults will be filled in
|
||||
automatically, but you'll need to fill in the new settings to run
|
||||
[`debug config`](/api/cli#debug) and [`debug data`](/api/cli#debug-data).
|
|
@ -437,6 +437,6 @@ Alternatively, if you're using [Streamlit](https://streamlit.io), check out the
|
|||
helps you integrate spaCy visualizations into your apps. It includes a full
|
||||
embedded visualizer, as well as individual components.
|
||||
|
||||

|
||||

|
||||
|
||||
</Grid>
|
||||
|
|
|
@ -13,7 +13,8 @@
|
|||
{ "text": "New in v3.1", "url": "/usage/v3-1" },
|
||||
{ "text": "New in v3.2", "url": "/usage/v3-2" },
|
||||
{ "text": "New in v3.3", "url": "/usage/v3-3" },
|
||||
{ "text": "New in v3.4", "url": "/usage/v3-4" }
|
||||
{ "text": "New in v3.4", "url": "/usage/v3-4" },
|
||||
{ "text": "New in v3.5", "url": "/usage/v3-5" }
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -129,6 +130,7 @@
|
|||
"items": [
|
||||
{ "text": "Attributes", "url": "/api/attributes" },
|
||||
{ "text": "Corpus", "url": "/api/corpus" },
|
||||
{ "text": "InMemoryLookupKB", "url": "/api/inmemorylookupkb" },
|
||||
{ "text": "KnowledgeBase", "url": "/api/kb" },
|
||||
{ "text": "Lookups", "url": "/api/lookups" },
|
||||
{ "text": "MorphAnalysis", "url": "/api/morphology#morphanalysis" },
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
"indexName": "spacy"
|
||||
},
|
||||
"binderUrl": "explosion/spacy-io-binder",
|
||||
"binderVersion": "3.4",
|
||||
"binderVersion": "3.5",
|
||||
"sections": [
|
||||
{ "id": "usage", "title": "Usage Documentation", "theme": "blue" },
|
||||
{ "id": "models", "title": "Models Documentation", "theme": "blue" },
|
||||
|
|
|
@ -2381,7 +2381,7 @@
|
|||
"author": "Nikita Kitaev",
|
||||
"author_links": {
|
||||
"github": "nikitakit",
|
||||
"website": " http://kitaev.io"
|
||||
"website": "http://kitaev.io"
|
||||
},
|
||||
"category": ["research", "pipeline"]
|
||||
},
|
||||
|
|
|
@ -17,7 +17,7 @@ export default function App({ Component, pageProps }: AppProps) {
|
|||
<link rel="manifest" href="/manifest.webmanifest" />
|
||||
<meta
|
||||
name="viewport"
|
||||
content="width=device-width, initial-scale=1.0, minimum-scale=1 maximum-scale=1.0, user-scalable=0, shrink-to-fit=no, viewport-fit=cover"
|
||||
content="width=device-width, initial-scale=1.0, minimum-scale=1, maximum-scale=5.0, shrink-to-fit=no, viewport-fit=cover"
|
||||
/>
|
||||
<meta name="theme-color" content="#09a3d5" />
|
||||
<link rel="apple-touch-icon" sizes="192x192" href="/icons/icon-192x192.png" />
|
||||
|
|
|
@ -13,7 +13,7 @@ import {
|
|||
LandingBanner,
|
||||
} from '../src/components/landing'
|
||||
import { H2 } from '../src/components/typography'
|
||||
import { InlineCode } from '../src/components/code'
|
||||
import { InlineCode } from '../src/components/inlineCode'
|
||||
import { Ul, Li } from '../src/components/list'
|
||||
import Button from '../src/components/button'
|
||||
import Link from '../src/components/link'
|
||||
|
@ -89,8 +89,8 @@ const Landing = () => {
|
|||
</LandingCard>
|
||||
|
||||
<LandingCard title="Awesome ecosystem" url="/usage/projects" button="Read more">
|
||||
In the five years since its release, spaCy has become an industry standard with
|
||||
a huge ecosystem. Choose from a variety of plugins, integrate with your machine
|
||||
Since its release in 2015, spaCy has become an industry standard with a huge
|
||||
ecosystem. Choose from a variety of plugins, integrate with your machine
|
||||
learning stack and build custom components and workflows.
|
||||
</LandingCard>
|
||||
</LandingGrid>
|
||||
|
@ -162,7 +162,7 @@ const Landing = () => {
|
|||
small
|
||||
>
|
||||
<p>
|
||||
<Link to="https://prodi.gy" hidden>
|
||||
<Link to="https://prodi.gy" noLinkLayout>
|
||||
<ImageFill
|
||||
image={prodigyImage}
|
||||
alt="Prodigy: Radically efficient machine teaching"
|
||||
|
@ -206,7 +206,10 @@ const Landing = () => {
|
|||
<LandingGrid cols={2}>
|
||||
<LandingCol>
|
||||
<Link to="/usage/projects" hidden>
|
||||
<ImageFill image={projectsImage} />
|
||||
<ImageFill
|
||||
image={projectsImage}
|
||||
alt="Illustration of project workflow and commands"
|
||||
/>
|
||||
</Link>
|
||||
<br />
|
||||
<br />
|
||||
|
|
|
@ -33,7 +33,7 @@ export default function Accordion({ title, id, expanded = false, spaced = false,
|
|||
<Link
|
||||
to={`#${id}`}
|
||||
className={classes.anchor}
|
||||
hidden
|
||||
noLinkLayout
|
||||
onClick={(event) => event.stopPropagation()}
|
||||
>
|
||||
¶
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import React from 'react'
|
||||
import PropTypes from 'prop-types'
|
||||
import classNames from 'classnames'
|
||||
import ImageNext from 'next/image'
|
||||
|
||||
import Link from './link'
|
||||
import { H5 } from './typography'
|
||||
|
@ -10,7 +11,7 @@ export default function Card({ title, to, image, header, small, onClick, childre
|
|||
return (
|
||||
<div className={classNames(classes.root, { [classes.small]: !!small })}>
|
||||
{header && (
|
||||
<Link to={to} onClick={onClick} hidden>
|
||||
<Link to={to} onClick={onClick} noLinkLayout>
|
||||
{header}
|
||||
</Link>
|
||||
)}
|
||||
|
@ -18,18 +19,17 @@ export default function Card({ title, to, image, header, small, onClick, childre
|
|||
<H5 className={classes.title}>
|
||||
{image && (
|
||||
<div className={classes.image}>
|
||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||
<img src={image} width={35} alt="" />
|
||||
<ImageNext src={image} height={35} width={35} alt={`${title} Logo`} />
|
||||
</div>
|
||||
)}
|
||||
{title && (
|
||||
<Link to={to} onClick={onClick} hidden>
|
||||
<Link to={to} onClick={onClick} noLinkLayout>
|
||||
{title}
|
||||
</Link>
|
||||
)}
|
||||
</H5>
|
||||
)}
|
||||
<Link to={to} onClick={onClick} hidden>
|
||||
<Link to={to} onClick={onClick} noLinkLayout>
|
||||
{children}
|
||||
</Link>
|
||||
</div>
|
||||
|
|
|
@ -14,96 +14,16 @@ import 'prismjs/components/prism-markdown.min.js'
|
|||
import 'prismjs/components/prism-python.min.js'
|
||||
import 'prismjs/components/prism-yaml.min.js'
|
||||
|
||||
import CUSTOM_TYPES from '../../meta/type-annotations.json'
|
||||
import { isString, htmlToReact } from './util'
|
||||
import { isString } from './util'
|
||||
import Link, { OptionalLink } from './link'
|
||||
import GitHubCode from './github'
|
||||
import Juniper from './juniper'
|
||||
import classes from '../styles/code.module.sass'
|
||||
import siteMetadata from '../../meta/site.json'
|
||||
import { binderBranch } from '../../meta/dynamicMeta.mjs'
|
||||
import dynamic from 'next/dynamic'
|
||||
|
||||
const WRAP_THRESHOLD = 30
|
||||
const CLI_GROUPS = ['init', 'debug', 'project', 'ray', 'huggingface-hub']
|
||||
|
||||
const CodeBlock = (props) => (
|
||||
<Pre>
|
||||
<Code {...props} />
|
||||
</Pre>
|
||||
)
|
||||
|
||||
export default CodeBlock
|
||||
|
||||
export const Pre = (props) => {
|
||||
return <pre className={classes['pre']}>{props.children}</pre>
|
||||
}
|
||||
|
||||
export const InlineCode = ({ wrap = false, className, children, ...props }) => {
|
||||
const codeClassNames = classNames(classes['inline-code'], className, {
|
||||
[classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD),
|
||||
})
|
||||
return (
|
||||
<code className={codeClassNames} {...props}>
|
||||
{children}
|
||||
</code>
|
||||
)
|
||||
}
|
||||
|
||||
InlineCode.propTypes = {
|
||||
wrap: PropTypes.bool,
|
||||
className: PropTypes.string,
|
||||
children: PropTypes.node,
|
||||
}
|
||||
|
||||
function linkType(el, showLink = true) {
|
||||
if (!isString(el) || !el.length) return el
|
||||
const elStr = el.trim()
|
||||
if (!elStr) return el
|
||||
const typeUrl = CUSTOM_TYPES[elStr]
|
||||
const url = typeUrl == true ? DEFAULT_TYPE_URL : typeUrl
|
||||
const ws = el[0] == ' '
|
||||
return url && showLink ? (
|
||||
<Fragment>
|
||||
{ws && ' '}
|
||||
<Link to={url} hideIcon>
|
||||
{elStr}
|
||||
</Link>
|
||||
</Fragment>
|
||||
) : (
|
||||
el
|
||||
)
|
||||
}
|
||||
|
||||
export const TypeAnnotation = ({ lang = 'python', link = true, children }) => {
|
||||
// Hacky, but we're temporarily replacing a dot to prevent it from being split during highlighting
|
||||
const TMP_DOT = '۔'
|
||||
const code = Array.isArray(children) ? children.join('') : children || ''
|
||||
const [rawText, meta] = code.split(/(?= \(.+\)$)/)
|
||||
const rawStr = rawText.replace(/\./g, TMP_DOT)
|
||||
const rawHtml =
|
||||
lang === 'none' || !code ? code : Prism.highlight(rawStr, Prism.languages[lang], lang)
|
||||
const html = rawHtml.replace(new RegExp(TMP_DOT, 'g'), '.').replace(/\n/g, ' ')
|
||||
const result = htmlToReact(html)
|
||||
const elements = Array.isArray(result) ? result : [result]
|
||||
const annotClassNames = classNames(
|
||||
'type-annotation',
|
||||
`language-${lang}`,
|
||||
classes['inline-code'],
|
||||
classes['type-annotation'],
|
||||
{
|
||||
[classes['wrap']]: code.length >= WRAP_THRESHOLD,
|
||||
}
|
||||
)
|
||||
return (
|
||||
<span className={annotClassNames} role="code" aria-label="Type annotation">
|
||||
{elements.map((el, i) => (
|
||||
<Fragment key={i}>{linkType(el, !!link)}</Fragment>
|
||||
))}
|
||||
{meta && <span className={classes['type-annotation-meta']}>{meta}</span>}
|
||||
</span>
|
||||
)
|
||||
}
|
||||
|
||||
const splitLines = (children) => {
|
||||
const listChildrenPerLine = []
|
||||
|
||||
|
@ -235,7 +155,7 @@ const handlePromot = ({ lineFlat, prompt }) => {
|
|||
<Fragment key={j}>
|
||||
{j !== 0 && ' '}
|
||||
<span className={itemClassNames}>
|
||||
<OptionalLink hidden hideIcon to={url}>
|
||||
<OptionalLink noLinkLayout hideIcon to={url}>
|
||||
{text}
|
||||
</OptionalLink>
|
||||
</span>
|
||||
|
@ -288,7 +208,7 @@ const addLineHighlight = (children, highlight) => {
|
|||
})
|
||||
}
|
||||
|
||||
export const CodeHighlighted = ({ children, highlight, lang }) => {
|
||||
const CodeHighlighted = ({ children, highlight, lang }) => {
|
||||
const [html, setHtml] = useState()
|
||||
|
||||
useEffect(
|
||||
|
@ -305,7 +225,7 @@ export const CodeHighlighted = ({ children, highlight, lang }) => {
|
|||
return <>{html}</>
|
||||
}
|
||||
|
||||
export class Code extends React.Component {
|
||||
export default class Code extends React.Component {
|
||||
static defaultProps = {
|
||||
lang: 'none',
|
||||
executable: null,
|
||||
|
@ -354,6 +274,8 @@ export class Code extends React.Component {
|
|||
}
|
||||
}
|
||||
|
||||
const JuniperDynamic = dynamic(() => import('./juniper'))
|
||||
|
||||
const JuniperWrapper = ({ title, lang, children }) => {
|
||||
const { binderUrl, binderVersion } = siteMetadata
|
||||
const juniperTitle = title || 'Editable Code'
|
||||
|
@ -363,13 +285,13 @@ const JuniperWrapper = ({ title, lang, children }) => {
|
|||
{juniperTitle}
|
||||
<span className={classes['juniper-meta']}>
|
||||
spaCy v{binderVersion} · Python 3 · via{' '}
|
||||
<Link to="https://mybinder.org/" hidden>
|
||||
<Link to="https://mybinder.org/" noLinkLayout>
|
||||
Binder
|
||||
</Link>
|
||||
</span>
|
||||
</h4>
|
||||
|
||||
<Juniper
|
||||
<JuniperDynamic
|
||||
repo={binderUrl}
|
||||
branch={binderBranch}
|
||||
lang={lang}
|
||||
|
@ -381,7 +303,7 @@ const JuniperWrapper = ({ title, lang, children }) => {
|
|||
}}
|
||||
>
|
||||
{children}
|
||||
</Juniper>
|
||||
</JuniperDynamic>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
|
14
website/src/components/codeBlock.js
Normal file
14
website/src/components/codeBlock.js
Normal file
|
@ -0,0 +1,14 @@
|
|||
import React from 'react'
|
||||
import Code from './codeDynamic'
|
||||
import classes from '../styles/code.module.sass'
|
||||
|
||||
export const Pre = (props) => {
|
||||
return <pre className={classes['pre']}>{props.children}</pre>
|
||||
}
|
||||
|
||||
const CodeBlock = (props) => (
|
||||
<Pre>
|
||||
<Code {...props} />
|
||||
</Pre>
|
||||
)
|
||||
export default CodeBlock
|
5
website/src/components/codeDynamic.js
Normal file
5
website/src/components/codeDynamic.js
Normal file
|
@ -0,0 +1,5 @@
|
|||
import dynamic from 'next/dynamic'
|
||||
|
||||
export default dynamic(() => import('./code'), {
|
||||
loading: () => <div style={{ color: 'white', padding: '1rem' }}>Loading...</div>,
|
||||
})
|
|
@ -14,7 +14,7 @@ export function copyToClipboard(ref, callback) {
|
|||
}
|
||||
}
|
||||
|
||||
export default function CopyInput({ text, prefix }) {
|
||||
export default function CopyInput({ text, description, prefix }) {
|
||||
const isClient = typeof window !== 'undefined'
|
||||
const [supportsCopy, setSupportsCopy] = useState(false)
|
||||
|
||||
|
@ -41,6 +41,7 @@ export default function CopyInput({ text, prefix }) {
|
|||
defaultValue={text}
|
||||
rows={1}
|
||||
onClick={selectText}
|
||||
aria-label={description}
|
||||
/>
|
||||
{supportsCopy && (
|
||||
<button title="Copy to clipboard" onClick={onClick}>
|
||||
|
|
|
@ -5,8 +5,8 @@ import ImageNext from 'next/image'
|
|||
|
||||
import Link from './link'
|
||||
import Button from './button'
|
||||
import { InlineCode } from './code'
|
||||
import { MarkdownToReact } from './util'
|
||||
import { InlineCode } from './inlineCode'
|
||||
import MarkdownToReact from './markdownToReactDynamic'
|
||||
|
||||
import classes from '../styles/embed.module.sass'
|
||||
|
||||
|
@ -88,10 +88,16 @@ const Image = ({ src, alt, title, href, ...props }) => {
|
|||
const markdownComponents = { code: InlineCode, p: Fragment, a: Link }
|
||||
return (
|
||||
<figure className="gatsby-resp-image-figure">
|
||||
<Link className={linkClassNames} href={href ?? src} hidden forceExternal>
|
||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||
{href ? (
|
||||
<Link className={linkClassNames} href={href} noLinkLayout forceExternal>
|
||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||
<img className={classes.image} src={src} alt={alt} width={650} height="auto" />
|
||||
</Link>
|
||||
) : (
|
||||
/* eslint-disable-next-line @next/next/no-img-element */
|
||||
<img className={classes.image} src={src} alt={alt} width={650} height="auto" />
|
||||
</Link>
|
||||
)}
|
||||
|
||||
{title && (
|
||||
<figcaption className="gatsby-resp-image-figcaption">
|
||||
<MarkdownToReact markdown={title} />
|
||||
|
@ -104,7 +110,7 @@ const Image = ({ src, alt, title, href, ...props }) => {
|
|||
const ImageFill = ({ image, ...props }) => {
|
||||
return (
|
||||
<span
|
||||
class={classes['figure-fill']}
|
||||
className={classes['figure-fill']}
|
||||
style={{ paddingBottom: `${(image.height / image.width) * 100}%` }}
|
||||
>
|
||||
<ImageNext src={image.src} {...props} fill />
|
||||
|
|
|
@ -21,7 +21,7 @@ export default function Footer({ wide = false }) {
|
|||
<li className={classes.label}>{label}</li>
|
||||
{items.map(({ text, url }, j) => (
|
||||
<li key={j}>
|
||||
<Link to={url} hidden>
|
||||
<Link to={url} noLinkLayout>
|
||||
{text}
|
||||
</Link>
|
||||
</li>
|
||||
|
@ -42,14 +42,14 @@ export default function Footer({ wide = false }) {
|
|||
<div className={classNames(classes.content, classes.copy)}>
|
||||
<span>
|
||||
© 2016-{new Date().getFullYear()}{' '}
|
||||
<Link to={companyUrl} hidden>
|
||||
<Link to={companyUrl} noLinkLayout>
|
||||
{company}
|
||||
</Link>
|
||||
</span>
|
||||
<Link to={companyUrl} aria-label={company} hidden className={classes.logo}>
|
||||
<Link to={companyUrl} aria-label={company} noLinkLayout className={classes.logo}>
|
||||
<SVG src={explosionLogo.src} width={45} height={45} />
|
||||
</Link>
|
||||
<Link to={`${companyUrl}/legal`} hidden>
|
||||
<Link to={`${companyUrl}/legal`} noLinkLayout>
|
||||
Legal / Imprint
|
||||
</Link>
|
||||
</div>
|
||||
|
|
|
@ -5,7 +5,7 @@ import classNames from 'classnames'
|
|||
import Icon from './icon'
|
||||
import Link from './link'
|
||||
import classes from '../styles/code.module.sass'
|
||||
import { Code } from './code'
|
||||
import Code from './codeDynamic'
|
||||
|
||||
const defaultErrorMsg = `Can't fetch code example from GitHub :(
|
||||
|
||||
|
@ -42,7 +42,7 @@ const GitHubCode = ({ url, lang, errorMsg = defaultErrorMsg, className }) => {
|
|||
return (
|
||||
<>
|
||||
<header className={classes.header}>
|
||||
<Link to={url} hidden>
|
||||
<Link to={url} noLinkLayout>
|
||||
<Icon name="github" width={16} inline />
|
||||
<code
|
||||
className={classNames(classes['inline-code'], classes['inline-code-dark'])}
|
||||
|
|
12
website/src/components/htmlToReact.js
Normal file
12
website/src/components/htmlToReact.js
Normal file
|
@ -0,0 +1,12 @@
|
|||
import { Parser as HtmlToReactParser } from 'html-to-react'
|
||||
|
||||
const htmlToReactParser = new HtmlToReactParser()
|
||||
/**
|
||||
* Convert raw HTML to React elements
|
||||
* @param {string} html - The HTML markup to convert.
|
||||
* @returns {Node} - The converted React elements.
|
||||
*/
|
||||
|
||||
export default function HtmlToReact(props) {
|
||||
return htmlToReactParser.parse(props.children)
|
||||
}
|
23
website/src/components/inlineCode.js
Normal file
23
website/src/components/inlineCode.js
Normal file
|
@ -0,0 +1,23 @@
|
|||
import React from 'react'
|
||||
import PropTypes from 'prop-types'
|
||||
import classNames from 'classnames'
|
||||
import { isString } from './util'
|
||||
import classes from '../styles/code.module.sass'
|
||||
|
||||
const WRAP_THRESHOLD = 30
|
||||
|
||||
export const InlineCode = ({ wrap = false, className, children, ...props }) => {
|
||||
const codeClassNames = classNames(classes['inline-code'], className, {
|
||||
[classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD),
|
||||
})
|
||||
return (
|
||||
<code className={codeClassNames} {...props}>
|
||||
{children}
|
||||
</code>
|
||||
)
|
||||
}
|
||||
InlineCode.propTypes = {
|
||||
wrap: PropTypes.bool,
|
||||
className: PropTypes.string,
|
||||
children: PropTypes.node,
|
||||
}
|
|
@ -12,17 +12,17 @@ const spacyTheme = createTheme({
|
|||
theme: 'dark',
|
||||
settings: {
|
||||
background: 'var(--color-front)',
|
||||
foreground: 'var(--color-subtle)',
|
||||
foreground: 'var(--color-subtle-on-dark)',
|
||||
caret: 'var(--color-theme-dark)',
|
||||
selection: 'var(--color-theme)',
|
||||
selectionMatch: 'var(--color-theme)',
|
||||
selection: 'var(--color-theme-dark)',
|
||||
selectionMatch: 'var(--color-theme-dark)',
|
||||
gutterBackground: 'var(--color-front)',
|
||||
gutterForeground: 'var(--color-subtle)',
|
||||
gutterForeground: 'var(--color-subtle-on-dark)',
|
||||
fontFamily: 'var(--font-code)',
|
||||
},
|
||||
styles: [
|
||||
{ tag: t.comment, color: 'var(--syntax-comment)' },
|
||||
{ tag: t.variableName, color: 'var(--color-subtle)' },
|
||||
{ tag: t.variableName, color: 'var(--color-subtle-on-dark)' },
|
||||
{ tag: [t.string, t.special(t.brace)], color: '#fff' },
|
||||
{ tag: t.number, color: 'var(--syntax-number)' },
|
||||
{ tag: t.string, color: 'var(--syntax-selector)' },
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
import React from 'react'
|
||||
import classNames from 'classnames'
|
||||
|
||||
import patternDefault from '../images/pattern_blue.jpg'
|
||||
import patternNightly from '../images/pattern_nightly.jpg'
|
||||
import patternLegacy from '../images/pattern_legacy.jpg'
|
||||
import overlayDefault from '../images/pattern_landing.jpg'
|
||||
import overlayNightly from '../images/pattern_landing_nightly.jpg'
|
||||
import overlayLegacy from '../images/pattern_landing_legacy.jpg'
|
||||
import patternDefault from '../images/pattern_blue.png'
|
||||
import patternNightly from '../images/pattern_nightly.png'
|
||||
import patternLegacy from '../images/pattern_legacy.png'
|
||||
import overlayDefault from '../images/pattern_landing.png'
|
||||
import overlayNightly from '../images/pattern_landing_nightly.png'
|
||||
import overlayLegacy from '../images/pattern_landing_legacy.png'
|
||||
|
||||
import Grid from './grid'
|
||||
import { Content } from './main'
|
||||
import Button from './button'
|
||||
import CodeBlock from './code'
|
||||
import CodeBlock from './codeBlock'
|
||||
import { H1, H2, H3 } from './typography'
|
||||
import Link from './link'
|
||||
import classes from '../styles/landing.module.sass'
|
||||
|
@ -110,6 +110,7 @@ export const LandingBanner = ({
|
|||
})
|
||||
const style = {
|
||||
'--color-theme': background,
|
||||
'--color-theme-dark': background,
|
||||
'--color-back': color,
|
||||
backgroundImage: backgroundImage ? `url(${backgroundImage})` : null,
|
||||
}
|
||||
|
@ -124,7 +125,7 @@ export const LandingBanner = ({
|
|||
<span className={classes['label']}>{label}</span>
|
||||
</div>
|
||||
)}
|
||||
<Link to={to} hidden>
|
||||
<Link to={to} noLinkLayout>
|
||||
{title}
|
||||
</Link>
|
||||
</Heading>
|
||||
|
|
|
@ -26,7 +26,7 @@ export default function Link({
|
|||
to,
|
||||
href,
|
||||
onClick,
|
||||
hidden = false,
|
||||
noLinkLayout = false,
|
||||
hideIcon = false,
|
||||
ws = false,
|
||||
forceExternal = false,
|
||||
|
@ -36,10 +36,10 @@ export default function Link({
|
|||
const dest = to || href
|
||||
const external = forceExternal || /(http(s?)):\/\//gi.test(dest)
|
||||
const icon = getIcon(dest)
|
||||
const withIcon = !hidden && !hideIcon && !!icon && !isImage(children)
|
||||
const withIcon = !noLinkLayout && !hideIcon && !!icon && !isImage(children)
|
||||
const sourceWithText = withIcon && isString(children)
|
||||
const linkClassNames = classNames(classes.root, className, {
|
||||
[classes.hidden]: hidden,
|
||||
[classes['no-link-layout']]: noLinkLayout,
|
||||
[classes.nowrap]: (withIcon && !sourceWithText) || icon === 'network',
|
||||
[classes['with-icon']]: withIcon,
|
||||
})
|
||||
|
@ -97,7 +97,7 @@ Link.propTypes = {
|
|||
to: PropTypes.string,
|
||||
href: PropTypes.string,
|
||||
onClick: PropTypes.func,
|
||||
hidden: PropTypes.bool,
|
||||
noLinkLayout: PropTypes.bool,
|
||||
hideIcon: PropTypes.bool,
|
||||
ws: PropTypes.bool,
|
||||
className: PropTypes.string,
|
||||
|
|
|
@ -2,11 +2,11 @@ import React from 'react'
|
|||
import PropTypes from 'prop-types'
|
||||
import classNames from 'classnames'
|
||||
|
||||
import patternBlue from '../images/pattern_blue.jpg'
|
||||
import patternGreen from '../images/pattern_green.jpg'
|
||||
import patternPurple from '../images/pattern_purple.jpg'
|
||||
import patternNightly from '../images/pattern_nightly.jpg'
|
||||
import patternLegacy from '../images/pattern_legacy.jpg'
|
||||
import patternBlue from '../images/pattern_blue.png'
|
||||
import patternGreen from '../images/pattern_green.png'
|
||||
import patternPurple from '../images/pattern_purple.png'
|
||||
import patternNightly from '../images/pattern_nightly.png'
|
||||
import patternLegacy from '../images/pattern_legacy.png'
|
||||
import classes from '../styles/main.module.sass'
|
||||
|
||||
const patterns = {
|
||||
|
|
32
website/src/components/markdownToReact.js
Normal file
32
website/src/components/markdownToReact.js
Normal file
|
@ -0,0 +1,32 @@
|
|||
import React, { useEffect, useState } from 'react'
|
||||
import { serialize } from 'next-mdx-remote/serialize'
|
||||
import { MDXRemote } from 'next-mdx-remote'
|
||||
import remarkPlugins from '../../plugins/index.mjs'
|
||||
|
||||
/**
|
||||
* Convert raw Markdown to React
|
||||
* @param {String} markdown - The Markdown markup to convert.
|
||||
* @param {Object} [remarkReactComponents] - Optional React components to use
|
||||
* for HTML elements.
|
||||
* @returns {Node} - The converted React elements.
|
||||
*/
|
||||
export default function MarkdownToReact({ markdown }) {
|
||||
const [mdx, setMdx] = useState(null)
|
||||
|
||||
useEffect(() => {
|
||||
const getMdx = async () => {
|
||||
setMdx(
|
||||
await serialize(markdown, {
|
||||
parseFrontmatter: false,
|
||||
mdxOptions: {
|
||||
remarkPlugins,
|
||||
},
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
getMdx()
|
||||
}, [markdown])
|
||||
|
||||
return mdx ? <MDXRemote {...mdx} /> : <></>
|
||||
}
|
5
website/src/components/markdownToReactDynamic.js
Normal file
5
website/src/components/markdownToReactDynamic.js
Normal file
|
@ -0,0 +1,5 @@
|
|||
import dynamic from 'next/dynamic'
|
||||
|
||||
export default dynamic(() => import('./markdownToReact'), {
|
||||
loading: () => <p>Loading...</p>,
|
||||
})
|
|
@ -30,7 +30,7 @@ const NavigationDropdown = ({ items = [], section }) => {
|
|||
|
||||
export default function Navigation({ title, items = [], section, search, alert, children }) {
|
||||
const logo = (
|
||||
<Link to="/" aria-label={title} hidden>
|
||||
<Link to="/" aria-label={title} noLinkLayout>
|
||||
<h1 className={classes.title}>{title}</h1>
|
||||
<SVG src={logoSpacy.src} className={classes.logo} width={300} height={96} />
|
||||
</Link>
|
||||
|
@ -57,7 +57,7 @@ export default function Navigation({ title, items = [], section, search, alert,
|
|||
})
|
||||
return (
|
||||
<li key={i} className={itemClassNames}>
|
||||
<Link to={url} tabIndex={isActive ? '-1' : null} hidden>
|
||||
<Link to={url} tabIndex={isActive ? '-1' : null} noLinkLayout>
|
||||
{text}
|
||||
</Link>
|
||||
</li>
|
||||
|
|
|
@ -251,7 +251,12 @@ const Quickstart = ({
|
|||
</menu>
|
||||
</pre>
|
||||
{showCopy && (
|
||||
<textarea ref={copyAreaRef} className={classes['copy-area']} rows={1} />
|
||||
<textarea
|
||||
ref={copyAreaRef}
|
||||
className={classes['copy-area']}
|
||||
rows={1}
|
||||
aria-label={`Interactive code example for ${title}`}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</Container>
|
||||
|
|
|
@ -9,15 +9,15 @@ import classes from '../styles/readnext.module.sass'
|
|||
|
||||
export default function ReadNext({ title, to }) {
|
||||
return (
|
||||
<div className={classes.root}>
|
||||
<Link to={to} hidden>
|
||||
<Link to={to} noLinkLayout className={classes.root}>
|
||||
<span>
|
||||
<Label>Read next</Label>
|
||||
{title}
|
||||
</Link>
|
||||
<Link to={to} hidden className={classes.icon} aria-hidden="true">
|
||||
<Icon name="arrowright" />
|
||||
</Link>
|
||||
</div>
|
||||
</span>
|
||||
<span className={classes.icon}>
|
||||
<Icon name="arrowright" aria-hidden="true" />
|
||||
</span>
|
||||
</Link>
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@ import socialImageLegacy from '../images/social_legacy.jpg'
|
|||
import siteMetadata from '../../meta/site.json'
|
||||
import Head from 'next/head'
|
||||
|
||||
import { siteUrl } from '../../meta/dynamicMeta.mjs'
|
||||
|
||||
function getPageTitle(title, sitename, slogan, sectionTitle, nightly, legacy) {
|
||||
if (sectionTitle && title) {
|
||||
const suffix = nightly ? ' (nightly)' : legacy ? ' (legacy)' : ''
|
||||
|
@ -25,7 +27,7 @@ function getImage(section, nightly, legacy) {
|
|||
if (legacy) return socialImageLegacy
|
||||
if (section === 'api') return socialImageApi
|
||||
if (section === 'universe') return socialImageUniverse
|
||||
return socialImageDefault
|
||||
return `${siteUrl}${socialImageDefault.src}`
|
||||
}
|
||||
|
||||
export default function SEO({
|
||||
|
@ -46,7 +48,7 @@ export default function SEO({
|
|||
nightly,
|
||||
legacy
|
||||
)
|
||||
const socialImage = getImage(section, nightly, legacy).src
|
||||
const socialImage = getImage(section, nightly, legacy)
|
||||
const meta = [
|
||||
{
|
||||
name: 'description',
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user