Merge pull request #12192 from adrianeboyd/chore/update-v4-from-master-5
Update v4 from master, format, update CI
10
.gitignore
vendored
|
@ -10,16 +10,6 @@ spacy/tests/package/setup.cfg
|
||||||
spacy/tests/package/pyproject.toml
|
spacy/tests/package/pyproject.toml
|
||||||
spacy/tests/package/requirements.txt
|
spacy/tests/package/requirements.txt
|
||||||
|
|
||||||
# Website
|
|
||||||
website/.cache/
|
|
||||||
website/public/
|
|
||||||
website/node_modules
|
|
||||||
website/.npm
|
|
||||||
website/logs
|
|
||||||
*.log
|
|
||||||
npm-debug.log*
|
|
||||||
quickstart-training-generator.js
|
|
||||||
|
|
||||||
# Cython / C extensions
|
# Cython / C extensions
|
||||||
cythonize.json
|
cythonize.json
|
||||||
spacy/*.html
|
spacy/*.html
|
||||||
|
|
|
@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
|
||||||
model packaging, deployment and workflow management. spaCy is commercial
|
model packaging, deployment and workflow management. spaCy is commercial
|
||||||
open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE).
|
open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE).
|
||||||
|
|
||||||
💫 **Version 3.4 out now!**
|
💫 **Version 3.5 out now!**
|
||||||
[Check out the release notes here.](https://github.com/explosion/spaCy/releases)
|
[Check out the release notes here.](https://github.com/explosion/spaCy/releases)
|
||||||
|
|
||||||
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
|
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
|
||||||
|
|
|
@ -11,18 +11,28 @@ trigger:
|
||||||
exclude:
|
exclude:
|
||||||
- "website/*"
|
- "website/*"
|
||||||
- "*.md"
|
- "*.md"
|
||||||
|
- "*.mdx"
|
||||||
- ".github/workflows/*"
|
- ".github/workflows/*"
|
||||||
pr:
|
pr:
|
||||||
paths:
|
paths:
|
||||||
exclude:
|
exclude:
|
||||||
- "*.md"
|
- "*.md"
|
||||||
|
- "*.mdx"
|
||||||
- "website/docs/*"
|
- "website/docs/*"
|
||||||
- "website/src/*"
|
- "website/src/*"
|
||||||
|
- "website/meta/*.tsx"
|
||||||
|
- "website/meta/*.mjs"
|
||||||
|
- "website/meta/languages.json"
|
||||||
|
- "website/meta/site.json"
|
||||||
|
- "website/meta/sidebars.json"
|
||||||
|
- "website/meta/type-annotations.json"
|
||||||
|
- "website/pages/*"
|
||||||
- ".github/workflows/*"
|
- ".github/workflows/*"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# Perform basic checks for most important errors (syntax etc.) Uses the config
|
# Check formatting and linting. Perform basic checks for most important errors
|
||||||
# defined in .flake8 and overwrites the selected codes.
|
# (syntax etc.) Uses the config defined in setup.cfg and overwrites the
|
||||||
|
# selected codes.
|
||||||
- job: "Validate"
|
- job: "Validate"
|
||||||
pool:
|
pool:
|
||||||
vmImage: "ubuntu-latest"
|
vmImage: "ubuntu-latest"
|
||||||
|
@ -30,6 +40,10 @@ jobs:
|
||||||
- task: UsePythonVersion@0
|
- task: UsePythonVersion@0
|
||||||
inputs:
|
inputs:
|
||||||
versionSpec: "3.7"
|
versionSpec: "3.7"
|
||||||
|
- script: |
|
||||||
|
pip install black==22.3.0
|
||||||
|
python -m black spacy --check
|
||||||
|
displayName: "black"
|
||||||
- script: |
|
- script: |
|
||||||
pip install flake8==5.0.4
|
pip install flake8==5.0.4
|
||||||
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
|
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
|
||||||
|
|
|
@ -22,7 +22,7 @@ langcodes>=3.2.0,<4.0.0
|
||||||
# Official Python utilities
|
# Official Python utilities
|
||||||
setuptools
|
setuptools
|
||||||
packaging>=20.0
|
packaging>=20.0
|
||||||
typing_extensions>=3.7.4.1,<4.2.0; python_version < "3.8"
|
typing_extensions>=3.7.4.1,<4.5.0; python_version < "3.8"
|
||||||
# Development dependencies
|
# Development dependencies
|
||||||
pre-commit>=2.13.0
|
pre-commit>=2.13.0
|
||||||
cython>=0.25,<3.0
|
cython>=0.25,<3.0
|
||||||
|
|
|
@ -55,7 +55,7 @@ install_requires =
|
||||||
# Official Python utilities
|
# Official Python utilities
|
||||||
setuptools
|
setuptools
|
||||||
packaging>=20.0
|
packaging>=20.0
|
||||||
typing_extensions>=3.7.4,<4.2.0; python_version < "3.8"
|
typing_extensions>=3.7.4.1,<4.5.0; python_version < "3.8"
|
||||||
langcodes>=3.2.0,<4.0.0
|
langcodes>=3.2.0,<4.0.0
|
||||||
|
|
||||||
[options.entry_points]
|
[options.entry_points]
|
||||||
|
|
|
@ -4,6 +4,7 @@ from ._util import app, setup_cli # noqa: F401
|
||||||
|
|
||||||
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands
|
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands
|
||||||
# are registered automatically and won't have to be imported here.
|
# are registered automatically and won't have to be imported here.
|
||||||
|
from .benchmark_speed import benchmark_speed_cli # noqa: F401
|
||||||
from .download import download # noqa: F401
|
from .download import download # noqa: F401
|
||||||
from .info import info # noqa: F401
|
from .info import info # noqa: F401
|
||||||
from .package import package # noqa: F401
|
from .package import package # noqa: F401
|
||||||
|
|
|
@ -46,6 +46,7 @@ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes
|
||||||
commands to check and validate your config files, training and evaluation data,
|
commands to check and validate your config files, training and evaluation data,
|
||||||
and custom model implementations.
|
and custom model implementations.
|
||||||
"""
|
"""
|
||||||
|
BENCHMARK_HELP = """Commands for benchmarking pipelines."""
|
||||||
INIT_HELP = """Commands for initializing configs and pipeline packages."""
|
INIT_HELP = """Commands for initializing configs and pipeline packages."""
|
||||||
|
|
||||||
# Wrappers for Typer's annotations. Initially created to set defaults and to
|
# Wrappers for Typer's annotations. Initially created to set defaults and to
|
||||||
|
@ -54,12 +55,14 @@ Arg = typer.Argument
|
||||||
Opt = typer.Option
|
Opt = typer.Option
|
||||||
|
|
||||||
app = typer.Typer(name=NAME, help=HELP)
|
app = typer.Typer(name=NAME, help=HELP)
|
||||||
|
benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True)
|
||||||
project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True)
|
project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True)
|
||||||
debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True)
|
debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True)
|
||||||
init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True)
|
init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True)
|
||||||
|
|
||||||
app.add_typer(project_cli)
|
app.add_typer(project_cli)
|
||||||
app.add_typer(debug_cli)
|
app.add_typer(debug_cli)
|
||||||
|
app.add_typer(benchmark_cli)
|
||||||
app.add_typer(init_cli)
|
app.add_typer(init_cli)
|
||||||
|
|
||||||
|
|
||||||
|
|
174
spacy/cli/benchmark_speed.py
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
from typing import Iterable, List, Optional
|
||||||
|
import random
|
||||||
|
from itertools import islice
|
||||||
|
import numpy
|
||||||
|
from pathlib import Path
|
||||||
|
import time
|
||||||
|
from tqdm import tqdm
|
||||||
|
import typer
|
||||||
|
from wasabi import msg
|
||||||
|
|
||||||
|
from .. import util
|
||||||
|
from ..language import Language
|
||||||
|
from ..tokens import Doc
|
||||||
|
from ..training import Corpus
|
||||||
|
from ._util import Arg, Opt, benchmark_cli, setup_gpu
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark_cli.command(
|
||||||
|
"speed",
|
||||||
|
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
|
||||||
|
)
|
||||||
|
def benchmark_speed_cli(
|
||||||
|
# fmt: off
|
||||||
|
ctx: typer.Context,
|
||||||
|
model: str = Arg(..., help="Model name or path"),
|
||||||
|
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
|
||||||
|
batch_size: Optional[int] = Opt(None, "--batch-size", "-b", min=1, help="Override the pipeline batch size"),
|
||||||
|
no_shuffle: bool = Opt(False, "--no-shuffle", help="Do not shuffle benchmark data"),
|
||||||
|
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
|
||||||
|
n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,),
|
||||||
|
warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"),
|
||||||
|
# fmt: on
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark
|
||||||
|
data in the binary .spacy format.
|
||||||
|
"""
|
||||||
|
setup_gpu(use_gpu=use_gpu, silent=False)
|
||||||
|
|
||||||
|
nlp = util.load_model(model)
|
||||||
|
batch_size = batch_size if batch_size is not None else nlp.batch_size
|
||||||
|
corpus = Corpus(data_path)
|
||||||
|
docs = [eg.predicted for eg in corpus(nlp)]
|
||||||
|
|
||||||
|
if len(docs) == 0:
|
||||||
|
msg.fail("Cannot benchmark speed using an empty corpus.", exits=1)
|
||||||
|
|
||||||
|
print(f"Warming up for {warmup_epochs} epochs...")
|
||||||
|
warmup(nlp, docs, warmup_epochs, batch_size)
|
||||||
|
|
||||||
|
print()
|
||||||
|
print(f"Benchmarking {n_batches} batches...")
|
||||||
|
wps = benchmark(nlp, docs, n_batches, batch_size, not no_shuffle)
|
||||||
|
|
||||||
|
print()
|
||||||
|
print_outliers(wps)
|
||||||
|
print_mean_with_ci(wps)
|
||||||
|
|
||||||
|
|
||||||
|
# Lowercased, behaves as a context manager function.
|
||||||
|
class time_context:
|
||||||
|
"""Register the running time of a context."""
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.start = time.perf_counter()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback):
|
||||||
|
self.elapsed = time.perf_counter() - self.start
|
||||||
|
|
||||||
|
|
||||||
|
class Quartiles:
|
||||||
|
"""Calculate the q1, q2, q3 quartiles and the inter-quartile range (iqr)
|
||||||
|
of a sample."""
|
||||||
|
|
||||||
|
q1: float
|
||||||
|
q2: float
|
||||||
|
q3: float
|
||||||
|
iqr: float
|
||||||
|
|
||||||
|
def __init__(self, sample: numpy.ndarray) -> None:
|
||||||
|
self.q1 = numpy.quantile(sample, 0.25)
|
||||||
|
self.q2 = numpy.quantile(sample, 0.5)
|
||||||
|
self.q3 = numpy.quantile(sample, 0.75)
|
||||||
|
self.iqr = self.q3 - self.q1
|
||||||
|
|
||||||
|
|
||||||
|
def annotate(
|
||||||
|
nlp: Language, docs: List[Doc], batch_size: Optional[int]
|
||||||
|
) -> numpy.ndarray:
|
||||||
|
docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size)
|
||||||
|
wps = []
|
||||||
|
while True:
|
||||||
|
with time_context() as elapsed:
|
||||||
|
batch_docs = list(
|
||||||
|
islice(docs, batch_size if batch_size else nlp.batch_size)
|
||||||
|
)
|
||||||
|
if len(batch_docs) == 0:
|
||||||
|
break
|
||||||
|
n_tokens = count_tokens(batch_docs)
|
||||||
|
wps.append(n_tokens / elapsed.elapsed)
|
||||||
|
|
||||||
|
return numpy.array(wps)
|
||||||
|
|
||||||
|
|
||||||
|
def benchmark(
|
||||||
|
nlp: Language,
|
||||||
|
docs: List[Doc],
|
||||||
|
n_batches: int,
|
||||||
|
batch_size: int,
|
||||||
|
shuffle: bool,
|
||||||
|
) -> numpy.ndarray:
|
||||||
|
if shuffle:
|
||||||
|
bench_docs = [
|
||||||
|
nlp.make_doc(random.choice(docs).text)
|
||||||
|
for _ in range(n_batches * batch_size)
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
bench_docs = [
|
||||||
|
nlp.make_doc(docs[i % len(docs)].text)
|
||||||
|
for i in range(n_batches * batch_size)
|
||||||
|
]
|
||||||
|
|
||||||
|
return annotate(nlp, bench_docs, batch_size)
|
||||||
|
|
||||||
|
|
||||||
|
def bootstrap(x, statistic=numpy.mean, iterations=10000) -> numpy.ndarray:
|
||||||
|
"""Apply a statistic to repeated random samples of an array."""
|
||||||
|
return numpy.fromiter(
|
||||||
|
(
|
||||||
|
statistic(numpy.random.choice(x, len(x), replace=True))
|
||||||
|
for _ in range(iterations)
|
||||||
|
),
|
||||||
|
numpy.float64,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def count_tokens(docs: Iterable[Doc]) -> int:
|
||||||
|
return sum(len(doc) for doc in docs)
|
||||||
|
|
||||||
|
|
||||||
|
def print_mean_with_ci(sample: numpy.ndarray):
|
||||||
|
mean = numpy.mean(sample)
|
||||||
|
bootstrap_means = bootstrap(sample)
|
||||||
|
bootstrap_means.sort()
|
||||||
|
|
||||||
|
# 95% confidence interval
|
||||||
|
low = bootstrap_means[int(len(bootstrap_means) * 0.025)]
|
||||||
|
high = bootstrap_means[int(len(bootstrap_means) * 0.975)]
|
||||||
|
|
||||||
|
print(f"Mean: {mean:.1f} words/s (95% CI: {low-mean:.1f} +{high-mean:.1f})")
|
||||||
|
|
||||||
|
|
||||||
|
def print_outliers(sample: numpy.ndarray):
|
||||||
|
quartiles = Quartiles(sample)
|
||||||
|
|
||||||
|
n_outliers = numpy.sum(
|
||||||
|
(sample < (quartiles.q1 - 1.5 * quartiles.iqr))
|
||||||
|
| (sample > (quartiles.q3 + 1.5 * quartiles.iqr))
|
||||||
|
)
|
||||||
|
n_extreme_outliers = numpy.sum(
|
||||||
|
(sample < (quartiles.q1 - 3.0 * quartiles.iqr))
|
||||||
|
| (sample > (quartiles.q3 + 3.0 * quartiles.iqr))
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"Outliers: {(100 * n_outliers) / len(sample):.1f}%, extreme outliers: {(100 * n_extreme_outliers) / len(sample)}%"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def warmup(
|
||||||
|
nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int]
|
||||||
|
) -> numpy.ndarray:
|
||||||
|
docs = warmup_epochs * docs
|
||||||
|
return annotate(nlp, docs, batch_size)
|
|
@ -17,6 +17,7 @@ from ..pipeline import TrainablePipe
|
||||||
from ..pipeline._parser_internals import nonproj
|
from ..pipeline._parser_internals import nonproj
|
||||||
from ..pipeline._parser_internals.nonproj import DELIMITER
|
from ..pipeline._parser_internals.nonproj import DELIMITER
|
||||||
from ..pipeline import Morphologizer, SpanCategorizer
|
from ..pipeline import Morphologizer, SpanCategorizer
|
||||||
|
from ..pipeline._edit_tree_internals.edit_trees import EditTrees
|
||||||
from ..morphology import Morphology
|
from ..morphology import Morphology
|
||||||
from ..language import Language
|
from ..language import Language
|
||||||
from ..util import registry, resolve_dot_names
|
from ..util import registry, resolve_dot_names
|
||||||
|
@ -671,6 +672,59 @@ def debug_data(
|
||||||
f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles"
|
f"Found {gold_train_data['n_cycles']} projectivized train sentence(s) with cycles"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if "trainable_lemmatizer" in factory_names:
|
||||||
|
msg.divider("Trainable Lemmatizer")
|
||||||
|
trees_train: Set[str] = gold_train_data["lemmatizer_trees"]
|
||||||
|
trees_dev: Set[str] = gold_dev_data["lemmatizer_trees"]
|
||||||
|
# This is necessary context when someone is attempting to interpret whether the
|
||||||
|
# number of trees exclusively in the dev set is meaningful.
|
||||||
|
msg.info(f"{len(trees_train)} lemmatizer trees generated from training data")
|
||||||
|
msg.info(f"{len(trees_dev)} lemmatizer trees generated from dev data")
|
||||||
|
dev_not_train = trees_dev - trees_train
|
||||||
|
|
||||||
|
if len(dev_not_train) != 0:
|
||||||
|
pct = len(dev_not_train) / len(trees_dev)
|
||||||
|
msg.info(
|
||||||
|
f"{len(dev_not_train)} lemmatizer trees ({pct*100:.1f}% of dev trees)"
|
||||||
|
" were found exclusively in the dev data."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Would we ever expect this case? It seems like it would be pretty rare,
|
||||||
|
# and we might actually want a warning?
|
||||||
|
msg.info("All trees in dev data present in training data.")
|
||||||
|
|
||||||
|
if gold_train_data["n_low_cardinality_lemmas"] > 0:
|
||||||
|
n = gold_train_data["n_low_cardinality_lemmas"]
|
||||||
|
msg.warn(f"{n} training docs with 0 or 1 unique lemmas.")
|
||||||
|
|
||||||
|
if gold_dev_data["n_low_cardinality_lemmas"] > 0:
|
||||||
|
n = gold_dev_data["n_low_cardinality_lemmas"]
|
||||||
|
msg.warn(f"{n} dev docs with 0 or 1 unique lemmas.")
|
||||||
|
|
||||||
|
if gold_train_data["no_lemma_annotations"] > 0:
|
||||||
|
n = gold_train_data["no_lemma_annotations"]
|
||||||
|
msg.warn(f"{n} training docs with no lemma annotations.")
|
||||||
|
else:
|
||||||
|
msg.good("All training docs have lemma annotations.")
|
||||||
|
|
||||||
|
if gold_dev_data["no_lemma_annotations"] > 0:
|
||||||
|
n = gold_dev_data["no_lemma_annotations"]
|
||||||
|
msg.warn(f"{n} dev docs with no lemma annotations.")
|
||||||
|
else:
|
||||||
|
msg.good("All dev docs have lemma annotations.")
|
||||||
|
|
||||||
|
if gold_train_data["partial_lemma_annotations"] > 0:
|
||||||
|
n = gold_train_data["partial_lemma_annotations"]
|
||||||
|
msg.info(f"{n} training docs with partial lemma annotations.")
|
||||||
|
else:
|
||||||
|
msg.good("All training docs have complete lemma annotations.")
|
||||||
|
|
||||||
|
if gold_dev_data["partial_lemma_annotations"] > 0:
|
||||||
|
n = gold_dev_data["partial_lemma_annotations"]
|
||||||
|
msg.info(f"{n} dev docs with partial lemma annotations.")
|
||||||
|
else:
|
||||||
|
msg.good("All dev docs have complete lemma annotations.")
|
||||||
|
|
||||||
msg.divider("Summary")
|
msg.divider("Summary")
|
||||||
good_counts = msg.counts[MESSAGES.GOOD]
|
good_counts = msg.counts[MESSAGES.GOOD]
|
||||||
warn_counts = msg.counts[MESSAGES.WARN]
|
warn_counts = msg.counts[MESSAGES.WARN]
|
||||||
|
@ -732,7 +786,13 @@ def _compile_gold(
|
||||||
"n_cats_multilabel": 0,
|
"n_cats_multilabel": 0,
|
||||||
"n_cats_bad_values": 0,
|
"n_cats_bad_values": 0,
|
||||||
"texts": set(),
|
"texts": set(),
|
||||||
|
"lemmatizer_trees": set(),
|
||||||
|
"no_lemma_annotations": 0,
|
||||||
|
"partial_lemma_annotations": 0,
|
||||||
|
"n_low_cardinality_lemmas": 0,
|
||||||
}
|
}
|
||||||
|
if "trainable_lemmatizer" in factory_names:
|
||||||
|
trees = EditTrees(nlp.vocab.strings)
|
||||||
for eg in examples:
|
for eg in examples:
|
||||||
gold = eg.reference
|
gold = eg.reference
|
||||||
doc = eg.predicted
|
doc = eg.predicted
|
||||||
|
@ -862,6 +922,25 @@ def _compile_gold(
|
||||||
data["n_nonproj"] += 1
|
data["n_nonproj"] += 1
|
||||||
if nonproj.contains_cycle(aligned_heads):
|
if nonproj.contains_cycle(aligned_heads):
|
||||||
data["n_cycles"] += 1
|
data["n_cycles"] += 1
|
||||||
|
if "trainable_lemmatizer" in factory_names:
|
||||||
|
# from EditTreeLemmatizer._labels_from_data
|
||||||
|
if all(token.lemma == 0 for token in gold):
|
||||||
|
data["no_lemma_annotations"] += 1
|
||||||
|
continue
|
||||||
|
if any(token.lemma == 0 for token in gold):
|
||||||
|
data["partial_lemma_annotations"] += 1
|
||||||
|
lemma_set = set()
|
||||||
|
for token in gold:
|
||||||
|
if token.lemma != 0:
|
||||||
|
lemma_set.add(token.lemma)
|
||||||
|
tree_id = trees.add(token.text, token.lemma_)
|
||||||
|
tree_str = trees.tree_to_str(tree_id)
|
||||||
|
data["lemmatizer_trees"].add(tree_str)
|
||||||
|
# We want to identify cases where lemmas aren't assigned
|
||||||
|
# or are all assigned the same value, as this would indicate
|
||||||
|
# an issue since we're expecting a large set of lemmas
|
||||||
|
if len(lemma_set) < 2 and len(gold) > 1:
|
||||||
|
data["n_low_cardinality_lemmas"] += 1
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -7,12 +7,15 @@ from thinc.api import fix_random_seed
|
||||||
|
|
||||||
from ..training import Corpus
|
from ..training import Corpus
|
||||||
from ..tokens import Doc
|
from ..tokens import Doc
|
||||||
from ._util import app, Arg, Opt, setup_gpu, import_code
|
from ._util import app, Arg, Opt, setup_gpu, import_code, benchmark_cli
|
||||||
from ..scorer import Scorer
|
from ..scorer import Scorer
|
||||||
from .. import util
|
from .. import util
|
||||||
from .. import displacy
|
from .. import displacy
|
||||||
|
|
||||||
|
|
||||||
|
@benchmark_cli.command(
|
||||||
|
"accuracy",
|
||||||
|
)
|
||||||
@app.command("evaluate")
|
@app.command("evaluate")
|
||||||
def evaluate_cli(
|
def evaluate_cli(
|
||||||
# fmt: off
|
# fmt: off
|
||||||
|
@ -36,7 +39,7 @@ def evaluate_cli(
|
||||||
dependency parses in a HTML file, set as output directory as the
|
dependency parses in a HTML file, set as output directory as the
|
||||||
displacy_path argument.
|
displacy_path argument.
|
||||||
|
|
||||||
DOCS: https://spacy.io/api/cli#evaluate
|
DOCS: https://spacy.io/api/cli#benchmark-accuracy
|
||||||
"""
|
"""
|
||||||
import_code(code_path)
|
import_code(code_path)
|
||||||
evaluate(
|
evaluate(
|
||||||
|
|
|
@ -106,9 +106,7 @@ def serve(
|
||||||
|
|
||||||
if is_in_jupyter():
|
if is_in_jupyter():
|
||||||
warnings.warn(Warnings.W011)
|
warnings.warn(Warnings.W011)
|
||||||
render(
|
render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
|
||||||
docs, style=style, page=page, minify=minify, options=options, manual=manual
|
|
||||||
)
|
|
||||||
httpd = simple_server.make_server(host, port, app)
|
httpd = simple_server.make_server(host, port, app)
|
||||||
print(f"\nUsing the '{style}' visualizer")
|
print(f"\nUsing the '{style}' visualizer")
|
||||||
print(f"Serving on http://{host}:{port} ...\n")
|
print(f"Serving on http://{host}:{port} ...\n")
|
||||||
|
|
|
@ -949,8 +949,8 @@ class Errors(metaclass=ErrorsWithCodes):
|
||||||
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
|
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
|
||||||
E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}")
|
E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}")
|
||||||
E1049 = ("No available port found for displaCy on host {host}. Please specify an available port "
|
E1049 = ("No available port found for displaCy on host {host}. Please specify an available port "
|
||||||
"with `displacy.serve(doc, port)`")
|
"with `displacy.serve(doc, port=port)`")
|
||||||
E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port)` "
|
E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port=port)` "
|
||||||
"or use `auto_switch_port=True` to pick an available port automatically.")
|
"or use `auto_switch_port=True` to pick an available port automatically.")
|
||||||
|
|
||||||
# v4 error strings
|
# v4 error strings
|
||||||
|
|
|
@ -25,7 +25,7 @@ cdef class InMemoryLookupKB(KnowledgeBase):
|
||||||
"""An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
|
"""An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
|
||||||
to support entity linking of named entities to real-world concepts.
|
to support entity linking of named entities to real-world concepts.
|
||||||
|
|
||||||
DOCS: https://spacy.io/api/kb_in_memory
|
DOCS: https://spacy.io/api/inmemorylookupkb
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, Vocab vocab, entity_vector_length):
|
def __init__(self, Vocab vocab, entity_vector_length):
|
||||||
|
|
|
@ -22,7 +22,7 @@ cpdef bint levenshtein_compare(input_text: str, pattern_text: str, fuzzy: int =
|
||||||
max_edits = fuzzy
|
max_edits = fuzzy
|
||||||
else:
|
else:
|
||||||
# allow at least two edits (to allow at least one transposition) and up
|
# allow at least two edits (to allow at least one transposition) and up
|
||||||
# to 20% of the pattern string length
|
# to 30% of the pattern string length
|
||||||
max_edits = max(2, round(0.3 * len(pattern_text)))
|
max_edits = max(2, round(0.3 * len(pattern_text)))
|
||||||
return levenshtein(input_text, pattern_text, max_edits) <= max_edits
|
return levenshtein(input_text, pattern_text, max_edits) <= max_edits
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,12 @@ from ..vocab import Vocab
|
||||||
from ..tokens import Doc, Span
|
from ..tokens import Doc, Span
|
||||||
|
|
||||||
class Matcher:
|
class Matcher:
|
||||||
def __init__(self, vocab: Vocab, validate: bool = ...,
|
def __init__(
|
||||||
fuzzy_compare: Callable[[str, str, int], bool] = ...) -> None: ...
|
self,
|
||||||
|
vocab: Vocab,
|
||||||
|
validate: bool = ...,
|
||||||
|
fuzzy_compare: Callable[[str, str, int], bool] = ...,
|
||||||
|
) -> None: ...
|
||||||
def __reduce__(self) -> Any: ...
|
def __reduce__(self) -> Any: ...
|
||||||
def __len__(self) -> int: ...
|
def __len__(self) -> int: ...
|
||||||
def __contains__(self, key: str) -> bool: ...
|
def __contains__(self, key: str) -> bool: ...
|
||||||
|
|
|
@ -5,7 +5,7 @@ from itertools import islice
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import srsly
|
import srsly
|
||||||
from thinc.api import Config, Model
|
from thinc.api import Config, Model, SequenceCategoricalCrossentropy, NumpyOps
|
||||||
from thinc.types import ArrayXd, Floats2d, Ints1d
|
from thinc.types import ArrayXd, Floats2d, Ints1d
|
||||||
from thinc.legacy import LegacySequenceCategoricalCrossentropy
|
from thinc.legacy import LegacySequenceCategoricalCrossentropy
|
||||||
|
|
||||||
|
@ -22,6 +22,8 @@ from .. import util
|
||||||
|
|
||||||
|
|
||||||
ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
|
ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
|
||||||
|
# The cutoff value of *top_k* above which an alternative method is used to process guesses.
|
||||||
|
TOP_K_GUARDRAIL = 20
|
||||||
|
|
||||||
|
|
||||||
default_model_config = """
|
default_model_config = """
|
||||||
|
@ -125,6 +127,7 @@ class EditTreeLemmatizer(TrainablePipe):
|
||||||
self.cfg: Dict[str, Any] = {"labels": []}
|
self.cfg: Dict[str, Any] = {"labels": []}
|
||||||
self.scorer = scorer
|
self.scorer = scorer
|
||||||
self.save_activations = save_activations
|
self.save_activations = save_activations
|
||||||
|
self.numpy_ops = NumpyOps()
|
||||||
|
|
||||||
def get_loss(
|
def get_loss(
|
||||||
self, examples: Iterable[Example], scores: List[Floats2d]
|
self, examples: Iterable[Example], scores: List[Floats2d]
|
||||||
|
@ -140,7 +143,7 @@ class EditTreeLemmatizer(TrainablePipe):
|
||||||
for (predicted, gold_lemma) in zip(
|
for (predicted, gold_lemma) in zip(
|
||||||
eg.predicted, eg.get_aligned("LEMMA", as_string=True)
|
eg.predicted, eg.get_aligned("LEMMA", as_string=True)
|
||||||
):
|
):
|
||||||
if gold_lemma is None:
|
if gold_lemma is None or gold_lemma == "":
|
||||||
label = -1
|
label = -1
|
||||||
else:
|
else:
|
||||||
tree_id = self.trees.add(predicted.text, gold_lemma)
|
tree_id = self.trees.add(predicted.text, gold_lemma)
|
||||||
|
@ -175,6 +178,18 @@ class EditTreeLemmatizer(TrainablePipe):
|
||||||
return float(loss), d_scores
|
return float(loss), d_scores
|
||||||
|
|
||||||
def predict(self, docs: Iterable[Doc]) -> ActivationsT:
|
def predict(self, docs: Iterable[Doc]) -> ActivationsT:
|
||||||
|
if self.top_k == 1:
|
||||||
|
scores2guesses = self._scores2guesses_top_k_equals_1
|
||||||
|
elif self.top_k <= TOP_K_GUARDRAIL:
|
||||||
|
scores2guesses = self._scores2guesses_top_k_greater_1
|
||||||
|
else:
|
||||||
|
scores2guesses = self._scores2guesses_top_k_guardrail
|
||||||
|
# The behaviour of *_scores2guesses_top_k_greater_1()* is efficient for values
|
||||||
|
# of *top_k>1* that are likely to be useful when the edit tree lemmatizer is used
|
||||||
|
# for its principal purpose of lemmatizing tokens. However, the code could also
|
||||||
|
# be used for other purposes, and with very large values of *top_k* the method
|
||||||
|
# becomes inefficient. In such cases, *_scores2guesses_top_k_guardrail()* is used
|
||||||
|
# instead.
|
||||||
n_docs = len(list(docs))
|
n_docs = len(list(docs))
|
||||||
if not any(len(doc) for doc in docs):
|
if not any(len(doc) for doc in docs):
|
||||||
# Handle cases where there are no tokens in any docs.
|
# Handle cases where there are no tokens in any docs.
|
||||||
|
@ -189,20 +204,52 @@ class EditTreeLemmatizer(TrainablePipe):
|
||||||
return {"probabilities": scores, "tree_ids": guesses}
|
return {"probabilities": scores, "tree_ids": guesses}
|
||||||
scores = self.model.predict(docs)
|
scores = self.model.predict(docs)
|
||||||
assert len(scores) == n_docs
|
assert len(scores) == n_docs
|
||||||
guesses = self._scores2guesses(docs, scores)
|
guesses = scores2guesses(docs, scores)
|
||||||
assert len(guesses) == n_docs
|
assert len(guesses) == n_docs
|
||||||
return {"probabilities": scores, "tree_ids": guesses}
|
return {"probabilities": scores, "tree_ids": guesses}
|
||||||
|
|
||||||
def _scores2guesses(self, docs, scores):
|
def _scores2guesses_top_k_equals_1(self, docs, scores):
|
||||||
guesses = []
|
guesses = []
|
||||||
for doc, doc_scores in zip(docs, scores):
|
for doc, doc_scores in zip(docs, scores):
|
||||||
if self.top_k == 1:
|
doc_guesses = doc_scores.argmax(axis=1)
|
||||||
doc_guesses = doc_scores.argmax(axis=1).reshape(-1, 1)
|
doc_guesses = self.numpy_ops.asarray(doc_guesses)
|
||||||
else:
|
|
||||||
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
|
|
||||||
|
|
||||||
if not isinstance(doc_guesses, np.ndarray):
|
doc_compat_guesses = []
|
||||||
doc_guesses = doc_guesses.get()
|
for i, token in enumerate(doc):
|
||||||
|
tree_id = self.cfg["labels"][doc_guesses[i]]
|
||||||
|
if self.trees.apply(tree_id, token.text) is not None:
|
||||||
|
doc_compat_guesses.append(tree_id)
|
||||||
|
else:
|
||||||
|
doc_compat_guesses.append(-1)
|
||||||
|
guesses.append(np.array(doc_compat_guesses))
|
||||||
|
|
||||||
|
return guesses
|
||||||
|
|
||||||
|
def _scores2guesses_top_k_greater_1(self, docs, scores):
|
||||||
|
guesses = []
|
||||||
|
top_k = min(self.top_k, len(self.labels))
|
||||||
|
for doc, doc_scores in zip(docs, scores):
|
||||||
|
doc_scores = self.numpy_ops.asarray(doc_scores)
|
||||||
|
doc_compat_guesses = []
|
||||||
|
for i, token in enumerate(doc):
|
||||||
|
for _ in range(top_k):
|
||||||
|
candidate = int(doc_scores[i].argmax())
|
||||||
|
candidate_tree_id = self.cfg["labels"][candidate]
|
||||||
|
if self.trees.apply(candidate_tree_id, token.text) is not None:
|
||||||
|
doc_compat_guesses.append(candidate_tree_id)
|
||||||
|
break
|
||||||
|
doc_scores[i, candidate] = np.finfo(np.float32).min
|
||||||
|
else:
|
||||||
|
doc_compat_guesses.append(-1)
|
||||||
|
guesses.append(np.array(doc_compat_guesses))
|
||||||
|
|
||||||
|
return guesses
|
||||||
|
|
||||||
|
def _scores2guesses_top_k_guardrail(self, docs, scores):
|
||||||
|
guesses = []
|
||||||
|
for doc, doc_scores in zip(docs, scores):
|
||||||
|
doc_guesses = np.argsort(doc_scores)[..., : -self.top_k - 1 : -1]
|
||||||
|
doc_guesses = self.numpy_ops.asarray(doc_guesses)
|
||||||
|
|
||||||
doc_compat_guesses = []
|
doc_compat_guesses = []
|
||||||
for token, candidates in zip(doc, doc_guesses):
|
for token, candidates in zip(doc, doc_guesses):
|
||||||
|
|
|
@ -453,7 +453,11 @@ class EntityLinker(TrainablePipe):
|
||||||
docs_ents: List[Ragged] = []
|
docs_ents: List[Ragged] = []
|
||||||
docs_scores: List[Ragged] = []
|
docs_scores: List[Ragged] = []
|
||||||
if not docs:
|
if not docs:
|
||||||
return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
|
return {
|
||||||
|
KNOWLEDGE_BASE_IDS: final_kb_ids,
|
||||||
|
"ents": docs_ents,
|
||||||
|
"scores": docs_scores,
|
||||||
|
}
|
||||||
if isinstance(docs, Doc):
|
if isinstance(docs, Doc):
|
||||||
docs = [docs]
|
docs = [docs]
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
|
@ -585,7 +589,11 @@ class EntityLinker(TrainablePipe):
|
||||||
method="predict", msg="result variables not of equal length"
|
method="predict", msg="result variables not of equal length"
|
||||||
)
|
)
|
||||||
raise RuntimeError(err)
|
raise RuntimeError(err)
|
||||||
return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
|
return {
|
||||||
|
KNOWLEDGE_BASE_IDS: final_kb_ids,
|
||||||
|
"ents": docs_ents,
|
||||||
|
"scores": docs_scores,
|
||||||
|
}
|
||||||
|
|
||||||
def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
|
def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
|
||||||
"""Modify a batch of documents, using pre-computed scores.
|
"""Modify a batch of documents, using pre-computed scores.
|
||||||
|
|
|
@ -252,8 +252,11 @@ class EntityRecognizer(Parser):
|
||||||
def labels(self):
|
def labels(self):
|
||||||
# Get the labels from the model by looking at the available moves, e.g.
|
# Get the labels from the model by looking at the available moves, e.g.
|
||||||
# B-PERSON, I-PERSON, L-PERSON, U-PERSON
|
# B-PERSON, I-PERSON, L-PERSON, U-PERSON
|
||||||
labels = set(remove_bilu_prefix(move) for move in self.move_names
|
labels = set(
|
||||||
if move[0] in ("B", "I", "L", "U"))
|
remove_bilu_prefix(move)
|
||||||
|
for move in self.move_names
|
||||||
|
if move[0] in ("B", "I", "L", "U")
|
||||||
|
)
|
||||||
return tuple(sorted(labels))
|
return tuple(sorted(labels))
|
||||||
|
|
||||||
def scored_ents(self, beams):
|
def scored_ents(self, beams):
|
||||||
|
|
|
@ -163,15 +163,33 @@ class TokenPatternString(BaseModel):
|
||||||
IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset")
|
IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset")
|
||||||
INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects")
|
INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects")
|
||||||
FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy")
|
FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy")
|
||||||
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy1")
|
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy2")
|
None, alias="fuzzy1"
|
||||||
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy3")
|
)
|
||||||
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy4")
|
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy5")
|
None, alias="fuzzy2"
|
||||||
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy6")
|
)
|
||||||
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy7")
|
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy8")
|
None, alias="fuzzy3"
|
||||||
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy9")
|
)
|
||||||
|
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
|
None, alias="fuzzy4"
|
||||||
|
)
|
||||||
|
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
|
None, alias="fuzzy5"
|
||||||
|
)
|
||||||
|
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
|
None, alias="fuzzy6"
|
||||||
|
)
|
||||||
|
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
|
None, alias="fuzzy7"
|
||||||
|
)
|
||||||
|
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
|
None, alias="fuzzy8"
|
||||||
|
)
|
||||||
|
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
|
||||||
|
None, alias="fuzzy9"
|
||||||
|
)
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
extra = "forbid"
|
extra = "forbid"
|
||||||
|
|
|
@ -103,14 +103,15 @@ def test_initialize_from_labels():
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_no_data():
|
@pytest.mark.parametrize("top_k", (1, 5, 30))
|
||||||
|
def test_no_data(top_k):
|
||||||
# Test that the lemmatizer provides a nice error when there's no tagging data / labels
|
# Test that the lemmatizer provides a nice error when there's no tagging data / labels
|
||||||
TEXTCAT_DATA = [
|
TEXTCAT_DATA = [
|
||||||
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
|
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
|
||||||
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
|
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
|
||||||
]
|
]
|
||||||
nlp = English()
|
nlp = English()
|
||||||
nlp.add_pipe("trainable_lemmatizer")
|
nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||||
nlp.add_pipe("textcat")
|
nlp.add_pipe("textcat")
|
||||||
|
|
||||||
train_examples = []
|
train_examples = []
|
||||||
|
@ -121,10 +122,11 @@ def test_no_data():
|
||||||
nlp.initialize(get_examples=lambda: train_examples)
|
nlp.initialize(get_examples=lambda: train_examples)
|
||||||
|
|
||||||
|
|
||||||
def test_incomplete_data():
|
@pytest.mark.parametrize("top_k", (1, 5, 30))
|
||||||
|
def test_incomplete_data(top_k):
|
||||||
# Test that the lemmatizer works with incomplete information
|
# Test that the lemmatizer works with incomplete information
|
||||||
nlp = English()
|
nlp = English()
|
||||||
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
|
lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||||
lemmatizer.min_tree_freq = 1
|
lemmatizer.min_tree_freq = 1
|
||||||
train_examples = []
|
train_examples = []
|
||||||
for t in PARTIAL_DATA:
|
for t in PARTIAL_DATA:
|
||||||
|
@ -141,10 +143,25 @@ def test_incomplete_data():
|
||||||
assert doc[1].lemma_ == "like"
|
assert doc[1].lemma_ == "like"
|
||||||
assert doc[2].lemma_ == "blue"
|
assert doc[2].lemma_ == "blue"
|
||||||
|
|
||||||
|
# Check that incomplete annotations are ignored.
|
||||||
|
scores, _ = lemmatizer.model([eg.predicted for eg in train_examples], is_train=True)
|
||||||
|
_, dX = lemmatizer.get_loss(train_examples, scores)
|
||||||
|
xp = lemmatizer.model.ops.xp
|
||||||
|
|
||||||
def test_overfitting_IO():
|
# Missing annotations.
|
||||||
|
assert xp.count_nonzero(dX[0][0]) == 0
|
||||||
|
assert xp.count_nonzero(dX[0][3]) == 0
|
||||||
|
assert xp.count_nonzero(dX[1][0]) == 0
|
||||||
|
assert xp.count_nonzero(dX[1][3]) == 0
|
||||||
|
|
||||||
|
# Misaligned annotations.
|
||||||
|
assert xp.count_nonzero(dX[1][1]) == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("top_k", (1, 5, 30))
|
||||||
|
def test_overfitting_IO(top_k):
|
||||||
nlp = English()
|
nlp = English()
|
||||||
lemmatizer = nlp.add_pipe("trainable_lemmatizer")
|
lemmatizer = nlp.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||||
lemmatizer.min_tree_freq = 1
|
lemmatizer.min_tree_freq = 1
|
||||||
train_examples = []
|
train_examples = []
|
||||||
for t in TRAIN_DATA:
|
for t in TRAIN_DATA:
|
||||||
|
@ -177,7 +194,7 @@ def test_overfitting_IO():
|
||||||
# Check model after a {to,from}_bytes roundtrip
|
# Check model after a {to,from}_bytes roundtrip
|
||||||
nlp_bytes = nlp.to_bytes()
|
nlp_bytes = nlp.to_bytes()
|
||||||
nlp3 = English()
|
nlp3 = English()
|
||||||
nlp3.add_pipe("trainable_lemmatizer")
|
nlp3.add_pipe("trainable_lemmatizer", config={"top_k": top_k})
|
||||||
nlp3.from_bytes(nlp_bytes)
|
nlp3.from_bytes(nlp_bytes)
|
||||||
doc3 = nlp3(test_text)
|
doc3 = nlp3(test_text)
|
||||||
assert doc3[0].lemma_ == "she"
|
assert doc3[0].lemma_ == "she"
|
||||||
|
|
|
@ -618,7 +618,6 @@ def test_string_to_list_intify(value):
|
||||||
assert string_to_list(value, intify=True) == [1, 2, 3]
|
assert string_to_list(value, intify=True) == [1, 2, 3]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
|
||||||
def test_download_compatibility():
|
def test_download_compatibility():
|
||||||
spec = SpecifierSet("==" + about.__version__)
|
spec = SpecifierSet("==" + about.__version__)
|
||||||
spec.prereleases = False
|
spec.prereleases = False
|
||||||
|
@ -629,7 +628,6 @@ def test_download_compatibility():
|
||||||
assert get_minor_version(about.__version__) == get_minor_version(version)
|
assert get_minor_version(about.__version__) == get_minor_version(version)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
|
||||||
def test_validate_compatibility_table():
|
def test_validate_compatibility_table():
|
||||||
spec = SpecifierSet("==" + about.__version__)
|
spec = SpecifierSet("==" + about.__version__)
|
||||||
spec.prereleases = False
|
spec.prereleases = False
|
||||||
|
@ -1076,7 +1074,7 @@ def test_cli_find_threshold(capsys):
|
||||||
)
|
)
|
||||||
with make_tempdir() as nlp_dir:
|
with make_tempdir() as nlp_dir:
|
||||||
nlp.to_disk(nlp_dir)
|
nlp.to_disk(nlp_dir)
|
||||||
res = find_threshold(
|
best_threshold, best_score, res = find_threshold(
|
||||||
model=nlp_dir,
|
model=nlp_dir,
|
||||||
data_path=docs_dir / "docs.spacy",
|
data_path=docs_dir / "docs.spacy",
|
||||||
pipe_name="tc_multi",
|
pipe_name="tc_multi",
|
||||||
|
@ -1084,10 +1082,10 @@ def test_cli_find_threshold(capsys):
|
||||||
scores_key="cats_macro_f",
|
scores_key="cats_macro_f",
|
||||||
silent=True,
|
silent=True,
|
||||||
)
|
)
|
||||||
assert res[0] != thresholds[0]
|
assert best_threshold != thresholds[0]
|
||||||
assert thresholds[0] < res[0] < thresholds[9]
|
assert thresholds[0] < best_threshold < thresholds[9]
|
||||||
assert res[1] == 1.0
|
assert best_score == max(res.values())
|
||||||
assert res[2][1.0] == 0.0
|
assert res[1.0] == 0.0
|
||||||
|
|
||||||
# Test with spancat.
|
# Test with spancat.
|
||||||
nlp, _ = init_nlp((("spancat", {}),))
|
nlp, _ = init_nlp((("spancat", {}),))
|
||||||
|
@ -1209,3 +1207,69 @@ def test_walk_directory():
|
||||||
assert (len(walk_directory(d, suffix="iob"))) == 2
|
assert (len(walk_directory(d, suffix="iob"))) == 2
|
||||||
assert (len(walk_directory(d, suffix="conll"))) == 3
|
assert (len(walk_directory(d, suffix="conll"))) == 3
|
||||||
assert (len(walk_directory(d, suffix="pdf"))) == 0
|
assert (len(walk_directory(d, suffix="pdf"))) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_debug_data_trainable_lemmatizer_basic():
|
||||||
|
examples = [
|
||||||
|
("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}),
|
||||||
|
("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}),
|
||||||
|
]
|
||||||
|
nlp = Language()
|
||||||
|
train_examples = []
|
||||||
|
for t in examples:
|
||||||
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||||
|
|
||||||
|
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||||
|
# ref test_edit_tree_lemmatizer::test_initialize_from_labels
|
||||||
|
# this results in 4 trees
|
||||||
|
assert len(data["lemmatizer_trees"]) == 4
|
||||||
|
|
||||||
|
|
||||||
|
def test_debug_data_trainable_lemmatizer_partial():
|
||||||
|
partial_examples = [
|
||||||
|
# partial annotation
|
||||||
|
("She likes green eggs", {"lemmas": ["", "like", "green", ""]}),
|
||||||
|
# misaligned partial annotation
|
||||||
|
(
|
||||||
|
"He hates green eggs",
|
||||||
|
{
|
||||||
|
"words": ["He", "hat", "es", "green", "eggs"],
|
||||||
|
"lemmas": ["", "hat", "e", "green", ""],
|
||||||
|
},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
nlp = Language()
|
||||||
|
train_examples = []
|
||||||
|
for t in partial_examples:
|
||||||
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||||
|
|
||||||
|
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||||
|
assert data["partial_lemma_annotations"] == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_debug_data_trainable_lemmatizer_low_cardinality():
|
||||||
|
low_cardinality_examples = [
|
||||||
|
("She likes green eggs", {"lemmas": ["no", "no", "no", "no"]}),
|
||||||
|
("Eat blue ham", {"lemmas": ["no", "no", "no"]}),
|
||||||
|
]
|
||||||
|
nlp = Language()
|
||||||
|
train_examples = []
|
||||||
|
for t in low_cardinality_examples:
|
||||||
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||||
|
|
||||||
|
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||||
|
assert data["n_low_cardinality_lemmas"] == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_debug_data_trainable_lemmatizer_not_annotated():
|
||||||
|
unannotated_examples = [
|
||||||
|
("She likes green eggs", {}),
|
||||||
|
("Eat blue ham", {}),
|
||||||
|
]
|
||||||
|
nlp = Language()
|
||||||
|
train_examples = []
|
||||||
|
for t in unannotated_examples:
|
||||||
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
||||||
|
|
||||||
|
data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True)
|
||||||
|
assert data["no_lemma_annotations"] == 2
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typer.testing import CliRunner
|
from typer.testing import CliRunner
|
||||||
|
from spacy.tokens import DocBin, Doc
|
||||||
|
|
||||||
from spacy.cli._util import app
|
from spacy.cli._util import app
|
||||||
from .util import make_tempdir
|
from .util import make_tempdir
|
||||||
|
@ -31,3 +32,60 @@ def test_convert_auto_conflict():
|
||||||
assert "All input files must be same type" in result.stdout
|
assert "All input files must be same type" in result.stdout
|
||||||
out_files = os.listdir(d_out)
|
out_files = os.listdir(d_out)
|
||||||
assert len(out_files) == 0
|
assert len(out_files) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_benchmark_accuracy_alias():
|
||||||
|
# Verify that the `evaluate` alias works correctly.
|
||||||
|
result_benchmark = CliRunner().invoke(app, ["benchmark", "accuracy", "--help"])
|
||||||
|
result_evaluate = CliRunner().invoke(app, ["evaluate", "--help"])
|
||||||
|
assert result_benchmark.stdout == result_evaluate.stdout.replace(
|
||||||
|
"spacy evaluate", "spacy benchmark accuracy"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_debug_data_trainable_lemmatizer_cli(en_vocab):
|
||||||
|
train_docs = [
|
||||||
|
Doc(en_vocab, words=["I", "like", "cats"], lemmas=["I", "like", "cat"]),
|
||||||
|
Doc(
|
||||||
|
en_vocab,
|
||||||
|
words=["Dogs", "are", "great", "too"],
|
||||||
|
lemmas=["dog", "be", "great", "too"],
|
||||||
|
),
|
||||||
|
]
|
||||||
|
dev_docs = [
|
||||||
|
Doc(en_vocab, words=["Cats", "are", "cute"], lemmas=["cat", "be", "cute"]),
|
||||||
|
Doc(en_vocab, words=["Pets", "are", "great"], lemmas=["pet", "be", "great"]),
|
||||||
|
]
|
||||||
|
with make_tempdir() as d_in:
|
||||||
|
train_bin = DocBin(docs=train_docs)
|
||||||
|
train_bin.to_disk(d_in / "train.spacy")
|
||||||
|
dev_bin = DocBin(docs=dev_docs)
|
||||||
|
dev_bin.to_disk(d_in / "dev.spacy")
|
||||||
|
# `debug data` requires an input pipeline config
|
||||||
|
CliRunner().invoke(
|
||||||
|
app,
|
||||||
|
[
|
||||||
|
"init",
|
||||||
|
"config",
|
||||||
|
f"{d_in}/config.cfg",
|
||||||
|
"--lang",
|
||||||
|
"en",
|
||||||
|
"--pipeline",
|
||||||
|
"trainable_lemmatizer",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
result_debug_data = CliRunner().invoke(
|
||||||
|
app,
|
||||||
|
[
|
||||||
|
"debug",
|
||||||
|
"data",
|
||||||
|
f"{d_in}/config.cfg",
|
||||||
|
"--paths.train",
|
||||||
|
f"{d_in}/train.spacy",
|
||||||
|
"--paths.dev",
|
||||||
|
f"{d_in}/dev.spacy",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
# Instead of checking specific wording of the output, which may change,
|
||||||
|
# we'll check that this section of the debug output is present.
|
||||||
|
assert "= Trainable Lemmatizer =" in result_debug_data.stdout
|
||||||
|
|
78
spacy/tests/training/test_corpus.py
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
from typing import IO, Generator, Iterable, List, TextIO, Tuple
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from pathlib import Path
|
||||||
|
import pytest
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from spacy.lang.en import English
|
||||||
|
from spacy.training import Example, PlainTextCorpus
|
||||||
|
from spacy.util import make_tempdir
|
||||||
|
|
||||||
|
# Intentional newlines to check that they are skipped.
|
||||||
|
PLAIN_TEXT_DOC = """
|
||||||
|
|
||||||
|
This is a doc. It contains two sentences.
|
||||||
|
This is another doc.
|
||||||
|
|
||||||
|
A third doc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
PLAIN_TEXT_DOC_TOKENIZED = [
|
||||||
|
[
|
||||||
|
"This",
|
||||||
|
"is",
|
||||||
|
"a",
|
||||||
|
"doc",
|
||||||
|
".",
|
||||||
|
"It",
|
||||||
|
"contains",
|
||||||
|
"two",
|
||||||
|
"sentences",
|
||||||
|
".",
|
||||||
|
],
|
||||||
|
["This", "is", "another", "doc", "."],
|
||||||
|
["A", "third", "doc", "."],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("min_length", [0, 5])
|
||||||
|
@pytest.mark.parametrize("max_length", [0, 5])
|
||||||
|
def test_plain_text_reader(min_length, max_length):
|
||||||
|
nlp = English()
|
||||||
|
with _string_to_tmp_file(PLAIN_TEXT_DOC) as file_path:
|
||||||
|
corpus = PlainTextCorpus(
|
||||||
|
file_path, min_length=min_length, max_length=max_length
|
||||||
|
)
|
||||||
|
|
||||||
|
check = [
|
||||||
|
doc
|
||||||
|
for doc in PLAIN_TEXT_DOC_TOKENIZED
|
||||||
|
if len(doc) >= min_length and (max_length == 0 or len(doc) <= max_length)
|
||||||
|
]
|
||||||
|
reference, predicted = _examples_to_tokens(corpus(nlp))
|
||||||
|
|
||||||
|
assert reference == check
|
||||||
|
assert predicted == check
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _string_to_tmp_file(s: str) -> Generator[Path, None, None]:
|
||||||
|
with make_tempdir() as d:
|
||||||
|
file_path = Path(d) / "string.txt"
|
||||||
|
with open(file_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write(s)
|
||||||
|
yield file_path
|
||||||
|
|
||||||
|
|
||||||
|
def _examples_to_tokens(
|
||||||
|
examples: Iterable[Example],
|
||||||
|
) -> Tuple[List[List[str]], List[List[str]]]:
|
||||||
|
reference = []
|
||||||
|
predicted = []
|
||||||
|
|
||||||
|
for eg in examples:
|
||||||
|
reference.append([t.text for t in eg.reference])
|
||||||
|
predicted.append([t.text for t in eg.predicted])
|
||||||
|
|
||||||
|
return reference, predicted
|
|
@ -1,4 +1,4 @@
|
||||||
from .corpus import Corpus, JsonlCorpus # noqa: F401
|
from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401
|
||||||
from .example import Example, validate_examples, validate_get_examples # noqa: F401
|
from .example import Example, validate_examples, validate_get_examples # noqa: F401
|
||||||
from .example import validate_distillation_examples # noqa: F401
|
from .example import validate_distillation_examples # noqa: F401
|
||||||
from .alignment import Alignment # noqa: F401
|
from .alignment import Alignment # noqa: F401
|
||||||
|
|
|
@ -58,6 +58,28 @@ def read_labels(path: Path, *, require: bool = False):
|
||||||
return srsly.read_json(path)
|
return srsly.read_json(path)
|
||||||
|
|
||||||
|
|
||||||
|
@util.registry.readers("spacy.PlainTextCorpus.v1")
|
||||||
|
def create_plain_text_reader(
|
||||||
|
path: Optional[Path],
|
||||||
|
min_length: int = 0,
|
||||||
|
max_length: int = 0,
|
||||||
|
) -> Callable[["Language"], Iterable[Doc]]:
|
||||||
|
"""Iterate Example objects from a file or directory of plain text
|
||||||
|
UTF-8 files with one line per doc.
|
||||||
|
|
||||||
|
path (Path): The directory or filename to read from.
|
||||||
|
min_length (int): Minimum document length (in tokens). Shorter documents
|
||||||
|
will be skipped. Defaults to 0, which indicates no limit.
|
||||||
|
max_length (int): Maximum document length (in tokens). Longer documents will
|
||||||
|
be skipped. Defaults to 0, which indicates no limit.
|
||||||
|
|
||||||
|
DOCS: https://spacy.io/api/corpus#plaintextcorpus
|
||||||
|
"""
|
||||||
|
if path is None:
|
||||||
|
raise ValueError(Errors.E913)
|
||||||
|
return PlainTextCorpus(path, min_length=min_length, max_length=max_length)
|
||||||
|
|
||||||
|
|
||||||
def walk_corpus(path: Union[str, Path], file_type) -> List[Path]:
|
def walk_corpus(path: Union[str, Path], file_type) -> List[Path]:
|
||||||
path = util.ensure_path(path)
|
path = util.ensure_path(path)
|
||||||
if not path.is_dir() and path.parts[-1].endswith(file_type):
|
if not path.is_dir() and path.parts[-1].endswith(file_type):
|
||||||
|
@ -257,3 +279,52 @@ class JsonlCorpus:
|
||||||
# We don't *need* an example here, but it seems nice to
|
# We don't *need* an example here, but it seems nice to
|
||||||
# make it match the Corpus signature.
|
# make it match the Corpus signature.
|
||||||
yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces))
|
yield Example(doc, Doc(nlp.vocab, words=words, spaces=spaces))
|
||||||
|
|
||||||
|
|
||||||
|
class PlainTextCorpus:
|
||||||
|
"""Iterate Example objects from a file or directory of plain text
|
||||||
|
UTF-8 files with one line per doc.
|
||||||
|
|
||||||
|
path (Path): The directory or filename to read from.
|
||||||
|
min_length (int): Minimum document length (in tokens). Shorter documents
|
||||||
|
will be skipped. Defaults to 0, which indicates no limit.
|
||||||
|
max_length (int): Maximum document length (in tokens). Longer documents will
|
||||||
|
be skipped. Defaults to 0, which indicates no limit.
|
||||||
|
|
||||||
|
DOCS: https://spacy.io/api/corpus#plaintextcorpus
|
||||||
|
"""
|
||||||
|
|
||||||
|
file_type = "txt"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
path: Optional[Union[str, Path]],
|
||||||
|
*,
|
||||||
|
min_length: int = 0,
|
||||||
|
max_length: int = 0,
|
||||||
|
) -> None:
|
||||||
|
self.path = util.ensure_path(path)
|
||||||
|
self.min_length = min_length
|
||||||
|
self.max_length = max_length
|
||||||
|
|
||||||
|
def __call__(self, nlp: "Language") -> Iterator[Example]:
|
||||||
|
"""Yield examples from the data.
|
||||||
|
|
||||||
|
nlp (Language): The current nlp object.
|
||||||
|
YIELDS (Example): The example objects.
|
||||||
|
|
||||||
|
DOCS: https://spacy.io/api/corpus#plaintextcorpus-call
|
||||||
|
"""
|
||||||
|
for loc in walk_corpus(self.path, ".txt"):
|
||||||
|
with open(loc, encoding="utf-8") as f:
|
||||||
|
for text in f:
|
||||||
|
text = text.rstrip("\r\n")
|
||||||
|
if len(text):
|
||||||
|
doc = nlp.make_doc(text)
|
||||||
|
if self.min_length >= 1 and len(doc) < self.min_length:
|
||||||
|
continue
|
||||||
|
elif self.max_length >= 1 and len(doc) > self.max_length:
|
||||||
|
continue
|
||||||
|
# We don't *need* an example here, but it seems nice to
|
||||||
|
# make it match the Corpus signature.
|
||||||
|
yield Example(doc, doc.copy())
|
||||||
|
|
9
website/.dockerignore
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
.cache/
|
||||||
|
.next/
|
||||||
|
public/
|
||||||
|
node_modules
|
||||||
|
.npm
|
||||||
|
logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
quickstart-training-generator.js
|
2
website/.gitignore
vendored
|
@ -1,5 +1,7 @@
|
||||||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||||
|
|
||||||
|
quickstart-training-generator.js
|
||||||
|
|
||||||
# dependencies
|
# dependencies
|
||||||
/node_modules
|
/node_modules
|
||||||
/.pnp
|
/.pnp
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
FROM node:11.15.0
|
FROM node:18
|
||||||
|
|
||||||
WORKDIR /spacy-io
|
USER node
|
||||||
|
|
||||||
RUN npm install -g gatsby-cli@2.7.4
|
|
||||||
|
|
||||||
COPY package.json .
|
|
||||||
COPY package-lock.json .
|
|
||||||
|
|
||||||
RUN npm install
|
|
||||||
|
|
||||||
# This is so the installed node_modules will be up one directory
|
# This is so the installed node_modules will be up one directory
|
||||||
# from where a user mounts files, so that they don't accidentally mount
|
# from where a user mounts files, so that they don't accidentally mount
|
||||||
# their own node_modules from a different build
|
# their own node_modules from a different build
|
||||||
# https://nodejs.org/api/modules.html#modules_loading_from_node_modules_folders
|
# https://nodejs.org/api/modules.html#modules_loading_from_node_modules_folders
|
||||||
WORKDIR /spacy-io/website/
|
WORKDIR /home/node
|
||||||
|
COPY --chown=node package.json .
|
||||||
|
COPY --chown=node package-lock.json .
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
WORKDIR /home/node/website/
|
||||||
|
|
|
@ -41,33 +41,27 @@ If you'd like to do this, **be sure you do _not_ include your local
|
||||||
`node_modules` folder**, since there are some dependencies that need to be built
|
`node_modules` folder**, since there are some dependencies that need to be built
|
||||||
for the image system. Rename it before using.
|
for the image system. Rename it before using.
|
||||||
|
|
||||||
```bash
|
First build the Docker image. This only needs to be done on the first run
|
||||||
docker run -it \
|
or when changes are made to `Dockerfile` or the website dependencies:
|
||||||
-v $(pwd):/spacy-io/website \
|
|
||||||
-p 8000:8000 \
|
|
||||||
ghcr.io/explosion/spacy-io \
|
|
||||||
gatsby develop -H 0.0.0.0
|
|
||||||
```
|
|
||||||
|
|
||||||
This will allow you to access the built website at http://0.0.0.0:8000/ in your
|
|
||||||
browser, and still edit code in your editor while having the site reflect those
|
|
||||||
changes.
|
|
||||||
|
|
||||||
**Note**: If you're working on a Mac with an M1 processor, you might see
|
|
||||||
segfault errors from `qemu` if you use the default image. To fix this use the
|
|
||||||
`arm64` tagged image in the `docker run` command
|
|
||||||
(ghcr.io/explosion/spacy-io:arm64).
|
|
||||||
|
|
||||||
### Building the Docker image
|
|
||||||
|
|
||||||
If you'd like to build the image locally, you can do so like this:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker build -t spacy-io .
|
docker build -t spacy-io .
|
||||||
```
|
```
|
||||||
|
|
||||||
This will take some time, so if you want to use the prebuilt image you'll save a
|
You can then build and run the website with:
|
||||||
bit of time.
|
|
||||||
|
```bash
|
||||||
|
docker run -it \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd):/home/node/website \
|
||||||
|
-p 3000:3000 \
|
||||||
|
spacy-io \
|
||||||
|
npm run dev -- -H 0.0.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
This will allow you to access the built website at http://0.0.0.0:3000/ in your
|
||||||
|
browser, and still edit code in your editor while having the site reflect those
|
||||||
|
changes.
|
||||||
|
|
||||||
## Project structure
|
## Project structure
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ menu:
|
||||||
- ['train', 'train']
|
- ['train', 'train']
|
||||||
- ['pretrain', 'pretrain']
|
- ['pretrain', 'pretrain']
|
||||||
- ['evaluate', 'evaluate']
|
- ['evaluate', 'evaluate']
|
||||||
|
- ['benchmark', 'benchmark']
|
||||||
- ['apply', 'apply']
|
- ['apply', 'apply']
|
||||||
- ['find-threshold', 'find-threshold']
|
- ['find-threshold', 'find-threshold']
|
||||||
- ['assemble', 'assemble']
|
- ['assemble', 'assemble']
|
||||||
|
@ -1135,8 +1136,19 @@ $ python -m spacy pretrain [config_path] [output_dir] [--code] [--resume-path] [
|
||||||
|
|
||||||
## evaluate {id="evaluate",version="2",tag="command"}
|
## evaluate {id="evaluate",version="2",tag="command"}
|
||||||
|
|
||||||
Evaluate a trained pipeline. Expects a loadable spaCy pipeline (package name or
|
The `evaluate` subcommand is superseded by
|
||||||
path) and evaluation data in the
|
[`spacy benchmark accuracy`](#benchmark-accuracy). `evaluate` is provided as an
|
||||||
|
alias to `benchmark accuracy` for compatibility.
|
||||||
|
|
||||||
|
## benchmark {id="benchmark", version="3.5"}
|
||||||
|
|
||||||
|
The `spacy benchmark` CLI includes commands for benchmarking the accuracy and
|
||||||
|
speed of your spaCy pipelines.
|
||||||
|
|
||||||
|
### accuracy {id="benchmark-accuracy", version="3.5", tag="command"}
|
||||||
|
|
||||||
|
Evaluate the accuracy of a trained pipeline. Expects a loadable spaCy pipeline
|
||||||
|
(package name or path) and evaluation data in the
|
||||||
[binary `.spacy` format](/api/data-formats#binary-training). The
|
[binary `.spacy` format](/api/data-formats#binary-training). The
|
||||||
`--gold-preproc` option sets up the evaluation examples with gold-standard
|
`--gold-preproc` option sets up the evaluation examples with gold-standard
|
||||||
sentences and tokens for the predictions. Gold preprocessing helps the
|
sentences and tokens for the predictions. Gold preprocessing helps the
|
||||||
|
@ -1147,7 +1159,7 @@ skew. To render a sample of dependency parses in a HTML file using the
|
||||||
`--displacy-path` argument.
|
`--displacy-path` argument.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit]
|
$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit]
|
||||||
```
|
```
|
||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
|
@ -1163,6 +1175,29 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
|
||||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||||
| **CREATES** | Training results and optional metrics and visualizations. |
|
| **CREATES** | Training results and optional metrics and visualizations. |
|
||||||
|
|
||||||
|
### speed {id="benchmark-speed", version="3.5", tag="command"}
|
||||||
|
|
||||||
|
Benchmark the speed of a trained pipeline with a 95% confidence interval.
|
||||||
|
Expects a loadable spaCy pipeline (package name or path) and benchmark data in
|
||||||
|
the [binary `.spacy` format](/api/data-formats#binary-training). The pipeline is
|
||||||
|
warmed up before any measurements are taken.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
$ python -m spacy benchmark speed [model] [data_path] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup]
|
||||||
|
```
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| -------------------- | -------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||||
|
| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ |
|
||||||
|
| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ |
|
||||||
|
| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ |
|
||||||
|
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||||
|
| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ |
|
||||||
|
| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ |
|
||||||
|
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||||
|
| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. |
|
||||||
|
|
||||||
## apply {id="apply", version="3.5", tag="command"}
|
## apply {id="apply", version="3.5", tag="command"}
|
||||||
|
|
||||||
Applies a trained pipeline to data and stores the resulting annotated documents
|
Applies a trained pipeline to data and stores the resulting annotated documents
|
||||||
|
@ -1176,16 +1211,16 @@ input formats are:
|
||||||
|
|
||||||
When a directory is provided it is traversed recursively to collect all files.
|
When a directory is provided it is traversed recursively to collect all files.
|
||||||
|
|
||||||
```cli
|
```bash
|
||||||
$ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process]
|
$ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process]
|
||||||
```
|
```
|
||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
| ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||||
| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
|
| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
|
||||||
| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
|
| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
|
||||||
| `--code`, `-c` <Tag variant="new">3</Tag> | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
|
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
|
||||||
| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
|
| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
|
||||||
| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
|
| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
|
||||||
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||||
|
@ -1194,7 +1229,6 @@ $ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key]
|
||||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||||
| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
|
| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
|
||||||
|
|
||||||
|
|
||||||
## find-threshold {id="find-threshold",version="3.5",tag="command"}
|
## find-threshold {id="find-threshold",version="3.5",tag="command"}
|
||||||
|
|
||||||
Runs prediction trials for a trained model with varying tresholds to maximize
|
Runs prediction trials for a trained model with varying tresholds to maximize
|
||||||
|
|
|
@ -175,3 +175,68 @@ Yield examples from the data.
|
||||||
| ---------- | -------------------------------------- |
|
| ---------- | -------------------------------------- |
|
||||||
| `nlp` | The current `nlp` object. ~~Language~~ |
|
| `nlp` | The current `nlp` object. ~~Language~~ |
|
||||||
| **YIELDS** | The examples. ~~Example~~ |
|
| **YIELDS** | The examples. ~~Example~~ |
|
||||||
|
|
||||||
|
## PlainTextCorpus {id="plaintextcorpus",tag="class",version="3.5.1"}
|
||||||
|
|
||||||
|
Iterate over documents from a plain text file. Can be used to read the raw text
|
||||||
|
corpus for language model
|
||||||
|
[pretraining](/usage/embeddings-transformers#pretraining). The expected file
|
||||||
|
format is:
|
||||||
|
|
||||||
|
- UTF-8 encoding
|
||||||
|
- One document per line
|
||||||
|
- Blank lines are ignored.
|
||||||
|
|
||||||
|
```text {title="Example"}
|
||||||
|
Can I ask where you work now and what you do, and if you enjoy it?
|
||||||
|
They may just pull out of the Seattle market completely, at least until they have autonomous vehicles.
|
||||||
|
My cynical view on this is that it will never be free to the public. Reason: what would be the draw of joining the military? Right now their selling point is free Healthcare and Education. Ironically both are run horribly and most, that I've talked to, come out wishing they never went in.
|
||||||
|
```
|
||||||
|
|
||||||
|
### PlainTextCorpus.\_\_init\_\_ {id="plaintextcorpus-init",tag="method"}
|
||||||
|
|
||||||
|
Initialize the reader.
|
||||||
|
|
||||||
|
> #### Example
|
||||||
|
>
|
||||||
|
> ```python
|
||||||
|
> from spacy.training import PlainTextCorpus
|
||||||
|
>
|
||||||
|
> corpus = PlainTextCorpus("./data/docs.txt")
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> ```ini
|
||||||
|
> ### Example config
|
||||||
|
> [corpora.pretrain]
|
||||||
|
> @readers = "spacy.PlainTextCorpus.v1"
|
||||||
|
> path = "corpus/raw_text.txt"
|
||||||
|
> min_length = 0
|
||||||
|
> max_length = 0
|
||||||
|
> ```
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| -------------- | -------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `path` | The directory or filename to read from. Expects newline-delimited documents in UTF8 format. ~~Union[str, Path]~~ |
|
||||||
|
| _keyword-only_ | |
|
||||||
|
| `min_length` | Minimum document length (in tokens). Shorter documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
|
||||||
|
| `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
|
||||||
|
|
||||||
|
### PlainTextCorpus.\_\_call\_\_ {id="plaintextcorpus-call",tag="method"}
|
||||||
|
|
||||||
|
Yield examples from the data.
|
||||||
|
|
||||||
|
> #### Example
|
||||||
|
>
|
||||||
|
> ```python
|
||||||
|
> from spacy.training import PlainTextCorpus
|
||||||
|
> import spacy
|
||||||
|
>
|
||||||
|
> corpus = PlainTextCorpus("./docs.txt")
|
||||||
|
> nlp = spacy.blank("en")
|
||||||
|
> data = corpus(nlp)
|
||||||
|
> ```
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| ---------- | -------------------------------------- |
|
||||||
|
| `nlp` | The current `nlp` object. ~~Language~~ |
|
||||||
|
| **YIELDS** | The examples. ~~Example~~ |
|
||||||
|
|
|
@ -15,7 +15,7 @@ world". It requires a `KnowledgeBase`, as well as a function to generate
|
||||||
plausible candidates from that `KnowledgeBase` given a certain textual mention,
|
plausible candidates from that `KnowledgeBase` given a certain textual mention,
|
||||||
and a machine learning model to pick the right candidate, given the local
|
and a machine learning model to pick the right candidate, given the local
|
||||||
context of the mention. `EntityLinker` defaults to using the
|
context of the mention. `EntityLinker` defaults to using the
|
||||||
[`InMemoryLookupKB`](/api/kb_in_memory) implementation.
|
[`InMemoryLookupKB`](/api/inmemorylookupkb) implementation.
|
||||||
|
|
||||||
## Assigned Attributes {id="assigned-attributes"}
|
## Assigned Attributes {id="assigned-attributes"}
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ The length of the fixed-size entity vectors in the knowledge base.
|
||||||
|
|
||||||
Add an entity to the knowledge base, specifying its corpus frequency and entity
|
Add an entity to the knowledge base, specifying its corpus frequency and entity
|
||||||
vector, which should be of length
|
vector, which should be of length
|
||||||
[`entity_vector_length`](/api/kb_in_memory#entity_vector_length).
|
[`entity_vector_length`](/api/inmemorylookupkb#entity_vector_length).
|
||||||
|
|
||||||
> #### Example
|
> #### Example
|
||||||
>
|
>
|
||||||
|
@ -79,8 +79,9 @@ frequency and entity vector for each entity.
|
||||||
|
|
||||||
Add an alias or mention to the knowledge base, specifying its potential KB
|
Add an alias or mention to the knowledge base, specifying its potential KB
|
||||||
identifiers and their prior probabilities. The entity identifiers should refer
|
identifiers and their prior probabilities. The entity identifiers should refer
|
||||||
to entities previously added with [`add_entity`](/api/kb_in_memory#add_entity)
|
to entities previously added with
|
||||||
or [`set_entities`](/api/kb_in_memory#set_entities). The sum of the prior
|
[`add_entity`](/api/inmemorylookupkb#add_entity) or
|
||||||
|
[`set_entities`](/api/inmemorylookupkb#set_entities). The sum of the prior
|
||||||
probabilities should not exceed 1. Note that an empty string can not be used as
|
probabilities should not exceed 1. Note that an empty string can not be used as
|
||||||
alias.
|
alias.
|
||||||
|
|
||||||
|
@ -156,7 +157,7 @@ Get a list of all aliases in the knowledge base.
|
||||||
|
|
||||||
Given a certain textual mention as input, retrieve a list of candidate entities
|
Given a certain textual mention as input, retrieve a list of candidate entities
|
||||||
of type [`Candidate`](/api/kb#candidate). Wraps
|
of type [`Candidate`](/api/kb#candidate). Wraps
|
||||||
[`get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
|
[`get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
|
||||||
|
|
||||||
> #### Example
|
> #### Example
|
||||||
>
|
>
|
||||||
|
@ -174,7 +175,7 @@ of type [`Candidate`](/api/kb#candidate). Wraps
|
||||||
|
|
||||||
## InMemoryLookupKB.get_candidates_batch {id="get_candidates_batch",tag="method"}
|
## InMemoryLookupKB.get_candidates_batch {id="get_candidates_batch",tag="method"}
|
||||||
|
|
||||||
Same as [`get_candidates()`](/api/kb_in_memory#get_candidates), but for an
|
Same as [`get_candidates()`](/api/inmemorylookupkb#get_candidates), but for an
|
||||||
arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component
|
arbitrary number of mentions. The [`EntityLinker`](/api/entitylinker) component
|
||||||
will call `get_candidates_batch()` instead of `get_candidates()`, if the config
|
will call `get_candidates_batch()` instead of `get_candidates()`, if the config
|
||||||
parameter `candidates_batch_size` is greater or equal than 1.
|
parameter `candidates_batch_size` is greater or equal than 1.
|
||||||
|
@ -231,7 +232,7 @@ Given a certain entity ID, retrieve its pretrained entity vector.
|
||||||
|
|
||||||
## InMemoryLookupKB.get_vectors {id="get_vectors",tag="method"}
|
## InMemoryLookupKB.get_vectors {id="get_vectors",tag="method"}
|
||||||
|
|
||||||
Same as [`get_vector()`](/api/kb_in_memory#get_vector), but for an arbitrary
|
Same as [`get_vector()`](/api/inmemorylookupkb#get_vector), but for an arbitrary
|
||||||
number of entity IDs.
|
number of entity IDs.
|
||||||
|
|
||||||
The default implementation of `get_vectors()` executes `get_vector()` in a loop.
|
The default implementation of `get_vectors()` executes `get_vector()` in a loop.
|
|
@ -21,8 +21,8 @@ functions called by the [`EntityLinker`](/api/entitylinker) component.
|
||||||
<Infobox variant="warning">
|
<Infobox variant="warning">
|
||||||
|
|
||||||
This class was not abstract up to spaCy version 3.5. The `KnowledgeBase`
|
This class was not abstract up to spaCy version 3.5. The `KnowledgeBase`
|
||||||
implementation up to that point is available as `InMemoryLookupKB` from 3.5
|
implementation up to that point is available as
|
||||||
onwards.
|
[`InMemoryLookupKB`](/api/inmemorylookupkb) from 3.5 onwards.
|
||||||
|
|
||||||
</Infobox>
|
</Infobox>
|
||||||
|
|
||||||
|
@ -110,14 +110,15 @@ to you.
|
||||||
</Infobox>
|
</Infobox>
|
||||||
|
|
||||||
From spaCy 3.5 on `KnowledgeBase` is an abstract class (with
|
From spaCy 3.5 on `KnowledgeBase` is an abstract class (with
|
||||||
[`InMemoryLookupKB`](/api/kb_in_memory) being a drop-in replacement) to allow
|
[`InMemoryLookupKB`](/api/inmemorylookupkb) being a drop-in replacement) to
|
||||||
more flexibility in customizing knowledge bases. Some of its methods were moved
|
allow more flexibility in customizing knowledge bases. Some of its methods were
|
||||||
to [`InMemoryLookupKB`](/api/kb_in_memory) during this refactoring, one of those
|
moved to [`InMemoryLookupKB`](/api/inmemorylookupkb) during this refactoring,
|
||||||
being `get_alias_candidates()`. This method is now available as
|
one of those being `get_alias_candidates()`. This method is now available as
|
||||||
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
|
[`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
|
||||||
Note: [`InMemoryLookupKB.get_candidates()`](/api/kb_in_memory#get_candidates)
|
Note:
|
||||||
|
[`InMemoryLookupKB.get_candidates()`](/api/inmemorylookupkb#get_candidates)
|
||||||
defaults to
|
defaults to
|
||||||
[`InMemoryLookupKB.get_alias_candidates()`](/api/kb_in_memory#get_alias_candidates).
|
[`InMemoryLookupKB.get_alias_candidates()`](/api/inmemorylookupkb#get_alias_candidates).
|
||||||
|
|
||||||
## KnowledgeBase.get_vector {id="get_vector",tag="method"}
|
## KnowledgeBase.get_vector {id="get_vector",tag="method"}
|
||||||
|
|
||||||
|
|
|
@ -237,16 +237,16 @@ browser. Will run a simple web server.
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
| Name | Description |
|
| Name | Description |
|
||||||
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
|
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
|
||||||
| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` <Tag variant="new">3.3</Tag>. Defaults to `"dep"`. ~~str~~ |
|
| `style` <Tag variant="new">3.3</Tag> | Visualization style, `"dep"`, `"ent"` or `"span"`. Defaults to `"dep"`. ~~str~~ |
|
||||||
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
|
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
|
||||||
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
|
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
|
||||||
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
|
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
|
||||||
| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ |
|
| `manual` | Don't parse `Doc` and instead expect a dict or list of dicts. [See here](/usage/visualizers#manual-usage) for formats and examples. Defaults to `False`. ~~bool~~ |
|
||||||
| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ |
|
| `port` | Port to serve visualization. Defaults to `5000`. ~~int~~ |
|
||||||
| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ |
|
| `host` | Host to serve visualization. Defaults to `"0.0.0.0"`. ~~str~~ |
|
||||||
| `auto_select_port` | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ |
|
| `auto_select_port` <Tag variant="new">3.5</Tag> | If `True`, automatically switch to a different port if the specified port is already in use. Defaults to `False`. ~~bool~~ |
|
||||||
|
|
||||||
### displacy.render {id="displacy.render",tag="method",version="2"}
|
### displacy.render {id="displacy.render",tag="method",version="2"}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ operates on a `Doc` and gives you access to the matched tokens **in context**.
|
||||||
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
|
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
|
||||||
| [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. |
|
| [`Corpus`](/api/corpus) | Class for managing annotated corpora for training and evaluation data. |
|
||||||
| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. |
|
| [`KnowledgeBase`](/api/kb) | Abstract base class for storage and retrieval of data for entity linking. |
|
||||||
| [`InMemoryLookupKB`](/api/kb_in_memory) | Implementation of `KnowledgeBase` storing all data in memory. |
|
| [`InMemoryLookupKB`](/api/inmemorylookupkb) | Implementation of `KnowledgeBase` storing all data in memory. |
|
||||||
| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. |
|
| [`Candidate`](/api/kb#candidate) | Object associating a textual mention with a specific entity contained in a `KnowledgeBase`. |
|
||||||
| [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. |
|
| [`Lookups`](/api/lookups) | Container for convenient access to large lookup tables and dictionaries. |
|
||||||
| [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. |
|
| [`MorphAnalysis`](/api/morphology#morphanalysis) | A morphological analysis. |
|
||||||
|
|
|
@ -134,6 +134,7 @@ useful for your purpose. Here are some important considerations to keep in mind:
|
||||||
<Image
|
<Image
|
||||||
src="/images/sense2vec.jpg"
|
src="/images/sense2vec.jpg"
|
||||||
href="https://github.com/explosion/sense2vec"
|
href="https://github.com/explosion/sense2vec"
|
||||||
|
alt="sense2vec Screenshot"
|
||||||
/>
|
/>
|
||||||
|
|
||||||
[`sense2vec`](https://github.com/explosion/sense2vec) is a library developed by
|
[`sense2vec`](https://github.com/explosion/sense2vec) is a library developed by
|
||||||
|
|
|
@ -113,6 +113,7 @@ code.
|
||||||
<Image
|
<Image
|
||||||
src="/images/thinc_mypy.jpg"
|
src="/images/thinc_mypy.jpg"
|
||||||
href="https://thinc.ai/docs/usage-type-checking#linting"
|
href="https://thinc.ai/docs/usage-type-checking#linting"
|
||||||
|
alt="Screenshot of Thinc type checking in VSCode with mypy"
|
||||||
/>
|
/>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
|
|
|
@ -943,7 +943,7 @@ full embedded visualizer, as well as individual components.
|
||||||
> $ pip install spacy-streamlit --pre
|
> $ pip install spacy-streamlit --pre
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
![](/images/spacy-streamlit.png)
|
![Screenshot of the spacy-streamlit package in Streamlit](/images/spacy-streamlit.png)
|
||||||
|
|
||||||
Using [`spacy-streamlit`](https://github.com/explosion/spacy-streamlit), your
|
Using [`spacy-streamlit`](https://github.com/explosion/spacy-streamlit), your
|
||||||
projects can easily define their own scripts that spin up an interactive
|
projects can easily define their own scripts that spin up an interactive
|
||||||
|
|
|
@ -384,14 +384,14 @@ the more specific attributes `FUZZY1`..`FUZZY9` you can specify the maximum
|
||||||
allowed edit distance directly.
|
allowed edit distance directly.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Match lowercase with fuzzy matching (allows 2 edits)
|
# Match lowercase with fuzzy matching (allows 3 edits)
|
||||||
pattern = [{"LOWER": {"FUZZY": "definitely"}}]
|
pattern = [{"LOWER": {"FUZZY": "definitely"}}]
|
||||||
|
|
||||||
# Match custom attribute values with fuzzy matching (allows 2 edits)
|
# Match custom attribute values with fuzzy matching (allows 3 edits)
|
||||||
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
|
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
|
||||||
|
|
||||||
# Match with exact Levenshtein edit distance limits (allows 3 edits)
|
# Match with exact Levenshtein edit distance limits (allows 4 edits)
|
||||||
pattern = [{"_": {"country": {"FUZZY3": "Kyrgyzstan"}}}]
|
pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Regex and fuzzy matching with lists {id="regex-fuzzy-lists", version="3.5"}
|
#### Regex and fuzzy matching with lists {id="regex-fuzzy-lists", version="3.5"}
|
||||||
|
|
|
@ -304,6 +304,28 @@ installed in the same environment – that's it.
|
||||||
| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
|
| `spacy_lookups` | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
|
||||||
| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
|
| [`spacy_displacy_colors`](#entry-points-displacy) | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |
|
||||||
|
|
||||||
|
### Loading probability tables into existing models
|
||||||
|
|
||||||
|
You can load a probability table from [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data) into an existing spaCy model like `en_core_web_sm`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Requirements: pip install spacy-lookups-data
|
||||||
|
import spacy
|
||||||
|
from spacy.lookups import load_lookups
|
||||||
|
nlp = spacy.load("en_core_web_sm")
|
||||||
|
lookups = load_lookups("en", ["lexeme_prob"])
|
||||||
|
nlp.vocab.lookups.add_table("lexeme_prob", lookups.get_table("lexeme_prob"))
|
||||||
|
```
|
||||||
|
|
||||||
|
When training a model from scratch you can also specify probability tables in the `config.cfg`.
|
||||||
|
|
||||||
|
```ini {title="config.cfg (excerpt)"}
|
||||||
|
[initialize.lookups]
|
||||||
|
@misc = "spacy.LookupsDataLoader.v1"
|
||||||
|
lang = ${nlp.lang}
|
||||||
|
tables = ["lexeme_prob"]
|
||||||
|
```
|
||||||
|
|
||||||
### Custom components via entry points {id="entry-points-components"}
|
### Custom components via entry points {id="entry-points-components"}
|
||||||
|
|
||||||
When you load a pipeline, spaCy will generally use its `config.cfg` to set up
|
When you load a pipeline, spaCy will generally use its `config.cfg` to set up
|
||||||
|
@ -684,10 +706,15 @@ If your pipeline includes
|
||||||
[custom components](/usage/processing-pipelines#custom-components), model
|
[custom components](/usage/processing-pipelines#custom-components), model
|
||||||
architectures or other [code](/usage/training#custom-code), those functions need
|
architectures or other [code](/usage/training#custom-code), those functions need
|
||||||
to be registered **before** your pipeline is loaded. Otherwise, spaCy won't know
|
to be registered **before** your pipeline is loaded. Otherwise, spaCy won't know
|
||||||
how to create the objects referenced in the config. The
|
how to create the objects referenced in the config. If you're loading your own
|
||||||
[`spacy package`](/api/cli#package) command lets you provide one or more paths
|
pipeline in Python, you can make custom components available just by importing
|
||||||
to Python files containing custom registered functions using the `--code`
|
the code that defines them before calling
|
||||||
argument.
|
[`spacy.load`](/api/top-level#spacy.load). This is also how the `--code`
|
||||||
|
argument to CLI commands works.
|
||||||
|
|
||||||
|
With the [`spacy package`](/api/cli#package) command, you can provide one or
|
||||||
|
more paths to Python files containing custom registered functions using the
|
||||||
|
`--code` argument.
|
||||||
|
|
||||||
> #### \_\_init\_\_.py (excerpt)
|
> #### \_\_init\_\_.py (excerpt)
|
||||||
>
|
>
|
||||||
|
|
|
@ -567,7 +567,10 @@ If you would like to use the spaCy logo on your site, please get in touch and
|
||||||
ask us first. However, if you want to show support and tell others that your
|
ask us first. However, if you want to show support and tell others that your
|
||||||
project is using spaCy, you can grab one of our **spaCy badges** here:
|
project is using spaCy, you can grab one of our **spaCy badges** here:
|
||||||
|
|
||||||
<img src={`https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg`} />
|
<img
|
||||||
|
src={`https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg`}
|
||||||
|
alt="Built with spaCy"
|
||||||
|
/>
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
[![Built with spaCy](https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg)](https://spacy.io)
|
[![Built with spaCy](https://img.shields.io/badge/built%20with-spaCy-09a3d5.svg)](https://spacy.io)
|
||||||
|
@ -575,8 +578,9 @@ project is using spaCy, you can grab one of our **spaCy badges** here:
|
||||||
|
|
||||||
<img
|
<img
|
||||||
src={`https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg`}
|
src={`https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg`}
|
||||||
|
alt="Made with love and spaCy"
|
||||||
/>
|
/>
|
||||||
|
|
||||||
```markdown
|
```markdown
|
||||||
[![Built with spaCy](https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg)](https://spacy.io)
|
[![Made with love and spaCy](https://img.shields.io/badge/made%20with%20❤%20and-spaCy-09a3d5.svg)](https://spacy.io)
|
||||||
```
|
```
|
||||||
|
|
215
website/docs/usage/v3-5.mdx
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
---
|
||||||
|
title: What's New in v3.5
|
||||||
|
teaser: New features and how to upgrade
|
||||||
|
menu:
|
||||||
|
- ['New Features', 'features']
|
||||||
|
- ['Upgrading Notes', 'upgrading']
|
||||||
|
---
|
||||||
|
|
||||||
|
## New features {id="features",hidden="true"}
|
||||||
|
|
||||||
|
spaCy v3.5 introduces three new CLI commands, `apply`, `benchmark` and
|
||||||
|
`find-threshold`, adds fuzzy matching, provides improvements to our entity
|
||||||
|
linking functionality, and includes a range of language updates and bug fixes.
|
||||||
|
|
||||||
|
### New CLI commands {id="cli"}
|
||||||
|
|
||||||
|
#### apply CLI
|
||||||
|
|
||||||
|
The [`apply` CLI](/api/cli#apply) can be used to apply a pipeline to one or more
|
||||||
|
`.txt`, `.jsonl` or `.spacy` input files, saving the annotated docs in a single
|
||||||
|
`.spacy` file.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ spacy apply en_core_web_sm my_texts/ output.spacy
|
||||||
|
```
|
||||||
|
|
||||||
|
#### benchmark CLI
|
||||||
|
|
||||||
|
The [`benchmark` CLI](/api/cli#benchmark) has been added to extend the existing
|
||||||
|
`evaluate` functionality with a wider range of profiling subcommands.
|
||||||
|
|
||||||
|
The `benchmark accuracy` CLI is introduced as an alias for `evaluate`. The new
|
||||||
|
`benchmark speed` CLI performs warmup rounds before measuring the speed in words
|
||||||
|
per second on batches of randomly shuffled documents from the provided data.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ spacy benchmark speed my_pipeline data.spacy
|
||||||
|
```
|
||||||
|
|
||||||
|
The output is the mean performance using batches (`nlp.pipe`) with a 95%
|
||||||
|
confidence interval, e.g., profiling `en_core_web_sm` on CPU:
|
||||||
|
|
||||||
|
```none
|
||||||
|
Outliers: 2.0%, extreme outliers: 0.0%
|
||||||
|
Mean: 18904.1 words/s (95% CI: -256.9 +244.1)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### find-threshold CLI
|
||||||
|
|
||||||
|
The [`find-threshold` CLI](/api/cli#find-threshold) runs a series of trials
|
||||||
|
across threshold values from `0.0` to `1.0` and identifies the best threshold
|
||||||
|
for the provided score metric.
|
||||||
|
|
||||||
|
The following command runs 20 trials for the `spancat` component in
|
||||||
|
`my_pipeline`, recording the `spans_sc_f` score for each value of the threshold
|
||||||
|
`[components.spancat.threshold]` from `0.0` to `1.0`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ spacy find-threshold my_pipeline data.spacy spancat threshold spans_sc_f --n_trials 20
|
||||||
|
```
|
||||||
|
|
||||||
|
The `find-threshold` CLI can be used with `textcat_multilabel`, `spancat` and
|
||||||
|
custom components with thresholds that are applied while predicting or scoring.
|
||||||
|
|
||||||
|
### Fuzzy matching {id="fuzzy"}
|
||||||
|
|
||||||
|
New `FUZZY` operators support [fuzzy matching](/usage/rule-based-matching#fuzzy)
|
||||||
|
with the `Matcher`. By default, the `FUZZY` operator allows a Levenshtein edit
|
||||||
|
distance of 2 and up to 30% of the pattern string length. `FUZZY1`..`FUZZY9` can
|
||||||
|
be used to specify the exact number of allowed edits.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Match lowercase with fuzzy matching (allows up to 3 edits)
|
||||||
|
pattern = [{"LOWER": {"FUZZY": "definitely"}}]
|
||||||
|
|
||||||
|
# Match custom attribute values with fuzzy matching (allows up to 3 edits)
|
||||||
|
pattern = [{"_": {"country": {"FUZZY": "Kyrgyzstan"}}}]
|
||||||
|
|
||||||
|
# Match with exact Levenshtein edit distance limits (allows up to 4 edits)
|
||||||
|
pattern = [{"_": {"country": {"FUZZY4": "Kyrgyzstan"}}}]
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that `FUZZY` uses Levenshtein edit distance rather than Damerau-Levenshtein
|
||||||
|
edit distance, so a transposition like `teh` for `the` counts as two edits, one
|
||||||
|
insertion and one deletion.
|
||||||
|
|
||||||
|
If you'd prefer an alternate fuzzy matching algorithm, you can provide your own
|
||||||
|
custom method to the `Matcher` or as a config option for an entity ruler and
|
||||||
|
span ruler.
|
||||||
|
|
||||||
|
### FUZZY and REGEX with lists {id="fuzzy-regex-lists"}
|
||||||
|
|
||||||
|
The `FUZZY` and `REGEX` operators are also now supported for lists with `IN` and
|
||||||
|
`NOT_IN`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
pattern = [{"TEXT": {"FUZZY": {"IN": ["awesome", "cool", "wonderful"]}}}]
|
||||||
|
pattern = [{"TEXT": {"REGEX": {"NOT_IN": ["^awe(some)?$", "^wonder(ful)?"]}}}]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Entity linking generalization {id="el"}
|
||||||
|
|
||||||
|
The knowledge base used for entity linking is now easier to customize and has a
|
||||||
|
new default implementation [`InMemoryLookupKB`](/api/inmemorylookupkb).
|
||||||
|
|
||||||
|
### Additional features and improvements {id="additional-features-and-improvements"}
|
||||||
|
|
||||||
|
- Language updates:
|
||||||
|
- Extended support for Slovenian
|
||||||
|
- Fixed lookup fallback for French and Catalan lemmatizers
|
||||||
|
- Switch Russian and Ukrainian lemmatizers to `pymorphy3`
|
||||||
|
- Support for editorial punctuation in Ancient Greek
|
||||||
|
- Update to Russian tokenizer exceptions
|
||||||
|
- Small fix for Dutch stop words
|
||||||
|
- Allow up to `typer` v0.7.x, `mypy` 0.990 and `typing_extensions` v4.4.x.
|
||||||
|
- New `spacy.ConsoleLogger.v3` with expanded progress
|
||||||
|
[tracking](/api/top-level#ConsoleLogger).
|
||||||
|
- Improved scoring behavior for `textcat` with `spacy.textcat_scorer.v2` and
|
||||||
|
`spacy.textcat_multilabel_scorer.v2`.
|
||||||
|
- Updates so that downstream components can train properly on a frozen `tok2vec`
|
||||||
|
or `transformer` layer.
|
||||||
|
- Allow interpolation of variables in directory names in projects.
|
||||||
|
- Support for local file system [remotes](/usage/projects#remote) for projects.
|
||||||
|
- Improve UX around `displacy.serve` when the default port is in use.
|
||||||
|
- Optional `before_update` callback that is invoked at the start of each
|
||||||
|
[training step](/api/data-formats#config-training).
|
||||||
|
- Improve performance of `SpanGroup` and fix typing issues for `SpanGroup` and
|
||||||
|
`Span` objects.
|
||||||
|
- Patch a
|
||||||
|
[security vulnerability](https://github.com/advisories/GHSA-gw9q-c7gh-j9vm) in
|
||||||
|
extracting tar files.
|
||||||
|
- Add equality definition for `Vectors`.
|
||||||
|
- Ensure `Vocab.to_disk` respects the exclude setting for `lookups` and
|
||||||
|
`vectors`.
|
||||||
|
- Correctly handle missing annotations in the edit tree lemmatizer.
|
||||||
|
|
||||||
|
### Trained pipeline updates {id="pipelines"}
|
||||||
|
|
||||||
|
- The CNN pipelines add `IS_SPACE` as a `tok2vec` feature for `tagger` and
|
||||||
|
`morphologizer` components to improve tagging of non-whitespace vs. whitespace
|
||||||
|
tokens.
|
||||||
|
- The transformer pipelines require `spacy-transformers` v1.2, which uses the
|
||||||
|
exact alignment from `tokenizers` for fast tokenizers instead of the heuristic
|
||||||
|
alignment from `spacy-alignments`. For all trained pipelines except
|
||||||
|
`ja_core_news_trf`, the alignments between spaCy tokens and transformer tokens
|
||||||
|
may be slightly different. More details about the `spacy-transformers` changes
|
||||||
|
in the
|
||||||
|
[v1.2.0 release notes](https://github.com/explosion/spacy-transformers/releases/tag/v1.2.0).
|
||||||
|
|
||||||
|
## Notes about upgrading from v3.4 {id="upgrading"}
|
||||||
|
|
||||||
|
### Validation of textcat values {id="textcat-validation"}
|
||||||
|
|
||||||
|
An error is now raised when unsupported values are given as input to train a
|
||||||
|
`textcat` or `textcat_multilabel` model - ensure that values are `0.0` or `1.0`
|
||||||
|
as explained in the [docs](/api/textcategorizer#assigned-attributes).
|
||||||
|
|
||||||
|
### Updated scorers for tokenization and textcat {id="scores"}
|
||||||
|
|
||||||
|
We fixed a bug that inflated the `token_acc` scores in v3.0-v3.4. The reported
|
||||||
|
`token_acc` will drop from v3.4 to v3.5, but if `token_p/r/f` stay the same,
|
||||||
|
your tokenization performance has not changed from v3.4.
|
||||||
|
|
||||||
|
For new `textcat` or `textcat_multilabel` configs, the new default `v2` scorers:
|
||||||
|
|
||||||
|
- ignore `threshold` for `textcat`, so the reported `cats_p/r/f` may increase
|
||||||
|
slightly in v3.5 even though the underlying predictions are unchanged
|
||||||
|
- report the performance of only the **final** `textcat` or `textcat_multilabel`
|
||||||
|
component in the pipeline by default
|
||||||
|
- allow custom scorers to be used to score multiple `textcat` and
|
||||||
|
`textcat_multilabel` components with `Scorer.score_cats` by restricting the
|
||||||
|
evaluation to the component's provided labels
|
||||||
|
|
||||||
|
### Pipeline package version compatibility {id="version-compat"}
|
||||||
|
|
||||||
|
> #### Using legacy implementations
|
||||||
|
>
|
||||||
|
> In spaCy v3, you'll still be able to load and reference legacy implementations
|
||||||
|
> via [`spacy-legacy`](https://github.com/explosion/spacy-legacy), even if the
|
||||||
|
> components or architectures change and newer versions are available in the
|
||||||
|
> core library.
|
||||||
|
|
||||||
|
When you're loading a pipeline package trained with an earlier version of spaCy
|
||||||
|
v3, you will see a warning telling you that the pipeline may be incompatible.
|
||||||
|
This doesn't necessarily have to be true, but we recommend running your
|
||||||
|
pipelines against your test suite or evaluation data to make sure there are no
|
||||||
|
unexpected results.
|
||||||
|
|
||||||
|
If you're using one of the [trained pipelines](/models) we provide, you should
|
||||||
|
run [`spacy download`](/api/cli#download) to update to the latest version. To
|
||||||
|
see an overview of all installed packages and their compatibility, you can run
|
||||||
|
[`spacy validate`](/api/cli#validate).
|
||||||
|
|
||||||
|
If you've trained your own custom pipeline and you've confirmed that it's still
|
||||||
|
working as expected, you can update the spaCy version requirements in the
|
||||||
|
[`meta.json`](/api/data-formats#meta):
|
||||||
|
|
||||||
|
```diff
|
||||||
|
- "spacy_version": ">=3.4.0,<3.5.0",
|
||||||
|
+ "spacy_version": ">=3.4.0,<3.6.0",
|
||||||
|
```
|
||||||
|
|
||||||
|
### Updating v3.4 configs
|
||||||
|
|
||||||
|
To update a config from spaCy v3.4 with the new v3.5 settings, run
|
||||||
|
[`init fill-config`](/api/cli#init-fill-config):
|
||||||
|
|
||||||
|
```cli
|
||||||
|
$ python -m spacy init fill-config config-v3.4.cfg config-v3.5.cfg
|
||||||
|
```
|
||||||
|
|
||||||
|
In many cases ([`spacy train`](/api/cli#train),
|
||||||
|
[`spacy.load`](/api/top-level#spacy.load)), the new defaults will be filled in
|
||||||
|
automatically, but you'll need to fill in the new settings to run
|
||||||
|
[`debug config`](/api/cli#debug) and [`debug data`](/api/cli#debug-data).
|
|
@ -437,6 +437,6 @@ Alternatively, if you're using [Streamlit](https://streamlit.io), check out the
|
||||||
helps you integrate spaCy visualizations into your apps. It includes a full
|
helps you integrate spaCy visualizations into your apps. It includes a full
|
||||||
embedded visualizer, as well as individual components.
|
embedded visualizer, as well as individual components.
|
||||||
|
|
||||||
![](/images/spacy-streamlit.png)
|
![Screenshot of the spacy-streamlit package in Streamlit](/images/spacy-streamlit.png)
|
||||||
|
|
||||||
</Grid>
|
</Grid>
|
||||||
|
|
|
@ -13,7 +13,8 @@
|
||||||
{ "text": "New in v3.1", "url": "/usage/v3-1" },
|
{ "text": "New in v3.1", "url": "/usage/v3-1" },
|
||||||
{ "text": "New in v3.2", "url": "/usage/v3-2" },
|
{ "text": "New in v3.2", "url": "/usage/v3-2" },
|
||||||
{ "text": "New in v3.3", "url": "/usage/v3-3" },
|
{ "text": "New in v3.3", "url": "/usage/v3-3" },
|
||||||
{ "text": "New in v3.4", "url": "/usage/v3-4" }
|
{ "text": "New in v3.4", "url": "/usage/v3-4" },
|
||||||
|
{ "text": "New in v3.5", "url": "/usage/v3-5" }
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -129,6 +130,7 @@
|
||||||
"items": [
|
"items": [
|
||||||
{ "text": "Attributes", "url": "/api/attributes" },
|
{ "text": "Attributes", "url": "/api/attributes" },
|
||||||
{ "text": "Corpus", "url": "/api/corpus" },
|
{ "text": "Corpus", "url": "/api/corpus" },
|
||||||
|
{ "text": "InMemoryLookupKB", "url": "/api/inmemorylookupkb" },
|
||||||
{ "text": "KnowledgeBase", "url": "/api/kb" },
|
{ "text": "KnowledgeBase", "url": "/api/kb" },
|
||||||
{ "text": "Lookups", "url": "/api/lookups" },
|
{ "text": "Lookups", "url": "/api/lookups" },
|
||||||
{ "text": "MorphAnalysis", "url": "/api/morphology#morphanalysis" },
|
{ "text": "MorphAnalysis", "url": "/api/morphology#morphanalysis" },
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
"indexName": "spacy"
|
"indexName": "spacy"
|
||||||
},
|
},
|
||||||
"binderUrl": "explosion/spacy-io-binder",
|
"binderUrl": "explosion/spacy-io-binder",
|
||||||
"binderVersion": "3.4",
|
"binderVersion": "3.5",
|
||||||
"sections": [
|
"sections": [
|
||||||
{ "id": "usage", "title": "Usage Documentation", "theme": "blue" },
|
{ "id": "usage", "title": "Usage Documentation", "theme": "blue" },
|
||||||
{ "id": "models", "title": "Models Documentation", "theme": "blue" },
|
{ "id": "models", "title": "Models Documentation", "theme": "blue" },
|
||||||
|
|
|
@ -17,7 +17,7 @@ export default function App({ Component, pageProps }: AppProps) {
|
||||||
<link rel="manifest" href="/manifest.webmanifest" />
|
<link rel="manifest" href="/manifest.webmanifest" />
|
||||||
<meta
|
<meta
|
||||||
name="viewport"
|
name="viewport"
|
||||||
content="width=device-width, initial-scale=1.0, minimum-scale=1 maximum-scale=1.0, user-scalable=0, shrink-to-fit=no, viewport-fit=cover"
|
content="width=device-width, initial-scale=1.0, minimum-scale=1, maximum-scale=5.0, shrink-to-fit=no, viewport-fit=cover"
|
||||||
/>
|
/>
|
||||||
<meta name="theme-color" content="#09a3d5" />
|
<meta name="theme-color" content="#09a3d5" />
|
||||||
<link rel="apple-touch-icon" sizes="192x192" href="/icons/icon-192x192.png" />
|
<link rel="apple-touch-icon" sizes="192x192" href="/icons/icon-192x192.png" />
|
||||||
|
|
|
@ -13,7 +13,7 @@ import {
|
||||||
LandingBanner,
|
LandingBanner,
|
||||||
} from '../src/components/landing'
|
} from '../src/components/landing'
|
||||||
import { H2 } from '../src/components/typography'
|
import { H2 } from '../src/components/typography'
|
||||||
import { InlineCode } from '../src/components/code'
|
import { InlineCode } from '../src/components/inlineCode'
|
||||||
import { Ul, Li } from '../src/components/list'
|
import { Ul, Li } from '../src/components/list'
|
||||||
import Button from '../src/components/button'
|
import Button from '../src/components/button'
|
||||||
import Link from '../src/components/link'
|
import Link from '../src/components/link'
|
||||||
|
@ -89,8 +89,8 @@ const Landing = () => {
|
||||||
</LandingCard>
|
</LandingCard>
|
||||||
|
|
||||||
<LandingCard title="Awesome ecosystem" url="/usage/projects" button="Read more">
|
<LandingCard title="Awesome ecosystem" url="/usage/projects" button="Read more">
|
||||||
In the five years since its release, spaCy has become an industry standard with
|
Since its release in 2015, spaCy has become an industry standard with a huge
|
||||||
a huge ecosystem. Choose from a variety of plugins, integrate with your machine
|
ecosystem. Choose from a variety of plugins, integrate with your machine
|
||||||
learning stack and build custom components and workflows.
|
learning stack and build custom components and workflows.
|
||||||
</LandingCard>
|
</LandingCard>
|
||||||
</LandingGrid>
|
</LandingGrid>
|
||||||
|
@ -162,7 +162,7 @@ const Landing = () => {
|
||||||
small
|
small
|
||||||
>
|
>
|
||||||
<p>
|
<p>
|
||||||
<Link to="https://prodi.gy" hidden>
|
<Link to="https://prodi.gy" noLinkLayout>
|
||||||
<ImageFill
|
<ImageFill
|
||||||
image={prodigyImage}
|
image={prodigyImage}
|
||||||
alt="Prodigy: Radically efficient machine teaching"
|
alt="Prodigy: Radically efficient machine teaching"
|
||||||
|
@ -206,7 +206,10 @@ const Landing = () => {
|
||||||
<LandingGrid cols={2}>
|
<LandingGrid cols={2}>
|
||||||
<LandingCol>
|
<LandingCol>
|
||||||
<Link to="/usage/projects" hidden>
|
<Link to="/usage/projects" hidden>
|
||||||
<ImageFill image={projectsImage} />
|
<ImageFill
|
||||||
|
image={projectsImage}
|
||||||
|
alt="Illustration of project workflow and commands"
|
||||||
|
/>
|
||||||
</Link>
|
</Link>
|
||||||
<br />
|
<br />
|
||||||
<br />
|
<br />
|
||||||
|
|
|
@ -33,7 +33,7 @@ export default function Accordion({ title, id, expanded = false, spaced = false,
|
||||||
<Link
|
<Link
|
||||||
to={`#${id}`}
|
to={`#${id}`}
|
||||||
className={classes.anchor}
|
className={classes.anchor}
|
||||||
hidden
|
noLinkLayout
|
||||||
onClick={(event) => event.stopPropagation()}
|
onClick={(event) => event.stopPropagation()}
|
||||||
>
|
>
|
||||||
¶
|
¶
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import React from 'react'
|
import React from 'react'
|
||||||
import PropTypes from 'prop-types'
|
import PropTypes from 'prop-types'
|
||||||
import classNames from 'classnames'
|
import classNames from 'classnames'
|
||||||
|
import ImageNext from 'next/image'
|
||||||
|
|
||||||
import Link from './link'
|
import Link from './link'
|
||||||
import { H5 } from './typography'
|
import { H5 } from './typography'
|
||||||
|
@ -10,7 +11,7 @@ export default function Card({ title, to, image, header, small, onClick, childre
|
||||||
return (
|
return (
|
||||||
<div className={classNames(classes.root, { [classes.small]: !!small })}>
|
<div className={classNames(classes.root, { [classes.small]: !!small })}>
|
||||||
{header && (
|
{header && (
|
||||||
<Link to={to} onClick={onClick} hidden>
|
<Link to={to} onClick={onClick} noLinkLayout>
|
||||||
{header}
|
{header}
|
||||||
</Link>
|
</Link>
|
||||||
)}
|
)}
|
||||||
|
@ -18,18 +19,17 @@ export default function Card({ title, to, image, header, small, onClick, childre
|
||||||
<H5 className={classes.title}>
|
<H5 className={classes.title}>
|
||||||
{image && (
|
{image && (
|
||||||
<div className={classes.image}>
|
<div className={classes.image}>
|
||||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
<ImageNext src={image} height={35} width={35} alt={`${title} Logo`} />
|
||||||
<img src={image} width={35} alt="" />
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{title && (
|
{title && (
|
||||||
<Link to={to} onClick={onClick} hidden>
|
<Link to={to} onClick={onClick} noLinkLayout>
|
||||||
{title}
|
{title}
|
||||||
</Link>
|
</Link>
|
||||||
)}
|
)}
|
||||||
</H5>
|
</H5>
|
||||||
)}
|
)}
|
||||||
<Link to={to} onClick={onClick} hidden>
|
<Link to={to} onClick={onClick} noLinkLayout>
|
||||||
{children}
|
{children}
|
||||||
</Link>
|
</Link>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -14,96 +14,16 @@ import 'prismjs/components/prism-markdown.min.js'
|
||||||
import 'prismjs/components/prism-python.min.js'
|
import 'prismjs/components/prism-python.min.js'
|
||||||
import 'prismjs/components/prism-yaml.min.js'
|
import 'prismjs/components/prism-yaml.min.js'
|
||||||
|
|
||||||
import CUSTOM_TYPES from '../../meta/type-annotations.json'
|
import { isString } from './util'
|
||||||
import { isString, htmlToReact } from './util'
|
|
||||||
import Link, { OptionalLink } from './link'
|
import Link, { OptionalLink } from './link'
|
||||||
import GitHubCode from './github'
|
import GitHubCode from './github'
|
||||||
import Juniper from './juniper'
|
|
||||||
import classes from '../styles/code.module.sass'
|
import classes from '../styles/code.module.sass'
|
||||||
import siteMetadata from '../../meta/site.json'
|
import siteMetadata from '../../meta/site.json'
|
||||||
import { binderBranch } from '../../meta/dynamicMeta.mjs'
|
import { binderBranch } from '../../meta/dynamicMeta.mjs'
|
||||||
|
import dynamic from 'next/dynamic'
|
||||||
|
|
||||||
const WRAP_THRESHOLD = 30
|
|
||||||
const CLI_GROUPS = ['init', 'debug', 'project', 'ray', 'huggingface-hub']
|
const CLI_GROUPS = ['init', 'debug', 'project', 'ray', 'huggingface-hub']
|
||||||
|
|
||||||
const CodeBlock = (props) => (
|
|
||||||
<Pre>
|
|
||||||
<Code {...props} />
|
|
||||||
</Pre>
|
|
||||||
)
|
|
||||||
|
|
||||||
export default CodeBlock
|
|
||||||
|
|
||||||
export const Pre = (props) => {
|
|
||||||
return <pre className={classes['pre']}>{props.children}</pre>
|
|
||||||
}
|
|
||||||
|
|
||||||
export const InlineCode = ({ wrap = false, className, children, ...props }) => {
|
|
||||||
const codeClassNames = classNames(classes['inline-code'], className, {
|
|
||||||
[classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD),
|
|
||||||
})
|
|
||||||
return (
|
|
||||||
<code className={codeClassNames} {...props}>
|
|
||||||
{children}
|
|
||||||
</code>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
InlineCode.propTypes = {
|
|
||||||
wrap: PropTypes.bool,
|
|
||||||
className: PropTypes.string,
|
|
||||||
children: PropTypes.node,
|
|
||||||
}
|
|
||||||
|
|
||||||
function linkType(el, showLink = true) {
|
|
||||||
if (!isString(el) || !el.length) return el
|
|
||||||
const elStr = el.trim()
|
|
||||||
if (!elStr) return el
|
|
||||||
const typeUrl = CUSTOM_TYPES[elStr]
|
|
||||||
const url = typeUrl == true ? DEFAULT_TYPE_URL : typeUrl
|
|
||||||
const ws = el[0] == ' '
|
|
||||||
return url && showLink ? (
|
|
||||||
<Fragment>
|
|
||||||
{ws && ' '}
|
|
||||||
<Link to={url} hideIcon>
|
|
||||||
{elStr}
|
|
||||||
</Link>
|
|
||||||
</Fragment>
|
|
||||||
) : (
|
|
||||||
el
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
export const TypeAnnotation = ({ lang = 'python', link = true, children }) => {
|
|
||||||
// Hacky, but we're temporarily replacing a dot to prevent it from being split during highlighting
|
|
||||||
const TMP_DOT = '۔'
|
|
||||||
const code = Array.isArray(children) ? children.join('') : children || ''
|
|
||||||
const [rawText, meta] = code.split(/(?= \(.+\)$)/)
|
|
||||||
const rawStr = rawText.replace(/\./g, TMP_DOT)
|
|
||||||
const rawHtml =
|
|
||||||
lang === 'none' || !code ? code : Prism.highlight(rawStr, Prism.languages[lang], lang)
|
|
||||||
const html = rawHtml.replace(new RegExp(TMP_DOT, 'g'), '.').replace(/\n/g, ' ')
|
|
||||||
const result = htmlToReact(html)
|
|
||||||
const elements = Array.isArray(result) ? result : [result]
|
|
||||||
const annotClassNames = classNames(
|
|
||||||
'type-annotation',
|
|
||||||
`language-${lang}`,
|
|
||||||
classes['inline-code'],
|
|
||||||
classes['type-annotation'],
|
|
||||||
{
|
|
||||||
[classes['wrap']]: code.length >= WRAP_THRESHOLD,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return (
|
|
||||||
<span className={annotClassNames} role="code" aria-label="Type annotation">
|
|
||||||
{elements.map((el, i) => (
|
|
||||||
<Fragment key={i}>{linkType(el, !!link)}</Fragment>
|
|
||||||
))}
|
|
||||||
{meta && <span className={classes['type-annotation-meta']}>{meta}</span>}
|
|
||||||
</span>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const splitLines = (children) => {
|
const splitLines = (children) => {
|
||||||
const listChildrenPerLine = []
|
const listChildrenPerLine = []
|
||||||
|
|
||||||
|
@ -235,7 +155,7 @@ const handlePromot = ({ lineFlat, prompt }) => {
|
||||||
<Fragment key={j}>
|
<Fragment key={j}>
|
||||||
{j !== 0 && ' '}
|
{j !== 0 && ' '}
|
||||||
<span className={itemClassNames}>
|
<span className={itemClassNames}>
|
||||||
<OptionalLink hidden hideIcon to={url}>
|
<OptionalLink noLinkLayout hideIcon to={url}>
|
||||||
{text}
|
{text}
|
||||||
</OptionalLink>
|
</OptionalLink>
|
||||||
</span>
|
</span>
|
||||||
|
@ -288,7 +208,7 @@ const addLineHighlight = (children, highlight) => {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
export const CodeHighlighted = ({ children, highlight, lang }) => {
|
const CodeHighlighted = ({ children, highlight, lang }) => {
|
||||||
const [html, setHtml] = useState()
|
const [html, setHtml] = useState()
|
||||||
|
|
||||||
useEffect(
|
useEffect(
|
||||||
|
@ -305,7 +225,7 @@ export const CodeHighlighted = ({ children, highlight, lang }) => {
|
||||||
return <>{html}</>
|
return <>{html}</>
|
||||||
}
|
}
|
||||||
|
|
||||||
export class Code extends React.Component {
|
export default class Code extends React.Component {
|
||||||
static defaultProps = {
|
static defaultProps = {
|
||||||
lang: 'none',
|
lang: 'none',
|
||||||
executable: null,
|
executable: null,
|
||||||
|
@ -354,6 +274,8 @@ export class Code extends React.Component {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const JuniperDynamic = dynamic(() => import('./juniper'))
|
||||||
|
|
||||||
const JuniperWrapper = ({ title, lang, children }) => {
|
const JuniperWrapper = ({ title, lang, children }) => {
|
||||||
const { binderUrl, binderVersion } = siteMetadata
|
const { binderUrl, binderVersion } = siteMetadata
|
||||||
const juniperTitle = title || 'Editable Code'
|
const juniperTitle = title || 'Editable Code'
|
||||||
|
@ -363,13 +285,13 @@ const JuniperWrapper = ({ title, lang, children }) => {
|
||||||
{juniperTitle}
|
{juniperTitle}
|
||||||
<span className={classes['juniper-meta']}>
|
<span className={classes['juniper-meta']}>
|
||||||
spaCy v{binderVersion} · Python 3 · via{' '}
|
spaCy v{binderVersion} · Python 3 · via{' '}
|
||||||
<Link to="https://mybinder.org/" hidden>
|
<Link to="https://mybinder.org/" noLinkLayout>
|
||||||
Binder
|
Binder
|
||||||
</Link>
|
</Link>
|
||||||
</span>
|
</span>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<Juniper
|
<JuniperDynamic
|
||||||
repo={binderUrl}
|
repo={binderUrl}
|
||||||
branch={binderBranch}
|
branch={binderBranch}
|
||||||
lang={lang}
|
lang={lang}
|
||||||
|
@ -381,7 +303,7 @@ const JuniperWrapper = ({ title, lang, children }) => {
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
</Juniper>
|
</JuniperDynamic>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
14
website/src/components/codeBlock.js
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
import React from 'react'
|
||||||
|
import Code from './codeDynamic'
|
||||||
|
import classes from '../styles/code.module.sass'
|
||||||
|
|
||||||
|
export const Pre = (props) => {
|
||||||
|
return <pre className={classes['pre']}>{props.children}</pre>
|
||||||
|
}
|
||||||
|
|
||||||
|
const CodeBlock = (props) => (
|
||||||
|
<Pre>
|
||||||
|
<Code {...props} />
|
||||||
|
</Pre>
|
||||||
|
)
|
||||||
|
export default CodeBlock
|
5
website/src/components/codeDynamic.js
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
import dynamic from 'next/dynamic'
|
||||||
|
|
||||||
|
export default dynamic(() => import('./code'), {
|
||||||
|
loading: () => <div style={{ color: 'white', padding: '1rem' }}>Loading...</div>,
|
||||||
|
})
|
|
@ -14,7 +14,7 @@ export function copyToClipboard(ref, callback) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function CopyInput({ text, prefix }) {
|
export default function CopyInput({ text, description, prefix }) {
|
||||||
const isClient = typeof window !== 'undefined'
|
const isClient = typeof window !== 'undefined'
|
||||||
const [supportsCopy, setSupportsCopy] = useState(false)
|
const [supportsCopy, setSupportsCopy] = useState(false)
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ export default function CopyInput({ text, prefix }) {
|
||||||
defaultValue={text}
|
defaultValue={text}
|
||||||
rows={1}
|
rows={1}
|
||||||
onClick={selectText}
|
onClick={selectText}
|
||||||
|
aria-label={description}
|
||||||
/>
|
/>
|
||||||
{supportsCopy && (
|
{supportsCopy && (
|
||||||
<button title="Copy to clipboard" onClick={onClick}>
|
<button title="Copy to clipboard" onClick={onClick}>
|
||||||
|
|
|
@ -5,8 +5,8 @@ import ImageNext from 'next/image'
|
||||||
|
|
||||||
import Link from './link'
|
import Link from './link'
|
||||||
import Button from './button'
|
import Button from './button'
|
||||||
import { InlineCode } from './code'
|
import { InlineCode } from './inlineCode'
|
||||||
import { MarkdownToReact } from './util'
|
import MarkdownToReact from './markdownToReactDynamic'
|
||||||
|
|
||||||
import classes from '../styles/embed.module.sass'
|
import classes from '../styles/embed.module.sass'
|
||||||
|
|
||||||
|
@ -88,10 +88,16 @@ const Image = ({ src, alt, title, href, ...props }) => {
|
||||||
const markdownComponents = { code: InlineCode, p: Fragment, a: Link }
|
const markdownComponents = { code: InlineCode, p: Fragment, a: Link }
|
||||||
return (
|
return (
|
||||||
<figure className="gatsby-resp-image-figure">
|
<figure className="gatsby-resp-image-figure">
|
||||||
<Link className={linkClassNames} href={href ?? src} hidden forceExternal>
|
{href ? (
|
||||||
|
<Link className={linkClassNames} href={href} noLinkLayout forceExternal>
|
||||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||||
<img className={classes.image} src={src} alt={alt} width={650} height="auto" />
|
<img className={classes.image} src={src} alt={alt} width={650} height="auto" />
|
||||||
</Link>
|
</Link>
|
||||||
|
) : (
|
||||||
|
/* eslint-disable-next-line @next/next/no-img-element */
|
||||||
|
<img className={classes.image} src={src} alt={alt} width={650} height="auto" />
|
||||||
|
)}
|
||||||
|
|
||||||
{title && (
|
{title && (
|
||||||
<figcaption className="gatsby-resp-image-figcaption">
|
<figcaption className="gatsby-resp-image-figcaption">
|
||||||
<MarkdownToReact markdown={title} />
|
<MarkdownToReact markdown={title} />
|
||||||
|
@ -104,7 +110,7 @@ const Image = ({ src, alt, title, href, ...props }) => {
|
||||||
const ImageFill = ({ image, ...props }) => {
|
const ImageFill = ({ image, ...props }) => {
|
||||||
return (
|
return (
|
||||||
<span
|
<span
|
||||||
class={classes['figure-fill']}
|
className={classes['figure-fill']}
|
||||||
style={{ paddingBottom: `${(image.height / image.width) * 100}%` }}
|
style={{ paddingBottom: `${(image.height / image.width) * 100}%` }}
|
||||||
>
|
>
|
||||||
<ImageNext src={image.src} {...props} fill />
|
<ImageNext src={image.src} {...props} fill />
|
||||||
|
|
|
@ -21,7 +21,7 @@ export default function Footer({ wide = false }) {
|
||||||
<li className={classes.label}>{label}</li>
|
<li className={classes.label}>{label}</li>
|
||||||
{items.map(({ text, url }, j) => (
|
{items.map(({ text, url }, j) => (
|
||||||
<li key={j}>
|
<li key={j}>
|
||||||
<Link to={url} hidden>
|
<Link to={url} noLinkLayout>
|
||||||
{text}
|
{text}
|
||||||
</Link>
|
</Link>
|
||||||
</li>
|
</li>
|
||||||
|
@ -42,14 +42,14 @@ export default function Footer({ wide = false }) {
|
||||||
<div className={classNames(classes.content, classes.copy)}>
|
<div className={classNames(classes.content, classes.copy)}>
|
||||||
<span>
|
<span>
|
||||||
© 2016-{new Date().getFullYear()}{' '}
|
© 2016-{new Date().getFullYear()}{' '}
|
||||||
<Link to={companyUrl} hidden>
|
<Link to={companyUrl} noLinkLayout>
|
||||||
{company}
|
{company}
|
||||||
</Link>
|
</Link>
|
||||||
</span>
|
</span>
|
||||||
<Link to={companyUrl} aria-label={company} hidden className={classes.logo}>
|
<Link to={companyUrl} aria-label={company} noLinkLayout className={classes.logo}>
|
||||||
<SVG src={explosionLogo.src} width={45} height={45} />
|
<SVG src={explosionLogo.src} width={45} height={45} />
|
||||||
</Link>
|
</Link>
|
||||||
<Link to={`${companyUrl}/legal`} hidden>
|
<Link to={`${companyUrl}/legal`} noLinkLayout>
|
||||||
Legal / Imprint
|
Legal / Imprint
|
||||||
</Link>
|
</Link>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -5,7 +5,7 @@ import classNames from 'classnames'
|
||||||
import Icon from './icon'
|
import Icon from './icon'
|
||||||
import Link from './link'
|
import Link from './link'
|
||||||
import classes from '../styles/code.module.sass'
|
import classes from '../styles/code.module.sass'
|
||||||
import { Code } from './code'
|
import Code from './codeDynamic'
|
||||||
|
|
||||||
const defaultErrorMsg = `Can't fetch code example from GitHub :(
|
const defaultErrorMsg = `Can't fetch code example from GitHub :(
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ const GitHubCode = ({ url, lang, errorMsg = defaultErrorMsg, className }) => {
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<header className={classes.header}>
|
<header className={classes.header}>
|
||||||
<Link to={url} hidden>
|
<Link to={url} noLinkLayout>
|
||||||
<Icon name="github" width={16} inline />
|
<Icon name="github" width={16} inline />
|
||||||
<code
|
<code
|
||||||
className={classNames(classes['inline-code'], classes['inline-code-dark'])}
|
className={classNames(classes['inline-code'], classes['inline-code-dark'])}
|
||||||
|
|
12
website/src/components/htmlToReact.js
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
import { Parser as HtmlToReactParser } from 'html-to-react'
|
||||||
|
|
||||||
|
const htmlToReactParser = new HtmlToReactParser()
|
||||||
|
/**
|
||||||
|
* Convert raw HTML to React elements
|
||||||
|
* @param {string} html - The HTML markup to convert.
|
||||||
|
* @returns {Node} - The converted React elements.
|
||||||
|
*/
|
||||||
|
|
||||||
|
export default function HtmlToReact(props) {
|
||||||
|
return htmlToReactParser.parse(props.children)
|
||||||
|
}
|
23
website/src/components/inlineCode.js
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
import React from 'react'
|
||||||
|
import PropTypes from 'prop-types'
|
||||||
|
import classNames from 'classnames'
|
||||||
|
import { isString } from './util'
|
||||||
|
import classes from '../styles/code.module.sass'
|
||||||
|
|
||||||
|
const WRAP_THRESHOLD = 30
|
||||||
|
|
||||||
|
export const InlineCode = ({ wrap = false, className, children, ...props }) => {
|
||||||
|
const codeClassNames = classNames(classes['inline-code'], className, {
|
||||||
|
[classes['wrap']]: wrap || (isString(children) && children.length >= WRAP_THRESHOLD),
|
||||||
|
})
|
||||||
|
return (
|
||||||
|
<code className={codeClassNames} {...props}>
|
||||||
|
{children}
|
||||||
|
</code>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
InlineCode.propTypes = {
|
||||||
|
wrap: PropTypes.bool,
|
||||||
|
className: PropTypes.string,
|
||||||
|
children: PropTypes.node,
|
||||||
|
}
|
|
@ -12,17 +12,17 @@ const spacyTheme = createTheme({
|
||||||
theme: 'dark',
|
theme: 'dark',
|
||||||
settings: {
|
settings: {
|
||||||
background: 'var(--color-front)',
|
background: 'var(--color-front)',
|
||||||
foreground: 'var(--color-subtle)',
|
foreground: 'var(--color-subtle-on-dark)',
|
||||||
caret: 'var(--color-theme-dark)',
|
caret: 'var(--color-theme-dark)',
|
||||||
selection: 'var(--color-theme)',
|
selection: 'var(--color-theme-dark)',
|
||||||
selectionMatch: 'var(--color-theme)',
|
selectionMatch: 'var(--color-theme-dark)',
|
||||||
gutterBackground: 'var(--color-front)',
|
gutterBackground: 'var(--color-front)',
|
||||||
gutterForeground: 'var(--color-subtle)',
|
gutterForeground: 'var(--color-subtle-on-dark)',
|
||||||
fontFamily: 'var(--font-code)',
|
fontFamily: 'var(--font-code)',
|
||||||
},
|
},
|
||||||
styles: [
|
styles: [
|
||||||
{ tag: t.comment, color: 'var(--syntax-comment)' },
|
{ tag: t.comment, color: 'var(--syntax-comment)' },
|
||||||
{ tag: t.variableName, color: 'var(--color-subtle)' },
|
{ tag: t.variableName, color: 'var(--color-subtle-on-dark)' },
|
||||||
{ tag: [t.string, t.special(t.brace)], color: '#fff' },
|
{ tag: [t.string, t.special(t.brace)], color: '#fff' },
|
||||||
{ tag: t.number, color: 'var(--syntax-number)' },
|
{ tag: t.number, color: 'var(--syntax-number)' },
|
||||||
{ tag: t.string, color: 'var(--syntax-selector)' },
|
{ tag: t.string, color: 'var(--syntax-selector)' },
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
import React from 'react'
|
import React from 'react'
|
||||||
import classNames from 'classnames'
|
import classNames from 'classnames'
|
||||||
|
|
||||||
import patternDefault from '../images/pattern_blue.jpg'
|
import patternDefault from '../images/pattern_blue.png'
|
||||||
import patternNightly from '../images/pattern_nightly.jpg'
|
import patternNightly from '../images/pattern_nightly.png'
|
||||||
import patternLegacy from '../images/pattern_legacy.jpg'
|
import patternLegacy from '../images/pattern_legacy.png'
|
||||||
import overlayDefault from '../images/pattern_landing.jpg'
|
import overlayDefault from '../images/pattern_landing.png'
|
||||||
import overlayNightly from '../images/pattern_landing_nightly.jpg'
|
import overlayNightly from '../images/pattern_landing_nightly.png'
|
||||||
import overlayLegacy from '../images/pattern_landing_legacy.jpg'
|
import overlayLegacy from '../images/pattern_landing_legacy.png'
|
||||||
|
|
||||||
import Grid from './grid'
|
import Grid from './grid'
|
||||||
import { Content } from './main'
|
import { Content } from './main'
|
||||||
import Button from './button'
|
import Button from './button'
|
||||||
import CodeBlock from './code'
|
import CodeBlock from './codeBlock'
|
||||||
import { H1, H2, H3 } from './typography'
|
import { H1, H2, H3 } from './typography'
|
||||||
import Link from './link'
|
import Link from './link'
|
||||||
import classes from '../styles/landing.module.sass'
|
import classes from '../styles/landing.module.sass'
|
||||||
|
@ -110,6 +110,7 @@ export const LandingBanner = ({
|
||||||
})
|
})
|
||||||
const style = {
|
const style = {
|
||||||
'--color-theme': background,
|
'--color-theme': background,
|
||||||
|
'--color-theme-dark': background,
|
||||||
'--color-back': color,
|
'--color-back': color,
|
||||||
backgroundImage: backgroundImage ? `url(${backgroundImage})` : null,
|
backgroundImage: backgroundImage ? `url(${backgroundImage})` : null,
|
||||||
}
|
}
|
||||||
|
@ -124,7 +125,7 @@ export const LandingBanner = ({
|
||||||
<span className={classes['label']}>{label}</span>
|
<span className={classes['label']}>{label}</span>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
<Link to={to} hidden>
|
<Link to={to} noLinkLayout>
|
||||||
{title}
|
{title}
|
||||||
</Link>
|
</Link>
|
||||||
</Heading>
|
</Heading>
|
||||||
|
|
|
@ -26,7 +26,7 @@ export default function Link({
|
||||||
to,
|
to,
|
||||||
href,
|
href,
|
||||||
onClick,
|
onClick,
|
||||||
hidden = false,
|
noLinkLayout = false,
|
||||||
hideIcon = false,
|
hideIcon = false,
|
||||||
ws = false,
|
ws = false,
|
||||||
forceExternal = false,
|
forceExternal = false,
|
||||||
|
@ -36,10 +36,10 @@ export default function Link({
|
||||||
const dest = to || href
|
const dest = to || href
|
||||||
const external = forceExternal || /(http(s?)):\/\//gi.test(dest)
|
const external = forceExternal || /(http(s?)):\/\//gi.test(dest)
|
||||||
const icon = getIcon(dest)
|
const icon = getIcon(dest)
|
||||||
const withIcon = !hidden && !hideIcon && !!icon && !isImage(children)
|
const withIcon = !noLinkLayout && !hideIcon && !!icon && !isImage(children)
|
||||||
const sourceWithText = withIcon && isString(children)
|
const sourceWithText = withIcon && isString(children)
|
||||||
const linkClassNames = classNames(classes.root, className, {
|
const linkClassNames = classNames(classes.root, className, {
|
||||||
[classes.hidden]: hidden,
|
[classes['no-link-layout']]: noLinkLayout,
|
||||||
[classes.nowrap]: (withIcon && !sourceWithText) || icon === 'network',
|
[classes.nowrap]: (withIcon && !sourceWithText) || icon === 'network',
|
||||||
[classes['with-icon']]: withIcon,
|
[classes['with-icon']]: withIcon,
|
||||||
})
|
})
|
||||||
|
@ -97,7 +97,7 @@ Link.propTypes = {
|
||||||
to: PropTypes.string,
|
to: PropTypes.string,
|
||||||
href: PropTypes.string,
|
href: PropTypes.string,
|
||||||
onClick: PropTypes.func,
|
onClick: PropTypes.func,
|
||||||
hidden: PropTypes.bool,
|
noLinkLayout: PropTypes.bool,
|
||||||
hideIcon: PropTypes.bool,
|
hideIcon: PropTypes.bool,
|
||||||
ws: PropTypes.bool,
|
ws: PropTypes.bool,
|
||||||
className: PropTypes.string,
|
className: PropTypes.string,
|
||||||
|
|
|
@ -2,11 +2,11 @@ import React from 'react'
|
||||||
import PropTypes from 'prop-types'
|
import PropTypes from 'prop-types'
|
||||||
import classNames from 'classnames'
|
import classNames from 'classnames'
|
||||||
|
|
||||||
import patternBlue from '../images/pattern_blue.jpg'
|
import patternBlue from '../images/pattern_blue.png'
|
||||||
import patternGreen from '../images/pattern_green.jpg'
|
import patternGreen from '../images/pattern_green.png'
|
||||||
import patternPurple from '../images/pattern_purple.jpg'
|
import patternPurple from '../images/pattern_purple.png'
|
||||||
import patternNightly from '../images/pattern_nightly.jpg'
|
import patternNightly from '../images/pattern_nightly.png'
|
||||||
import patternLegacy from '../images/pattern_legacy.jpg'
|
import patternLegacy from '../images/pattern_legacy.png'
|
||||||
import classes from '../styles/main.module.sass'
|
import classes from '../styles/main.module.sass'
|
||||||
|
|
||||||
const patterns = {
|
const patterns = {
|
||||||
|
|
32
website/src/components/markdownToReact.js
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
import React, { useEffect, useState } from 'react'
|
||||||
|
import { serialize } from 'next-mdx-remote/serialize'
|
||||||
|
import { MDXRemote } from 'next-mdx-remote'
|
||||||
|
import remarkPlugins from '../../plugins/index.mjs'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert raw Markdown to React
|
||||||
|
* @param {String} markdown - The Markdown markup to convert.
|
||||||
|
* @param {Object} [remarkReactComponents] - Optional React components to use
|
||||||
|
* for HTML elements.
|
||||||
|
* @returns {Node} - The converted React elements.
|
||||||
|
*/
|
||||||
|
export default function MarkdownToReact({ markdown }) {
|
||||||
|
const [mdx, setMdx] = useState(null)
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const getMdx = async () => {
|
||||||
|
setMdx(
|
||||||
|
await serialize(markdown, {
|
||||||
|
parseFrontmatter: false,
|
||||||
|
mdxOptions: {
|
||||||
|
remarkPlugins,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
getMdx()
|
||||||
|
}, [markdown])
|
||||||
|
|
||||||
|
return mdx ? <MDXRemote {...mdx} /> : <></>
|
||||||
|
}
|
5
website/src/components/markdownToReactDynamic.js
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
import dynamic from 'next/dynamic'
|
||||||
|
|
||||||
|
export default dynamic(() => import('./markdownToReact'), {
|
||||||
|
loading: () => <p>Loading...</p>,
|
||||||
|
})
|
|
@ -30,7 +30,7 @@ const NavigationDropdown = ({ items = [], section }) => {
|
||||||
|
|
||||||
export default function Navigation({ title, items = [], section, search, alert, children }) {
|
export default function Navigation({ title, items = [], section, search, alert, children }) {
|
||||||
const logo = (
|
const logo = (
|
||||||
<Link to="/" aria-label={title} hidden>
|
<Link to="/" aria-label={title} noLinkLayout>
|
||||||
<h1 className={classes.title}>{title}</h1>
|
<h1 className={classes.title}>{title}</h1>
|
||||||
<SVG src={logoSpacy.src} className={classes.logo} width={300} height={96} />
|
<SVG src={logoSpacy.src} className={classes.logo} width={300} height={96} />
|
||||||
</Link>
|
</Link>
|
||||||
|
@ -57,7 +57,7 @@ export default function Navigation({ title, items = [], section, search, alert,
|
||||||
})
|
})
|
||||||
return (
|
return (
|
||||||
<li key={i} className={itemClassNames}>
|
<li key={i} className={itemClassNames}>
|
||||||
<Link to={url} tabIndex={isActive ? '-1' : null} hidden>
|
<Link to={url} tabIndex={isActive ? '-1' : null} noLinkLayout>
|
||||||
{text}
|
{text}
|
||||||
</Link>
|
</Link>
|
||||||
</li>
|
</li>
|
||||||
|
|
|
@ -251,7 +251,12 @@ const Quickstart = ({
|
||||||
</menu>
|
</menu>
|
||||||
</pre>
|
</pre>
|
||||||
{showCopy && (
|
{showCopy && (
|
||||||
<textarea ref={copyAreaRef} className={classes['copy-area']} rows={1} />
|
<textarea
|
||||||
|
ref={copyAreaRef}
|
||||||
|
className={classes['copy-area']}
|
||||||
|
rows={1}
|
||||||
|
aria-label={`Interactive code example for ${title}`}
|
||||||
|
/>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
</Container>
|
</Container>
|
||||||
|
|
|
@ -9,15 +9,15 @@ import classes from '../styles/readnext.module.sass'
|
||||||
|
|
||||||
export default function ReadNext({ title, to }) {
|
export default function ReadNext({ title, to }) {
|
||||||
return (
|
return (
|
||||||
<div className={classes.root}>
|
<Link to={to} noLinkLayout className={classes.root}>
|
||||||
<Link to={to} hidden>
|
<span>
|
||||||
<Label>Read next</Label>
|
<Label>Read next</Label>
|
||||||
{title}
|
{title}
|
||||||
|
</span>
|
||||||
|
<span className={classes.icon}>
|
||||||
|
<Icon name="arrowright" aria-hidden="true" />
|
||||||
|
</span>
|
||||||
</Link>
|
</Link>
|
||||||
<Link to={to} hidden className={classes.icon} aria-hidden="true">
|
|
||||||
<Icon name="arrowright" />
|
|
||||||
</Link>
|
|
||||||
</div>
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,8 @@ import socialImageLegacy from '../images/social_legacy.jpg'
|
||||||
import siteMetadata from '../../meta/site.json'
|
import siteMetadata from '../../meta/site.json'
|
||||||
import Head from 'next/head'
|
import Head from 'next/head'
|
||||||
|
|
||||||
|
import { siteUrl } from '../../meta/dynamicMeta.mjs'
|
||||||
|
|
||||||
function getPageTitle(title, sitename, slogan, sectionTitle, nightly, legacy) {
|
function getPageTitle(title, sitename, slogan, sectionTitle, nightly, legacy) {
|
||||||
if (sectionTitle && title) {
|
if (sectionTitle && title) {
|
||||||
const suffix = nightly ? ' (nightly)' : legacy ? ' (legacy)' : ''
|
const suffix = nightly ? ' (nightly)' : legacy ? ' (legacy)' : ''
|
||||||
|
@ -25,7 +27,7 @@ function getImage(section, nightly, legacy) {
|
||||||
if (legacy) return socialImageLegacy
|
if (legacy) return socialImageLegacy
|
||||||
if (section === 'api') return socialImageApi
|
if (section === 'api') return socialImageApi
|
||||||
if (section === 'universe') return socialImageUniverse
|
if (section === 'universe') return socialImageUniverse
|
||||||
return socialImageDefault
|
return `${siteUrl}${socialImageDefault.src}`
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function SEO({
|
export default function SEO({
|
||||||
|
@ -46,7 +48,7 @@ export default function SEO({
|
||||||
nightly,
|
nightly,
|
||||||
legacy
|
legacy
|
||||||
)
|
)
|
||||||
const socialImage = getImage(section, nightly, legacy).src
|
const socialImage = getImage(section, nightly, legacy)
|
||||||
const meta = [
|
const meta = [
|
||||||
{
|
{
|
||||||
name: 'description',
|
name: 'description',
|
||||||
|
|
|
@ -5,7 +5,7 @@ import classNames from 'classnames'
|
||||||
import Button from './button'
|
import Button from './button'
|
||||||
import Tag from './tag'
|
import Tag from './tag'
|
||||||
import { OptionalLink } from './link'
|
import { OptionalLink } from './link'
|
||||||
import { InlineCode } from './code'
|
import { InlineCode } from './inlineCode'
|
||||||
import { H1, Label, InlineList, Help } from './typography'
|
import { H1, Label, InlineList, Help } from './typography'
|
||||||
import Icon from './icon'
|
import Icon from './icon'
|
||||||
|
|
||||||
|
@ -51,8 +51,7 @@ export default function Title({
|
||||||
|
|
||||||
{image && (
|
{image && (
|
||||||
<div className={classes.image}>
|
<div className={classes.image}>
|
||||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
<Image src={image} width={100} height={100} alt={`${title} Logo`} />
|
||||||
<img src={image} width={100} height={100} alt="" />
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|
51
website/src/components/typeAnnotation.js
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
import React from 'react'
|
||||||
|
import classNames from 'classnames'
|
||||||
|
import CUSTOM_TYPES from '../../meta/type-annotations.json'
|
||||||
|
import Link from './link'
|
||||||
|
import classes from '../styles/code.module.sass'
|
||||||
|
|
||||||
|
export const WRAP_THRESHOLD = 30
|
||||||
|
|
||||||
|
const specialCharacterList = ['[', ']', ',', ', ']
|
||||||
|
|
||||||
|
const highlight = (element) =>
|
||||||
|
specialCharacterList.includes(element) ? (
|
||||||
|
<span className={classes['cli-arg-subtle']}>{element}</span>
|
||||||
|
) : (
|
||||||
|
element
|
||||||
|
)
|
||||||
|
|
||||||
|
function linkType(el, showLink = true, key) {
|
||||||
|
if (!el.length) return el
|
||||||
|
const elStr = el.trim()
|
||||||
|
if (!elStr) return el
|
||||||
|
const typeUrl = CUSTOM_TYPES[elStr]
|
||||||
|
const url = typeUrl == true ? DEFAULT_TYPE_URL : typeUrl
|
||||||
|
return url && showLink ? (
|
||||||
|
<Link to={url} hideIcon key={key}>
|
||||||
|
{elStr}
|
||||||
|
</Link>
|
||||||
|
) : (
|
||||||
|
highlight(el)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export const TypeAnnotation = ({ lang = 'python', link = true, children }) => {
|
||||||
|
const code = Array.isArray(children) ? children.join('') : children || ''
|
||||||
|
const [rawText, meta] = code.split(/(?= \(.+\)$)/)
|
||||||
|
const annotClassNames = classNames(
|
||||||
|
'type-annotation',
|
||||||
|
`language-${lang}`,
|
||||||
|
classes['inline-code'],
|
||||||
|
classes['type-annotation'],
|
||||||
|
{
|
||||||
|
[classes['wrap']]: code.length >= WRAP_THRESHOLD,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
<span className={annotClassNames} role="code" aria-label="Type annotation">
|
||||||
|
{rawText.split(/(\[|\]|,)/).map((el, i) => linkType(el, !!link, i))}
|
||||||
|
{meta && <span className={classes['type-annotation-meta']}>{meta}</span>}
|
||||||
|
</span>
|
||||||
|
)
|
||||||
|
}
|
|
@ -1,12 +1,6 @@
|
||||||
import React, { Fragment, useEffect, useState } from 'react'
|
import React, { Fragment } from 'react'
|
||||||
import { Parser as HtmlToReactParser } from 'html-to-react'
|
|
||||||
import siteMetadata from '../../meta/site.json'
|
import siteMetadata from '../../meta/site.json'
|
||||||
import { domain } from '../../meta/dynamicMeta.mjs'
|
import { domain } from '../../meta/dynamicMeta.mjs'
|
||||||
import remarkPlugins from '../../plugins/index.mjs'
|
|
||||||
import { serialize } from 'next-mdx-remote/serialize'
|
|
||||||
import { MDXRemote } from 'next-mdx-remote'
|
|
||||||
|
|
||||||
const htmlToReactParser = new HtmlToReactParser()
|
|
||||||
|
|
||||||
const isNightly = siteMetadata.nightlyBranches.includes(domain)
|
const isNightly = siteMetadata.nightlyBranches.includes(domain)
|
||||||
export const DEFAULT_BRANCH = isNightly ? 'develop' : 'master'
|
export const DEFAULT_BRANCH = isNightly ? 'develop' : 'master'
|
||||||
|
@ -70,43 +64,6 @@ export function isEmptyObj(obj) {
|
||||||
return Object.entries(obj).length === 0 && obj.constructor === Object
|
return Object.entries(obj).length === 0 && obj.constructor === Object
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert raw HTML to React elements
|
|
||||||
* @param {string} html - The HTML markup to convert.
|
|
||||||
* @returns {Node} - The converted React elements.
|
|
||||||
*/
|
|
||||||
export function htmlToReact(html) {
|
|
||||||
return htmlToReactParser.parse(html)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert raw Markdown to React
|
|
||||||
* @param {String} markdown - The Markdown markup to convert.
|
|
||||||
* @param {Object} [remarkReactComponents] - Optional React components to use
|
|
||||||
* for HTML elements.
|
|
||||||
* @returns {Node} - The converted React elements.
|
|
||||||
*/
|
|
||||||
export function MarkdownToReact({ markdown }) {
|
|
||||||
const [mdx, setMdx] = useState(null)
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const getMdx = async () => {
|
|
||||||
setMdx(
|
|
||||||
await serialize(markdown, {
|
|
||||||
parseFrontmatter: false,
|
|
||||||
mdxOptions: {
|
|
||||||
remarkPlugins,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
getMdx()
|
|
||||||
}, [markdown])
|
|
||||||
|
|
||||||
return mdx ? <MDXRemote {...mdx} /> : <></>
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Join an array of nodes with a given string delimiter, like Array.join for React
|
* Join an array of nodes with a given string delimiter, like Array.join for React
|
||||||
* @param {Array} arr - The elements to join.
|
* @param {Array} arr - The elements to join.
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 500 500" width="200" height="200">
|
<svg height="100" viewBox="0 0 100 100" width="100" xmlns="http://www.w3.org/2000/svg">
|
||||||
<path fill="currentColor" d="M111.7 74.9L91.2 93.1l9.1 10.2 17.8-15.8 7.4 8.4-17.8 15.8 10.1 11.4 20.6-18.2 7.7 8.7-30.4 26.9-41.9-47.3 30.3-26.9 7.6 8.6zM190.8 59.6L219 84.3l-14.4 4.5-20.4-18.2-6.4 26.6-14.4 4.5 8.9-36.4-26.9-24.1 14.3-4.5L179 54.2l5.7-25.2 14.3-4.5-8.2 35.1zM250.1 21.2l27.1 3.4c6.1.8 10.8 3.1 14 7.2 3.2 4.1 4.5 9.2 3.7 15.5-.8 6.3-3.2 11-7.4 14.1-4.1 3.1-9.2 4.3-15.3 3.5L258 63.2l-2.8 22.3-13-1.6 7.9-62.7zm11.5 13l-2.2 17.5 12.6 1.6c5.1.6 9.1-2 9.8-7.6.7-5.6-2.5-9.2-7.6-9.9l-12.6-1.6zM329.1 95.4l23.8 13.8-5.8 10L312 98.8l31.8-54.6 11.3 6.6-26 44.6zM440.5 145c-1.3 8.4-5.9 15.4-13.9 21.1s-16.2 7.7-24.6 6.1c-8.4-1.6-15.3-6.3-20.8-14.1-5.5-7.9-7.6-16-6.4-24.4 1.3-8.5 6-15.5 14-21.1 8-5.6 16.2-7.7 24.5-6 8.4 1.6 15.4 6.3 20.9 14.2 5.5 7.6 7.6 15.7 6.3 24.2zM412 119c-5.1-.8-10.3.6-15.6 4.4-5.2 3.7-8.4 8.1-9.4 13.2-1 5.2.2 10.1 3.5 14.8 3.4 4.8 7.5 7.5 12.7 8.2 5.2.8 10.4-.7 15.6-4.4 5.3-3.7 8.4-8.1 9.4-13.2 1.1-5.1-.1-9.9-3.4-14.7-3.4-4.8-7.6-7.6-12.8-8.3zM471.5 237.9c-2.8 4.8-7.1 7.6-13 8.7l-2.6-13.1c5.3-.9 8.1-5 7.2-11-.9-5.8-4.3-8.8-8.9-8.2-2.3.3-3.7 1.4-4.5 3.3-.7 1.9-1.4 5.2-1.7 10.1-.8 7.5-2.2 13.1-4.3 16.9-2.1 3.9-5.7 6.2-10.9 7-6.3.9-11.3-.5-15.2-4.4-3.9-3.8-6.3-9-7.3-15.7-1.1-7.4-.2-13.7 2.6-18.8 2.8-5.1 7.4-8.2 13.7-9.2l2.6 13c-5.6 1.1-8.7 6.6-7.7 13.4 1 6.6 3.9 9.5 8.6 8.8 4.4-.7 5.7-4.5 6.7-14.1.3-3.5.7-6.2 1.1-8.4.4-2.2 1.2-4.4 2.2-6.8 2.1-4.7 6-7.2 11.8-8.1 5.4-.8 10.3.4 14.5 3.7 4.2 3.3 6.9 8.5 8 15.6.9 6.9-.1 12.6-2.9 17.3zM408.6 293.5l2.4-12.9 62 11.7-2.4 12.9-62-11.7zM419.6 396.9c-8.3 2-16.5.3-24.8-5-8.2-5.3-13.2-12.1-14.9-20.5-1.6-8.4.1-16.6 5.3-24.6 5.2-8.1 11.9-13.1 20.2-15.1 8.4-1.9 16.6-.3 24.9 5 8.2 5.3 13.2 12.1 14.8 20.5 1.7 8.4 0 16.6-5.2 24.7-5.2 8-12 13-20.3 15zm13.4-36.3c-1.2-5.1-4.5-9.3-9.9-12.8s-10.6-4.7-15.8-3.7-9.3 4-12.4 8.9-4.1 9.8-2.8 14.8c1.2 5.1 4.5 9.3 9.9 12.8 5.5 3.5 10.7 4.8 15.8 3.7 5.1-.9 9.2-3.8 12.3-8.7s4.1-9.9 2.9-15zM303.6 416.5l9.6-5.4 43.3 20.4-19.2-34 11.4-6.4 31 55-9.6 5.4-43.4-20.5 19.2 34.1-11.3 6.4-31-55zM238.2 468.8c-49 0-96.9-17.4-134.8-49-38.3-32-64-76.7-72.5-125.9-2-11.9-3.1-24-3.1-35.9 0-36.5 9.6-72.6 27.9-104.4 2.1-3.6 6.7-4.9 10.3-2.8 3.6 2.1 4.9 6.7 2.8 10.3-16.9 29.5-25.9 63.1-25.9 96.9 0 11.1 1 22.3 2.9 33.4 7.9 45.7 31.8 87.2 67.3 116.9 35.2 29.3 79.6 45.5 125.1 45.5 11.1 0 22.3-1 33.4-2.9 4.1-.7 8 2 8.7 6.1.7 4.1-2 8-6.1 8.7-11.9 2-24 3.1-36 3.1z" />
|
<path d="M22.8 11.9l1.5 1.7-4.4 3.8 1.8 2 4-3.5 1.4 1.6-4 3.5 1.9 2.2 4.4-3.8 1.5 1.7-6.3 5.5-8-9.3 6.2-5.4zm17.1-1.2l5.9 4.8-2.8.8-4-3.3-1.5 4.9-2.7.8 2.3-7.2-5.6-4.6 2.8-.8 3.7 3 1.5-4.6 2.7-.8-2.3 7zm14.1.9l-.6 4.2-2.5-.4 1.8-12.1 4.9.7c2.6.4 4.1 2 3.7 4.6s-2.3 3.7-4.9 3.3l-2.4-.3zm3.2-5.4l-2.3-.3-.5 3.6 2.3.3c1.3.2 2.1-.5 2.2-1.5.1-1.1-.4-1.9-1.7-2.1zm14.5 2.4l2.2 1.4-5.4 8.4 4.8 3.1-1.2 1.9-7-4.5 6.6-10.3zm6.8 22.8c-1.9-2.9-.9-6.3 2.1-8.3 3.1-2 6.6-1.5 8.5 1.4s1 6.3-2.1 8.3c-3 2-6.6 1.4-8.5-1.4zm8.7-5.7c-1.1-1.6-3.1-1.8-5.1-.5s-2.7 3.3-1.6 4.9 3.2 1.8 5.2.5c1.9-1.3 2.6-3.3 1.5-4.9zm-.1 17c-1 .5-1.4 1.4-1.2 2.6s.8 2 1.8 1.8c.7-.1 1.1-.6 1.2-1.7l.2-2.2c.2-1.8.9-3.3 2.9-3.5 2.2-.3 4 1.3 4.3 3.9.4 2.8-.9 4.6-2.9 5.2l-.3-2.5c.8-.4 1.3-1.2 1.2-2.4-.2-1.1-.8-1.8-1.7-1.7-.7.1-1 .6-1.1 1.5l-.3 2.3c-.2 2-1.2 3.4-3 3.6-2.4.3-4-1.4-4.4-4-.4-2.7.7-4.8 3-5.5l.3 2.6zm-3.8 15.6l.5-2.5 12 2.5-.5 2.5-12-2.5zm-4.7 10.3c1.9-2.9 5.4-3.4 8.5-1.4s4 5.5 2.1 8.3-5.4 3.4-8.5 1.4-4-5.5-2.1-8.3zm8.7 5.6c1.1-1.6.4-3.6-1.7-4.9-2-1.3-4.1-1.2-5.1.5-1.1 1.6-.4 3.6 1.6 4.9 2 1.4 4.1 1.2 5.2-.5zm-24.7 8.1l1.8-1 9.1 4.2-4.1-7.1 2.1-1.2 6.1 10.6-2 1.2-8.6-4 3.9 6.7-2.1 1.2-6.2-10.6zM50 92.2C26.7 92.2 7.8 73.3 7.8 50c0-7.2 1.8-14.3 5.3-20.5.4-.7 1.3-1 2-.6s1 1.3.6 2a39.53 39.53 0 0 0-4.9 19c0 21.6 17.6 39.2 39.2 39.2 2.2 0 4.4-.2 6.6-.5.8-.1 1.6.4 1.7 1.2s-.4 1.6-1.2 1.7c-2.4.5-4.7.7-7.1.7z"/>
|
||||||
</svg>
|
</svg>
|
Before Width: | Height: | Size: 2.4 KiB After Width: | Height: | Size: 1.4 KiB |
Before Width: | Height: | Size: 114 KiB |
BIN
website/src/images/pattern_blue.png
Normal file
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 119 KiB |
BIN
website/src/images/pattern_green.png
Normal file
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 101 KiB |
BIN
website/src/images/pattern_landing.png
Normal file
After Width: | Height: | Size: 7.8 KiB |
Before Width: | Height: | Size: 86 KiB |
BIN
website/src/images/pattern_landing_legacy.png
Normal file
After Width: | Height: | Size: 7.9 KiB |
Before Width: | Height: | Size: 126 KiB |
BIN
website/src/images/pattern_landing_nightly.png
Normal file
After Width: | Height: | Size: 7.9 KiB |
Before Width: | Height: | Size: 106 KiB |
BIN
website/src/images/pattern_legacy.png
Normal file
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 157 KiB |
BIN
website/src/images/pattern_nightly.png
Normal file
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 112 KiB |
BIN
website/src/images/pattern_purple.png
Normal file
After Width: | Height: | Size: 48 KiB |
|
@ -1,7 +1,10 @@
|
||||||
import Link from './components/link'
|
import Link from './components/link'
|
||||||
import Section, { Hr } from './components/section'
|
import Section, { Hr } from './components/section'
|
||||||
import { Table, Tr, Th, Tx, Td } from './components/table'
|
import { Table, Tr, Th, Tx, Td } from './components/table'
|
||||||
import CodeBlock, { Pre, Code, InlineCode, TypeAnnotation } from './components/code'
|
import Code from './components/codeDynamic'
|
||||||
|
import { TypeAnnotation } from './components/typeAnnotation'
|
||||||
|
import { InlineCode } from './components/inlineCode'
|
||||||
|
import CodeBlock, { Pre } from './components/codeBlock'
|
||||||
import { Ol, Ul, Li } from './components/list'
|
import { Ol, Ul, Li } from './components/list'
|
||||||
import { H2, H3, H4, H5, P, Abbr, Help, Label } from './components/typography'
|
import { H2, H3, H4, H5, P, Abbr, Help, Label } from './components/typography'
|
||||||
import Accordion from './components/accordion'
|
import Accordion from './components/accordion'
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
width: $width
|
width: $width
|
||||||
height: $width
|
height: $width
|
||||||
flex: 0 0 $width
|
flex: 0 0 $width
|
||||||
background: var(--color-theme)
|
background: var(--color-theme-dark)
|
||||||
color: var(--color-back)
|
color: var(--color-back)
|
||||||
border-radius: 50%
|
border-radius: 50%
|
||||||
padding: 0.35rem
|
padding: 0.35rem
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
padding: 1rem
|
padding: 1rem
|
||||||
box-shadow: var(--box-shadow)
|
box-shadow: var(--box-shadow)
|
||||||
border-top: 2px solid
|
border-top: 2px solid
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
|
|
||||||
.warning
|
.warning
|
||||||
--alert-bg: var(--color-yellow-light)
|
--alert-bg: var(--color-yellow-light)
|
||||||
|
|
|
@ -77,7 +77,7 @@ $border-radius: 6px
|
||||||
padding: 1.5rem 2.5rem 2.5rem 2rem
|
padding: 1.5rem 2.5rem 2.5rem 2rem
|
||||||
|
|
||||||
a, a:hover
|
a, a:hover
|
||||||
color: var(--color-subtle)
|
color: var(--color-subtle-on-dark)
|
||||||
|
|
||||||
& > *:last-child
|
& > *:last-child
|
||||||
margin-bottom: 0
|
margin-bottom: 0
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
display: inline-block
|
display: inline-block
|
||||||
padding: 0.65rem 1.1rem 0.825rem
|
padding: 0.65rem 1.1rem 0.825rem
|
||||||
margin-bottom: 1px
|
margin-bottom: 1px
|
||||||
border: 2px solid var(--color-theme)
|
border: 2px solid var(--color-theme-dark)
|
||||||
border-radius: 2em
|
border-radius: 2em
|
||||||
text-align: center
|
text-align: center
|
||||||
transition: background-color, color 0.25s ease
|
transition: background-color, color 0.25s ease
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
padding: 0.8em 1.1em 1em
|
padding: 0.8em 1.1em 1em
|
||||||
|
|
||||||
.primary
|
.primary
|
||||||
background: var(--color-theme)
|
background: var(--color-theme-dark)
|
||||||
color: var(--color-back)
|
color: var(--color-back)
|
||||||
|
|
||||||
&:hover
|
&:hover
|
||||||
|
@ -27,7 +27,7 @@
|
||||||
|
|
||||||
.secondary
|
.secondary
|
||||||
background: var(--color-back)
|
background: var(--color-back)
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
|
|
||||||
&:hover
|
&:hover
|
||||||
color: var(--color-theme-dark)
|
color: var(--color-theme-dark)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
.pre
|
.pre
|
||||||
position: relative
|
position: relative
|
||||||
background: var(--color-front)
|
background: var(--color-front)
|
||||||
color: var(--color-subtle)
|
color: var(--color-subtle-on-dark)
|
||||||
border-radius: var(--border-radius)
|
border-radius: var(--border-radius)
|
||||||
overflow: auto
|
overflow: auto
|
||||||
width: 100%
|
width: 100%
|
||||||
|
@ -152,7 +152,7 @@
|
||||||
|
|
||||||
.juniper-button
|
.juniper-button
|
||||||
transition: background-color 0.15s ease
|
transition: background-color 0.15s ease
|
||||||
background: var(--color-theme)
|
background: var(--color-theme-dark)
|
||||||
margin: 0.5rem 0 1rem 2rem
|
margin: 0.5rem 0 1rem 2rem
|
||||||
|
|
||||||
&:hover
|
&:hover
|
||||||
|
@ -182,8 +182,8 @@
|
||||||
color: inherit !important
|
color: inherit !important
|
||||||
|
|
||||||
.cli-arg-highlight
|
.cli-arg-highlight
|
||||||
background: var(--color-theme)
|
background: var(--color-theme-dark)
|
||||||
border-color: var(--color-theme)
|
border-color: var(--color-theme-dark)
|
||||||
color: var(--color-back) !important
|
color: var(--color-back) !important
|
||||||
|
|
||||||
.cli-arg-subtle
|
.cli-arg-subtle
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
.copy
|
.copy
|
||||||
border-top: 1px dotted var(--color-subtle)
|
border-top: 1px dotted var(--color-subtle)
|
||||||
font-size: var(--font-size-xs)
|
font-size: var(--font-size-xs)
|
||||||
color: var(--color-subtle-dark)
|
color: var(--color-front-dark)
|
||||||
text-align: center
|
text-align: center
|
||||||
width: 100%
|
width: 100%
|
||||||
|
|
||||||
|
@ -42,4 +42,4 @@
|
||||||
vertical-align: middle
|
vertical-align: middle
|
||||||
|
|
||||||
&:hover
|
&:hover
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
|
|
||||||
.title
|
.title
|
||||||
font-weight: bold
|
font-weight: bold
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
display: block
|
display: block
|
||||||
margin-bottom: var(--spacing-xs)
|
margin-bottom: var(--spacing-xs)
|
||||||
font-size: var(--font-size-md)
|
font-size: var(--font-size-md)
|
||||||
|
@ -41,7 +41,7 @@
|
||||||
color: inherit
|
color: inherit
|
||||||
|
|
||||||
.icon
|
.icon
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
vertical-align: baseline
|
vertical-align: baseline
|
||||||
position: relative
|
position: relative
|
||||||
bottom: -2px
|
bottom: -2px
|
||||||
|
|
|
@ -2,22 +2,25 @@
|
||||||
|
|
||||||
.header
|
.header
|
||||||
background: var(--color-theme)
|
background: var(--color-theme)
|
||||||
padding-top: calc(var(--height-nav) * 1.5)
|
padding-top: var(--height-nav)
|
||||||
width: 100%
|
width: 100%
|
||||||
text-align: center
|
text-align: center
|
||||||
|
--header-top-margin: 27px
|
||||||
|
|
||||||
.header-wrapper
|
.header-wrapper
|
||||||
background: var(--color-theme)
|
background: var(--color-theme)
|
||||||
background-position: top center
|
background-position: center var(--header-top-margin)
|
||||||
background-repeat: repeat
|
background-repeat: repeat
|
||||||
width: 100%
|
width: 100%
|
||||||
|
background-size: 799px 643px
|
||||||
|
|
||||||
.header-content
|
.header-content
|
||||||
background: transparent
|
background-position: center calc(-138px + var(--header-top-margin))
|
||||||
background-position: center -138px
|
|
||||||
background-repeat: no-repeat
|
background-repeat: no-repeat
|
||||||
width: 100%
|
width: 100%
|
||||||
min-height: 573px
|
min-height: calc(573px + var(--header-top-margin))
|
||||||
|
background-size: 1444px 573px
|
||||||
|
padding-top: var(--header-top-margin)
|
||||||
|
|
||||||
.title
|
.title
|
||||||
font: normal 600 7rem/#{1} var(--font-secondary)
|
font: normal 600 7rem/#{1} var(--font-secondary)
|
||||||
|
|
|
@ -65,9 +65,10 @@
|
||||||
--color-dark: hsl(214, 15%, 32%)
|
--color-dark: hsl(214, 15%, 32%)
|
||||||
--color-dark-secondary: hsl(214, 14%, 22%)
|
--color-dark-secondary: hsl(214, 14%, 22%)
|
||||||
--color-subtle-opaque: hsla(0, 0%, 96%, 0.56)
|
--color-subtle-opaque: hsla(0, 0%, 96%, 0.56)
|
||||||
--color-subtle: hsl(0, 0%, 87%)
|
--color-subtle: hsla(0, 0%, 0%, 0.13)
|
||||||
--color-subtle-light: hsl(0, 0%, 96%)
|
--color-subtle-light: hsla(0, 0%, 0%, 0.04)
|
||||||
--color-subtle-dark: hsl(162, 5%, 60%)
|
--color-subtle-dark: hsla(162, 5%, 0%, 0.55)
|
||||||
|
--color-subtle-on-dark: hsla(0, 0%, 100%, 0.87)
|
||||||
|
|
||||||
--color-green-medium: hsl(108, 66%, 63%)
|
--color-green-medium: hsl(108, 66%, 63%)
|
||||||
--color-green-transparent: hsla(108, 66%, 63%, 0.12)
|
--color-green-transparent: hsla(108, 66%, 63%, 0.12)
|
||||||
|
@ -301,13 +302,13 @@ p
|
||||||
margin-bottom: 0
|
margin-bottom: 0
|
||||||
|
|
||||||
a:focus
|
a:focus
|
||||||
outline: 1px dotted var(--color-theme)
|
outline: 1px dotted var(--color-theme-dark)
|
||||||
|
|
||||||
body [id]:target
|
body [id]:target
|
||||||
padding-top: calc(var(--height-nav) * 1.25) !important
|
padding-top: calc(var(--height-nav) * 1.25) !important
|
||||||
|
|
||||||
::selection
|
::selection
|
||||||
background: var(--color-theme)
|
background: var(--color-theme-dark)
|
||||||
color: var(--color-back)
|
color: var(--color-back)
|
||||||
text-shadow: none
|
text-shadow: none
|
||||||
|
|
||||||
|
@ -387,7 +388,7 @@ body [id]:target
|
||||||
|
|
||||||
[class*="language-bash"] .token
|
[class*="language-bash"] .token
|
||||||
&.function
|
&.function
|
||||||
color: var(--color-subtle)
|
color: var(--color-subtle-on-dark)
|
||||||
|
|
||||||
&.operator, &.variable
|
&.operator, &.variable
|
||||||
color: var(--syntax-comment)
|
color: var(--syntax-comment)
|
||||||
|
@ -397,7 +398,7 @@ body [id]:target
|
||||||
color: var(--syntax-comment)
|
color: var(--syntax-comment)
|
||||||
|
|
||||||
.token
|
.token
|
||||||
color: var(--color-subtle)
|
color: var(--color-subtle-on-dark)
|
||||||
|
|
||||||
.gatsby-highlight-code-line
|
.gatsby-highlight-code-line
|
||||||
background-color: var(--color-dark-secondary)
|
background-color: var(--color-dark-secondary)
|
||||||
|
@ -524,7 +525,7 @@ body [id]:target
|
||||||
display: block
|
display: block
|
||||||
font: bold var(--font-size-lg)/var(--line-height-md) var(--font-secondary)
|
font: bold var(--font-size-lg)/var(--line-height-md) var(--font-secondary)
|
||||||
text-transform: uppercase
|
text-transform: uppercase
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
|
|
||||||
.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column
|
.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column
|
||||||
color: var(--color-dark)
|
color: var(--color-dark)
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
.root
|
.root
|
||||||
color: var(--color-theme)
|
color: var(--color-theme-dark)
|
||||||
border-bottom: 1px solid
|
border-bottom: 1px solid
|
||||||
transition: color 0.2s ease
|
transition: color 0.2s ease
|
||||||
cursor: pointer
|
cursor: pointer
|
||||||
|
|
||||||
&:hover
|
&:hover
|
||||||
color: var(--color-theme-dark)
|
color: var(--color-front)
|
||||||
|
|
||||||
.hidden
|
.no-link-layout
|
||||||
border: none
|
border: none
|
||||||
color: inherit
|
color: inherit
|
||||||
|
|
||||||
|
|