mirror of
https://github.com/explosion/spaCy.git
synced 2025-04-19 16:41:59 +03:00
Merge branch 'explosion:master' into master
This commit is contained in:
commit
c8b8c54d65
44
.github/azure-steps.yml
vendored
44
.github/azure-steps.yml
vendored
|
@ -52,17 +52,17 @@ steps:
|
|||
python -W error -c "import spacy"
|
||||
displayName: "Test import"
|
||||
|
||||
- script: |
|
||||
python -m spacy download ca_core_news_sm
|
||||
python -m spacy download ca_core_news_md
|
||||
python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
|
||||
displayName: 'Test download CLI'
|
||||
condition: eq(variables['python_version'], '3.8')
|
||||
|
||||
- script: |
|
||||
python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
|
||||
displayName: 'Test no warnings on load (#11713)'
|
||||
condition: eq(variables['python_version'], '3.8')
|
||||
# - script: |
|
||||
# python -m spacy download ca_core_news_sm
|
||||
# python -m spacy download ca_core_news_md
|
||||
# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
|
||||
# displayName: 'Test download CLI'
|
||||
# condition: eq(variables['python_version'], '3.8')
|
||||
#
|
||||
# - script: |
|
||||
# python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
|
||||
# displayName: 'Test no warnings on load (#11713)'
|
||||
# condition: eq(variables['python_version'], '3.8')
|
||||
|
||||
- script: |
|
||||
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
|
||||
|
@ -86,17 +86,17 @@ steps:
|
|||
displayName: 'Test train CLI'
|
||||
condition: eq(variables['python_version'], '3.8')
|
||||
|
||||
- script: |
|
||||
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
|
||||
PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
|
||||
displayName: 'Test assemble CLI'
|
||||
condition: eq(variables['python_version'], '3.8')
|
||||
|
||||
- script: |
|
||||
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
|
||||
python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
|
||||
displayName: 'Test assemble CLI vectors warning'
|
||||
condition: eq(variables['python_version'], '3.8')
|
||||
# - script: |
|
||||
# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
|
||||
# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
|
||||
# displayName: 'Test assemble CLI'
|
||||
# condition: eq(variables['python_version'], '3.8')
|
||||
#
|
||||
# - script: |
|
||||
# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
|
||||
# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
|
||||
# displayName: 'Test assemble CLI vectors warning'
|
||||
# condition: eq(variables['python_version'], '3.8')
|
||||
|
||||
- script: |
|
||||
python -m pip install -U -r requirements.txt
|
||||
|
|
|
@ -5,7 +5,7 @@ repos:
|
|||
- id: black
|
||||
language_version: python3.7
|
||||
additional_dependencies: ['click==8.0.4']
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 5.0.4
|
||||
hooks:
|
||||
- id: flake8
|
||||
|
|
|
@ -10,7 +10,8 @@ wasabi>=0.9.1,<1.1.0
|
|||
srsly>=2.4.3,<3.0.0
|
||||
catalogue>=2.0.6,<2.1.0
|
||||
typer>=0.3.0,<0.8.0
|
||||
pathy>=0.3.5
|
||||
pathy>=0.10.0
|
||||
smart-open>=5.2.1,<7.0.0
|
||||
# Third party dependencies
|
||||
numpy>=1.15.0
|
||||
requests>=2.13.0,<3.0.0
|
||||
|
@ -30,7 +31,7 @@ pytest-timeout>=1.3.0,<2.0.0
|
|||
mock>=2.0.0,<3.0.0
|
||||
flake8>=3.8.0,<6.0.0
|
||||
hypothesis>=3.27.0,<7.0.0
|
||||
mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7"
|
||||
mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
|
||||
types-dataclasses>=0.1.3; python_version < "3.7"
|
||||
types-mock>=0.1.1
|
||||
types-setuptools>=57.0.0
|
||||
|
|
|
@ -52,7 +52,8 @@ install_requires =
|
|||
catalogue>=2.0.6,<2.1.0
|
||||
# Third-party dependencies
|
||||
typer>=0.3.0,<0.8.0
|
||||
pathy>=0.3.5
|
||||
pathy>=0.10.0
|
||||
smart-open>=5.2.1,<7.0.0
|
||||
tqdm>=4.38.0,<5.0.0
|
||||
numpy>=1.15.0
|
||||
requests>=2.13.0,<3.0.0
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# fmt: off
|
||||
__title__ = "spacy"
|
||||
__version__ = "3.4.2"
|
||||
__version__ = "3.5.0"
|
||||
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
|
||||
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
|
||||
__projects__ = "https://github.com/explosion/projects"
|
||||
|
|
|
@ -27,6 +27,7 @@ from .project.dvc import project_update_dvc # noqa: F401
|
|||
from .project.push import project_push # noqa: F401
|
||||
from .project.pull import project_pull # noqa: F401
|
||||
from .project.document import project_document # noqa: F401
|
||||
from .find_threshold import find_threshold # noqa: F401
|
||||
|
||||
|
||||
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True)
|
||||
|
|
|
@ -23,7 +23,7 @@ from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS
|
|||
from .. import about
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathy import Pathy # noqa: F401
|
||||
from pathy import FluidPath # noqa: F401
|
||||
|
||||
|
||||
SDIST_SUFFIX = ".tar.gz"
|
||||
|
@ -331,7 +331,7 @@ def import_code(code_path: Optional[Union[Path, str]]) -> None:
|
|||
msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1)
|
||||
|
||||
|
||||
def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None:
|
||||
def upload_file(src: Path, dest: Union[str, "FluidPath"]) -> None:
|
||||
"""Upload a file.
|
||||
|
||||
src (Path): The source path.
|
||||
|
@ -339,13 +339,20 @@ def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None:
|
|||
"""
|
||||
import smart_open
|
||||
|
||||
# Create parent directories for local paths
|
||||
if isinstance(dest, Path):
|
||||
if not dest.parent.exists():
|
||||
dest.parent.mkdir(parents=True)
|
||||
|
||||
dest = str(dest)
|
||||
with smart_open.open(dest, mode="wb") as output_file:
|
||||
with src.open(mode="rb") as input_file:
|
||||
output_file.write(input_file.read())
|
||||
|
||||
|
||||
def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) -> None:
|
||||
def download_file(
|
||||
src: Union[str, "FluidPath"], dest: Path, *, force: bool = False
|
||||
) -> None:
|
||||
"""Download a file using smart_open.
|
||||
|
||||
url (str): The URL of the file.
|
||||
|
@ -358,7 +365,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False)
|
|||
if dest.exists() and not force:
|
||||
return None
|
||||
src = str(src)
|
||||
with smart_open.open(src, mode="rb", ignore_ext=True) as input_file:
|
||||
with smart_open.open(src, mode="rb", compression="disable") as input_file:
|
||||
with dest.open(mode="wb") as output_file:
|
||||
shutil.copyfileobj(input_file, output_file)
|
||||
|
||||
|
@ -368,7 +375,7 @@ def ensure_pathy(path):
|
|||
slow and annoying Google Cloud warning)."""
|
||||
from pathy import Pathy # noqa: F811
|
||||
|
||||
return Pathy(path)
|
||||
return Pathy.fluid(path)
|
||||
|
||||
|
||||
def git_checkout(
|
||||
|
|
|
@ -13,6 +13,7 @@ from ._util import import_code, debug_cli, _format_number
|
|||
from ..training import Example, remove_bilu_prefix
|
||||
from ..training.initialize import get_sourced_components
|
||||
from ..schemas import ConfigSchemaTraining
|
||||
from ..pipeline import TrainablePipe
|
||||
from ..pipeline._parser_internals import nonproj
|
||||
from ..pipeline._parser_internals.nonproj import DELIMITER
|
||||
from ..pipeline import Morphologizer, SpanCategorizer
|
||||
|
@ -934,6 +935,7 @@ def _get_labels_from_model(nlp: Language, factory_name: str) -> Set[str]:
|
|||
labels: Set[str] = set()
|
||||
for pipe_name in pipe_names:
|
||||
pipe = nlp.get_pipe(pipe_name)
|
||||
assert isinstance(pipe, TrainablePipe)
|
||||
labels.update(pipe.labels)
|
||||
return labels
|
||||
|
||||
|
|
233
spacy/cli/find_threshold.py
Normal file
233
spacy/cli/find_threshold.py
Normal file
|
@ -0,0 +1,233 @@
|
|||
import functools
|
||||
import operator
|
||||
from pathlib import Path
|
||||
import logging
|
||||
from typing import Optional, Tuple, Any, Dict, List
|
||||
|
||||
import numpy
|
||||
import wasabi.tables
|
||||
|
||||
from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer
|
||||
from ..errors import Errors
|
||||
from ..training import Corpus
|
||||
from ._util import app, Arg, Opt, import_code, setup_gpu
|
||||
from .. import util
|
||||
|
||||
_DEFAULTS = {
|
||||
"n_trials": 11,
|
||||
"use_gpu": -1,
|
||||
"gold_preproc": False,
|
||||
}
|
||||
|
||||
|
||||
@app.command(
|
||||
"find-threshold",
|
||||
context_settings={"allow_extra_args": False, "ignore_unknown_options": True},
|
||||
)
|
||||
def find_threshold_cli(
|
||||
# fmt: off
|
||||
model: str = Arg(..., help="Model name or path"),
|
||||
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
|
||||
pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"),
|
||||
threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"),
|
||||
scores_key: str = Arg(..., help="Metric to optimize"),
|
||||
n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"),
|
||||
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
|
||||
use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
|
||||
gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"),
|
||||
verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"),
|
||||
# fmt: on
|
||||
):
|
||||
"""
|
||||
Runs prediction trials for a trained model with varying tresholds to maximize
|
||||
the specified metric. The search space for the threshold is traversed linearly
|
||||
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
|
||||
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
|
||||
returns all results).
|
||||
|
||||
This is applicable only for components whose predictions are influenced by
|
||||
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
|
||||
that the full path to the corresponding threshold attribute in the config has to
|
||||
be provided.
|
||||
|
||||
DOCS: https://spacy.io/api/cli#find-threshold
|
||||
"""
|
||||
|
||||
util.logger.setLevel(logging.DEBUG if verbose else logging.INFO)
|
||||
import_code(code_path)
|
||||
find_threshold(
|
||||
model=model,
|
||||
data_path=data_path,
|
||||
pipe_name=pipe_name,
|
||||
threshold_key=threshold_key,
|
||||
scores_key=scores_key,
|
||||
n_trials=n_trials,
|
||||
use_gpu=use_gpu,
|
||||
gold_preproc=gold_preproc,
|
||||
silent=False,
|
||||
)
|
||||
|
||||
|
||||
def find_threshold(
|
||||
model: str,
|
||||
data_path: Path,
|
||||
pipe_name: str,
|
||||
threshold_key: str,
|
||||
scores_key: str,
|
||||
*,
|
||||
n_trials: int = _DEFAULTS["n_trials"], # type: ignore
|
||||
use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore
|
||||
gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore
|
||||
silent: bool = True,
|
||||
) -> Tuple[float, float, Dict[float, float]]:
|
||||
"""
|
||||
Runs prediction trials for models with varying tresholds to maximize the specified metric.
|
||||
model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory.
|
||||
data_path (Path): Path to file with DocBin with docs to use for threshold search.
|
||||
pipe_name (str): Name of pipe to examine thresholds for.
|
||||
threshold_key (str): Key of threshold attribute in component's configuration.
|
||||
scores_key (str): Name of score to metric to optimize.
|
||||
n_trials (int): Number of trials to determine optimal thresholds.
|
||||
use_gpu (int): GPU ID or -1 for CPU.
|
||||
gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the
|
||||
tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due
|
||||
to train/test skew.
|
||||
silent (bool): Whether to print non-error-related output to stdout.
|
||||
RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all
|
||||
evaluated thresholds.
|
||||
"""
|
||||
|
||||
setup_gpu(use_gpu, silent=silent)
|
||||
data_path = util.ensure_path(data_path)
|
||||
if not data_path.exists():
|
||||
wasabi.msg.fail("Evaluation data not found", data_path, exits=1)
|
||||
nlp = util.load_model(model)
|
||||
|
||||
if pipe_name not in nlp.component_names:
|
||||
raise AttributeError(
|
||||
Errors.E001.format(name=pipe_name, opts=nlp.component_names)
|
||||
)
|
||||
pipe = nlp.get_pipe(pipe_name)
|
||||
if not hasattr(pipe, "scorer"):
|
||||
raise AttributeError(Errors.E1045)
|
||||
|
||||
if type(pipe) == TextCategorizer:
|
||||
wasabi.msg.warn(
|
||||
"The `textcat` component doesn't use a threshold as it's not applicable to the concept of "
|
||||
"exclusive classes. All thresholds will yield the same results."
|
||||
)
|
||||
|
||||
if not silent:
|
||||
wasabi.msg.info(
|
||||
title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} "
|
||||
f"trials."
|
||||
)
|
||||
|
||||
# Load evaluation corpus.
|
||||
corpus = Corpus(data_path, gold_preproc=gold_preproc)
|
||||
dev_dataset = list(corpus(nlp))
|
||||
config_keys = threshold_key.split(".")
|
||||
|
||||
def set_nested_item(
|
||||
config: Dict[str, Any], keys: List[str], value: float
|
||||
) -> Dict[str, Any]:
|
||||
"""Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200.
|
||||
config (Dict[str, Any]): Configuration dictionary.
|
||||
keys (List[Any]): Path to value to set.
|
||||
value (float): Value to set.
|
||||
RETURNS (Dict[str, Any]): Updated dictionary.
|
||||
"""
|
||||
functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value
|
||||
return config
|
||||
|
||||
def filter_config(
|
||||
config: Dict[str, Any], keys: List[str], full_key: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Filters provided config dictionary so that only the specified keys path remains.
|
||||
config (Dict[str, Any]): Configuration dictionary.
|
||||
keys (List[Any]): Path to value to set.
|
||||
full_key (str): Full user-specified key.
|
||||
RETURNS (Dict[str, Any]): Filtered dictionary.
|
||||
"""
|
||||
if keys[0] not in config:
|
||||
wasabi.msg.fail(
|
||||
title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.",
|
||||
text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: "
|
||||
f"{list(config.keys())}",
|
||||
exits=1,
|
||||
)
|
||||
return {
|
||||
keys[0]: filter_config(config[keys[0]], keys[1:], full_key)
|
||||
if len(keys) > 1
|
||||
else config[keys[0]]
|
||||
}
|
||||
|
||||
# Evaluate with varying threshold values.
|
||||
scores: Dict[float, float] = {}
|
||||
config_keys_full = ["components", pipe_name, *config_keys]
|
||||
table_col_widths = (10, 10)
|
||||
thresholds = numpy.linspace(0, 1, n_trials)
|
||||
print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths))
|
||||
for threshold in thresholds:
|
||||
# Reload pipeline with overrides specifying the new threshold.
|
||||
nlp = util.load_model(
|
||||
model,
|
||||
config=set_nested_item(
|
||||
filter_config(
|
||||
nlp.config, config_keys_full, ".".join(config_keys_full)
|
||||
).copy(),
|
||||
config_keys_full,
|
||||
threshold,
|
||||
),
|
||||
)
|
||||
if hasattr(pipe, "cfg"):
|
||||
setattr(
|
||||
nlp.get_pipe(pipe_name),
|
||||
"cfg",
|
||||
set_nested_item(getattr(pipe, "cfg"), config_keys, threshold),
|
||||
)
|
||||
|
||||
eval_scores = nlp.evaluate(dev_dataset)
|
||||
if scores_key not in eval_scores:
|
||||
wasabi.msg.fail(
|
||||
title=f"Failed to look up score `{scores_key}` in evaluation results.",
|
||||
text=f"Make sure you specified the correct value for `scores_key`. The following scores are "
|
||||
f"available: {list(eval_scores.keys())}",
|
||||
exits=1,
|
||||
)
|
||||
scores[threshold] = eval_scores[scores_key]
|
||||
|
||||
if not isinstance(scores[threshold], (float, int)):
|
||||
wasabi.msg.fail(
|
||||
f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric "
|
||||
f"scores.",
|
||||
exits=1,
|
||||
)
|
||||
print(
|
||||
wasabi.row(
|
||||
[round(threshold, 3), round(scores[threshold], 3)],
|
||||
widths=table_col_widths,
|
||||
)
|
||||
)
|
||||
|
||||
best_threshold = max(scores.keys(), key=(lambda key: scores[key]))
|
||||
|
||||
# If all scores are identical, emit warning.
|
||||
if len(set(scores.values())) == 1:
|
||||
wasabi.msg.warn(
|
||||
title="All scores are identical. Verify that all settings are correct.",
|
||||
text=""
|
||||
if (
|
||||
not isinstance(pipe, MultiLabel_TextCategorizer)
|
||||
or scores_key in ("cats_macro_f", "cats_micro_f")
|
||||
)
|
||||
else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.",
|
||||
)
|
||||
|
||||
else:
|
||||
if not silent:
|
||||
print(
|
||||
f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}."
|
||||
)
|
||||
|
||||
return best_threshold, scores[best_threshold], scores
|
|
@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str:
|
|||
RETURNS (str): The converted URL.
|
||||
"""
|
||||
# If the asset URL is a regular GitHub URL it's likely a mistake
|
||||
if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url:
|
||||
if (
|
||||
re.match(r"(http(s?)):\/\/github.com", url)
|
||||
and "releases/download" not in url
|
||||
and "/raw/" not in url
|
||||
):
|
||||
converted = url.replace("github.com", "raw.githubusercontent.com")
|
||||
converted = re.sub(r"/(tree|blob)/", "/", converted)
|
||||
msg.warn(
|
||||
|
|
|
@ -5,15 +5,17 @@ import hashlib
|
|||
import urllib.parse
|
||||
import tarfile
|
||||
from pathlib import Path
|
||||
from wasabi import msg
|
||||
|
||||
from .._util import get_hash, get_checksum, download_file, ensure_pathy
|
||||
from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var
|
||||
from .._util import get_hash, get_checksum, upload_file, download_file
|
||||
from .._util import ensure_pathy, make_tempdir
|
||||
from ...util import get_minor_version, ENV_VARS, check_bool_env_var
|
||||
from ...git_info import GIT_VERSION
|
||||
from ... import about
|
||||
from ...errors import Errors
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathy import Pathy # noqa: F401
|
||||
from pathy import FluidPath # noqa: F401
|
||||
|
||||
|
||||
class RemoteStorage:
|
||||
|
@ -28,7 +30,7 @@ class RemoteStorage:
|
|||
self.url = ensure_pathy(url)
|
||||
self.compression = compression
|
||||
|
||||
def push(self, path: Path, command_hash: str, content_hash: str) -> "Pathy":
|
||||
def push(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
|
||||
"""Compress a file or directory within a project and upload it to a remote
|
||||
storage. If an object exists at the full URL, nothing is done.
|
||||
|
||||
|
@ -49,9 +51,7 @@ class RemoteStorage:
|
|||
mode_string = f"w:{self.compression}" if self.compression else "w"
|
||||
with tarfile.open(tar_loc, mode=mode_string) as tar_file:
|
||||
tar_file.add(str(loc), arcname=str(path))
|
||||
with tar_loc.open(mode="rb") as input_file:
|
||||
with url.open(mode="wb") as output_file:
|
||||
output_file.write(input_file.read())
|
||||
upload_file(tar_loc, url)
|
||||
return url
|
||||
|
||||
def pull(
|
||||
|
@ -60,7 +60,7 @@ class RemoteStorage:
|
|||
*,
|
||||
command_hash: Optional[str] = None,
|
||||
content_hash: Optional[str] = None,
|
||||
) -> Optional["Pathy"]:
|
||||
) -> Optional["FluidPath"]:
|
||||
"""Retrieve a file from the remote cache. If the file already exists,
|
||||
nothing is done.
|
||||
|
||||
|
@ -110,25 +110,37 @@ class RemoteStorage:
|
|||
*,
|
||||
command_hash: Optional[str] = None,
|
||||
content_hash: Optional[str] = None,
|
||||
) -> Optional["Pathy"]:
|
||||
) -> Optional["FluidPath"]:
|
||||
"""Find the best matching version of a file within the storage,
|
||||
or `None` if no match can be found. If both the creation and content hash
|
||||
are specified, only exact matches will be returned. Otherwise, the most
|
||||
recent matching file is preferred.
|
||||
"""
|
||||
name = self.encode_name(str(path))
|
||||
urls = []
|
||||
if command_hash is not None and content_hash is not None:
|
||||
url = self.make_url(path, command_hash, content_hash)
|
||||
url = self.url / name / command_hash / content_hash
|
||||
urls = [url] if url.exists() else []
|
||||
elif command_hash is not None:
|
||||
urls = list((self.url / name / command_hash).iterdir())
|
||||
if (self.url / name / command_hash).exists():
|
||||
urls = list((self.url / name / command_hash).iterdir())
|
||||
else:
|
||||
urls = list((self.url / name).iterdir())
|
||||
if content_hash is not None:
|
||||
urls = [url for url in urls if url.parts[-1] == content_hash]
|
||||
if (self.url / name).exists():
|
||||
for sub_dir in (self.url / name).iterdir():
|
||||
urls.extend(sub_dir.iterdir())
|
||||
if content_hash is not None:
|
||||
urls = [url for url in urls if url.parts[-1] == content_hash]
|
||||
if len(urls) >= 2:
|
||||
try:
|
||||
urls.sort(key=lambda x: x.stat().last_modified) # type: ignore
|
||||
except Exception:
|
||||
msg.warn(
|
||||
"Unable to sort remote files by last modified. The file(s) "
|
||||
"pulled from the cache may not be the most recent."
|
||||
)
|
||||
return urls[-1] if urls else None
|
||||
|
||||
def make_url(self, path: Path, command_hash: str, content_hash: str) -> "Pathy":
|
||||
def make_url(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
|
||||
"""Construct a URL from a subpath, a creation hash and a content hash."""
|
||||
return self.url / self.encode_name(str(path)) / command_hash / content_hash
|
||||
|
||||
|
|
|
@ -37,6 +37,15 @@ bn:
|
|||
accuracy:
|
||||
name: sagorsarker/bangla-bert-base
|
||||
size_factor: 3
|
||||
ca:
|
||||
word_vectors: null
|
||||
transformer:
|
||||
efficiency:
|
||||
name: projecte-aina/roberta-base-ca-v2
|
||||
size_factor: 3
|
||||
accuracy:
|
||||
name: projecte-aina/roberta-base-ca-v2
|
||||
size_factor: 3
|
||||
da:
|
||||
word_vectors: da_core_news_lg
|
||||
transformer:
|
||||
|
|
|
@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
|
|||
train_corpus = "corpora.train"
|
||||
# Optional callback before nlp object is saved to disk after training
|
||||
before_to_disk = null
|
||||
# Optional callback that is invoked at the start of each training step
|
||||
before_update = null
|
||||
|
||||
[training.logger]
|
||||
@loggers = "spacy.ConsoleLogger.v1"
|
||||
|
|
|
@ -228,12 +228,13 @@ def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
|
|||
"kb_id": span.kb_id_ if span.kb_id_ else "",
|
||||
"kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#",
|
||||
}
|
||||
for span in doc.spans[spans_key]
|
||||
for span in doc.spans.get(spans_key, [])
|
||||
]
|
||||
tokens = [token.text for token in doc]
|
||||
|
||||
if not spans:
|
||||
warnings.warn(Warnings.W117.format(spans_key=spans_key))
|
||||
keys = list(doc.spans.keys())
|
||||
warnings.warn(Warnings.W117.format(spans_key=spans_key, keys=keys))
|
||||
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
|
||||
settings = get_doc_settings(doc)
|
||||
return {
|
||||
|
|
|
@ -199,7 +199,7 @@ class Warnings(metaclass=ErrorsWithCodes):
|
|||
W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is "
|
||||
"surprising to you, make sure the Doc was processed using a model "
|
||||
"that supports span categorization, and check the `doc.spans[spans_key]` "
|
||||
"property manually if necessary.")
|
||||
"property manually if necessary.\n\nAvailable keys: {keys}")
|
||||
W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation "
|
||||
"for the corpora used to train the language. Please check "
|
||||
"`nlp.meta[\"sources\"]` for any relevant links.")
|
||||
|
@ -544,6 +544,8 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
"during training, make sure to include it in 'annotating components'")
|
||||
|
||||
# New errors added in v3.x
|
||||
E851 = ("The 'textcat' component labels should only have values of 0 or 1, "
|
||||
"but found value of '{val}'.")
|
||||
E852 = ("The tar file pulled from the remote attempted an unsafe path "
|
||||
"traversal.")
|
||||
E853 = ("Unsupported component factory name '{name}'. The character '.' is "
|
||||
|
@ -954,6 +956,7 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
"sure it's overwritten on the subclass.")
|
||||
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
|
||||
"knowledge base, use `InMemoryLookupKB`.")
|
||||
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
|
||||
|
||||
|
||||
# Deprecated model shortcuts, only used in errors and warnings
|
||||
|
|
|
@ -28,34 +28,39 @@ class RussianLemmatizer(Lemmatizer):
|
|||
from pymorphy2 import MorphAnalyzer
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The Russian lemmatizer mode 'pymorphy2' requires the "
|
||||
"pymorphy2 library. Install it with: pip install pymorphy2"
|
||||
"The lemmatizer mode 'pymorphy2' requires the "
|
||||
"pymorphy2 library and dictionaries. Install them with: "
|
||||
"pip install pymorphy2"
|
||||
"# for Ukrainian dictionaries:"
|
||||
"pip install pymorphy2-dicts-uk"
|
||||
) from None
|
||||
if getattr(self, "_morph", None) is None:
|
||||
self._morph = MorphAnalyzer()
|
||||
elif mode == "pymorphy3":
|
||||
self._morph = MorphAnalyzer(lang="ru")
|
||||
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
|
||||
try:
|
||||
from pymorphy3 import MorphAnalyzer
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The Russian lemmatizer mode 'pymorphy3' requires the "
|
||||
"pymorphy3 library. Install it with: pip install pymorphy3"
|
||||
"The lemmatizer mode 'pymorphy3' requires the "
|
||||
"pymorphy3 library and dictionaries. Install them with: "
|
||||
"pip install pymorphy3"
|
||||
"# for Ukrainian dictionaries:"
|
||||
"pip install pymorphy3-dicts-uk"
|
||||
) from None
|
||||
if getattr(self, "_morph", None) is None:
|
||||
self._morph = MorphAnalyzer()
|
||||
self._morph = MorphAnalyzer(lang="ru")
|
||||
super().__init__(
|
||||
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
|
||||
)
|
||||
|
||||
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
|
||||
def _pymorphy_lemmatize(self, token: Token) -> List[str]:
|
||||
string = token.text
|
||||
univ_pos = token.pos_
|
||||
morphology = token.morph.to_dict()
|
||||
if univ_pos == "PUNCT":
|
||||
return [PUNCT_RULES.get(string, string)]
|
||||
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
|
||||
# Skip unchangeable pos
|
||||
return [string.lower()]
|
||||
return self._pymorphy_lookup_lemmatize(token)
|
||||
analyses = self._morph.parse(string)
|
||||
filtered_analyses = []
|
||||
for analysis in analyses:
|
||||
|
@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer):
|
|||
# Skip suggested parse variant for unknown word for pymorphy
|
||||
continue
|
||||
analysis_pos, _ = oc2ud(str(analysis.tag))
|
||||
if analysis_pos == univ_pos or (
|
||||
analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN")
|
||||
if (
|
||||
analysis_pos == univ_pos
|
||||
or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN"))
|
||||
or ((analysis_pos == "PRON") and (univ_pos == "DET"))
|
||||
):
|
||||
filtered_analyses.append(analysis)
|
||||
if not len(filtered_analyses):
|
||||
|
@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer):
|
|||
dict.fromkeys([analysis.normal_form for analysis in filtered_analyses])
|
||||
)
|
||||
|
||||
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
string = token.text
|
||||
analyses = self._morph.parse(string)
|
||||
if len(analyses) == 1:
|
||||
return [analyses[0].normal_form]
|
||||
# often multiple forms would derive from the same normal form
|
||||
# thus check _unique_ normal forms
|
||||
normal_forms = set([an.normal_form for an in analyses])
|
||||
if len(normal_forms) == 1:
|
||||
return [next(iter(normal_forms))]
|
||||
return [string]
|
||||
|
||||
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
|
||||
return self._pymorphy_lemmatize(token)
|
||||
|
||||
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
return self._pymorphy_lookup_lemmatize(token)
|
||||
|
||||
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
|
||||
return self.pymorphy2_lemmatize(token)
|
||||
return self._pymorphy_lemmatize(token)
|
||||
|
||||
def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
return self._pymorphy_lookup_lemmatize(token)
|
||||
|
||||
|
||||
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
|
||||
|
|
|
@ -61,6 +61,11 @@ for abbr in [
|
|||
{ORTH: "2к23", NORM: "2023"},
|
||||
{ORTH: "2к24", NORM: "2024"},
|
||||
{ORTH: "2к25", NORM: "2025"},
|
||||
{ORTH: "2к26", NORM: "2026"},
|
||||
{ORTH: "2к27", NORM: "2027"},
|
||||
{ORTH: "2к28", NORM: "2028"},
|
||||
{ORTH: "2к29", NORM: "2029"},
|
||||
{ORTH: "2к30", NORM: "2030"},
|
||||
]:
|
||||
_exc[abbr[ORTH]] = [abbr]
|
||||
|
||||
|
@ -268,8 +273,8 @@ for abbr in [
|
|||
{ORTH: "з-ка", NORM: "заимка"},
|
||||
{ORTH: "п-к", NORM: "починок"},
|
||||
{ORTH: "киш.", NORM: "кишлак"},
|
||||
{ORTH: "п. ст. ", NORM: "поселок станция"},
|
||||
{ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"},
|
||||
{ORTH: "п. ст.", NORM: "поселок станция"},
|
||||
{ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"},
|
||||
{ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
|
||||
{ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
|
||||
{ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
|
||||
|
@ -280,12 +285,12 @@ for abbr in [
|
|||
{ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
|
||||
{ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
|
||||
{ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
|
||||
{ORTH: "ж/д ст. ", NORM: "железнодорожная станция"},
|
||||
{ORTH: "ж/д ст.", NORM: "железнодорожная станция"},
|
||||
{ORTH: "м-ко", NORM: "местечко"},
|
||||
{ORTH: "д.", NORM: "деревня"},
|
||||
{ORTH: "с.", NORM: "село"},
|
||||
{ORTH: "сл.", NORM: "слобода"},
|
||||
{ORTH: "ст. ", NORM: "станция"},
|
||||
{ORTH: "ст.", NORM: "станция"},
|
||||
{ORTH: "ст-ца", NORM: "станица"},
|
||||
{ORTH: "у.", NORM: "улус"},
|
||||
{ORTH: "х.", NORM: "хутор"},
|
||||
|
@ -388,8 +393,9 @@ for abbr in [
|
|||
{ORTH: "прим.", NORM: "примечание"},
|
||||
{ORTH: "прим.ред.", NORM: "примечание редакции"},
|
||||
{ORTH: "см. также", NORM: "смотри также"},
|
||||
{ORTH: "кв.м.", NORM: "квадрантный метр"},
|
||||
{ORTH: "м2", NORM: "квадрантный метр"},
|
||||
{ORTH: "см.", NORM: "смотри"},
|
||||
{ORTH: "кв.м.", NORM: "квадратный метр"},
|
||||
{ORTH: "м2", NORM: "квадратный метр"},
|
||||
{ORTH: "б/у", NORM: "бывший в употреблении"},
|
||||
{ORTH: "сокр.", NORM: "сокращение"},
|
||||
{ORTH: "чел.", NORM: "человек"},
|
||||
|
|
|
@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
|
|||
) from None
|
||||
if getattr(self, "_morph", None) is None:
|
||||
self._morph = MorphAnalyzer(lang="uk")
|
||||
elif mode == "pymorphy3":
|
||||
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
|
||||
try:
|
||||
from pymorphy3 import MorphAnalyzer
|
||||
except ImportError:
|
||||
|
|
|
@ -43,8 +43,7 @@ from .lookups import load_lookups
|
|||
from .compat import Literal
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .pipeline import Pipe # noqa: F401
|
||||
PipeCallable = Callable[[Doc], Doc]
|
||||
|
||||
|
||||
# This is the base config will all settings (training etc.)
|
||||
|
@ -181,7 +180,7 @@ class Language:
|
|||
self.vocab: Vocab = vocab
|
||||
if self.lang is None:
|
||||
self.lang = self.vocab.lang
|
||||
self._components: List[Tuple[str, "Pipe"]] = []
|
||||
self._components: List[Tuple[str, PipeCallable]] = []
|
||||
self._disabled: Set[str] = set()
|
||||
self.max_length = max_length
|
||||
# Create the default tokenizer from the default config
|
||||
|
@ -303,7 +302,7 @@ class Language:
|
|||
return SimpleFrozenList(names)
|
||||
|
||||
@property
|
||||
def components(self) -> List[Tuple[str, "Pipe"]]:
|
||||
def components(self) -> List[Tuple[str, PipeCallable]]:
|
||||
"""Get all (name, component) tuples in the pipeline, including the
|
||||
currently disabled components.
|
||||
"""
|
||||
|
@ -322,12 +321,12 @@ class Language:
|
|||
return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names"))
|
||||
|
||||
@property
|
||||
def pipeline(self) -> List[Tuple[str, "Pipe"]]:
|
||||
def pipeline(self) -> List[Tuple[str, PipeCallable]]:
|
||||
"""The processing pipeline consisting of (name, component) tuples. The
|
||||
components are called on the Doc in order as it passes through the
|
||||
pipeline.
|
||||
|
||||
RETURNS (List[Tuple[str, Pipe]]): The pipeline.
|
||||
RETURNS (List[Tuple[str, Callable[[Doc], Doc]]]): The pipeline.
|
||||
"""
|
||||
pipes = [(n, p) for n, p in self._components if n not in self._disabled]
|
||||
return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline"))
|
||||
|
@ -527,7 +526,7 @@ class Language:
|
|||
assigns: Iterable[str] = SimpleFrozenList(),
|
||||
requires: Iterable[str] = SimpleFrozenList(),
|
||||
retokenizes: bool = False,
|
||||
func: Optional["Pipe"] = None,
|
||||
func: Optional[PipeCallable] = None,
|
||||
) -> Callable[..., Any]:
|
||||
"""Register a new pipeline component. Can be used for stateless function
|
||||
components that don't require a separate factory. Can be used as a
|
||||
|
@ -542,7 +541,7 @@ class Language:
|
|||
e.g. "token.ent_id". Used for pipeline analysis.
|
||||
retokenizes (bool): Whether the component changes the tokenization.
|
||||
Used for pipeline analysis.
|
||||
func (Optional[Callable]): Factory function if not used as a decorator.
|
||||
func (Optional[Callable[[Doc], Doc]): Factory function if not used as a decorator.
|
||||
|
||||
DOCS: https://spacy.io/api/language#component
|
||||
"""
|
||||
|
@ -553,11 +552,11 @@ class Language:
|
|||
raise ValueError(Errors.E853.format(name=name))
|
||||
component_name = name if name is not None else util.get_object_name(func)
|
||||
|
||||
def add_component(component_func: "Pipe") -> Callable:
|
||||
def add_component(component_func: PipeCallable) -> Callable:
|
||||
if isinstance(func, type): # function is a class
|
||||
raise ValueError(Errors.E965.format(name=component_name))
|
||||
|
||||
def factory_func(nlp, name: str) -> "Pipe":
|
||||
def factory_func(nlp, name: str) -> PipeCallable:
|
||||
return component_func
|
||||
|
||||
internal_name = cls.get_factory_name(name)
|
||||
|
@ -607,7 +606,7 @@ class Language:
|
|||
print_pipe_analysis(analysis, keys=keys)
|
||||
return analysis
|
||||
|
||||
def get_pipe(self, name: str) -> "Pipe":
|
||||
def get_pipe(self, name: str) -> PipeCallable:
|
||||
"""Get a pipeline component for a given component name.
|
||||
|
||||
name (str): Name of pipeline component to get.
|
||||
|
@ -628,7 +627,7 @@ class Language:
|
|||
config: Dict[str, Any] = SimpleFrozenDict(),
|
||||
raw_config: Optional[Config] = None,
|
||||
validate: bool = True,
|
||||
) -> "Pipe":
|
||||
) -> PipeCallable:
|
||||
"""Create a pipeline component. Mostly used internally. To create and
|
||||
add a component to the pipeline, you can use nlp.add_pipe.
|
||||
|
||||
|
@ -640,7 +639,7 @@ class Language:
|
|||
raw_config (Optional[Config]): Internals: the non-interpolated config.
|
||||
validate (bool): Whether to validate the component config against the
|
||||
arguments and types expected by the factory.
|
||||
RETURNS (Pipe): The pipeline component.
|
||||
RETURNS (Callable[[Doc], Doc]): The pipeline component.
|
||||
|
||||
DOCS: https://spacy.io/api/language#create_pipe
|
||||
"""
|
||||
|
@ -695,24 +694,18 @@ class Language:
|
|||
|
||||
def create_pipe_from_source(
|
||||
self, source_name: str, source: "Language", *, name: str
|
||||
) -> Tuple["Pipe", str]:
|
||||
) -> Tuple[PipeCallable, str]:
|
||||
"""Create a pipeline component by copying it from an existing model.
|
||||
|
||||
source_name (str): Name of the component in the source pipeline.
|
||||
source (Language): The source nlp object to copy from.
|
||||
name (str): Optional alternative name to use in current pipeline.
|
||||
RETURNS (Tuple[Callable, str]): The component and its factory name.
|
||||
RETURNS (Tuple[Callable[[Doc], Doc], str]): The component and its factory name.
|
||||
"""
|
||||
# Check source type
|
||||
if not isinstance(source, Language):
|
||||
raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
|
||||
# Check vectors, with faster checks first
|
||||
if (
|
||||
self.vocab.vectors.shape != source.vocab.vectors.shape
|
||||
or self.vocab.vectors.key2row != source.vocab.vectors.key2row
|
||||
or self.vocab.vectors.to_bytes(exclude=["strings"])
|
||||
!= source.vocab.vectors.to_bytes(exclude=["strings"])
|
||||
):
|
||||
if self.vocab.vectors != source.vocab.vectors:
|
||||
warnings.warn(Warnings.W113.format(name=source_name))
|
||||
if source_name not in source.component_names:
|
||||
raise KeyError(
|
||||
|
@ -746,7 +739,7 @@ class Language:
|
|||
config: Dict[str, Any] = SimpleFrozenDict(),
|
||||
raw_config: Optional[Config] = None,
|
||||
validate: bool = True,
|
||||
) -> "Pipe":
|
||||
) -> PipeCallable:
|
||||
"""Add a component to the processing pipeline. Valid components are
|
||||
callables that take a `Doc` object, modify it and return it. Only one
|
||||
of before/after/first/last can be set. Default behaviour is "last".
|
||||
|
@ -769,7 +762,7 @@ class Language:
|
|||
raw_config (Optional[Config]): Internals: the non-interpolated config.
|
||||
validate (bool): Whether to validate the component config against the
|
||||
arguments and types expected by the factory.
|
||||
RETURNS (Pipe): The pipeline component.
|
||||
RETURNS (Callable[[Doc], Doc]): The pipeline component.
|
||||
|
||||
DOCS: https://spacy.io/api/language#add_pipe
|
||||
"""
|
||||
|
@ -790,14 +783,6 @@ class Language:
|
|||
factory_name, source, name=name
|
||||
)
|
||||
else:
|
||||
if not self.has_factory(factory_name):
|
||||
err = Errors.E002.format(
|
||||
name=factory_name,
|
||||
opts=", ".join(self.factory_names),
|
||||
method="add_pipe",
|
||||
lang=util.get_object_name(self),
|
||||
lang_code=self.lang,
|
||||
)
|
||||
pipe_component = self.create_pipe(
|
||||
factory_name,
|
||||
name=name,
|
||||
|
@ -883,7 +868,7 @@ class Language:
|
|||
*,
|
||||
config: Dict[str, Any] = SimpleFrozenDict(),
|
||||
validate: bool = True,
|
||||
) -> "Pipe":
|
||||
) -> PipeCallable:
|
||||
"""Replace a component in the pipeline.
|
||||
|
||||
name (str): Name of the component to replace.
|
||||
|
@ -892,7 +877,7 @@ class Language:
|
|||
component. Will be merged with default config, if available.
|
||||
validate (bool): Whether to validate the component config against the
|
||||
arguments and types expected by the factory.
|
||||
RETURNS (Pipe): The new pipeline component.
|
||||
RETURNS (Callable[[Doc], Doc]): The new pipeline component.
|
||||
|
||||
DOCS: https://spacy.io/api/language#replace_pipe
|
||||
"""
|
||||
|
@ -944,11 +929,11 @@ class Language:
|
|||
init_cfg = self._config["initialize"]["components"].pop(old_name)
|
||||
self._config["initialize"]["components"][new_name] = init_cfg
|
||||
|
||||
def remove_pipe(self, name: str) -> Tuple[str, "Pipe"]:
|
||||
def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]:
|
||||
"""Remove a component from the pipeline.
|
||||
|
||||
name (str): Name of the component to remove.
|
||||
RETURNS (tuple): A `(name, component)` tuple of the removed component.
|
||||
RETURNS (Tuple[str, Callable[[Doc], Doc]]): A `(name, component)` tuple of the removed component.
|
||||
|
||||
DOCS: https://spacy.io/api/language#remove_pipe
|
||||
"""
|
||||
|
@ -1363,15 +1348,15 @@ class Language:
|
|||
|
||||
def set_error_handler(
|
||||
self,
|
||||
error_handler: Callable[[str, "Pipe", List[Doc], Exception], NoReturn],
|
||||
error_handler: Callable[[str, PipeCallable, List[Doc], Exception], NoReturn],
|
||||
):
|
||||
"""Set an error handler object for all the components in the pipeline that implement
|
||||
a set_error_handler function.
|
||||
"""Set an error handler object for all the components in the pipeline
|
||||
that implement a set_error_handler function.
|
||||
|
||||
error_handler (Callable[[str, Pipe, List[Doc], Exception], NoReturn]):
|
||||
Function that deals with a failing batch of documents. This callable function should take in
|
||||
the component's name, the component itself, the offending batch of documents, and the exception
|
||||
that was thrown.
|
||||
error_handler (Callable[[str, Callable[[Doc], Doc], List[Doc], Exception], NoReturn]):
|
||||
Function that deals with a failing batch of documents. This callable
|
||||
function should take in the component's name, the component itself,
|
||||
the offending batch of documents, and the exception that was thrown.
|
||||
DOCS: https://spacy.io/api/language#set_error_handler
|
||||
"""
|
||||
self.default_error_handler = error_handler
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# cython: infer_types=True, cython: profile=True
|
||||
# cython: infer_types=True, profile=True
|
||||
from typing import List, Iterable
|
||||
|
||||
from libcpp.vector cimport vector
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
|
||||
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any
|
||||
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
|
||||
from thinc.api import Optimizer
|
||||
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d
|
||||
from thinc.types import Ragged, Ints2d, Floats2d
|
||||
|
||||
import numpy
|
||||
|
||||
|
|
|
@ -87,7 +87,6 @@ subword_features = true
|
|||
"cats_macro_f": None,
|
||||
"cats_macro_auc": None,
|
||||
"cats_f_per_type": None,
|
||||
"cats_macro_auc_per_type": None,
|
||||
},
|
||||
)
|
||||
def make_textcat(
|
||||
|
@ -401,5 +400,9 @@ class TextCategorizer(TrainablePipe):
|
|||
def _validate_categories(self, examples: Iterable[Example]):
|
||||
"""Check whether the provided examples all have single-label cats annotations."""
|
||||
for ex in examples:
|
||||
if list(ex.reference.cats.values()).count(1.0) > 1:
|
||||
vals = list(ex.reference.cats.values())
|
||||
if vals.count(1.0) > 1:
|
||||
raise ValueError(Errors.E895.format(value=ex.reference.cats))
|
||||
for val in vals:
|
||||
if not (val == 1.0 or val == 0.0):
|
||||
raise ValueError(Errors.E851.format(val=val))
|
||||
|
|
|
@ -87,7 +87,6 @@ subword_features = true
|
|||
"cats_macro_f": None,
|
||||
"cats_macro_auc": None,
|
||||
"cats_f_per_type": None,
|
||||
"cats_macro_auc_per_type": None,
|
||||
},
|
||||
)
|
||||
def make_multilabel_textcat(
|
||||
|
@ -192,6 +191,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
|
|||
for label in labels:
|
||||
self.add_label(label)
|
||||
subbatch = list(islice(get_examples(), 10))
|
||||
self._validate_categories(subbatch)
|
||||
|
||||
doc_sample = [eg.reference for eg in subbatch]
|
||||
label_sample, _ = self._examples_to_truth(subbatch)
|
||||
self._require_labels()
|
||||
|
@ -202,4 +203,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
|
|||
def _validate_categories(self, examples: Iterable[Example]):
|
||||
"""This component allows any type of single- or multi-label annotations.
|
||||
This method overwrites the more strict one from 'textcat'."""
|
||||
pass
|
||||
# check that annotation values are valid
|
||||
for ex in examples:
|
||||
for val in ex.reference.cats.values():
|
||||
if not (val == 1.0 or val == 0.0):
|
||||
raise ValueError(Errors.E851.format(val=val))
|
||||
|
|
|
@ -329,6 +329,7 @@ class ConfigSchemaTraining(BaseModel):
|
|||
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
|
||||
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
|
||||
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
|
||||
before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
|
|
|
@ -337,17 +337,17 @@ def ru_tokenizer():
|
|||
return get_lang_class("ru")().tokenizer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def ru_lemmatizer():
|
||||
pytest.importorskip("pymorphy3")
|
||||
return get_lang_class("ru")().add_pipe("lemmatizer")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def ru_lookup_lemmatizer():
|
||||
pytest.importorskip("pymorphy2")
|
||||
pytest.importorskip("pymorphy3")
|
||||
return get_lang_class("ru")().add_pipe(
|
||||
"lemmatizer", config={"mode": "pymorphy2_lookup"}
|
||||
"lemmatizer", config={"mode": "pymorphy3_lookup"}
|
||||
)
|
||||
|
||||
|
||||
|
@ -423,19 +423,19 @@ def uk_tokenizer():
|
|||
return get_lang_class("uk")().tokenizer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def uk_lemmatizer():
|
||||
pytest.importorskip("pymorphy3")
|
||||
pytest.importorskip("pymorphy3_dicts_uk")
|
||||
return get_lang_class("uk")().add_pipe("lemmatizer")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def uk_lookup_lemmatizer():
|
||||
pytest.importorskip("pymorphy2")
|
||||
pytest.importorskip("pymorphy2_dicts_uk")
|
||||
pytest.importorskip("pymorphy3")
|
||||
pytest.importorskip("pymorphy3_dicts_uk")
|
||||
return get_lang_class("uk")().add_pipe(
|
||||
"lemmatizer", config={"mode": "pymorphy2_lookup"}
|
||||
"lemmatizer", config={"mode": "pymorphy3_lookup"}
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@ def test_ru_lemmatizer_punct(ru_lemmatizer):
|
|||
|
||||
|
||||
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
|
||||
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
|
||||
words = ["мама", "мыла", "раму"]
|
||||
pos = ["NOUN", "VERB", "NOUN"]
|
||||
morphs = [
|
||||
|
@ -92,3 +93,17 @@ def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
|
|||
doc = ru_lookup_lemmatizer(doc)
|
||||
lemmas = [token.lemma_ for token in doc]
|
||||
assert lemmas == ["мама", "мыла", "раму"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"word,lemma",
|
||||
(
|
||||
("бременем", "бремя"),
|
||||
("будешь", "быть"),
|
||||
("какая-то", "какой-то"),
|
||||
),
|
||||
)
|
||||
def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma):
|
||||
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
|
||||
doc = Doc(ru_lookup_lemmatizer.vocab, words=[word])
|
||||
assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma
|
||||
|
|
|
@ -8,12 +8,20 @@ pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
|
|||
def test_uk_lemmatizer(uk_lemmatizer):
|
||||
"""Check that the default uk lemmatizer runs."""
|
||||
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
|
||||
assert uk_lemmatizer.mode == "pymorphy3"
|
||||
uk_lemmatizer(doc)
|
||||
assert [token.lemma for token in doc]
|
||||
|
||||
|
||||
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer):
|
||||
"""Check that the lookup uk lemmatizer runs."""
|
||||
doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"])
|
||||
uk_lookup_lemmatizer(doc)
|
||||
assert [token.lemma for token in doc]
|
||||
@pytest.mark.parametrize(
|
||||
"word,lemma",
|
||||
(
|
||||
("якийсь", "якийсь"),
|
||||
("розповідають", "розповідати"),
|
||||
("розповіси", "розповісти"),
|
||||
),
|
||||
)
|
||||
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma):
|
||||
assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup"
|
||||
doc = Doc(uk_lookup_lemmatizer.vocab, words=[word])
|
||||
assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma
|
||||
|
|
|
@ -360,6 +360,30 @@ def test_label_types(name):
|
|||
nlp.initialize()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"name,get_examples",
|
||||
[
|
||||
("textcat", make_get_examples_single_label),
|
||||
("textcat_multilabel", make_get_examples_multi_label),
|
||||
],
|
||||
)
|
||||
def test_invalid_label_value(name, get_examples):
|
||||
nlp = Language()
|
||||
textcat = nlp.add_pipe(name)
|
||||
example_getter = get_examples(nlp)
|
||||
|
||||
def invalid_examples():
|
||||
# make one example with an invalid score
|
||||
examples = example_getter()
|
||||
ref = examples[0].reference
|
||||
key = list(ref.cats.keys())[0]
|
||||
ref.cats[key] = 2.0
|
||||
return examples
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
nlp.initialize(get_examples=invalid_examples)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
|
||||
def test_no_label(name):
|
||||
nlp = Language()
|
||||
|
@ -814,8 +838,8 @@ def test_textcat_loss(multi_label: bool, expected_loss: float):
|
|||
textcat = nlp.add_pipe("textcat_multilabel")
|
||||
else:
|
||||
textcat = nlp.add_pipe("textcat")
|
||||
textcat.initialize(lambda: train_examples)
|
||||
assert isinstance(textcat, TextCategorizer)
|
||||
textcat.initialize(lambda: train_examples)
|
||||
scores = textcat.model.ops.asarray(
|
||||
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype="f" # type: ignore
|
||||
)
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
import os
|
||||
import math
|
||||
from collections import Counter
|
||||
from typing import Tuple, List, Dict, Any
|
||||
import pkg_resources
|
||||
from random import sample
|
||||
from typing import Counter
|
||||
import time
|
||||
|
||||
import numpy
|
||||
import pytest
|
||||
import srsly
|
||||
from click import NoSuchOption
|
||||
|
@ -16,6 +18,7 @@ from spacy.cli._util import is_subpath_of, load_project_config
|
|||
from spacy.cli._util import parse_config_overrides, string_to_list
|
||||
from spacy.cli._util import substitute_project_variables
|
||||
from spacy.cli._util import validate_project_commands
|
||||
from spacy.cli._util import upload_file, download_file
|
||||
from spacy.cli.debug_data import _compile_gold, _get_labels_from_model
|
||||
from spacy.cli.debug_data import _get_labels_from_spancat
|
||||
from spacy.cli.debug_data import _get_distribution, _get_kl_divergence
|
||||
|
@ -26,13 +29,15 @@ from spacy.cli.download import get_compatibility, get_version
|
|||
from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config
|
||||
from spacy.cli.package import get_third_party_dependencies
|
||||
from spacy.cli.package import _is_permitted_package_name
|
||||
from spacy.cli.project.remote_storage import RemoteStorage
|
||||
from spacy.cli.project.run import _check_requirements
|
||||
from spacy.cli.validate import get_model_pkgs
|
||||
from spacy.cli.find_threshold import find_threshold
|
||||
from spacy.lang.en import English
|
||||
from spacy.lang.nl import Dutch
|
||||
from spacy.language import Language
|
||||
from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate
|
||||
from spacy.tokens import Doc
|
||||
from spacy.tokens import Doc, DocBin
|
||||
from spacy.tokens.span import Span
|
||||
from spacy.training import Example, docs_to_json, offsets_to_biluo_tags
|
||||
from spacy.training.converters import conll_ner_to_docs, conllu_to_docs
|
||||
|
@ -591,6 +596,7 @@ def test_string_to_list_intify(value):
|
|||
assert string_to_list(value, intify=True) == [1, 2, 3]
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
||||
def test_download_compatibility():
|
||||
spec = SpecifierSet("==" + about.__version__)
|
||||
spec.prereleases = False
|
||||
|
@ -601,6 +607,7 @@ def test_download_compatibility():
|
|||
assert get_minor_version(about.__version__) == get_minor_version(version)
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Temporarily skip for dev version")
|
||||
def test_validate_compatibility_table():
|
||||
spec = SpecifierSet("==" + about.__version__)
|
||||
spec.prereleases = False
|
||||
|
@ -859,6 +866,176 @@ def test_span_length_freq_dist_output_must_be_correct():
|
|||
assert list(span_freqs.keys()) == [3, 1, 4, 5, 2]
|
||||
|
||||
|
||||
def test_local_remote_storage():
|
||||
with make_tempdir() as d:
|
||||
filename = "a.txt"
|
||||
|
||||
content_hashes = ("aaaa", "cccc", "bbbb")
|
||||
for i, content_hash in enumerate(content_hashes):
|
||||
# make sure that each subsequent file has a later timestamp
|
||||
if i > 0:
|
||||
time.sleep(1)
|
||||
content = f"{content_hash} content"
|
||||
loc_file = d / "root" / filename
|
||||
if not loc_file.parent.exists():
|
||||
loc_file.parent.mkdir(parents=True)
|
||||
with loc_file.open(mode="w") as file_:
|
||||
file_.write(content)
|
||||
|
||||
# push first version to remote storage
|
||||
remote = RemoteStorage(d / "root", str(d / "remote"))
|
||||
remote.push(filename, "aaaa", content_hash)
|
||||
|
||||
# retrieve with full hashes
|
||||
loc_file.unlink()
|
||||
remote.pull(filename, command_hash="aaaa", content_hash=content_hash)
|
||||
with loc_file.open(mode="r") as file_:
|
||||
assert file_.read() == content
|
||||
|
||||
# retrieve with command hash
|
||||
loc_file.unlink()
|
||||
remote.pull(filename, command_hash="aaaa")
|
||||
with loc_file.open(mode="r") as file_:
|
||||
assert file_.read() == content
|
||||
|
||||
# retrieve with content hash
|
||||
loc_file.unlink()
|
||||
remote.pull(filename, content_hash=content_hash)
|
||||
with loc_file.open(mode="r") as file_:
|
||||
assert file_.read() == content
|
||||
|
||||
# retrieve with no hashes
|
||||
loc_file.unlink()
|
||||
remote.pull(filename)
|
||||
with loc_file.open(mode="r") as file_:
|
||||
assert file_.read() == content
|
||||
|
||||
|
||||
def test_local_remote_storage_pull_missing():
|
||||
# pulling from a non-existent remote pulls nothing gracefully
|
||||
with make_tempdir() as d:
|
||||
filename = "a.txt"
|
||||
remote = RemoteStorage(d / "root", str(d / "remote"))
|
||||
assert remote.pull(filename, command_hash="aaaa") is None
|
||||
assert remote.pull(filename) is None
|
||||
|
||||
|
||||
def test_cli_find_threshold(capsys):
|
||||
thresholds = numpy.linspace(0, 1, 10)
|
||||
|
||||
def make_examples(nlp: Language) -> List[Example]:
|
||||
docs: List[Example] = []
|
||||
|
||||
for t in [
|
||||
(
|
||||
"I am angry and confused in the Bank of America.",
|
||||
{
|
||||
"cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0},
|
||||
"spans": {"sc": [(31, 46, "ORG")]},
|
||||
},
|
||||
),
|
||||
(
|
||||
"I am confused but happy in New York.",
|
||||
{
|
||||
"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0},
|
||||
"spans": {"sc": [(27, 35, "GPE")]},
|
||||
},
|
||||
),
|
||||
]:
|
||||
doc = nlp.make_doc(t[0])
|
||||
docs.append(Example.from_dict(doc, t[1]))
|
||||
|
||||
return docs
|
||||
|
||||
def init_nlp(
|
||||
components: Tuple[Tuple[str, Dict[str, Any]], ...] = ()
|
||||
) -> Tuple[Language, List[Example]]:
|
||||
new_nlp = English()
|
||||
new_nlp.add_pipe( # type: ignore
|
||||
factory_name="textcat_multilabel",
|
||||
name="tc_multi",
|
||||
config={"threshold": 0.9},
|
||||
)
|
||||
|
||||
# Append additional components to pipeline.
|
||||
for cfn, comp_config in components:
|
||||
new_nlp.add_pipe(cfn, config=comp_config)
|
||||
|
||||
new_examples = make_examples(new_nlp)
|
||||
new_nlp.initialize(get_examples=lambda: new_examples)
|
||||
for i in range(5):
|
||||
new_nlp.update(new_examples)
|
||||
|
||||
return new_nlp, new_examples
|
||||
|
||||
with make_tempdir() as docs_dir:
|
||||
# Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches
|
||||
# the current model behavior with the examples above. This can break once the model behavior changes and serves
|
||||
# mostly as a smoke test.
|
||||
nlp, examples = init_nlp()
|
||||
DocBin(docs=[example.reference for example in examples]).to_disk(
|
||||
docs_dir / "docs.spacy"
|
||||
)
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
res = find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="tc_multi",
|
||||
threshold_key="threshold",
|
||||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
assert res[0] != thresholds[0]
|
||||
assert thresholds[0] < res[0] < thresholds[9]
|
||||
assert res[1] == 1.0
|
||||
assert res[2][1.0] == 0.0
|
||||
|
||||
# Test with spancat.
|
||||
nlp, _ = init_nlp((("spancat", {}),))
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
res = find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="spancat",
|
||||
threshold_key="threshold",
|
||||
scores_key="spans_sc_f",
|
||||
silent=True,
|
||||
)
|
||||
assert res[0] != thresholds[0]
|
||||
assert thresholds[0] < res[0] < thresholds[8]
|
||||
assert res[1] >= 0.6
|
||||
assert res[2][1.0] == 0.0
|
||||
|
||||
# Having multiple textcat_multilabel components should work, since the name has to be specified.
|
||||
nlp, _ = init_nlp((("textcat_multilabel", {}),))
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
assert find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="tc_multi",
|
||||
threshold_key="threshold",
|
||||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
|
||||
# Specifying the name of an non-existing pipe should fail.
|
||||
nlp, _ = init_nlp()
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
with pytest.raises(AttributeError):
|
||||
find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="_",
|
||||
threshold_key="threshold",
|
||||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"reqs,output",
|
||||
[
|
||||
|
@ -896,3 +1073,18 @@ def test_project_check_requirements(reqs, output):
|
|||
pkg_resources.require("spacyunknowndoesnotexist12345")
|
||||
except pkg_resources.DistributionNotFound:
|
||||
assert output == _check_requirements([req.strip() for req in reqs.split("\n")])
|
||||
|
||||
|
||||
def test_upload_download_local_file():
|
||||
with make_tempdir() as d1, make_tempdir() as d2:
|
||||
filename = "f.txt"
|
||||
content = "content"
|
||||
local_file = d1 / filename
|
||||
remote_file = d2 / filename
|
||||
with local_file.open(mode="w") as file_:
|
||||
file_.write(content)
|
||||
upload_file(local_file, remote_file)
|
||||
local_file.unlink()
|
||||
download_file(remote_file, local_file)
|
||||
with local_file.open(mode="r") as file_:
|
||||
assert file_.read() == content
|
||||
|
|
|
@ -203,6 +203,16 @@ def test_displacy_parse_spans_different_spans_key(en_vocab):
|
|||
]
|
||||
|
||||
|
||||
def test_displacy_parse_empty_spans_key(en_vocab):
|
||||
"""Test that having an unset spans key doesn't raise an error"""
|
||||
doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"])
|
||||
doc.spans["custom"] = [Span(doc, 3, 6, "BANK")]
|
||||
with pytest.warns(UserWarning, match="W117"):
|
||||
spans = displacy.parse_spans(doc)
|
||||
|
||||
assert isinstance(spans, dict)
|
||||
|
||||
|
||||
def test_displacy_parse_ents(en_vocab):
|
||||
"""Test that named entities on a Doc are converted into displaCy's format."""
|
||||
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
|
||||
|
|
|
@ -2,6 +2,7 @@ import random
|
|||
|
||||
import numpy
|
||||
import pytest
|
||||
import spacy
|
||||
import srsly
|
||||
from spacy.lang.en import English
|
||||
from spacy.tokens import Doc, DocBin
|
||||
|
@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags
|
|||
from spacy.training.alignment_array import AlignmentArray
|
||||
from spacy.training.align import get_alignments
|
||||
from spacy.training.converters import json_to_docs
|
||||
from spacy.training.loop import train_while_improving
|
||||
from spacy.util import get_words_and_spaces, load_model_from_path, minibatch
|
||||
from spacy.util import load_config_from_str
|
||||
from thinc.api import compounding
|
||||
from thinc.api import compounding, Adam
|
||||
|
||||
from ..util import make_tempdir
|
||||
|
||||
|
@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc):
|
|||
retokenizer.merge(doc1[0:2])
|
||||
retokenizer.merge(doc1[5:7])
|
||||
assert example.get_aligned("ORTH", as_string=True) == expected2
|
||||
|
||||
|
||||
def test_training_before_update(doc):
|
||||
def before_update(nlp, args):
|
||||
assert args["step"] == 0
|
||||
assert args["epoch"] == 1
|
||||
|
||||
# Raise an error here as the rest of the loop
|
||||
# will not run to completion due to uninitialized
|
||||
# models.
|
||||
raise ValueError("ran_before_update")
|
||||
|
||||
def generate_batch():
|
||||
yield 1, [Example(doc, doc)]
|
||||
|
||||
nlp = spacy.blank("en")
|
||||
nlp.add_pipe("tagger")
|
||||
optimizer = Adam()
|
||||
generator = train_while_improving(
|
||||
nlp,
|
||||
optimizer,
|
||||
generate_batch(),
|
||||
lambda: None,
|
||||
dropout=0.1,
|
||||
eval_frequency=100,
|
||||
accumulate_gradient=10,
|
||||
patience=10,
|
||||
max_steps=100,
|
||||
exclude=[],
|
||||
annotating_components=[],
|
||||
before_update=before_update,
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="ran_before_update"):
|
||||
for _ in generator:
|
||||
pass
|
||||
|
|
|
@ -626,3 +626,23 @@ def test_floret_vectors(floret_vectors_vec_str, floret_vectors_hashvec_str):
|
|||
OPS.to_numpy(vocab_r[word].vector),
|
||||
decimal=6,
|
||||
)
|
||||
|
||||
|
||||
def test_equality():
|
||||
vectors1 = Vectors(shape=(10, 10))
|
||||
vectors2 = Vectors(shape=(10, 8))
|
||||
|
||||
assert vectors1 != vectors2
|
||||
|
||||
vectors2 = Vectors(shape=(10, 10))
|
||||
assert vectors1 == vectors2
|
||||
|
||||
vectors1.add("hello", row=2)
|
||||
assert vectors1 != vectors2
|
||||
|
||||
vectors2.add("hello", row=2)
|
||||
assert vectors1 == vectors2
|
||||
|
||||
vectors1.resize((5, 9))
|
||||
vectors2.resize((5, 9))
|
||||
assert vectors1 == vectors2
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
from spacy.attrs import IS_ALPHA, LEMMA, ORTH
|
||||
from spacy.lang.en import English
|
||||
from spacy.parts_of_speech import NOUN, VERB
|
||||
from spacy.vocab import Vocab
|
||||
|
||||
from ..util import make_tempdir
|
||||
|
||||
|
||||
@pytest.mark.issue(1868)
|
||||
def test_issue1868():
|
||||
|
@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text):
|
|||
def test_vocab_writing_system(en_vocab):
|
||||
assert en_vocab.writing_system["direction"] == "ltr"
|
||||
assert en_vocab.writing_system["has_case"] is True
|
||||
|
||||
|
||||
def test_to_disk():
|
||||
nlp = English()
|
||||
with make_tempdir() as d:
|
||||
nlp.vocab.to_disk(d)
|
||||
assert "vectors" in os.listdir(d)
|
||||
assert "lookups.bin" in os.listdir(d)
|
||||
|
||||
|
||||
def test_to_disk_exclude():
|
||||
nlp = English()
|
||||
with make_tempdir() as d:
|
||||
nlp.vocab.to_disk(d, exclude=("vectors", "lookups"))
|
||||
assert "vectors" not in os.listdir(d)
|
||||
assert "lookups.bin" not in os.listdir(d)
|
||||
|
|
|
@ -59,6 +59,7 @@ def train(
|
|||
batcher = T["batcher"]
|
||||
train_logger = T["logger"]
|
||||
before_to_disk = create_before_to_disk_callback(T["before_to_disk"])
|
||||
before_update = T["before_update"]
|
||||
|
||||
# Helper function to save checkpoints. This is a closure for convenience,
|
||||
# to avoid passing in all the args all the time.
|
||||
|
@ -89,6 +90,7 @@ def train(
|
|||
eval_frequency=T["eval_frequency"],
|
||||
exclude=frozen_components,
|
||||
annotating_components=annotating_components,
|
||||
before_update=before_update,
|
||||
)
|
||||
clean_output_dir(output_path)
|
||||
stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n")
|
||||
|
@ -150,6 +152,7 @@ def train_while_improving(
|
|||
max_steps: int,
|
||||
exclude: List[str],
|
||||
annotating_components: List[str],
|
||||
before_update: Optional[Callable[["Language", Dict[str, Any]], None]],
|
||||
):
|
||||
"""Train until an evaluation stops improving. Works as a generator,
|
||||
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
|
||||
|
@ -198,6 +201,9 @@ def train_while_improving(
|
|||
words_seen = 0
|
||||
start_time = timer()
|
||||
for step, (epoch, batch) in enumerate(train_data):
|
||||
if before_update:
|
||||
before_update_args = {"step": step, "epoch": epoch}
|
||||
before_update(nlp, before_update_args)
|
||||
dropout = next(dropouts) # type: ignore
|
||||
for subbatch in subdivide_batch(batch, accumulate_gradient):
|
||||
nlp.update(
|
||||
|
|
|
@ -51,8 +51,7 @@ from . import about
|
|||
|
||||
if TYPE_CHECKING:
|
||||
# This lets us add type hints for mypy etc. without causing circular imports
|
||||
from .language import Language # noqa: F401
|
||||
from .pipeline import Pipe # noqa: F401
|
||||
from .language import Language, PipeCallable # noqa: F401
|
||||
from .tokens import Doc, Span # noqa: F401
|
||||
from .vocab import Vocab # noqa: F401
|
||||
|
||||
|
@ -1642,9 +1641,9 @@ def check_bool_env_var(env_var: str) -> bool:
|
|||
|
||||
def _pipe(
|
||||
docs: Iterable["Doc"],
|
||||
proc: "Pipe",
|
||||
proc: "PipeCallable",
|
||||
name: str,
|
||||
default_error_handler: Callable[[str, "Pipe", List["Doc"], Exception], NoReturn],
|
||||
default_error_handler: Callable[[str, "PipeCallable", List["Doc"], Exception], NoReturn],
|
||||
kwargs: Mapping[str, Any],
|
||||
) -> Iterator["Doc"]:
|
||||
if hasattr(proc, "pipe"):
|
||||
|
|
|
@ -243,6 +243,15 @@ cdef class Vectors:
|
|||
else:
|
||||
return key in self.key2row
|
||||
|
||||
def __eq__(self, other):
|
||||
# Check for equality, with faster checks first
|
||||
return (
|
||||
self.shape == other.shape
|
||||
and self.key2row == other.key2row
|
||||
and self.to_bytes(exclude=["strings"])
|
||||
== other.to_bytes(exclude=["strings"])
|
||||
)
|
||||
|
||||
def resize(self, shape, inplace=False):
|
||||
"""Resize the underlying vectors array. If inplace=True, the memory
|
||||
is reallocated. This may cause other references to the data to become
|
||||
|
|
|
@ -468,9 +468,9 @@ cdef class Vocab:
|
|||
setters = ["strings", "vectors"]
|
||||
if "strings" not in exclude:
|
||||
self.strings.to_disk(path / "strings.json")
|
||||
if "vectors" not in "exclude":
|
||||
if "vectors" not in exclude:
|
||||
self.vectors.to_disk(path, exclude=["strings"])
|
||||
if "lookups" not in "exclude":
|
||||
if "lookups" not in exclude:
|
||||
self.lookups.to_disk(path)
|
||||
|
||||
def from_disk(self, path, *, exclude=tuple()):
|
||||
|
|
|
@ -1,531 +1,11 @@
|
|||
<Comment>
|
||||
|
||||
# spacy.io website and docs
|
||||
|
||||

|
||||
|
||||
_This page contains the documentation and styleguide for the spaCy website. Its
|
||||
rendered version is available at https://spacy.io/styleguide._
|
||||
The styleguide for the spaCy website is available at
|
||||
[spacy.io/styleguide](https://spacy.io/styleguide).
|
||||
|
||||
---
|
||||
|
||||
</Comment>
|
||||
|
||||
The [spacy.io](https://spacy.io) website is implemented using
|
||||
[Gatsby](https://www.gatsbyjs.org) with
|
||||
[Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This
|
||||
allows authoring content in **straightforward Markdown** without the usual
|
||||
limitations. Standard elements can be overwritten with powerful
|
||||
[React](http://reactjs.org/) components and wherever Markdown syntax isn't
|
||||
enough, JSX components can be used.
|
||||
|
||||
> #### Contributing to the site
|
||||
>
|
||||
> The docs can always use another example or more detail, and they should always
|
||||
> be up to date and not misleading. We always appreciate a
|
||||
> [pull request](https://github.com/explosion/spaCy/pulls). To quickly find the
|
||||
> correct file to edit, simply click on the "Suggest edits" button at the bottom
|
||||
> of a page.
|
||||
>
|
||||
> For more details on editing the site locally, see the installation
|
||||
> instructions and markdown reference below.
|
||||
|
||||
## Logo {#logo source="website/src/images/logo.svg"}
|
||||
|
||||
import { Logos } from 'widgets/styleguide'
|
||||
|
||||
If you would like to use the spaCy logo on your site, please get in touch and
|
||||
ask us first. However, if you want to show support and tell others that your
|
||||
project is using spaCy, you can grab one of our
|
||||
[spaCy badges](/usage/spacy-101#faq-project-with-spacy).
|
||||
|
||||
<Logos />
|
||||
|
||||
## Colors {#colors}
|
||||
|
||||
import { Colors, Patterns } from 'widgets/styleguide'
|
||||
|
||||
<Colors />
|
||||
|
||||
### Patterns
|
||||
|
||||
<Patterns />
|
||||
|
||||
## Typography {#typography}
|
||||
|
||||
import { H1, H2, H3, H4, H5, Label, InlineList, Comment } from
|
||||
'components/typography'
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> ## Headline 2
|
||||
> ## Headline 2 {#some_id}
|
||||
> ## Headline 2 {#some_id tag="method"}
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <H2>Headline 2</H2>
|
||||
> <H2 id="some_id">Headline 2</H2>
|
||||
> <H2 id="some_id" tag="method">Headline 2</H2>
|
||||
> ```
|
||||
|
||||
Headlines are set in
|
||||
[HK Grotesk](http://cargocollective.com/hanken/HK-Grotesk-Open-Source-Font) by
|
||||
Hanken Design. All other body text and code uses the best-matching default
|
||||
system font to provide a "native" reading experience. All code uses the
|
||||
[JetBrains Mono](https://www.jetbrains.com/lp/mono/) typeface by JetBrains.
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
Level 2 headings are automatically wrapped in `<section>` elements at compile
|
||||
time, using a custom
|
||||
[Markdown transformer](https://github.com/explosion/spaCy/tree/master/website/plugins/remark-wrap-section.js).
|
||||
This makes it easier to highlight the section that's currently in the viewpoint
|
||||
in the sidebar menu.
|
||||
|
||||
</Infobox>
|
||||
|
||||
<div>
|
||||
<H1>Headline 1</H1>
|
||||
<H2>Headline 2</H2>
|
||||
<H3>Headline 3</H3>
|
||||
<H4>Headline 4</H4>
|
||||
<H5>Headline 5</H5>
|
||||
<Label>Label</Label>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
The following optional attributes can be set on the headline to modify it. For
|
||||
example, to add a tag for the documented type or mark features that have been
|
||||
introduced in a specific version or require statistical models to be loaded.
|
||||
Tags are also available as standalone `<Tag />` components.
|
||||
|
||||
| Argument | Example | Result |
|
||||
| -------- | -------------------------- | ----------------------------------------- |
|
||||
| `tag` | `{tag="method"}` | <Tag>method</Tag> |
|
||||
| `new` | `{new="3"}` | <Tag variant="new">3</Tag> |
|
||||
| `model` | `{model="tagger, parser"}` | <Tag variant="model">tagger, parser</Tag> |
|
||||
| `hidden` | `{hidden="true"}` | |
|
||||
|
||||
## Elements {#elements}
|
||||
|
||||
### Links {#links}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown
|
||||
> [I am a link](https://spacy.io)
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Link to="https://spacy.io">I am a link</Link>
|
||||
> ```
|
||||
|
||||
Special link styles are used depending on the link URL.
|
||||
|
||||
- [I am a regular external link](https://explosion.ai)
|
||||
- [I am a link to the documentation](/api/doc)
|
||||
- [I am a link to an architecture](/api/architectures#HashEmbedCNN)
|
||||
- [I am a link to a model](/models/en#en_core_web_sm)
|
||||
- [I am a link to GitHub](https://github.com/explosion/spaCy)
|
||||
|
||||
### Abbreviations {#abbr}
|
||||
|
||||
import { Abbr } from 'components/typography'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Abbr title="Explanation">Abbreviation</Abbr>
|
||||
> ```
|
||||
|
||||
Some text with <Abbr title="Explanation here">an abbreviation</Abbr>. On small
|
||||
screens, I collapse and the explanation text is displayed next to the
|
||||
abbreviation.
|
||||
|
||||
### Tags {#tags}
|
||||
|
||||
import Tag from 'components/tag'
|
||||
|
||||
> ```jsx
|
||||
> <Tag>method</Tag>
|
||||
> <Tag variant="new">4</Tag>
|
||||
> <Tag variant="model">tagger, parser</Tag>
|
||||
> ```
|
||||
|
||||
Tags can be used together with headlines, or next to properties across the
|
||||
documentation, and combined with tooltips to provide additional information. An
|
||||
optional `variant` argument can be used for special tags. `variant="new"` makes
|
||||
the tag take a version number to mark new features. Using the component,
|
||||
visibility of this tag can later be toggled once the feature isn't considered
|
||||
new anymore. Setting `variant="model"` takes a description of model capabilities
|
||||
and can be used to mark features that require a respective model to be
|
||||
installed.
|
||||
|
||||
<InlineList>
|
||||
|
||||
<Tag>method</Tag> <Tag variant="new">4</Tag> <Tag variant="model">tagger,
|
||||
parser</Tag>
|
||||
|
||||
</InlineList>
|
||||
|
||||
### Buttons {#buttons}
|
||||
|
||||
import Button from 'components/button'
|
||||
|
||||
> ```jsx
|
||||
> <Button to="#" variant="primary">Primary small</Button>
|
||||
> <Button to="#" variant="secondary">Secondary small</Button>
|
||||
> ```
|
||||
|
||||
Link buttons come in two variants, `primary` and `secondary` and two sizes, with
|
||||
an optional `large` size modifier. Since they're mostly used as enhanced links,
|
||||
the buttons are implemented as styled links instead of native button elements.
|
||||
|
||||
<InlineList><Button to="#" variant="primary">Primary small</Button>
|
||||
<Button to="#" variant="secondary">Secondary small</Button></InlineList>
|
||||
|
||||
<br />
|
||||
|
||||
<InlineList><Button to="#" variant="primary" large>Primary large</Button>
|
||||
<Button to="#" variant="secondary" large>Secondary large</Button></InlineList>
|
||||
|
||||
## Components
|
||||
|
||||
### Table {#table}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> | Header 1 | Header 2 |
|
||||
> | -------- | -------- |
|
||||
> | Column 1 | Column 2 |
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```markup
|
||||
> <Table>
|
||||
> <Tr><Th>Header 1</Th><Th>Header 2</Th></Tr></thead>
|
||||
> <Tr><Td>Column 1</Td><Td>Column 2</Td></Tr>
|
||||
> </Table>
|
||||
> ```
|
||||
|
||||
Tables are used to present data and API documentation. Certain keywords can be
|
||||
used to mark a footer row with a distinct style, for example to visualize the
|
||||
return values of a documented function.
|
||||
|
||||
| Header 1 | Header 2 | Header 3 | Header 4 |
|
||||
| ----------- | -------- | :------: | -------: |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| **RETURNS** | Column 2 | Column 3 | Column 4 |
|
||||
|
||||
Tables also support optional "divider" rows that are typically used to denote
|
||||
keyword-only arguments in API documentation. To turn a row into a dividing
|
||||
headline, it should only include content in its first cell, and its value should
|
||||
be italicized:
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> | Header 1 | Header 2 | Header 3 |
|
||||
> | -------- | -------- | -------- |
|
||||
> | Column 1 | Column 2 | Column 3 |
|
||||
> | _Hello_ | | |
|
||||
> | Column 1 | Column 2 | Column 3 |
|
||||
> ```
|
||||
|
||||
| Header 1 | Header 2 | Header 3 |
|
||||
| -------- | -------- | -------- |
|
||||
| Column 1 | Column 2 | Column 3 |
|
||||
| _Hello_ | | |
|
||||
| Column 1 | Column 2 | Column 3 |
|
||||
|
||||
### Type Annotations {#type-annotations}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> ~~Model[List[Doc], Floats2d]~~
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```markup
|
||||
> <TypeAnnotation>Model[List[Doc], Floats2d]</Typeannotation>
|
||||
> ```
|
||||
|
||||
Type annotations are special inline code blocks are used to describe Python
|
||||
types in the [type hints](https://docs.python.org/3/library/typing.html) format.
|
||||
The special component will split the type, apply syntax highlighting and link
|
||||
all types that specify links in `meta/type-annotations.json`. Types can link to
|
||||
internal or external documentation pages. To make it easy to represent the type
|
||||
annotations in Markdown, the rendering "hijacks" the `~~` tags that would
|
||||
typically be converted to a `<del>` element – but in this case, text surrounded
|
||||
by `~~` becomes a type annotation.
|
||||
|
||||
- ~~Dict[str, List[Union[Doc, Span]]]~~
|
||||
- ~~Model[List[Doc], List[numpy.ndarray]]~~
|
||||
|
||||
Type annotations support a special visual style in tables and will render as a
|
||||
separate row, under the cell text. This allows the API docs to display complex
|
||||
types without taking up too much space in the cell. The type annotation should
|
||||
always be the **last element** in the row.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> | Header 1 | Header 2 |
|
||||
> | -------- | ----------------------- |
|
||||
> | Column 1 | Column 2 ~~List[Doc]~~ |
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | The shared vocabulary. ~~Vocab~~ |
|
||||
| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. ~~Model[List[Doc], FullTransformerBatch]~~ |
|
||||
| `set_extra_annotations` | Function that takes a batch of `Doc` objects and transformer outputs and can set additional annotations on the `Doc`. ~~Callable[[List[Doc], FullTransformerBatch], None]~~ |
|
||||
|
||||
### List {#list}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> 1. One
|
||||
> 2. Two
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```markup
|
||||
> <Ol>
|
||||
> <Li>One</Li>
|
||||
> <Li>Two</Li>
|
||||
> </Ol>
|
||||
> ```
|
||||
|
||||
Lists are available as bulleted and numbered. Markdown lists are transformed
|
||||
automatically.
|
||||
|
||||
- I am a bulleted list
|
||||
- I have nice bullets
|
||||
- Lorem ipsum dolor
|
||||
- consectetur adipiscing elit
|
||||
|
||||
1. I am an ordered list
|
||||
2. I have nice numbers
|
||||
3. Lorem ipsum dolor
|
||||
4. consectetur adipiscing elit
|
||||
|
||||
### Aside {#aside}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> > #### Aside title
|
||||
> > This is aside text.
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Aside title="Aside title">This is aside text.</Aside>
|
||||
> ```
|
||||
|
||||
Asides can be used to display additional notes and content in the right-hand
|
||||
column. Asides can contain text, code and other elements if needed. Visually,
|
||||
asides are moved to the side on the X-axis, and displayed at the same level they
|
||||
were inserted. On small screens, they collapse and are rendered in their
|
||||
original position, in between the text.
|
||||
|
||||
To make them easier to use in Markdown, paragraphs formatted as blockquotes will
|
||||
turn into asides by default. Level 4 headlines (with a leading `####`) will
|
||||
become aside titles.
|
||||
|
||||
### Code Block {#code-block}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> ### This is a title
|
||||
> import spacy
|
||||
> ```
|
||||
> ````
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <CodeBlock title="This is a title" lang="python">
|
||||
> import spacy
|
||||
> </CodeBlock>
|
||||
> ```
|
||||
|
||||
Code blocks use the [Prism](http://prismjs.com/) syntax highlighter with a
|
||||
custom theme. The language can be set individually on each block, and defaults
|
||||
to raw text with no highlighting. An optional label can be added as the first
|
||||
line with the prefix `####` (Python-like) and `///` (JavaScript-like). the
|
||||
indented block as plain text and preserve whitespace.
|
||||
|
||||
```python
|
||||
### Using spaCy
|
||||
import spacy
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("This is a sentence.")
|
||||
for token in doc:
|
||||
print(token.text, token.pos_)
|
||||
```
|
||||
|
||||
Code blocks and also specify an optional range of line numbers to highlight by
|
||||
adding `{highlight="..."}` to the headline. Acceptable ranges are spans like
|
||||
`5-7`, but also `5-7,10` or `5-7,10,13-14`.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> ### This is a title {highlight="1-2"}
|
||||
> import spacy
|
||||
> nlp = spacy.load("en_core_web_sm")
|
||||
> ```
|
||||
> ````
|
||||
|
||||
```python
|
||||
### Using the matcher {highlight="5-7"}
|
||||
import spacy
|
||||
from spacy.matcher import Matcher
|
||||
|
||||
nlp = spacy.load('en_core_web_sm')
|
||||
matcher = Matcher(nlp.vocab)
|
||||
pattern = [{"LOWER": "hello"}, {"IS_PUNCT": True}, {"LOWER": "world"}]
|
||||
matcher.add("HelloWorld", None, pattern)
|
||||
doc = nlp("Hello, world! Hello world!")
|
||||
matches = matcher(doc)
|
||||
```
|
||||
|
||||
Adding `{executable="true"}` to the title turns the code into an executable
|
||||
block, powered by [Binder](https://mybinder.org) and
|
||||
[Juniper](https://github.com/ines/juniper). If JavaScript is disabled, the
|
||||
interactive widget defaults to a regular code block.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> ### {executable="true"}
|
||||
> import spacy
|
||||
> nlp = spacy.load("en_core_web_sm")
|
||||
> ```
|
||||
> ````
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("This is a sentence.")
|
||||
for token in doc:
|
||||
print(token.text, token.pos_)
|
||||
```
|
||||
|
||||
If a code block only contains a URL to a GitHub file, the raw file contents are
|
||||
embedded automatically and syntax highlighting is applied. The link to the
|
||||
original file is shown at the top of the widget.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> https://github.com/...
|
||||
> ```
|
||||
> ````
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <GitHubCode url="https://github.com/..." lang="python" />
|
||||
> ```
|
||||
|
||||
```python
|
||||
https://github.com/explosion/spaCy/tree/master/spacy/language.py
|
||||
```
|
||||
|
||||
### Infobox {#infobox}
|
||||
|
||||
import Infobox from 'components/infobox'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Infobox title="Information">Regular infobox</Infobox>
|
||||
> <Infobox title="Important note" variant="warning">This is a warning.</Infobox>
|
||||
> <Infobox title="Be careful!" variant="danger">This is dangerous.</Infobox>
|
||||
> ```
|
||||
|
||||
Infoboxes can be used to add notes, updates, warnings or additional information
|
||||
to a page or section. Semantically, they're implemented and interpreted as an
|
||||
`aside` element. Infoboxes can take an optional `title` argument, as well as an
|
||||
optional `variant` (either `"warning"` or `"danger"`).
|
||||
|
||||
<Infobox title="This is an infobox">
|
||||
|
||||
If needed, an infobox can contain regular text, `inline code`, lists and other
|
||||
blocks.
|
||||
|
||||
</Infobox>
|
||||
|
||||
<Infobox title="This is a warning" variant="warning">
|
||||
|
||||
If needed, an infobox can contain regular text, `inline code`, lists and other
|
||||
blocks.
|
||||
|
||||
</Infobox>
|
||||
|
||||
<Infobox title="This is dangerous" variant="danger">
|
||||
|
||||
If needed, an infobox can contain regular text, `inline code`, lists and other
|
||||
blocks.
|
||||
|
||||
</Infobox>
|
||||
|
||||
### Accordion {#accordion}
|
||||
|
||||
import Accordion from 'components/accordion'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Accordion title="This is an accordion">
|
||||
> Accordion content goes here.
|
||||
> </Accordion>
|
||||
> ```
|
||||
|
||||
Accordions are collapsible sections that are mostly used for lengthy tables,
|
||||
like the tag and label annotation schemes for different languages. They all need
|
||||
to be presented – but chances are the user doesn't actually care about _all_ of
|
||||
them, especially not at the same time. So it's fairly reasonable to hide them
|
||||
begin a click. This particular implementation was inspired by the amazing
|
||||
[Inclusive Components blog](https://inclusive-components.design/collapsible-sections/).
|
||||
|
||||
<Accordion title="This is an accordion">
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante,
|
||||
pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt
|
||||
nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor
|
||||
gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor,
|
||||
sit amet dignissim justo congue.
|
||||
|
||||
</Accordion>
|
||||
|
||||
## Setup and installation {#setup}
|
||||
## Setup and installation
|
||||
|
||||
Before running the setup, make sure your versions of
|
||||
[Node](https://nodejs.org/en/) and [npm](https://www.npmjs.com/) are up to date.
|
||||
|
@ -554,14 +34,14 @@ extensions for your code editor. The
|
|||
[`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc)
|
||||
file in the root defines the settings used in this codebase.
|
||||
|
||||
## Building & developing the site with Docker {#docker}
|
||||
Sometimes it's hard to get a local environment working due to rapid updates to node dependencies,
|
||||
so it may be easier to use docker for building the docs.
|
||||
## Building & developing the site with Docker
|
||||
|
||||
If you'd like to do this,
|
||||
**be sure you do *not* include your local `node_modules` folder**,
|
||||
since there are some dependencies that need to be built for the image system.
|
||||
Rename it before using.
|
||||
Sometimes it's hard to get a local environment working due to rapid updates to
|
||||
node dependencies, so it may be easier to use docker for building the docs.
|
||||
|
||||
If you'd like to do this, **be sure you do _not_ include your local
|
||||
`node_modules` folder**, since there are some dependencies that need to be built
|
||||
for the image system. Rename it before using.
|
||||
|
||||
```bash
|
||||
docker run -it \
|
||||
|
@ -571,16 +51,16 @@ docker run -it \
|
|||
gatsby develop -H 0.0.0.0
|
||||
```
|
||||
|
||||
This will allow you to access the built website at http://0.0.0.0:8000/
|
||||
in your browser, and still edit code in your editor while having the site
|
||||
reflect those changes.
|
||||
This will allow you to access the built website at http://0.0.0.0:8000/ in your
|
||||
browser, and still edit code in your editor while having the site reflect those
|
||||
changes.
|
||||
|
||||
**Note**: If you're working on a Mac with an M1 processor,
|
||||
you might see segfault errors from `qemu` if you use the default image.
|
||||
To fix this use the `arm64` tagged image in the `docker run` command
|
||||
**Note**: If you're working on a Mac with an M1 processor, you might see
|
||||
segfault errors from `qemu` if you use the default image. To fix this use the
|
||||
`arm64` tagged image in the `docker run` command
|
||||
(ghcr.io/explosion/spacy-io:arm64).
|
||||
|
||||
### Building the Docker image {#docker-build}
|
||||
### Building the Docker image
|
||||
|
||||
If you'd like to build the image locally, you can do so like this:
|
||||
|
||||
|
@ -588,67 +68,21 @@ If you'd like to build the image locally, you can do so like this:
|
|||
docker build -t spacy-io .
|
||||
```
|
||||
|
||||
This will take some time, so if you want to use the prebuilt image you'll save a bit of time.
|
||||
This will take some time, so if you want to use the prebuilt image you'll save a
|
||||
bit of time.
|
||||
|
||||
## Markdown reference {#markdown}
|
||||
|
||||
All page content and page meta lives in the `.md` files in the `/docs`
|
||||
directory. The frontmatter block at the top of each file defines the page title
|
||||
and other settings like the sidebar menu.
|
||||
|
||||
````markdown
|
||||
---
|
||||
title: Page title
|
||||
---
|
||||
|
||||
## Headline starting a section {#some_id}
|
||||
|
||||
This is a regular paragraph with a [link](https://spacy.io) and **bold text**.
|
||||
|
||||
> #### This is an aside title
|
||||
>
|
||||
> This is aside text.
|
||||
|
||||
### Subheadline
|
||||
|
||||
| Header 1 | Header 2 |
|
||||
| -------- | -------- |
|
||||
| Column 1 | Column 2 |
|
||||
|
||||
```python
|
||||
### Code block title {highlight="2-3"}
|
||||
import spacy
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("Hello world")
|
||||
```
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
This is content in the infobox.
|
||||
|
||||
</Infobox>
|
||||
````
|
||||
|
||||
In addition to the native markdown elements, you can use the components
|
||||
[`<Infobox />`][infobox], [`<Accordion />`][accordion], [`<Abbr />`][abbr] and
|
||||
[`<Tag />`][tag] via their JSX syntax.
|
||||
|
||||
[infobox]: https://spacy.io/styleguide#infobox
|
||||
[accordion]: https://spacy.io/styleguide#accordion
|
||||
[abbr]: https://spacy.io/styleguide#abbr
|
||||
[tag]: https://spacy.io/styleguide#tag
|
||||
|
||||
## Project structure {#structure}
|
||||
## Project structure
|
||||
|
||||
```yaml
|
||||
### Directory structure
|
||||
├── docs # the actual markdown content
|
||||
├── meta # JSON-formatted site metadata
|
||||
| ├── languages.json # supported languages and statistical models
|
||||
| ├── sidebars.json # sidebar navigations for different sections
|
||||
| ├── site.json # general site metadata
|
||||
| ├── type-annotations.json # Type annotations
|
||||
| └── universe.json # data for the spaCy universe section
|
||||
├── public # compiled site
|
||||
├── setup # Jinja setup
|
||||
├── src # source
|
||||
| ├── components # React components
|
||||
| ├── fonts # webfonts
|
||||
|
@ -661,54 +95,10 @@ In addition to the native markdown elements, you can use the components
|
|||
| | ├── models.js # layout template for model pages
|
||||
| | └── universe.js # layout templates for universe
|
||||
| └── widgets # non-reusable components with content, e.g. changelog
|
||||
├── .eslintrc.json # ESLint config file
|
||||
├── .prettierrc # Prettier config file
|
||||
├── gatsby-browser.js # browser-specific hooks for Gatsby
|
||||
├── gatsby-config.js # Gatsby configuration
|
||||
├── gatsby-node.js # Node-specific hooks for Gatsby
|
||||
└── package.json # package settings and dependencies
|
||||
```
|
||||
|
||||
## Editorial {#editorial}
|
||||
|
||||
- "spaCy" should always be spelled with a lowercase "s" and a capital "C",
|
||||
unless it specifically refers to the Python package or Python import `spacy`
|
||||
(in which case it should be formatted as code).
|
||||
- ✅ spaCy is a library for advanced NLP in Python.
|
||||
- ❌ Spacy is a library for advanced NLP in Python.
|
||||
- ✅ First, you need to install the `spacy` package from pip.
|
||||
- Mentions of code, like function names, classes, variable names etc. in inline
|
||||
text should be formatted as `code`.
|
||||
- ✅ "Calling the `nlp` object on a text returns a `Doc`."
|
||||
- Objects that have pages in the [API docs](/api) should be linked – for
|
||||
example, [`Doc`](/api/doc) or [`Language.to_disk`](/api/language#to_disk). The
|
||||
mentions should still be formatted as code within the link. Links pointing to
|
||||
the API docs will automatically receive a little icon. However, if a paragraph
|
||||
includes many references to the API, the links can easily get messy. In that
|
||||
case, we typically only link the first mention of an object and not any
|
||||
subsequent ones.
|
||||
- ✅ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
|
||||
[`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a `Doc` object
|
||||
from a `Span`.
|
||||
- ❌ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
|
||||
[`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a
|
||||
[`Doc`](/api/doc) object from a [`Span`](/api/span).
|
||||
|
||||
* Other things we format as code are: references to trained pipeline packages
|
||||
like `en_core_web_sm` or file names like `code.py` or `meta.json`.
|
||||
|
||||
- ✅ After training, the `config.cfg` is saved to disk.
|
||||
|
||||
* [Type annotations](#type-annotations) are a special type of code formatting,
|
||||
expressed by wrapping the text in `~~` instead of backticks. The result looks
|
||||
like this: ~~List[Doc]~~. All references to known types will be linked
|
||||
automatically.
|
||||
|
||||
- ✅ The model has the input type ~~List[Doc]~~ and it outputs a
|
||||
~~List[Array2d]~~.
|
||||
|
||||
* We try to keep links meaningful but short.
|
||||
- ✅ For details, see the usage guide on
|
||||
[training with custom code](/usage/training#custom-code).
|
||||
- ❌ For details, see
|
||||
[the usage guide on training with custom code](/usage/training#custom-code).
|
||||
- ❌ For details, see the usage guide on training with custom code
|
||||
[here](/usage/training#custom-code).
|
||||
|
|
|
@ -12,6 +12,7 @@ menu:
|
|||
- ['train', 'train']
|
||||
- ['pretrain', 'pretrain']
|
||||
- ['evaluate', 'evaluate']
|
||||
- ['find-threshold', 'find-threshold']
|
||||
- ['assemble', 'assemble']
|
||||
- ['package', 'package']
|
||||
- ['project', 'project']
|
||||
|
@ -1161,6 +1162,46 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
|
|||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
| **CREATES** | Training results and optional metrics and visualizations. |
|
||||
|
||||
## find-threshold {#find-threshold new="3.5" tag="command"}
|
||||
|
||||
Runs prediction trials for a trained model with varying tresholds to maximize
|
||||
the specified metric. The search space for the threshold is traversed linearly
|
||||
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
|
||||
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
|
||||
returns all results).
|
||||
|
||||
This is applicable only for components whose predictions are influenced by
|
||||
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
|
||||
that the full path to the corresponding threshold attribute in the config has to
|
||||
be provided.
|
||||
|
||||
> #### Examples
|
||||
>
|
||||
> ```cli
|
||||
> # For textcat_multilabel:
|
||||
> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f
|
||||
> ```
|
||||
>
|
||||
> ```cli
|
||||
> # For spancat:
|
||||
> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f
|
||||
> ```
|
||||
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||
| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ |
|
||||
| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ |
|
||||
| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ |
|
||||
| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ |
|
||||
| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ |
|
||||
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
|
||||
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ |
|
||||
| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
|
||||
## assemble {#assemble tag="command"}
|
||||
|
||||
Assemble a pipeline from a config file without additional training. Expects a
|
||||
|
@ -1350,12 +1391,13 @@ If the contents are different, the new version of the file is uploaded. Deleting
|
|||
obsolete files is left up to you.
|
||||
|
||||
Remotes can be defined in the `remotes` section of the
|
||||
[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the
|
||||
[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to
|
||||
communicate with the remote storages, so you can use any protocol that
|
||||
`smart-open` supports, including [S3](https://aws.amazon.com/s3/),
|
||||
[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although
|
||||
you may need to install extra dependencies to use certain protocols.
|
||||
[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses
|
||||
[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
|
||||
remote storages, so you can use any protocol that `Pathy` supports, including
|
||||
[S3](https://aws.amazon.com/s3/),
|
||||
[Google Cloud Storage](https://cloud.google.com/storage), and the local
|
||||
filesystem, although you may need to install extra dependencies to use certain
|
||||
protocols.
|
||||
|
||||
```cli
|
||||
$ python -m spacy project push [remote] [project_dir]
|
||||
|
@ -1394,12 +1436,13 @@ outputs, so if you change the config back, you'll be able to fetch back the
|
|||
result.
|
||||
|
||||
Remotes can be defined in the `remotes` section of the
|
||||
[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the
|
||||
[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to
|
||||
communicate with the remote storages, so you can use any protocol that
|
||||
`smart-open` supports, including [S3](https://aws.amazon.com/s3/),
|
||||
[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although
|
||||
you may need to install extra dependencies to use certain protocols.
|
||||
[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses
|
||||
[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
|
||||
remote storages, so you can use any protocol that `Pathy` supports, including
|
||||
[S3](https://aws.amazon.com/s3/),
|
||||
[Google Cloud Storage](https://cloud.google.com/storage), and the local
|
||||
filesystem, although you may need to install extra dependencies to use certain
|
||||
protocols.
|
||||
|
||||
```cli
|
||||
$ python -m spacy project pull [remote] [project_dir]
|
||||
|
|
|
@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train).
|
|||
| `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ |
|
||||
| `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ |
|
||||
| `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
|
||||
| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ |
|
||||
| `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ |
|
||||
| `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ |
|
||||
| `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ |
|
||||
|
|
|
@ -1004,6 +1004,54 @@ This method was previously available as `spacy.gold.spans_from_biluo_tags`.
|
|||
| `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags with each tag describing one token. Each tag string will be of the form of either `""`, `"O"` or `"{action}-{label}"`, where action is one of `"B"`, `"I"`, `"L"`, `"U"`. ~~List[str]~~ |
|
||||
| **RETURNS** | A sequence of `Span` objects with added entity labels. ~~List[Span]~~ |
|
||||
|
||||
### training.biluo_to_iob {#biluo_to_iob tag="function"}
|
||||
|
||||
Convert a sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags to
|
||||
[IOB](/usage/linguistic-features#accessing-ner) tags. This is useful if you want
|
||||
use the BILUO tags with a model that only supports IOB tags.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> from spacy.training import biluo_to_iob
|
||||
>
|
||||
> tags = ["O", "O", "B-LOC", "I-LOC", "L-LOC", "O"]
|
||||
> iob_tags = biluo_to_iob(tags)
|
||||
> assert iob_tags == ["O", "O", "B-LOC", "I-LOC", "I-LOC", "O"]
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | --------------------------------------------------------------------------------------- |
|
||||
| `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | A list of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ |
|
||||
|
||||
### training.iob_to_biluo {#iob_to_biluo tag="function"}
|
||||
|
||||
Convert a sequence of [IOB](/usage/linguistic-features#accessing-ner) tags to
|
||||
[BILUO](/usage/linguistic-features#accessing-ner) tags. This is useful if you
|
||||
want use the IOB tags with a model that only supports BILUO tags.
|
||||
|
||||
<Infobox title="Changed in v3.0" variant="warning" id="iob_to_biluo">
|
||||
|
||||
This method was previously available as `spacy.gold.iob_to_biluo`.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> from spacy.training import iob_to_biluo
|
||||
>
|
||||
> tags = ["O", "O", "B-LOC", "I-LOC", "O"]
|
||||
> biluo_tags = iob_to_biluo(tags)
|
||||
> assert biluo_tags == ["O", "O", "B-LOC", "L-LOC", "O"]
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------- |
|
||||
| `tags` | A sequence of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | A list of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ |
|
||||
|
||||
## Utility functions {#util source="spacy/util.py"}
|
||||
|
||||
spaCy comes with a small collection of utility functions located in
|
||||
|
|
|
@ -50,7 +50,7 @@ modified later.
|
|||
| _keyword-only_ | |
|
||||
| `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ |
|
||||
| `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ |
|
||||
| `data` | The vector data. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
|
||||
| `data` | The vector data. ~~numpy.ndarray[ndim=2, dtype=float32]~~ |
|
||||
| `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ |
|
||||
| `name` | A name to identify the vectors table. ~~str~~ |
|
||||
| `mode` <Tag variant="new">3.2</Tag> | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ |
|
||||
|
|
|
@ -308,14 +308,14 @@ Load state from a binary string.
|
|||
> assert type(PERSON) == int
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ |
|
||||
| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ |
|
||||
| `vectors_length` | Number of dimensions for each word vector. ~~int~~ |
|
||||
| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ |
|
||||
| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
|
||||
| `get_noun_chunks` <Tag variant="new">3.0</Tag> | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
|
||||
| Name | Description |
|
||||
| ---------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ |
|
||||
| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ |
|
||||
| `vectors_length` | Number of dimensions for each word vector. ~~int~~ |
|
||||
| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ |
|
||||
| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ |
|
||||
| `get_noun_chunks` <Tag variant="new">3.0</Tag> | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ |
|
||||
|
||||
## Serialization fields {#serialization-fields}
|
||||
|
||||
|
|
|
@ -8,9 +8,7 @@ menu:
|
|||
- ['Typography', 'typography']
|
||||
- ['Elements', 'elements']
|
||||
- ['Components', 'components']
|
||||
- ['Setup & Installation', 'setup']
|
||||
- ['Markdown Reference', 'markdown']
|
||||
- ['Project Structure', 'structure']
|
||||
- ['Editorial', 'editorial']
|
||||
sidebar:
|
||||
- label: Styleguide
|
||||
|
@ -25,6 +23,610 @@ sidebar:
|
|||
url: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md
|
||||
---
|
||||
|
||||
import Readme from 'README.md'
|
||||
The [spacy.io](https://spacy.io) website is implemented using
|
||||
[Gatsby](https://www.gatsbyjs.org) with
|
||||
[Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This
|
||||
allows authoring content in **straightforward Markdown** without the usual
|
||||
limitations. Standard elements can be overwritten with powerful
|
||||
[React](http://reactjs.org/) components and wherever Markdown syntax isn't
|
||||
enough, JSX components can be used.
|
||||
|
||||
<Readme />
|
||||
> #### Contributing to the site
|
||||
>
|
||||
> The docs can always use another example or more detail, and they should always
|
||||
> be up to date and not misleading. We always appreciate a
|
||||
> [pull request](https://github.com/explosion/spaCy/pulls). To quickly find the
|
||||
> correct file to edit, simply click on the "Suggest edits" button at the bottom
|
||||
> of a page.
|
||||
>
|
||||
> For more details on editing the site locally, see the installation
|
||||
> instructions and markdown reference below.
|
||||
|
||||
## Logo {#logo source="website/src/images/logo.svg"}
|
||||
|
||||
import { Logos } from 'widgets/styleguide'
|
||||
|
||||
If you would like to use the spaCy logo on your site, please get in touch and
|
||||
ask us first. However, if you want to show support and tell others that your
|
||||
project is using spaCy, you can grab one of our
|
||||
[spaCy badges](/usage/spacy-101#faq-project-with-spacy).
|
||||
|
||||
<Logos />
|
||||
|
||||
## Colors {#colors}
|
||||
|
||||
import { Colors, Patterns } from 'widgets/styleguide'
|
||||
|
||||
<Colors />
|
||||
|
||||
### Patterns
|
||||
|
||||
<Patterns />
|
||||
|
||||
## Typography {#typography}
|
||||
|
||||
import { H1, H2, H3, H4, H5, Label, InlineList, Comment } from
|
||||
'components/typography'
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> ## Headline 2
|
||||
> ## Headline 2 {#some_id}
|
||||
> ## Headline 2 {#some_id tag="method"}
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <H2>Headline 2</H2>
|
||||
> <H2 id="some_id">Headline 2</H2>
|
||||
> <H2 id="some_id" tag="method">Headline 2</H2>
|
||||
> ```
|
||||
|
||||
Headlines are set in
|
||||
[HK Grotesk](http://cargocollective.com/hanken/HK-Grotesk-Open-Source-Font) by
|
||||
Hanken Design. All other body text and code uses the best-matching default
|
||||
system font to provide a "native" reading experience. All code uses the
|
||||
[JetBrains Mono](https://www.jetbrains.com/lp/mono/) typeface by JetBrains.
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
Level 2 headings are automatically wrapped in `<section>` elements at compile
|
||||
time, using a custom
|
||||
[Markdown transformer](https://github.com/explosion/spaCy/tree/master/website/plugins/remark-wrap-section.js).
|
||||
This makes it easier to highlight the section that's currently in the viewpoint
|
||||
in the sidebar menu.
|
||||
|
||||
</Infobox>
|
||||
|
||||
<div>
|
||||
<H1>Headline 1</H1>
|
||||
<H2>Headline 2</H2>
|
||||
<H3>Headline 3</H3>
|
||||
<H4>Headline 4</H4>
|
||||
<H5>Headline 5</H5>
|
||||
<Label>Label</Label>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
The following optional attributes can be set on the headline to modify it. For
|
||||
example, to add a tag for the documented type or mark features that have been
|
||||
introduced in a specific version or require statistical models to be loaded.
|
||||
Tags are also available as standalone `<Tag />` components.
|
||||
|
||||
| Argument | Example | Result |
|
||||
| -------- | -------------------------- | ----------------------------------------- |
|
||||
| `tag` | `{tag="method"}` | <Tag>method</Tag> |
|
||||
| `new` | `{new="3"}` | <Tag variant="new">3</Tag> |
|
||||
| `model` | `{model="tagger, parser"}` | <Tag variant="model">tagger, parser</Tag> |
|
||||
| `hidden` | `{hidden="true"}` | |
|
||||
|
||||
## Elements {#elements}
|
||||
|
||||
### Links {#links}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown
|
||||
> [I am a link](https://spacy.io)
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Link to="https://spacy.io">I am a link</Link>
|
||||
> ```
|
||||
|
||||
Special link styles are used depending on the link URL.
|
||||
|
||||
- [I am a regular external link](https://explosion.ai)
|
||||
- [I am a link to the documentation](/api/doc)
|
||||
- [I am a link to an architecture](/api/architectures#HashEmbedCNN)
|
||||
- [I am a link to a model](/models/en#en_core_web_sm)
|
||||
- [I am a link to GitHub](https://github.com/explosion/spaCy)
|
||||
|
||||
### Abbreviations {#abbr}
|
||||
|
||||
import { Abbr } from 'components/typography'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Abbr title="Explanation">Abbreviation</Abbr>
|
||||
> ```
|
||||
|
||||
Some text with <Abbr title="Explanation here">an abbreviation</Abbr>. On small
|
||||
screens, I collapse and the explanation text is displayed next to the
|
||||
abbreviation.
|
||||
|
||||
### Tags {#tags}
|
||||
|
||||
import Tag from 'components/tag'
|
||||
|
||||
> ```jsx
|
||||
> <Tag>method</Tag>
|
||||
> <Tag variant="new">4</Tag>
|
||||
> <Tag variant="model">tagger, parser</Tag>
|
||||
> ```
|
||||
|
||||
Tags can be used together with headlines, or next to properties across the
|
||||
documentation, and combined with tooltips to provide additional information. An
|
||||
optional `variant` argument can be used for special tags. `variant="new"` makes
|
||||
the tag take a version number to mark new features. Using the component,
|
||||
visibility of this tag can later be toggled once the feature isn't considered
|
||||
new anymore. Setting `variant="model"` takes a description of model capabilities
|
||||
and can be used to mark features that require a respective model to be
|
||||
installed.
|
||||
|
||||
<InlineList>
|
||||
|
||||
<Tag>method</Tag> <Tag variant="new">4</Tag> <Tag variant="model">tagger,
|
||||
parser</Tag>
|
||||
|
||||
</InlineList>
|
||||
|
||||
### Buttons {#buttons}
|
||||
|
||||
import Button from 'components/button'
|
||||
|
||||
> ```jsx
|
||||
> <Button to="#" variant="primary">Primary small</Button>
|
||||
> <Button to="#" variant="secondary">Secondary small</Button>
|
||||
> ```
|
||||
|
||||
Link buttons come in two variants, `primary` and `secondary` and two sizes, with
|
||||
an optional `large` size modifier. Since they're mostly used as enhanced links,
|
||||
the buttons are implemented as styled links instead of native button elements.
|
||||
|
||||
<InlineList><Button to="#" variant="primary">Primary small</Button>
|
||||
<Button to="#" variant="secondary">Secondary small</Button></InlineList>
|
||||
|
||||
<br />
|
||||
|
||||
<InlineList><Button to="#" variant="primary" large>Primary large</Button>
|
||||
<Button to="#" variant="secondary" large>Secondary large</Button></InlineList>
|
||||
|
||||
## Components
|
||||
|
||||
### Table {#table}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> | Header 1 | Header 2 |
|
||||
> | -------- | -------- |
|
||||
> | Column 1 | Column 2 |
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```markup
|
||||
> <Table>
|
||||
> <Tr><Th>Header 1</Th><Th>Header 2</Th></Tr></thead>
|
||||
> <Tr><Td>Column 1</Td><Td>Column 2</Td></Tr>
|
||||
> </Table>
|
||||
> ```
|
||||
|
||||
Tables are used to present data and API documentation. Certain keywords can be
|
||||
used to mark a footer row with a distinct style, for example to visualize the
|
||||
return values of a documented function.
|
||||
|
||||
| Header 1 | Header 2 | Header 3 | Header 4 |
|
||||
| ----------- | -------- | :------: | -------: |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| Column 1 | Column 2 | Column 3 | Column 4 |
|
||||
| **RETURNS** | Column 2 | Column 3 | Column 4 |
|
||||
|
||||
Tables also support optional "divider" rows that are typically used to denote
|
||||
keyword-only arguments in API documentation. To turn a row into a dividing
|
||||
headline, it should only include content in its first cell, and its value should
|
||||
be italicized:
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> | Header 1 | Header 2 | Header 3 |
|
||||
> | -------- | -------- | -------- |
|
||||
> | Column 1 | Column 2 | Column 3 |
|
||||
> | _Hello_ | | |
|
||||
> | Column 1 | Column 2 | Column 3 |
|
||||
> ```
|
||||
|
||||
| Header 1 | Header 2 | Header 3 |
|
||||
| -------- | -------- | -------- |
|
||||
| Column 1 | Column 2 | Column 3 |
|
||||
| _Hello_ | | |
|
||||
| Column 1 | Column 2 | Column 3 |
|
||||
|
||||
### Type Annotations {#type-annotations}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> ~~Model[List[Doc], Floats2d]~~
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```markup
|
||||
> <TypeAnnotation>Model[List[Doc], Floats2d]</Typeannotation>
|
||||
> ```
|
||||
|
||||
Type annotations are special inline code blocks are used to describe Python
|
||||
types in the [type hints](https://docs.python.org/3/library/typing.html) format.
|
||||
The special component will split the type, apply syntax highlighting and link
|
||||
all types that specify links in `meta/type-annotations.json`. Types can link to
|
||||
internal or external documentation pages. To make it easy to represent the type
|
||||
annotations in Markdown, the rendering "hijacks" the `~~` tags that would
|
||||
typically be converted to a `<del>` element – but in this case, text surrounded
|
||||
by `~~` becomes a type annotation.
|
||||
|
||||
- ~~Dict[str, List[Union[Doc, Span]]]~~
|
||||
- ~~Model[List[Doc], List[numpy.ndarray]]~~
|
||||
|
||||
Type annotations support a special visual style in tables and will render as a
|
||||
separate row, under the cell text. This allows the API docs to display complex
|
||||
types without taking up too much space in the cell. The type annotation should
|
||||
always be the **last element** in the row.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> | Header 1 | Header 2 |
|
||||
> | -------- | ----------------------- |
|
||||
> | Column 1 | Column 2 ~~List[Doc]~~ |
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | The shared vocabulary. ~~Vocab~~ |
|
||||
| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. ~~Model[List[Doc], FullTransformerBatch]~~ |
|
||||
| `set_extra_annotations` | Function that takes a batch of `Doc` objects and transformer outputs and can set additional annotations on the `Doc`. ~~Callable[[List[Doc], FullTransformerBatch], None]~~ |
|
||||
|
||||
### List {#list}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> 1. One
|
||||
> 2. Two
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```markup
|
||||
> <Ol>
|
||||
> <Li>One</Li>
|
||||
> <Li>Two</Li>
|
||||
> </Ol>
|
||||
> ```
|
||||
|
||||
Lists are available as bulleted and numbered. Markdown lists are transformed
|
||||
automatically.
|
||||
|
||||
- I am a bulleted list
|
||||
- I have nice bullets
|
||||
- Lorem ipsum dolor
|
||||
- consectetur adipiscing elit
|
||||
|
||||
1. I am an ordered list
|
||||
2. I have nice numbers
|
||||
3. Lorem ipsum dolor
|
||||
4. consectetur adipiscing elit
|
||||
|
||||
### Aside {#aside}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
> > #### Aside title
|
||||
> > This is aside text.
|
||||
> ```
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Aside title="Aside title">This is aside text.</Aside>
|
||||
> ```
|
||||
|
||||
Asides can be used to display additional notes and content in the right-hand
|
||||
column. Asides can contain text, code and other elements if needed. Visually,
|
||||
asides are moved to the side on the X-axis, and displayed at the same level they
|
||||
were inserted. On small screens, they collapse and are rendered in their
|
||||
original position, in between the text.
|
||||
|
||||
To make them easier to use in Markdown, paragraphs formatted as blockquotes will
|
||||
turn into asides by default. Level 4 headlines (with a leading `####`) will
|
||||
become aside titles.
|
||||
|
||||
### Code Block {#code-block}
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> ### This is a title
|
||||
> import spacy
|
||||
> ```
|
||||
> ````
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <CodeBlock title="This is a title" lang="python">
|
||||
> import spacy
|
||||
> </CodeBlock>
|
||||
> ```
|
||||
|
||||
Code blocks use the [Prism](http://prismjs.com/) syntax highlighter with a
|
||||
custom theme. The language can be set individually on each block, and defaults
|
||||
to raw text with no highlighting. An optional label can be added as the first
|
||||
line with the prefix `####` (Python-like) and `///` (JavaScript-like). the
|
||||
indented block as plain text and preserve whitespace.
|
||||
|
||||
```python
|
||||
### Using spaCy
|
||||
import spacy
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("This is a sentence.")
|
||||
for token in doc:
|
||||
print(token.text, token.pos_)
|
||||
```
|
||||
|
||||
Code blocks and also specify an optional range of line numbers to highlight by
|
||||
adding `{highlight="..."}` to the headline. Acceptable ranges are spans like
|
||||
`5-7`, but also `5-7,10` or `5-7,10,13-14`.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> ### This is a title {highlight="1-2"}
|
||||
> import spacy
|
||||
> nlp = spacy.load("en_core_web_sm")
|
||||
> ```
|
||||
> ````
|
||||
|
||||
```python
|
||||
### Using the matcher {highlight="5-7"}
|
||||
import spacy
|
||||
from spacy.matcher import Matcher
|
||||
|
||||
nlp = spacy.load('en_core_web_sm')
|
||||
matcher = Matcher(nlp.vocab)
|
||||
pattern = [{"LOWER": "hello"}, {"IS_PUNCT": True}, {"LOWER": "world"}]
|
||||
matcher.add("HelloWorld", None, pattern)
|
||||
doc = nlp("Hello, world! Hello world!")
|
||||
matches = matcher(doc)
|
||||
```
|
||||
|
||||
Adding `{executable="true"}` to the title turns the code into an executable
|
||||
block, powered by [Binder](https://mybinder.org) and
|
||||
[Juniper](https://github.com/ines/juniper). If JavaScript is disabled, the
|
||||
interactive widget defaults to a regular code block.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> ### {executable="true"}
|
||||
> import spacy
|
||||
> nlp = spacy.load("en_core_web_sm")
|
||||
> ```
|
||||
> ````
|
||||
|
||||
```python
|
||||
### {executable="true"}
|
||||
import spacy
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("This is a sentence.")
|
||||
for token in doc:
|
||||
print(token.text, token.pos_)
|
||||
```
|
||||
|
||||
If a code block only contains a URL to a GitHub file, the raw file contents are
|
||||
embedded automatically and syntax highlighting is applied. The link to the
|
||||
original file is shown at the top of the widget.
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ````markdown_
|
||||
> ```python
|
||||
> https://github.com/...
|
||||
> ```
|
||||
> ````
|
||||
>
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <GitHubCode url="https://github.com/..." lang="python" />
|
||||
> ```
|
||||
|
||||
```python
|
||||
https://github.com/explosion/spaCy/tree/master/spacy/language.py
|
||||
```
|
||||
|
||||
### Infobox {#infobox}
|
||||
|
||||
import Infobox from 'components/infobox'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Infobox title="Information">Regular infobox</Infobox>
|
||||
> <Infobox title="Important note" variant="warning">This is a warning.</Infobox>
|
||||
> <Infobox title="Be careful!" variant="danger">This is dangerous.</Infobox>
|
||||
> ```
|
||||
|
||||
Infoboxes can be used to add notes, updates, warnings or additional information
|
||||
to a page or section. Semantically, they're implemented and interpreted as an
|
||||
`aside` element. Infoboxes can take an optional `title` argument, as well as an
|
||||
optional `variant` (either `"warning"` or `"danger"`).
|
||||
|
||||
<Infobox title="This is an infobox">
|
||||
|
||||
If needed, an infobox can contain regular text, `inline code`, lists and other
|
||||
blocks.
|
||||
|
||||
</Infobox>
|
||||
|
||||
<Infobox title="This is a warning" variant="warning">
|
||||
|
||||
If needed, an infobox can contain regular text, `inline code`, lists and other
|
||||
blocks.
|
||||
|
||||
</Infobox>
|
||||
|
||||
<Infobox title="This is dangerous" variant="danger">
|
||||
|
||||
If needed, an infobox can contain regular text, `inline code`, lists and other
|
||||
blocks.
|
||||
|
||||
</Infobox>
|
||||
|
||||
### Accordion {#accordion}
|
||||
|
||||
import Accordion from 'components/accordion'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
> <Accordion title="This is an accordion">
|
||||
> Accordion content goes here.
|
||||
> </Accordion>
|
||||
> ```
|
||||
|
||||
Accordions are collapsible sections that are mostly used for lengthy tables,
|
||||
like the tag and label annotation schemes for different languages. They all need
|
||||
to be presented – but chances are the user doesn't actually care about _all_ of
|
||||
them, especially not at the same time. So it's fairly reasonable to hide them
|
||||
begin a click. This particular implementation was inspired by the amazing
|
||||
[Inclusive Components blog](https://inclusive-components.design/collapsible-sections/).
|
||||
|
||||
<Accordion title="This is an accordion">
|
||||
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante,
|
||||
pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt
|
||||
nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor
|
||||
gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor,
|
||||
sit amet dignissim justo congue.
|
||||
|
||||
</Accordion>
|
||||
|
||||
## Markdown reference {#markdown}
|
||||
|
||||
All page content and page meta lives in the `.md` files in the `/docs`
|
||||
directory. The frontmatter block at the top of each file defines the page title
|
||||
and other settings like the sidebar menu.
|
||||
|
||||
````markdown
|
||||
---
|
||||
title: Page title
|
||||
---
|
||||
|
||||
## Headline starting a section {#some_id}
|
||||
|
||||
This is a regular paragraph with a [link](https://spacy.io) and **bold text**.
|
||||
|
||||
> #### This is an aside title
|
||||
>
|
||||
> This is aside text.
|
||||
|
||||
### Subheadline
|
||||
|
||||
| Header 1 | Header 2 |
|
||||
| -------- | -------- |
|
||||
| Column 1 | Column 2 |
|
||||
|
||||
```python
|
||||
### Code block title {highlight="2-3"}
|
||||
import spacy
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
doc = nlp("Hello world")
|
||||
```
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
This is content in the infobox.
|
||||
|
||||
</Infobox>
|
||||
````
|
||||
|
||||
In addition to the native markdown elements, you can use the components
|
||||
[`<Infobox />`][infobox], [`<Accordion />`][accordion], [`<Abbr />`][abbr] and
|
||||
[`<Tag />`][tag] via their JSX syntax.
|
||||
|
||||
[infobox]: https://spacy.io/styleguide#infobox
|
||||
[accordion]: https://spacy.io/styleguide#accordion
|
||||
[abbr]: https://spacy.io/styleguide#abbr
|
||||
[tag]: https://spacy.io/styleguide#tag
|
||||
|
||||
## Editorial {#editorial}
|
||||
|
||||
- "spaCy" should always be spelled with a lowercase "s" and a capital "C",
|
||||
unless it specifically refers to the Python package or Python import `spacy`
|
||||
(in which case it should be formatted as code).
|
||||
- ✅ spaCy is a library for advanced NLP in Python.
|
||||
- ❌ Spacy is a library for advanced NLP in Python.
|
||||
- ✅ First, you need to install the `spacy` package from pip.
|
||||
- Mentions of code, like function names, classes, variable names etc. in inline
|
||||
text should be formatted as `code`.
|
||||
- ✅ "Calling the `nlp` object on a text returns a `Doc`."
|
||||
- Objects that have pages in the [API docs](/api) should be linked – for
|
||||
example, [`Doc`](/api/doc) or [`Language.to_disk`](/api/language#to_disk). The
|
||||
mentions should still be formatted as code within the link. Links pointing to
|
||||
the API docs will automatically receive a little icon. However, if a paragraph
|
||||
includes many references to the API, the links can easily get messy. In that
|
||||
case, we typically only link the first mention of an object and not any
|
||||
subsequent ones.
|
||||
- ✅ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
|
||||
[`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a `Doc` object
|
||||
from a `Span`.
|
||||
- ❌ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
|
||||
[`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a
|
||||
[`Doc`](/api/doc) object from a [`Span`](/api/span).
|
||||
|
||||
* Other things we format as code are: references to trained pipeline packages
|
||||
like `en_core_web_sm` or file names like `code.py` or `meta.json`.
|
||||
|
||||
- ✅ After training, the `config.cfg` is saved to disk.
|
||||
|
||||
* [Type annotations](#type-annotations) are a special type of code formatting,
|
||||
expressed by wrapping the text in `~~` instead of backticks. The result looks
|
||||
like this: ~~List[Doc]~~. All references to known types will be linked
|
||||
automatically.
|
||||
|
||||
- ✅ The model has the input type ~~List[Doc]~~ and it outputs a
|
||||
~~List[Array2d]~~.
|
||||
|
||||
* We try to keep links meaningful but short.
|
||||
- ✅ For details, see the usage guide on
|
||||
[training with custom code](/usage/training#custom-code).
|
||||
- ❌ For details, see
|
||||
[the usage guide on training with custom code](/usage/training#custom-code).
|
||||
- ❌ For details, see the usage guide on training with custom code
|
||||
[here](/usage/training#custom-code).
|
||||
|
|
|
@ -259,9 +259,9 @@ pipelines.
|
|||
> This can be used in a project command like so:
|
||||
>
|
||||
> ```yaml
|
||||
> - name: "echo-path"
|
||||
> script:
|
||||
> - "echo ${env.ENV_PATH}"
|
||||
> - name: 'echo-path'
|
||||
> script:
|
||||
> - 'echo ${env.ENV_PATH}'
|
||||
> ```
|
||||
|
||||
| Section | Description |
|
||||
|
@ -643,12 +643,13 @@ locally.
|
|||
|
||||
You can list one or more remotes in the `remotes` section of your
|
||||
[`project.yml`](#project-yml) by mapping a string name to the URL of the
|
||||
storage. Under the hood, spaCy uses the
|
||||
[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to
|
||||
communicate with the remote storages, so you can use any protocol that
|
||||
`smart-open` supports, including [S3](https://aws.amazon.com/s3/),
|
||||
[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although
|
||||
you may need to install extra dependencies to use certain protocols.
|
||||
storage. Under the hood, spaCy uses
|
||||
[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
|
||||
remote storages, so you can use any protocol that `Pathy` supports, including
|
||||
[S3](https://aws.amazon.com/s3/),
|
||||
[Google Cloud Storage](https://cloud.google.com/storage), and the local
|
||||
filesystem, although you may need to install extra dependencies to use certain
|
||||
protocols.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
|
@ -661,7 +662,6 @@ you may need to install extra dependencies to use certain protocols.
|
|||
remotes:
|
||||
default: 's3://my-spacy-bucket'
|
||||
local: '/mnt/scratch/cache'
|
||||
stuff: 'ssh://myserver.example.com/whatever'
|
||||
```
|
||||
|
||||
<Infobox title="How it works" emoji="💡">
|
||||
|
|
|
@ -562,6 +562,7 @@
|
|||
"url": "https://github.com/explosion/spacy-pkuseg"
|
||||
}
|
||||
],
|
||||
"example": "这是一个用于示例的句子。",
|
||||
"has_examples": true
|
||||
}
|
||||
],
|
||||
|
|
|
@ -461,37 +461,6 @@
|
|||
},
|
||||
"category": ["standalone"]
|
||||
},
|
||||
{
|
||||
"id": "spikex",
|
||||
"title": "SpikeX - SpaCy Pipes for Knowledge Extraction",
|
||||
"slogan": "Use SpikeX to build knowledge extraction tools with almost-zero effort",
|
||||
"description": "SpikeX is a collection of pipes ready to be plugged in a spaCy pipeline. It aims to help in building knowledge extraction tools with almost-zero effort.",
|
||||
"github": "erre-quadro/spikex",
|
||||
"pip": "spikex",
|
||||
"code_example": [
|
||||
"from spacy import load as spacy_load",
|
||||
"from spikex.wikigraph import load as wg_load",
|
||||
"from spikex.pipes import WikiPageX",
|
||||
"",
|
||||
"# load a spacy model and get a doc",
|
||||
"nlp = spacy_load('en_core_web_sm')",
|
||||
"doc = nlp('An apple a day keeps the doctor away')",
|
||||
"# load a WikiGraph",
|
||||
"wg = wg_load('simplewiki_core')",
|
||||
"# get a WikiPageX and extract all pages",
|
||||
"wikipagex = WikiPageX(wg)",
|
||||
"doc = wikipagex(doc)",
|
||||
"# see all pages extracted from the doc",
|
||||
"for span in doc._.wiki_spans:",
|
||||
" print(span._.wiki_pages)"
|
||||
],
|
||||
"category": ["pipeline", "standalone"],
|
||||
"author": "Erre Quadro",
|
||||
"author_links": {
|
||||
"github": "erre-quadro",
|
||||
"website": "https://www.errequadrosrl.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "spacy-dbpedia-spotlight",
|
||||
"title": "DBpedia Spotlight for SpaCy",
|
||||
|
@ -2024,17 +1993,6 @@
|
|||
},
|
||||
"category": ["books"]
|
||||
},
|
||||
{
|
||||
"type": "education",
|
||||
"id": "learning-path-spacy",
|
||||
"title": "Learning Path: Mastering spaCy for Natural Language Processing",
|
||||
"slogan": "O'Reilly, 2017",
|
||||
"description": "spaCy, a fast, user-friendly library for teaching computers to understand text, simplifies NLP techniques, such as speech tagging and syntactic dependencies, so you can easily extract information, attributes, and objects from massive amounts of text to then document, measure, and analyze. This Learning Path is a hands-on introduction to using spaCy to discover insights through natural language processing. While end-to-end natural language processing solutions can be complex, you’ll learn the linguistics, algorithms, and machine learning skills to get the job done.",
|
||||
"url": "https://www.safaribooksonline.com/library/view/learning-path-mastering/9781491986653/",
|
||||
"thumb": "https://i.imgur.com/9MIgMAc.jpg",
|
||||
"author": "Aaron Kramer",
|
||||
"category": ["courses"]
|
||||
},
|
||||
{
|
||||
"type": "education",
|
||||
"id": "introduction-into-spacy-3",
|
||||
|
|
Loading…
Reference in New Issue
Block a user