mirror of
https://github.com/explosion/spaCy.git
synced 2025-08-05 04:40:20 +03:00
Merge branch 'explosion:master' into rapidfuzz
This commit is contained in:
commit
920835e21e
2
.github/ISSUE_TEMPLATE/01_bugs.md
vendored
2
.github/ISSUE_TEMPLATE/01_bugs.md
vendored
|
@ -10,7 +10,7 @@ about: Use this template if you came across a bug or unexpected behaviour differ
|
|||
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
|
||||
|
||||
## Your Environment
|
||||
<!-- Include details of your environment. If you're using spaCy 1.7+, you can also type `python -m spacy info --markdown` and copy-paste the result here.-->
|
||||
<!-- Include details of your environment. You can also type `python -m spacy info --markdown` and copy-paste the result here.-->
|
||||
* Operating System:
|
||||
* Python Version Used:
|
||||
* spaCy Version Used:
|
||||
|
|
2
.github/azure-steps.yml
vendored
2
.github/azure-steps.yml
vendored
|
@ -27,7 +27,7 @@ steps:
|
|||
|
||||
- script: python -m mypy spacy
|
||||
displayName: 'Run mypy'
|
||||
condition: ne(variables['python_version'], '3.10')
|
||||
condition: ne(variables['python_version'], '3.6')
|
||||
|
||||
- task: DeleteFiles@1
|
||||
inputs:
|
||||
|
|
|
@ -6,7 +6,7 @@ repos:
|
|||
language_version: python3.7
|
||||
additional_dependencies: ['click==8.0.4']
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 3.9.2
|
||||
rev: 5.0.4
|
||||
hooks:
|
||||
- id: flake8
|
||||
args:
|
||||
|
|
|
@ -31,7 +31,7 @@ jobs:
|
|||
inputs:
|
||||
versionSpec: "3.7"
|
||||
- script: |
|
||||
pip install flake8==3.9.2
|
||||
pip install flake8==5.0.4
|
||||
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
|
||||
displayName: "flake8"
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ pathy>=0.3.5
|
|||
numpy>=1.15.0
|
||||
requests>=2.13.0,<3.0.0
|
||||
tqdm>=4.38.0,<5.0.0
|
||||
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0
|
||||
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
|
||||
jinja2
|
||||
langcodes>=3.2.0,<4.0.0
|
||||
# Official Python utilities
|
||||
|
@ -28,11 +28,12 @@ cython>=0.25,<3.0
|
|||
pytest>=5.2.0,!=7.1.0
|
||||
pytest-timeout>=1.3.0,<2.0.0
|
||||
mock>=2.0.0,<3.0.0
|
||||
flake8>=3.8.0,<3.10.0
|
||||
flake8>=3.8.0,<6.0.0
|
||||
hypothesis>=3.27.0,<7.0.0
|
||||
mypy>=0.910,<0.970; platform_machine!='aarch64'
|
||||
mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7"
|
||||
types-dataclasses>=0.1.3; python_version < "3.7"
|
||||
types-mock>=0.1.1
|
||||
types-setuptools>=57.0.0
|
||||
types-requests
|
||||
types-setuptools>=57.0.0
|
||||
black>=22.0,<23.0
|
||||
|
|
|
@ -56,7 +56,7 @@ install_requires =
|
|||
tqdm>=4.38.0,<5.0.0
|
||||
numpy>=1.15.0
|
||||
requests>=2.13.0,<3.0.0
|
||||
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0
|
||||
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
|
||||
jinja2
|
||||
# Official Python utilities
|
||||
setuptools
|
||||
|
|
|
@ -31,9 +31,9 @@ def load(
|
|||
name: Union[str, Path],
|
||||
*,
|
||||
vocab: Union[Vocab, bool] = True,
|
||||
disable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
|
||||
enable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
|
||||
exclude: Union[str, Iterable[str]] = util.SimpleFrozenList(),
|
||||
disable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
|
||||
enable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
|
||||
exclude: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
|
||||
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
|
||||
) -> Language:
|
||||
"""Load a spaCy model from an installed package or a local path.
|
||||
|
|
|
@ -573,3 +573,12 @@ def setup_gpu(use_gpu: int, silent=None) -> None:
|
|||
local_msg.info("Using CPU")
|
||||
if gpu_is_available():
|
||||
local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")
|
||||
|
||||
|
||||
def _format_number(number: Union[int, float], ndigits: int = 2) -> str:
|
||||
"""Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s,
|
||||
as happens with `round(number, ndigits)`"""
|
||||
if isinstance(number, float):
|
||||
return f"{number:.{ndigits}f}"
|
||||
else:
|
||||
return str(number)
|
||||
|
|
|
@ -9,7 +9,7 @@ import typer
|
|||
import math
|
||||
|
||||
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
|
||||
from ._util import import_code, debug_cli
|
||||
from ._util import import_code, debug_cli, _format_number
|
||||
from ..training import Example, remove_bilu_prefix
|
||||
from ..training.initialize import get_sourced_components
|
||||
from ..schemas import ConfigSchemaTraining
|
||||
|
@ -989,7 +989,8 @@ def _get_kl_divergence(p: Counter, q: Counter) -> float:
|
|||
def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]:
|
||||
"""Compile into one list for easier reporting"""
|
||||
d = {
|
||||
label: [label] + list(round(d[label], 2) for d in span_data) for label in labels
|
||||
label: [label] + list(_format_number(d[label]) for d in span_data)
|
||||
for label in labels
|
||||
}
|
||||
return list(d.values())
|
||||
|
||||
|
@ -1004,6 +1005,10 @@ def _get_span_characteristics(
|
|||
label: _gmean(l)
|
||||
for label, l in compiled_gold["spans_length"][spans_key].items()
|
||||
}
|
||||
spans_per_type = {
|
||||
label: len(spans)
|
||||
for label, spans in compiled_gold["spans_per_type"][spans_key].items()
|
||||
}
|
||||
min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()]
|
||||
max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()]
|
||||
|
||||
|
@ -1031,6 +1036,7 @@ def _get_span_characteristics(
|
|||
return {
|
||||
"sd": span_distinctiveness,
|
||||
"bd": sb_distinctiveness,
|
||||
"spans_per_type": spans_per_type,
|
||||
"lengths": span_length,
|
||||
"min_length": min(min_lengths),
|
||||
"max_length": max(max_lengths),
|
||||
|
@ -1045,12 +1051,15 @@ def _get_span_characteristics(
|
|||
|
||||
def _print_span_characteristics(span_characteristics: Dict[str, Any]):
|
||||
"""Print all span characteristics into a table"""
|
||||
headers = ("Span Type", "Length", "SD", "BD")
|
||||
headers = ("Span Type", "Length", "SD", "BD", "N")
|
||||
# Wasabi has this at 30 by default, but we might have some long labels
|
||||
max_col = max(30, max(len(label) for label in span_characteristics["labels"]))
|
||||
# Prepare table data with all span characteristics
|
||||
table_data = [
|
||||
span_characteristics["lengths"],
|
||||
span_characteristics["sd"],
|
||||
span_characteristics["bd"],
|
||||
span_characteristics["spans_per_type"],
|
||||
]
|
||||
table = _format_span_row(
|
||||
span_data=table_data, labels=span_characteristics["labels"]
|
||||
|
@ -1061,8 +1070,18 @@ def _print_span_characteristics(span_characteristics: Dict[str, Any]):
|
|||
span_characteristics["avg_sd"],
|
||||
span_characteristics["avg_bd"],
|
||||
]
|
||||
footer = ["Wgt. Average"] + [str(round(f, 2)) for f in footer_data]
|
||||
msg.table(table, footer=footer, header=headers, divider=True)
|
||||
|
||||
footer = (
|
||||
["Wgt. Average"] + ["{:.2f}".format(round(f, 2)) for f in footer_data] + ["-"]
|
||||
)
|
||||
msg.table(
|
||||
table,
|
||||
footer=footer,
|
||||
header=headers,
|
||||
divider=True,
|
||||
aligns=["l"] + ["r"] * (len(footer_data) + 1),
|
||||
max_col=max_col,
|
||||
)
|
||||
|
||||
|
||||
def _get_spans_length_freq_dist(
|
||||
|
|
|
@ -299,8 +299,8 @@ def get_meta(
|
|||
}
|
||||
nlp = util.load_model_from_path(Path(model_path))
|
||||
meta.update(nlp.meta)
|
||||
meta.update(existing_meta)
|
||||
meta["spacy_version"] = util.get_minor_version_range(about.__version__)
|
||||
meta.update(existing_meta)
|
||||
meta["vectors"] = {
|
||||
"width": nlp.vocab.vectors_length,
|
||||
"vectors": len(nlp.vocab.vectors),
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
from typing import Optional, List, Dict, Sequence, Any, Iterable
|
||||
from typing import Optional, List, Dict, Sequence, Any, Iterable, Tuple
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
|
||||
import pkg_resources
|
||||
from wasabi import msg
|
||||
from wasabi.util import locale_escape
|
||||
import sys
|
||||
|
@ -71,6 +74,12 @@ def project_run(
|
|||
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
|
||||
workflows = config.get("workflows", {})
|
||||
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
|
||||
|
||||
req_path = project_dir / "requirements.txt"
|
||||
if config.get("check_requirements", True) and os.path.exists(req_path):
|
||||
with req_path.open() as requirements_file:
|
||||
_check_requirements([req.replace("\n", "") for req in requirements_file])
|
||||
|
||||
if subcommand in workflows:
|
||||
msg.info(f"Running workflow '{subcommand}'")
|
||||
for cmd in workflows[subcommand]:
|
||||
|
@ -310,3 +319,32 @@ def get_fileinfo(project_dir: Path, paths: List[str]) -> List[Dict[str, Optional
|
|||
md5 = get_checksum(file_path) if file_path.exists() else None
|
||||
data.append({"path": path, "md5": md5})
|
||||
return data
|
||||
|
||||
|
||||
def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
|
||||
"""Checks whether requirements are installed and free of version conflicts.
|
||||
requirements (List[str]): List of requirements.
|
||||
RETURNS (Tuple[bool, bool]): Whether (1) any packages couldn't be imported, (2) any packages with version conflicts
|
||||
exist.
|
||||
"""
|
||||
|
||||
failed_pkgs_msgs: List[str] = []
|
||||
conflicting_pkgs_msgs: List[str] = []
|
||||
|
||||
for req in requirements:
|
||||
try:
|
||||
pkg_resources.require(req)
|
||||
except pkg_resources.DistributionNotFound as dnf:
|
||||
failed_pkgs_msgs.append(dnf.report())
|
||||
except pkg_resources.VersionConflict as vc:
|
||||
conflicting_pkgs_msgs.append(vc.report())
|
||||
|
||||
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
|
||||
msg.warn(
|
||||
title="Missing requirements or requirement conflicts detected. Make sure your Python environment is set up "
|
||||
"correctly and you installed all requirements specified in your project's requirements.txt: "
|
||||
)
|
||||
for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs:
|
||||
msg.text(pgk_msg)
|
||||
|
||||
return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0
|
||||
|
|
|
@ -212,6 +212,8 @@ class Warnings(metaclass=ErrorsWithCodes):
|
|||
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
|
||||
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
|
||||
"is a Cython extension type.")
|
||||
W123 = ("Argument {arg} with value {arg_value} is used instead of {config_value} as specified in the config. Be "
|
||||
"aware that this might affect other components in your pipeline.")
|
||||
|
||||
|
||||
class Errors(metaclass=ErrorsWithCodes):
|
||||
|
@ -937,8 +939,9 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
|
||||
"Some tokens do not contain annotation for: {partial_attrs}")
|
||||
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
|
||||
E1042 = ("Function was called with `{arg1}`={arg1_values} and "
|
||||
"`{arg2}`={arg2_values} but these arguments are conflicting.")
|
||||
E1042 = ("`enable={enable}` and `disable={disable}` are inconsistent with each other.\nIf you only passed "
|
||||
"one of `enable` or `disable`, the other argument is specified in your pipeline's configuration.\nIn that "
|
||||
"case pass an empty list for the previously not specified argument to avoid this error.")
|
||||
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
|
||||
"{value}.")
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ class RussianLemmatizer(Lemmatizer):
|
|||
overwrite: bool = False,
|
||||
scorer: Optional[Callable] = lemmatizer_score,
|
||||
) -> None:
|
||||
if mode == "pymorphy2":
|
||||
if mode in {"pymorphy2", "pymorphy2_lookup"}:
|
||||
try:
|
||||
from pymorphy2 import MorphAnalyzer
|
||||
except ImportError:
|
||||
|
|
|
@ -18,7 +18,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
|
|||
overwrite: bool = False,
|
||||
scorer: Optional[Callable] = lemmatizer_score,
|
||||
) -> None:
|
||||
if mode == "pymorphy2":
|
||||
if mode in {"pymorphy2", "pymorphy2_lookup"}:
|
||||
try:
|
||||
from pymorphy2 import MorphAnalyzer
|
||||
except ImportError:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection
|
||||
from typing import Iterator, Optional, Any, Dict, Callable, Iterable
|
||||
from typing import Union, Tuple, List, Set, Pattern, Sequence
|
||||
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
|
||||
|
||||
|
@ -10,6 +10,7 @@ from contextlib import contextmanager
|
|||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
import warnings
|
||||
|
||||
from thinc.api import get_current_ops, Config, CupyOps, Optimizer
|
||||
import srsly
|
||||
import multiprocessing as mp
|
||||
|
@ -24,7 +25,7 @@ from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
|
|||
from .training import Example, validate_examples
|
||||
from .training.initialize import init_vocab, init_tok2vec
|
||||
from .scorer import Scorer
|
||||
from .util import registry, SimpleFrozenList, _pipe, raise_error
|
||||
from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES
|
||||
from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER
|
||||
from .util import warn_if_jupyter_cupy
|
||||
from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS
|
||||
|
@ -1698,9 +1699,9 @@ class Language:
|
|||
config: Union[Dict[str, Any], Config] = {},
|
||||
*,
|
||||
vocab: Union[Vocab, bool] = True,
|
||||
disable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
enable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
meta: Dict[str, Any] = SimpleFrozenDict(),
|
||||
auto_fill: bool = True,
|
||||
validate: bool = True,
|
||||
|
@ -1727,12 +1728,6 @@ class Language:
|
|||
|
||||
DOCS: https://spacy.io/api/language#from_config
|
||||
"""
|
||||
if isinstance(disable, str):
|
||||
disable = [disable]
|
||||
if isinstance(enable, str):
|
||||
enable = [enable]
|
||||
if isinstance(exclude, str):
|
||||
exclude = [exclude]
|
||||
if auto_fill:
|
||||
config = Config(
|
||||
cls.default_config, section_order=CONFIG_SECTION_ORDER
|
||||
|
@ -1877,9 +1872,38 @@ class Language:
|
|||
nlp.vocab.from_bytes(vocab_b)
|
||||
|
||||
# Resolve disabled/enabled settings.
|
||||
if isinstance(disable, str):
|
||||
disable = [disable]
|
||||
if isinstance(enable, str):
|
||||
enable = [enable]
|
||||
if isinstance(exclude, str):
|
||||
exclude = [exclude]
|
||||
|
||||
def fetch_pipes_status(value: Iterable[str], key: str) -> Iterable[str]:
|
||||
"""Fetch value for `enable` or `disable` w.r.t. the specified config and passed arguments passed to
|
||||
.load(). If both arguments and config specified values for this field, the passed arguments take precedence
|
||||
and a warning is printed.
|
||||
value (Iterable[str]): Passed value for `enable` or `disable`.
|
||||
key (str): Key for field in config (either "enabled" or "disabled").
|
||||
RETURN (Iterable[str]):
|
||||
"""
|
||||
# We assume that no argument was passed if the value is the specified default value.
|
||||
if id(value) == id(_DEFAULT_EMPTY_PIPES):
|
||||
return config["nlp"].get(key, [])
|
||||
else:
|
||||
if len(config["nlp"].get(key, [])):
|
||||
warnings.warn(
|
||||
Warnings.W123.format(
|
||||
arg=key[:-1],
|
||||
arg_value=value,
|
||||
config_value=config["nlp"][key],
|
||||
)
|
||||
)
|
||||
return value
|
||||
|
||||
disabled_pipes = cls._resolve_component_status(
|
||||
[*config["nlp"]["disabled"], *disable],
|
||||
[*config["nlp"].get("enabled", []), *enable],
|
||||
fetch_pipes_status(disable, "disabled"),
|
||||
fetch_pipes_status(enable, "enabled"),
|
||||
config["nlp"]["pipeline"],
|
||||
)
|
||||
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
|
||||
|
@ -2064,14 +2088,7 @@ class Language:
|
|||
pipe_name for pipe_name in pipe_names if pipe_name not in enable
|
||||
]
|
||||
if disable and disable != to_disable:
|
||||
raise ValueError(
|
||||
Errors.E1042.format(
|
||||
arg1="enable",
|
||||
arg2="disable",
|
||||
arg1_values=enable,
|
||||
arg2_values=disable,
|
||||
)
|
||||
)
|
||||
raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
|
||||
|
||||
return tuple(to_disable)
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from typing import cast, Any, Callable, Dict, Iterable, List, Optional
|
||||
from typing import Sequence, Tuple, Union
|
||||
from typing import Tuple
|
||||
from collections import Counter
|
||||
from copy import deepcopy
|
||||
from itertools import islice
|
||||
import numpy as np
|
||||
|
||||
|
@ -149,9 +148,7 @@ class EditTreeLemmatizer(TrainablePipe):
|
|||
if not any(len(doc) for doc in docs):
|
||||
# Handle cases where there are no tokens in any docs.
|
||||
n_labels = len(self.cfg["labels"])
|
||||
guesses: List[Ints2d] = [
|
||||
self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs
|
||||
]
|
||||
guesses: List[Ints2d] = [self.model.ops.alloc2i(0, n_labels) for _ in docs]
|
||||
assert len(guesses) == n_docs
|
||||
return guesses
|
||||
scores = self.model.predict(docs)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import warnings
|
||||
from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence
|
||||
from typing import cast
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
import srsly
|
||||
|
@ -317,7 +316,7 @@ class EntityRuler(Pipe):
|
|||
phrase_pattern["id"] = ent_id
|
||||
phrase_patterns.append(phrase_pattern)
|
||||
for entry in token_patterns + phrase_patterns: # type: ignore[operator]
|
||||
label = entry["label"]
|
||||
label = entry["label"] # type: ignore
|
||||
if "id" in entry:
|
||||
ent_label = label
|
||||
label = self._create_label(label, entry["id"])
|
||||
|
|
|
@ -133,6 +133,9 @@ def make_spancat(
|
|||
spans_key (str): Key of the doc.spans dict to save the spans under. During
|
||||
initialization and training, the component will look for spans on the
|
||||
reference document under the same key.
|
||||
scorer (Optional[Callable]): The scoring method. Defaults to
|
||||
Scorer.score_spans for the Doc.spans[spans_key] with overlapping
|
||||
spans allowed.
|
||||
threshold (float): Minimum probability to consider a prediction positive.
|
||||
Spans with a positive prediction will be saved on the Doc. Defaults to
|
||||
0.5.
|
||||
|
|
|
@ -96,8 +96,8 @@ def make_multilabel_textcat(
|
|||
model: Model[List[Doc], List[Floats2d]],
|
||||
threshold: float,
|
||||
scorer: Optional[Callable],
|
||||
) -> "TextCategorizer":
|
||||
"""Create a TextCategorizer component. The text categorizer predicts categories
|
||||
) -> "MultiLabel_TextCategorizer":
|
||||
"""Create a MultiLabel_TextCategorizer component. The text categorizer predicts categories
|
||||
over a whole document. It can learn one or more labels, and the labels are considered
|
||||
to be non-mutually exclusive, which means that there can be zero or more labels
|
||||
per doc).
|
||||
|
@ -105,6 +105,7 @@ def make_multilabel_textcat(
|
|||
model (Model[List[Doc], List[Floats2d]]): A model instance that predicts
|
||||
scores for each category.
|
||||
threshold (float): Cutoff to consider a prediction "positive".
|
||||
scorer (Optional[Callable]): The scoring method.
|
||||
"""
|
||||
return MultiLabel_TextCategorizer(
|
||||
nlp.vocab, model, name, threshold=threshold, scorer=scorer
|
||||
|
@ -147,6 +148,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
|
|||
name (str): The component instance name, used to add entries to the
|
||||
losses during training.
|
||||
threshold (float): Cutoff to consider a prediction "positive".
|
||||
scorer (Optional[Callable]): The scoring method.
|
||||
|
||||
DOCS: https://spacy.io/api/textcategorizer#init
|
||||
"""
|
||||
|
|
|
@ -187,12 +187,12 @@ class TokenPatternNumber(BaseModel):
|
|||
IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset")
|
||||
IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset")
|
||||
INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects")
|
||||
EQ: Union[StrictInt, StrictFloat] = Field(None, alias="==")
|
||||
NEQ: Union[StrictInt, StrictFloat] = Field(None, alias="!=")
|
||||
GEQ: Union[StrictInt, StrictFloat] = Field(None, alias=">=")
|
||||
LEQ: Union[StrictInt, StrictFloat] = Field(None, alias="<=")
|
||||
GT: Union[StrictInt, StrictFloat] = Field(None, alias=">")
|
||||
LT: Union[StrictInt, StrictFloat] = Field(None, alias="<")
|
||||
EQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="==")
|
||||
NEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="!=")
|
||||
GEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">=")
|
||||
LEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<=")
|
||||
GT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">")
|
||||
LT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<")
|
||||
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
|
@ -436,7 +436,7 @@ class ProjectConfigAssetURL(BaseModel):
|
|||
# fmt: off
|
||||
dest: StrictStr = Field(..., title="Destination of downloaded asset")
|
||||
url: Optional[StrictStr] = Field(None, title="URL of asset")
|
||||
checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
|
||||
checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
|
||||
description: StrictStr = Field("", title="Description of asset")
|
||||
# fmt: on
|
||||
|
||||
|
@ -444,7 +444,7 @@ class ProjectConfigAssetURL(BaseModel):
|
|||
class ProjectConfigAssetGit(BaseModel):
|
||||
# fmt: off
|
||||
git: ProjectConfigAssetGitItem = Field(..., title="Git repo information")
|
||||
checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
|
||||
checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
|
||||
description: Optional[StrictStr] = Field(None, title="Description of asset")
|
||||
# fmt: on
|
||||
|
||||
|
@ -514,9 +514,9 @@ class DocJSONSchema(BaseModel):
|
|||
None, title="Indices of sentences' start and end indices"
|
||||
)
|
||||
text: StrictStr = Field(..., title="Document text")
|
||||
spans: Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] = Field(
|
||||
None, title="Span information - end/start indices, label, KB ID"
|
||||
)
|
||||
spans: Optional[
|
||||
Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]]
|
||||
] = Field(None, title="Span information - end/start indices, label, KB ID")
|
||||
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
|
||||
..., title="Token information - ID, start, annotations"
|
||||
)
|
||||
|
|
|
@ -343,6 +343,14 @@ def ru_lemmatizer():
|
|||
return get_lang_class("ru")().add_pipe("lemmatizer")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ru_lookup_lemmatizer():
|
||||
pytest.importorskip("pymorphy2")
|
||||
return get_lang_class("ru")().add_pipe(
|
||||
"lemmatizer", config={"mode": "pymorphy2_lookup"}
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def sa_tokenizer():
|
||||
return get_lang_class("sa")().tokenizer
|
||||
|
@ -422,6 +430,15 @@ def uk_lemmatizer():
|
|||
return get_lang_class("uk")().add_pipe("lemmatizer")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def uk_lookup_lemmatizer():
|
||||
pytest.importorskip("pymorphy2")
|
||||
pytest.importorskip("pymorphy2_dicts_uk")
|
||||
return get_lang_class("uk")().add_pipe(
|
||||
"lemmatizer", config={"mode": "pymorphy2_lookup"}
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def ur_tokenizer():
|
||||
return get_lang_class("ur")().tokenizer
|
||||
|
|
|
@ -82,6 +82,21 @@ def test_issue2396(en_vocab):
|
|||
assert (span.get_lca_matrix() == matrix).all()
|
||||
|
||||
|
||||
@pytest.mark.issue(11499)
|
||||
def test_init_args_unmodified(en_vocab):
|
||||
words = ["A", "sentence"]
|
||||
ents = ["B-TYPE1", ""]
|
||||
sent_starts = [True, False]
|
||||
Doc(
|
||||
vocab=en_vocab,
|
||||
words=words,
|
||||
ents=ents,
|
||||
sent_starts=sent_starts,
|
||||
)
|
||||
assert ents == ["B-TYPE1", ""]
|
||||
assert sent_starts == [True, False]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text", ["-0.23", "+123,456", "±1"])
|
||||
@pytest.mark.parametrize("lang_cls", [English, MultiLanguage])
|
||||
@pytest.mark.issue(2782)
|
||||
|
|
|
@ -78,3 +78,17 @@ def test_ru_lemmatizer_punct(ru_lemmatizer):
|
|||
assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"']
|
||||
doc = Doc(ru_lemmatizer.vocab, words=["»"], pos=["PUNCT"])
|
||||
assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"']
|
||||
|
||||
|
||||
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
|
||||
words = ["мама", "мыла", "раму"]
|
||||
pos = ["NOUN", "VERB", "NOUN"]
|
||||
morphs = [
|
||||
"Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing",
|
||||
"Aspect=Imp|Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act",
|
||||
"Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing",
|
||||
]
|
||||
doc = Doc(ru_lookup_lemmatizer.vocab, words=words, pos=pos, morphs=morphs)
|
||||
doc = ru_lookup_lemmatizer(doc)
|
||||
lemmas = [token.lemma_ for token in doc]
|
||||
assert lemmas == ["мама", "мыла", "раму"]
|
||||
|
|
|
@ -9,3 +9,11 @@ def test_uk_lemmatizer(uk_lemmatizer):
|
|||
"""Check that the default uk lemmatizer runs."""
|
||||
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
|
||||
uk_lemmatizer(doc)
|
||||
assert [token.lemma for token in doc]
|
||||
|
||||
|
||||
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer):
|
||||
"""Check that the lookup uk lemmatizer runs."""
|
||||
doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"])
|
||||
uk_lookup_lemmatizer(doc)
|
||||
assert [token.lemma for token in doc]
|
||||
|
|
|
@ -28,8 +28,16 @@ from spacy.matcher import levenshtein
|
|||
(4, "いあうう", "ううああ"),
|
||||
(3, "いあいい", "ういああ"),
|
||||
(3, "いいああ", "ううあう"),
|
||||
(166,"TCTGGGCACGGATTCGTCAGATTCCATGTCCATATTTGAGGCTCTTGCAGGCAAAATTTGGGCATGTGAACTCCTTATAGTCCCCGTGC","ATATGGATTGGGGGCATTCAAAGATACGGTTTCCCTTTCTTCAGTTTCGCGCGGCGCACGTCCGGGTGCGAGCCAGTTCGTCTTACTCACATTGTCGACTTCACGAATCGCGCATGATGTGCTTAGCCTGTACTTACGAACGAACTTTCGGTCCAAATACATTCTATCAACACCGAGGTATCCGTGCCACACGCCGAAGCTCGACCGTGTTCGTTGAGAGGTGGAAATGGTAAAAGATGAACATAGTC"),
|
||||
(111,"GGTTCGGCCGAATTCATAGAGCGTGGTAGTCGACGGTATCCCGCCTGGTAGGGGCCCCTTCTACCTAGCGGAAGTTTGTCAGTACTCTATAACACGAGGGCCTCTCACACCCTAGATCGTCCAGCCACTCGAAGATCGCAGCACCCTTACAGAAAGGCATTAATGTTTCTCCTAGCACTTGTGCAATGGTGAAGGAGTGATG","CGTAACACTTCGCGCTACTGGGCTGCAACGTCTTGGGCATACATGCAAGATTATCTAATGCAAGCTTGAGCCCCGCTTGCGGAATTTCCCTAATCGGGGTCCCTTCCTGTTACGATAAGGACGCGTGCACT"),
|
||||
(
|
||||
166,
|
||||
"TCTGGGCACGGATTCGTCAGATTCCATGTCCATATTTGAGGCTCTTGCAGGCAAAATTTGGGCATGTGAACTCCTTATAGTCCCCGTGC",
|
||||
"ATATGGATTGGGGGCATTCAAAGATACGGTTTCCCTTTCTTCAGTTTCGCGCGGCGCACGTCCGGGTGCGAGCCAGTTCGTCTTACTCACATTGTCGACTTCACGAATCGCGCATGATGTGCTTAGCCTGTACTTACGAACGAACTTTCGGTCCAAATACATTCTATCAACACCGAGGTATCCGTGCCACACGCCGAAGCTCGACCGTGTTCGTTGAGAGGTGGAAATGGTAAAAGATGAACATAGTC",
|
||||
),
|
||||
(
|
||||
111,
|
||||
"GGTTCGGCCGAATTCATAGAGCGTGGTAGTCGACGGTATCCCGCCTGGTAGGGGCCCCTTCTACCTAGCGGAAGTTTGTCAGTACTCTATAACACGAGGGCCTCTCACACCCTAGATCGTCCAGCCACTCGAAGATCGCAGCACCCTTACAGAAAGGCATTAATGTTTCTCCTAGCACTTGTGCAATGGTGAAGGAGTGATG",
|
||||
"CGTAACACTTCGCGCTACTGGGCTGCAACGTCTTGGGCATACATGCAAGATTATCTAATGCAAGCTTGAGCCCCGCTTGCGGAATTTCCCTAATCGGGGTCCCTTCCTGTTACGATAAGGACGCGTGCACT",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_levenshtein(dist, a, b):
|
||||
|
|
|
@ -605,10 +605,35 @@ def test_update_with_annotates():
|
|||
assert results[component] == ""
|
||||
|
||||
|
||||
def test_load_disable_enable() -> None:
|
||||
"""
|
||||
Tests spacy.load() with dis-/enabling components.
|
||||
"""
|
||||
@pytest.mark.issue(11443)
|
||||
def test_enable_disable_conflict_with_config():
|
||||
"""Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config."""
|
||||
nlp = English()
|
||||
nlp.add_pipe("tagger")
|
||||
nlp.add_pipe("senter")
|
||||
nlp.add_pipe("sentencizer")
|
||||
|
||||
with make_tempdir() as tmp_dir:
|
||||
nlp.to_disk(tmp_dir)
|
||||
# Expected to fail, as config and arguments conflict.
|
||||
with pytest.raises(ValueError):
|
||||
spacy.load(
|
||||
tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
|
||||
)
|
||||
# Expected to succeed without warning due to the lack of a conflicting config option.
|
||||
spacy.load(tmp_dir, enable=["tagger"])
|
||||
# Expected to succeed with a warning, as disable=[] should override the config setting.
|
||||
with pytest.warns(UserWarning):
|
||||
spacy.load(
|
||||
tmp_dir,
|
||||
enable=["tagger"],
|
||||
disable=[],
|
||||
config={"nlp": {"disabled": ["senter"]}},
|
||||
)
|
||||
|
||||
|
||||
def test_load_disable_enable():
|
||||
"""Tests spacy.load() with dis-/enabling components."""
|
||||
|
||||
base_nlp = English()
|
||||
for pipe in ("sentencizer", "tagger", "parser"):
|
||||
|
|
|
@ -404,10 +404,11 @@ def test_serialize_pipeline_disable_enable():
|
|||
assert nlp3.component_names == ["ner", "tagger"]
|
||||
with make_tempdir() as d:
|
||||
nlp3.to_disk(d)
|
||||
nlp4 = spacy.load(d, disable=["ner"])
|
||||
assert nlp4.pipe_names == []
|
||||
with pytest.warns(UserWarning):
|
||||
nlp4 = spacy.load(d, disable=["ner"])
|
||||
assert nlp4.pipe_names == ["tagger"]
|
||||
assert nlp4.component_names == ["ner", "tagger"]
|
||||
assert nlp4.disabled == ["ner", "tagger"]
|
||||
assert nlp4.disabled == ["ner"]
|
||||
with make_tempdir() as d:
|
||||
nlp.to_disk(d)
|
||||
nlp5 = spacy.load(d, exclude=["tagger"])
|
||||
|
|
|
@ -31,7 +31,7 @@ def doc(nlp):
|
|||
words = ["Sarah", "'s", "sister", "flew", "to", "Silicon", "Valley", "via", "London", "."]
|
||||
tags = ["NNP", "POS", "NN", "VBD", "IN", "NNP", "NNP", "IN", "NNP", "."]
|
||||
pos = ["PROPN", "PART", "NOUN", "VERB", "ADP", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"]
|
||||
ents = ["B-PERSON", "I-PERSON", "O", "O", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"]
|
||||
ents = ["B-PERSON", "I-PERSON", "O", "", "O", "B-LOC", "I-LOC", "O", "B-GPE", "O"]
|
||||
cats = {"TRAVEL": 1.0, "BAKING": 0.0}
|
||||
# fmt: on
|
||||
doc = Doc(nlp.vocab, words=words, tags=tags, pos=pos, ents=ents)
|
||||
|
@ -106,6 +106,7 @@ def test_lowercase_augmenter(nlp, doc):
|
|||
assert [(e.start, e.end, e.label) for e in eg.reference.ents] == ents
|
||||
for ref_ent, orig_ent in zip(eg.reference.ents, doc.ents):
|
||||
assert ref_ent.text == orig_ent.text.lower()
|
||||
assert [t.ent_iob for t in doc] == [t.ent_iob for t in eg.reference]
|
||||
assert [t.pos_ for t in eg.reference] == [t.pos_ for t in doc]
|
||||
|
||||
# check that augmentation works when lowercasing leads to different
|
||||
|
@ -166,7 +167,7 @@ def test_make_whitespace_variant(nlp):
|
|||
lemmas = ["they", "fly", "to", "New", "York", "City", ".", "\n", "then", "they", "drive", "to", "Washington", ",", "D.C."]
|
||||
heads = [1, 1, 1, 4, 5, 2, 1, 10, 10, 10, 10, 10, 11, 12, 12]
|
||||
deps = ["nsubj", "ROOT", "prep", "compound", "compound", "pobj", "punct", "dep", "advmod", "nsubj", "ROOT", "prep", "pobj", "punct", "appos"]
|
||||
ents = ["O", "O", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"]
|
||||
ents = ["O", "", "O", "B-GPE", "I-GPE", "I-GPE", "O", "O", "O", "O", "O", "O", "B-GPE", "O", "B-GPE"]
|
||||
# fmt: on
|
||||
doc = Doc(
|
||||
nlp.vocab,
|
||||
|
@ -215,6 +216,8 @@ def test_make_whitespace_variant(nlp):
|
|||
assert mod_ex2.reference[j].head.i == j - 1
|
||||
# entities are well-formed
|
||||
assert len(doc.ents) == len(mod_ex.reference.ents)
|
||||
# there is one token with missing entity information
|
||||
assert any(t.ent_iob == 0 for t in mod_ex.reference)
|
||||
for ent in mod_ex.reference.ents:
|
||||
assert not ent[0].is_space
|
||||
assert not ent[-1].is_space
|
||||
|
|
|
@ -72,7 +72,7 @@ class Doc:
|
|||
lemmas: Optional[List[str]] = ...,
|
||||
heads: Optional[List[int]] = ...,
|
||||
deps: Optional[List[str]] = ...,
|
||||
sent_starts: Optional[List[Union[bool, None]]] = ...,
|
||||
sent_starts: Optional[List[Union[bool, int, None]]] = ...,
|
||||
ents: Optional[List[str]] = ...,
|
||||
) -> None: ...
|
||||
@property
|
||||
|
|
|
@ -217,9 +217,9 @@ cdef class Doc:
|
|||
head in the doc. Defaults to None.
|
||||
deps (Optional[List[str]]): A list of unicode strings, of the same
|
||||
length as words, to assign as token.dep. Defaults to None.
|
||||
sent_starts (Optional[List[Union[bool, None]]]): A list of values, of
|
||||
the same length as words, to assign as token.is_sent_start. Will be
|
||||
overridden by heads if heads is provided. Defaults to None.
|
||||
sent_starts (Optional[List[Union[bool, int, None]]]): A list of values,
|
||||
of the same length as words, to assign as token.is_sent_start. Will
|
||||
be overridden by heads if heads is provided. Defaults to None.
|
||||
ents (Optional[List[str]]): A list of unicode strings, of the same
|
||||
length as words, as IOB tags to assign as token.ent_iob and
|
||||
token.ent_type. Defaults to None.
|
||||
|
@ -285,6 +285,7 @@ cdef class Doc:
|
|||
heads = [0] * len(deps)
|
||||
if heads and not deps:
|
||||
raise ValueError(Errors.E1017)
|
||||
sent_starts = list(sent_starts) if sent_starts is not None else None
|
||||
if sent_starts is not None:
|
||||
for i in range(len(sent_starts)):
|
||||
if sent_starts[i] is True:
|
||||
|
@ -300,12 +301,11 @@ cdef class Doc:
|
|||
ent_iobs = None
|
||||
ent_types = None
|
||||
if ents is not None:
|
||||
ents = [ent if ent != "" else None for ent in ents]
|
||||
iob_strings = Token.iob_strings()
|
||||
# make valid IOB2 out of IOB1 or IOB2
|
||||
for i, ent in enumerate(ents):
|
||||
if ent is "":
|
||||
ents[i] = None
|
||||
elif ent is not None and not isinstance(ent, str):
|
||||
if ent is not None and not isinstance(ent, str):
|
||||
raise ValueError(Errors.E177.format(tag=ent))
|
||||
if i < len(ents) - 1:
|
||||
# OI -> OB
|
||||
|
|
|
@ -6,7 +6,7 @@ from functools import partial
|
|||
|
||||
from ..util import registry
|
||||
from .example import Example
|
||||
from .iob_utils import split_bilu_label
|
||||
from .iob_utils import split_bilu_label, _doc_to_biluo_tags_with_partial
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..language import Language # noqa: F401
|
||||
|
@ -62,6 +62,9 @@ def combined_augmenter(
|
|||
if orth_variants and random.random() < orth_level:
|
||||
raw_text = example.text
|
||||
orig_dict = example.to_dict()
|
||||
orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
|
||||
example.reference
|
||||
)
|
||||
variant_text, variant_token_annot = make_orth_variants(
|
||||
nlp,
|
||||
raw_text,
|
||||
|
@ -128,6 +131,9 @@ def lower_casing_augmenter(
|
|||
|
||||
def make_lowercase_variant(nlp: "Language", example: Example):
|
||||
example_dict = example.to_dict()
|
||||
example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
|
||||
example.reference
|
||||
)
|
||||
doc = nlp.make_doc(example.text.lower())
|
||||
example_dict["token_annotation"]["ORTH"] = [t.lower_ for t in example.reference]
|
||||
return example.from_dict(doc, example_dict)
|
||||
|
@ -146,6 +152,9 @@ def orth_variants_augmenter(
|
|||
else:
|
||||
raw_text = example.text
|
||||
orig_dict = example.to_dict()
|
||||
orig_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
|
||||
example.reference
|
||||
)
|
||||
variant_text, variant_token_annot = make_orth_variants(
|
||||
nlp,
|
||||
raw_text,
|
||||
|
@ -248,6 +257,9 @@ def make_whitespace_variant(
|
|||
RETURNS (Example): Example with one additional space token.
|
||||
"""
|
||||
example_dict = example.to_dict()
|
||||
example_dict["doc_annotation"]["entities"] = _doc_to_biluo_tags_with_partial(
|
||||
example.reference
|
||||
)
|
||||
doc_dict = example_dict.get("doc_annotation", {})
|
||||
token_dict = example_dict.get("token_annotation", {})
|
||||
# returned unmodified if:
|
||||
|
|
|
@ -60,6 +60,14 @@ def doc_to_biluo_tags(doc: Doc, missing: str = "O"):
|
|||
)
|
||||
|
||||
|
||||
def _doc_to_biluo_tags_with_partial(doc: Doc) -> List[str]:
|
||||
ents = doc_to_biluo_tags(doc, missing="-")
|
||||
for i, token in enumerate(doc):
|
||||
if token.ent_iob == 2:
|
||||
ents[i] = "O"
|
||||
return ents
|
||||
|
||||
|
||||
def offsets_to_biluo_tags(
|
||||
doc: Doc, entities: Iterable[Tuple[int, int, Union[str, int]]], missing: str = "O"
|
||||
) -> List[str]:
|
||||
|
|
|
@ -67,7 +67,6 @@ LEXEME_NORM_LANGS = ["cs", "da", "de", "el", "en", "id", "lb", "mk", "pt", "ru",
|
|||
CONFIG_SECTION_ORDER = ["paths", "variables", "system", "nlp", "components", "corpora", "training", "pretraining", "initialize"]
|
||||
# fmt: on
|
||||
|
||||
|
||||
logger = logging.getLogger("spacy")
|
||||
logger_stream_handler = logging.StreamHandler()
|
||||
logger_stream_handler.setFormatter(
|
||||
|
@ -394,13 +393,17 @@ def get_module_path(module: ModuleType) -> Path:
|
|||
return file_path.parent
|
||||
|
||||
|
||||
# Default value for passed enable/disable values.
|
||||
_DEFAULT_EMPTY_PIPES = SimpleFrozenList()
|
||||
|
||||
|
||||
def load_model(
|
||||
name: Union[str, Path],
|
||||
*,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
enable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
|
||||
) -> "Language":
|
||||
"""Load a model from a package or data path.
|
||||
|
@ -470,9 +473,9 @@ def load_model_from_path(
|
|||
*,
|
||||
meta: Optional[Dict[str, Any]] = None,
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
enable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
|
||||
) -> "Language":
|
||||
"""Load a model from a data directory path. Creates Language class with
|
||||
|
@ -516,9 +519,9 @@ def load_model_from_config(
|
|||
*,
|
||||
meta: Dict[str, Any] = SimpleFrozenDict(),
|
||||
vocab: Union["Vocab", bool] = True,
|
||||
disable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
enable: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
|
||||
disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
|
||||
auto_fill: bool = False,
|
||||
validate: bool = True,
|
||||
) -> "Language":
|
||||
|
|
|
@ -11,6 +11,7 @@ menu:
|
|||
- ['Text Classification', 'textcat']
|
||||
- ['Span Classification', 'spancat']
|
||||
- ['Entity Linking', 'entitylinker']
|
||||
- ['Coreference', 'coref-architectures']
|
||||
---
|
||||
|
||||
A **model architecture** is a function that wires up a
|
||||
|
@ -587,8 +588,8 @@ consists of either two or three subnetworks:
|
|||
run once for each batch.
|
||||
- **lower**: Construct a feature-specific vector for each `(token, feature)`
|
||||
pair. This is also run once for each batch. Constructing the state
|
||||
representation is then a matter of summing the component features and
|
||||
applying the non-linearity.
|
||||
representation is then a matter of summing the component features and applying
|
||||
the non-linearity.
|
||||
- **upper** (optional): A feed-forward network that predicts scores from the
|
||||
state representation. If not present, the output from the lower model is used
|
||||
as action scores directly.
|
||||
|
@ -628,8 +629,8 @@ same signature, but the `use_upper` argument was `True` by default.
|
|||
> ```
|
||||
|
||||
Build a tagger model, using a provided token-to-vector component. The tagger
|
||||
model adds a linear layer with softmax activation to predict scores given
|
||||
the token vectors.
|
||||
model adds a linear layer with softmax activation to predict scores given the
|
||||
token vectors.
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------------ |
|
||||
|
@ -920,5 +921,84 @@ A function that reads an existing `KnowledgeBase` from file.
|
|||
A function that takes as input a [`KnowledgeBase`](/api/kb) and a
|
||||
[`Span`](/api/span) object denoting a named entity, and returns a list of
|
||||
plausible [`Candidate`](/api/kb/#candidate) objects. The default
|
||||
`CandidateGenerator` uses the text of a mention to find its potential
|
||||
aliases in the `KnowledgeBase`. Note that this function is case-dependent.
|
||||
`CandidateGenerator` uses the text of a mention to find its potential aliases in
|
||||
the `KnowledgeBase`. Note that this function is case-dependent.
|
||||
|
||||
## Coreference {#coref-architectures tag="experimental"}
|
||||
|
||||
A [`CoreferenceResolver`](/api/coref) component identifies tokens that refer to
|
||||
the same entity. A [`SpanResolver`](/api/span-resolver) component infers spans
|
||||
from single tokens. Together these components can be used to reproduce
|
||||
traditional coreference models. You can also omit the `SpanResolver` if working
|
||||
with only token-level clusters is acceptable.
|
||||
|
||||
### spacy-experimental.Coref.v1 {#Coref tag="experimental"}
|
||||
|
||||
> #### Example Config
|
||||
>
|
||||
> ```ini
|
||||
>
|
||||
> [model]
|
||||
> @architectures = "spacy-experimental.Coref.v1"
|
||||
> distance_embedding_size = 20
|
||||
> dropout = 0.3
|
||||
> hidden_size = 1024
|
||||
> depth = 2
|
||||
> antecedent_limit = 50
|
||||
> antecedent_batch_size = 512
|
||||
>
|
||||
> [model.tok2vec]
|
||||
> @architectures = "spacy-transformers.TransformerListener.v1"
|
||||
> grad_factor = 1.0
|
||||
> upstream = "transformer"
|
||||
> pooling = {"@layers":"reduce_mean.v1"}
|
||||
> ```
|
||||
|
||||
The `Coref` model architecture is a Thinc `Model`.
|
||||
|
||||
| Name | Description |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ |
|
||||
| `distance_embedding_size` | A representation of the distance between candidates. ~~int~~ |
|
||||
| `dropout` | The dropout to use internally. Unlike some Thinc models, this has separate dropout for the internal PyTorch layers. ~~float~~ |
|
||||
| `hidden_size` | Size of the main internal layers. ~~int~~ |
|
||||
| `depth` | Depth of the internal network. ~~int~~ |
|
||||
| `antecedent_limit` | How many candidate antecedents to keep after rough scoring. This has a significant effect on memory usage. Typical values would be 50 to 200, or higher for very long documents. ~~int~~ |
|
||||
| `antecedent_batch_size` | Internal batch size. ~~int~~ |
|
||||
| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ |
|
||||
|
||||
### spacy-experimental.SpanResolver.v1 {#SpanResolver tag="experimental"}
|
||||
|
||||
> #### Example Config
|
||||
>
|
||||
> ```ini
|
||||
>
|
||||
> [model]
|
||||
> @architectures = "spacy-experimental.SpanResolver.v1"
|
||||
> hidden_size = 1024
|
||||
> distance_embedding_size = 64
|
||||
> conv_channels = 4
|
||||
> window_size = 1
|
||||
> max_distance = 128
|
||||
> prefix = "coref_head_clusters"
|
||||
>
|
||||
> [model.tok2vec]
|
||||
> @architectures = "spacy-transformers.TransformerListener.v1"
|
||||
> grad_factor = 1.0
|
||||
> upstream = "transformer"
|
||||
> pooling = {"@layers":"reduce_mean.v1"}
|
||||
> ```
|
||||
|
||||
The `SpanResolver` model architecture is a Thinc `Model`. Note that
|
||||
`MentionClusters` is `List[List[Tuple[int, int]]]`.
|
||||
|
||||
| Name | Description |
|
||||
| ------------------------- | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ |
|
||||
| `hidden_size` | Size of the main internal layers. ~~int~~ |
|
||||
| `distance_embedding_size` | A representation of the distance between two candidates. ~~int~~ |
|
||||
| `conv_channels` | The number of channels in the internal CNN. ~~int~~ |
|
||||
| `window_size` | The number of neighboring tokens to consider in the internal CNN. `1` means consider one token on each side. ~~int~~ |
|
||||
| `max_distance` | The longest possible length of a predicted span. ~~int~~ |
|
||||
| `prefix` | The prefix that indicates spans to use for input data. ~~string~~ |
|
||||
| **CREATES** | The model using the architecture. ~~Model[List[Doc], List[MentionClusters]]~~ |
|
||||
|
|
353
website/docs/api/coref.md
Normal file
353
website/docs/api/coref.md
Normal file
|
@ -0,0 +1,353 @@
|
|||
---
|
||||
title: CoreferenceResolver
|
||||
tag: class,experimental
|
||||
source: spacy-experimental/coref/coref_component.py
|
||||
teaser: 'Pipeline component for word-level coreference resolution'
|
||||
api_base_class: /api/pipe
|
||||
api_string_name: coref
|
||||
api_trainable: true
|
||||
---
|
||||
|
||||
> #### Installation
|
||||
>
|
||||
> ```bash
|
||||
> $ pip install -U spacy-experimental
|
||||
> ```
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
This component is not yet integrated into spaCy core, and is available via the
|
||||
extension package
|
||||
[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting
|
||||
in version 0.6.0. It exposes the component via
|
||||
[entry points](/usage/saving-loading/#entry-points), so if you have the package
|
||||
installed, using `factory = "experimental_coref"` in your
|
||||
[training config](/usage/training#config) or
|
||||
`nlp.add_pipe("experimental_coref")` will work out-of-the-box.
|
||||
|
||||
</Infobox>
|
||||
|
||||
A `CoreferenceResolver` component groups tokens into clusters that refer to the
|
||||
same thing. Clusters are represented as SpanGroups that start with a prefix
|
||||
(`coref_clusters` by default).
|
||||
|
||||
A `CoreferenceResolver` component can be paired with a
|
||||
[`SpanResolver`](/api/span-resolver) to expand single tokens to spans.
|
||||
|
||||
## Assigned Attributes {#assigned-attributes}
|
||||
|
||||
Predictions will be saved to `Doc.spans` as a [`SpanGroup`](/api/spangroup). The
|
||||
span key will be a prefix plus a serial number referring to the coreference
|
||||
cluster, starting from zero.
|
||||
|
||||
The span key prefix defaults to `"coref_clusters"`, but can be passed as a
|
||||
parameter.
|
||||
|
||||
| Location | Value |
|
||||
| ------------------------------------------ | ------------------------------------------------------------------------------------------------------- |
|
||||
| `Doc.spans[prefix + "_" + cluster_number]` | One coreference cluster, represented as single-token spans. Cluster numbers start from 1. ~~SpanGroup~~ |
|
||||
|
||||
## Config and implementation {#config}
|
||||
|
||||
The default config is defined by the pipeline component factory and describes
|
||||
how the component should be configured. You can override its settings via the
|
||||
`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your
|
||||
[`config.cfg` for training](/usage/training#config). See the
|
||||
[model architectures](/api/architectures#coref-architectures) documentation for
|
||||
details on the architectures and their arguments and hyperparameters.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> from spacy_experimental.coref.coref_component import DEFAULT_COREF_MODEL
|
||||
> from spacy_experimental.coref.coref_util import DEFAULT_CLUSTER_PREFIX
|
||||
> config={
|
||||
> "model": DEFAULT_COREF_MODEL,
|
||||
> "span_cluster_prefix": DEFAULT_CLUSTER_PREFIX,
|
||||
> },
|
||||
> nlp.add_pipe("experimental_coref", config=config)
|
||||
> ```
|
||||
|
||||
| Setting | Description |
|
||||
| --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Coref](/api/architectures#Coref). ~~Model~~ |
|
||||
| `span_cluster_prefix` | The prefix for the keys for clusters saved to `doc.spans`. Defaults to `coref_clusters`. ~~str~~ |
|
||||
|
||||
## CoreferenceResolver.\_\_init\_\_ {#init tag="method"}
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> # Construction via add_pipe with default model
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
>
|
||||
> # Construction via add_pipe with custom model
|
||||
> config = {"model": {"@architectures": "my_coref.v1"}}
|
||||
> coref = nlp.add_pipe("experimental_coref", config=config)
|
||||
>
|
||||
> # Construction from class
|
||||
> from spacy_experimental.coref.coref_component import CoreferenceResolver
|
||||
> coref = CoreferenceResolver(nlp.vocab, model)
|
||||
> ```
|
||||
|
||||
Create a new pipeline instance. In your application, you would normally use a
|
||||
shortcut for this and instantiate the component using its string name and
|
||||
[`nlp.add_pipe`](/api/language#add_pipe).
|
||||
|
||||
| Name | Description |
|
||||
| --------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | The shared vocabulary. ~~Vocab~~ |
|
||||
| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ |
|
||||
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
|
||||
| _keyword-only_ | |
|
||||
| `span_cluster_prefix` | The prefix for the key for saving clusters of spans. ~~bool~~ |
|
||||
|
||||
## CoreferenceResolver.\_\_call\_\_ {#call tag="method"}
|
||||
|
||||
Apply the pipe to one document. The document is modified in place and returned.
|
||||
This usually happens under the hood when the `nlp` object is called on a text
|
||||
and all pipeline components are applied to the `Doc` in order. Both
|
||||
[`__call__`](/api/coref#call) and [`pipe`](/api/coref#pipe) delegate to the
|
||||
[`predict`](/api/coref#predict) and
|
||||
[`set_annotations`](/api/coref#set_annotations) methods.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> doc = nlp("This is a sentence.")
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> # This usually happens under the hood
|
||||
> processed = coref(doc)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | -------------------------------- |
|
||||
| `doc` | The document to process. ~~Doc~~ |
|
||||
| **RETURNS** | The processed document. ~~Doc~~ |
|
||||
|
||||
## CoreferenceResolver.pipe {#pipe tag="method"}
|
||||
|
||||
Apply the pipe to a stream of documents. This usually happens under the hood
|
||||
when the `nlp` object is called on a text and all pipeline components are
|
||||
applied to the `Doc` in order. Both [`__call__`](/api/coref#call) and
|
||||
[`pipe`](/api/coref#pipe) delegate to the [`predict`](/api/coref#predict) and
|
||||
[`set_annotations`](/api/coref#set_annotations) methods.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> for doc in coref.pipe(docs, batch_size=50):
|
||||
> pass
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------- |
|
||||
| `stream` | A stream of documents. ~~Iterable[Doc]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ |
|
||||
| **YIELDS** | The processed documents in order. ~~Doc~~ |
|
||||
|
||||
## CoreferenceResolver.initialize {#initialize tag="method"}
|
||||
|
||||
Initialize the component for training. `get_examples` should be a function that
|
||||
returns an iterable of [`Example`](/api/example) objects. **At least one example
|
||||
should be supplied.** The data examples are used to **initialize the model** of
|
||||
the component and can either be the full training data or a representative
|
||||
sample. Initialization includes validating the network,
|
||||
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
|
||||
setting up the label scheme based on the data. This method is typically called
|
||||
by [`Language.initialize`](/api/language#initialize).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> coref.initialize(lambda: examples, nlp=nlp)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
|
||||
|
||||
## CoreferenceResolver.predict {#predict tag="method"}
|
||||
|
||||
Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
|
||||
modifying them. Clusters are returned as a list of `MentionClusters`, one for
|
||||
each input `Doc`. A `MentionClusters` instance is just a list of lists of pairs
|
||||
of `int`s, where each item corresponds to a cluster, and the `int`s correspond
|
||||
to token indices.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> clusters = coref.predict([doc1, doc2])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------- |
|
||||
| `docs` | The documents to predict. ~~Iterable[Doc]~~ |
|
||||
| **RETURNS** | The predicted coreference clusters for the `docs`. ~~List[MentionClusters]~~ |
|
||||
|
||||
## CoreferenceResolver.set_annotations {#set_annotations tag="method"}
|
||||
|
||||
Modify a batch of documents, saving coreference clusters in `Doc.spans`.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> clusters = coref.predict([doc1, doc2])
|
||||
> coref.set_annotations([doc1, doc2], clusters)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ---------- | ---------------------------------------------------------------------------- |
|
||||
| `docs` | The documents to modify. ~~Iterable[Doc]~~ |
|
||||
| `clusters` | The predicted coreference clusters for the `docs`. ~~List[MentionClusters]~~ |
|
||||
|
||||
## CoreferenceResolver.update {#update tag="method"}
|
||||
|
||||
Learn from a batch of [`Example`](/api/example) objects. Delegates to
|
||||
[`predict`](/api/coref#predict).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> optimizer = nlp.initialize()
|
||||
> losses = coref.update(examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `examples` | A batch of [`Example`](/api/example) objects to learn from. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | The dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## CoreferenceResolver.create_optimizer {#create_optimizer tag="method"}
|
||||
|
||||
Create an optimizer for the pipeline component.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> optimizer = coref.create_optimizer()
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------- |
|
||||
| **RETURNS** | The optimizer. ~~Optimizer~~ |
|
||||
|
||||
## CoreferenceResolver.use_params {#use_params tag="method, contextmanager"}
|
||||
|
||||
Modify the pipe's model, to use the given parameter values. At the end of the
|
||||
context, the original parameters are restored.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> with coref.use_params(optimizer.averages):
|
||||
> coref.to_disk("/best_model")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------- | -------------------------------------------------- |
|
||||
| `params` | The parameter values to use in the model. ~~dict~~ |
|
||||
|
||||
## CoreferenceResolver.to_disk {#to_disk tag="method"}
|
||||
|
||||
Serialize the pipe to disk.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> coref.to_disk("/path/to/coref")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
|
||||
## CoreferenceResolver.from_disk {#from_disk tag="method"}
|
||||
|
||||
Load the pipe from disk. Modifies the object in place and returns it.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> coref.from_disk("/path/to/coref")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | The modified `CoreferenceResolver` object. ~~CoreferenceResolver~~ |
|
||||
|
||||
## CoreferenceResolver.to_bytes {#to_bytes tag="method"}
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> coref_bytes = coref.to_bytes()
|
||||
> ```
|
||||
|
||||
Serialize the pipe to a bytestring, including the `KnowledgeBase`.
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------- |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | The serialized form of the `CoreferenceResolver` object. ~~bytes~~ |
|
||||
|
||||
## CoreferenceResolver.from_bytes {#from_bytes tag="method"}
|
||||
|
||||
Load the pipe from a bytestring. Modifies the object in place and returns it.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> coref_bytes = coref.to_bytes()
|
||||
> coref = nlp.add_pipe("experimental_coref")
|
||||
> coref.from_bytes(coref_bytes)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------- |
|
||||
| `bytes_data` | The data to load from. ~~bytes~~ |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | The `CoreferenceResolver` object. ~~CoreferenceResolver~~ |
|
||||
|
||||
## Serialization fields {#serialization-fields}
|
||||
|
||||
During serialization, spaCy will export several data fields used to restore
|
||||
different aspects of the object. If needed, you can exclude them from
|
||||
serialization by passing in the string names via the `exclude` argument.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> data = coref.to_disk("/path", exclude=["vocab"])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------- | -------------------------------------------------------------- |
|
||||
| `vocab` | The shared [`Vocab`](/api/vocab). |
|
||||
| `cfg` | The config file. You usually don't want to exclude this. |
|
||||
| `model` | The binary model data. You usually don't want to exclude this. |
|
|
@ -31,21 +31,21 @@ Construct a `Doc` object. The most common way to get a `Doc` object is via the
|
|||
> doc = Doc(nlp.vocab, words=words, spaces=spaces)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | A storage container for lexical types. ~~Vocab~~ |
|
||||
| `words` | A list of strings or integer hash values to add to the document as words. ~~Optional[List[Union[str,int]]]~~ |
|
||||
| `spaces` | A list of boolean values indicating whether each word has a subsequent space. Must have the same length as `words`, if specified. Defaults to a sequence of `True`. ~~Optional[List[bool]]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `user\_data` | Optional extra data to attach to the Doc. ~~Dict~~ |
|
||||
| `tags` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.tag` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `pos` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.pos` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `morphs` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.morph` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `lemmas` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.lemma` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `heads` <Tag variant="new">3</Tag> | A list of values, of the same length as `words`, to assign as the head for each word. Head indices are the absolute position of the head in the `Doc`. Defaults to `None`. ~~Optional[List[int]]~~ |
|
||||
| `deps` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.dep` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `sent_starts` <Tag variant="new">3</Tag> | A list of values, of the same length as `words`, to assign as `token.is_sent_start`. Will be overridden by heads if `heads` is provided. Defaults to `None`. ~~Optional[List[Optional[bool]]]~~ |
|
||||
| `ents` <Tag variant="new">3</Tag> | A list of strings, of the same length of `words`, to assign the token-based IOB tag. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| Name | Description |
|
||||
| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | A storage container for lexical types. ~~Vocab~~ |
|
||||
| `words` | A list of strings or integer hash values to add to the document as words. ~~Optional[List[Union[str,int]]]~~ |
|
||||
| `spaces` | A list of boolean values indicating whether each word has a subsequent space. Must have the same length as `words`, if specified. Defaults to a sequence of `True`. ~~Optional[List[bool]]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `user\_data` | Optional extra data to attach to the Doc. ~~Dict~~ |
|
||||
| `tags` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.tag` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `pos` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.pos` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `morphs` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.morph` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `lemmas` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.lemma` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `heads` <Tag variant="new">3</Tag> | A list of values, of the same length as `words`, to assign as the head for each word. Head indices are the absolute position of the head in the `Doc`. Defaults to `None`. ~~Optional[List[int]]~~ |
|
||||
| `deps` <Tag variant="new">3</Tag> | A list of strings, of the same length as `words`, to assign as `token.dep` for each word. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
| `sent_starts` <Tag variant="new">3</Tag> | A list of values, of the same length as `words`, to assign as `token.is_sent_start`. Will be overridden by heads if `heads` is provided. Defaults to `None`. ~~Optional[List[Union[bool, int, None]]]~~ |
|
||||
| `ents` <Tag variant="new">3</Tag> | A list of strings, of the same length of `words`, to assign the token-based IOB tag. Defaults to `None`. ~~Optional[List[str]]~~ |
|
||||
|
||||
## Doc.\_\_getitem\_\_ {#getitem tag="method"}
|
||||
|
||||
|
|
|
@ -23,11 +23,13 @@ both documents.
|
|||
> ```python
|
||||
> from spacy.tokens import Doc
|
||||
> from spacy.training import Example
|
||||
>
|
||||
> words = ["hello", "world", "!"]
|
||||
> spaces = [True, False, False]
|
||||
> predicted = Doc(nlp.vocab, words=words, spaces=spaces)
|
||||
> reference = parse_gold_doc(my_data)
|
||||
> pred_words = ["Apply", "some", "sunscreen"]
|
||||
> pred_spaces = [True, True, False]
|
||||
> gold_words = ["Apply", "some", "sun", "screen"]
|
||||
> gold_spaces = [True, True, False, False]
|
||||
> gold_tags = ["VERB", "DET", "NOUN", "NOUN"]
|
||||
> predicted = Doc(nlp.vocab, words=pred_words, spaces=pred_spaces)
|
||||
> reference = Doc(nlp.vocab, words=gold_words, spaces=gold_spaces, tags=gold_tags)
|
||||
> example = Example(predicted, reference)
|
||||
> ```
|
||||
|
||||
|
|
|
@ -164,6 +164,9 @@ examples, see the
|
|||
Apply the pipeline to some text. The text can span multiple sentences, and can
|
||||
contain arbitrary whitespace. Alignment into the original string is preserved.
|
||||
|
||||
Instead of text, a `Doc` can be passed as input, in which case tokenization is
|
||||
skipped, but the rest of the pipeline is run.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
|
@ -173,7 +176,7 @@ contain arbitrary whitespace. Alignment into the original string is preserved.
|
|||
|
||||
| Name | Description |
|
||||
| --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `text` | The text to be processed. ~~str~~ |
|
||||
| `text` | The text to be processed, or a Doc. ~~Union[str, Doc]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). ~~List[str]~~ |
|
||||
| `component_cfg` | Optional dictionary of keyword arguments for components, keyed by component names. Defaults to `None`. ~~Optional[Dict[str, Dict[str, Any]]]~~ |
|
||||
|
@ -184,6 +187,9 @@ contain arbitrary whitespace. Alignment into the original string is preserved.
|
|||
Process texts as a stream, and yield `Doc` objects in order. This is usually
|
||||
more efficient than processing texts one-by-one.
|
||||
|
||||
Instead of text, a `Doc` object can be passed as input. In this case
|
||||
tokenization is skipped but the rest of the pipeline is run.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
|
@ -194,7 +200,7 @@ more efficient than processing texts one-by-one.
|
|||
|
||||
| Name | Description |
|
||||
| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `texts` | A sequence of strings. ~~Iterable[str]~~ |
|
||||
| `texts` | A sequence of strings (or `Doc` objects). ~~Iterable[Union[str, Doc]]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `as_tuples` | If set to `True`, inputs should be a sequence of `(text, context)` tuples. Output will then be a sequence of `(doc, context)` tuples. Defaults to `False`. ~~bool~~ |
|
||||
| `batch_size` | The number of texts to buffer. ~~Optional[int]~~ |
|
||||
|
|
|
@ -153,3 +153,36 @@ whole pipeline has run.
|
|||
| `attrs` | A dict of the `Doc` attributes and the values to set them to. Defaults to `{"tensor": None, "_.trf_data": None}` to clean up after `tok2vec` and `transformer` components. ~~dict~~ |
|
||||
| `silent` | If `False`, show warnings if attributes aren't found or can't be set. Defaults to `True`. ~~bool~~ |
|
||||
| **RETURNS** | The modified `Doc` with the modified attributes. ~~Doc~~ |
|
||||
|
||||
## span_cleaner {#span_cleaner tag="function,experimental"}
|
||||
|
||||
Remove `SpanGroup`s from `doc.spans` based on a key prefix. This is used to
|
||||
clean up after the [`CoreferenceResolver`](/api/coref) when it's paired with a
|
||||
[`SpanResolver`](/api/span-resolver).
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
This pipeline function is not yet integrated into spaCy core, and is available
|
||||
via the extension package
|
||||
[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting
|
||||
in version 0.6.0. It exposes the component via
|
||||
[entry points](/usage/saving-loading/#entry-points), so if you have the package
|
||||
installed, using `factory = "span_cleaner"` in your
|
||||
[training config](/usage/training#config) or `nlp.add_pipe("span_cleaner")` will
|
||||
work out-of-the-box.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> config = {"prefix": "coref_head_clusters"}
|
||||
> nlp.add_pipe("span_cleaner", config=config)
|
||||
> doc = nlp("text")
|
||||
> assert "coref_head_clusters_1" not in doc.spans
|
||||
> ```
|
||||
|
||||
| Setting | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `prefix` | A prefix to check `SpanGroup` keys for. Any matching groups will be removed. Defaults to `"coref_head_clusters"`. ~~str~~ |
|
||||
| **RETURNS** | The modified `Doc` with any matching spans removed. ~~Doc~~ |
|
||||
|
|
|
@ -270,3 +270,62 @@ Compute micro-PRF and per-entity PRF scores.
|
|||
| Name | Description |
|
||||
| ---------- | ------------------------------------------------------------------------------------------------------------------- |
|
||||
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
|
||||
|
||||
## score_coref_clusters {#score_coref_clusters tag="experimental"}
|
||||
|
||||
Returns LEA ([Moosavi and Strube, 2016](https://aclanthology.org/P16-1060/)) PRF
|
||||
scores for coreference clusters.
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
Note this scoring function is not yet included in spaCy core - for details, see
|
||||
the [CoreferenceResolver](/api/coref) docs.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> scores = score_coref_clusters(
|
||||
> examples,
|
||||
> span_cluster_prefix="coref_clusters",
|
||||
> )
|
||||
> print(scores["coref_f"])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| --------------------- | ------------------------------------------------------------------------------------------------------------------- |
|
||||
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `span_cluster_prefix` | The prefix used for spans representing coreference clusters. ~~str~~ |
|
||||
| **RETURNS** | A dictionary containing the scores. ~~Dict[str, Optional[float]]~~ |
|
||||
|
||||
## score_span_predictions {#score_span_predictions tag="experimental"}
|
||||
|
||||
Return accuracy for reconstructions of spans from single tokens. Only exactly
|
||||
correct predictions are counted as correct, there is no partial credit for near
|
||||
answers. Used by the [SpanResolver](/api/span-resolver).
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
Note this scoring function is not yet included in spaCy core - for details, see
|
||||
the [SpanResolver](/api/span-resolver) docs.
|
||||
|
||||
</Infobox>
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> scores = score_span_predictions(
|
||||
> examples,
|
||||
> output_prefix="coref_clusters",
|
||||
> )
|
||||
> print(scores["span_coref_clusters_accuracy"])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------------- |
|
||||
| `examples` | The `Example` objects holding both the predictions and the correct gold-standard annotations. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `output_prefix` | The prefix used for spans representing the final predicted spans. ~~str~~ |
|
||||
| **RETURNS** | A dictionary containing the scores. ~~Dict[str, Optional[float]]~~ |
|
||||
|
|
356
website/docs/api/span-resolver.md
Normal file
356
website/docs/api/span-resolver.md
Normal file
|
@ -0,0 +1,356 @@
|
|||
---
|
||||
title: SpanResolver
|
||||
tag: class,experimental
|
||||
source: spacy-experimental/coref/span_resolver_component.py
|
||||
teaser: 'Pipeline component for resolving tokens into spans'
|
||||
api_base_class: /api/pipe
|
||||
api_string_name: span_resolver
|
||||
api_trainable: true
|
||||
---
|
||||
|
||||
> #### Installation
|
||||
>
|
||||
> ```bash
|
||||
> $ pip install -U spacy-experimental
|
||||
> ```
|
||||
|
||||
<Infobox title="Important note" variant="warning">
|
||||
|
||||
This component not yet integrated into spaCy core, and is available via the
|
||||
extension package
|
||||
[`spacy-experimental`](https://github.com/explosion/spacy-experimental) starting
|
||||
in version 0.6.0. It exposes the component via
|
||||
[entry points](/usage/saving-loading/#entry-points), so if you have the package
|
||||
installed, using `factory = "experimental_span_resolver"` in your
|
||||
[training config](/usage/training#config) or
|
||||
`nlp.add_pipe("experimental_span_resolver")` will work out-of-the-box.
|
||||
|
||||
</Infobox>
|
||||
|
||||
A `SpanResolver` component takes in tokens (represented as `Span` objects of
|
||||
length 1) and resolves them into `Span` objects of arbitrary length. The initial
|
||||
use case is as a post-processing step on word-level
|
||||
[coreference resolution](/api/coref). The input and output keys used to store
|
||||
`Span` objects are configurable.
|
||||
|
||||
## Assigned Attributes {#assigned-attributes}
|
||||
|
||||
Predictions will be saved to `Doc.spans` as [`SpanGroup`s](/api/spangroup).
|
||||
|
||||
Input token spans will be read in using an input prefix, by default
|
||||
`"coref_head_clusters"`, and output spans will be saved using an output prefix
|
||||
(default `"coref_clusters"`) plus a serial number starting from one. The
|
||||
prefixes are configurable.
|
||||
|
||||
| Location | Value |
|
||||
| ------------------------------------------------- | ------------------------------------------------------------------------- |
|
||||
| `Doc.spans[output_prefix + "_" + cluster_number]` | One group of predicted spans. Cluster number starts from 1. ~~SpanGroup~~ |
|
||||
|
||||
## Config and implementation {#config}
|
||||
|
||||
The default config is defined by the pipeline component factory and describes
|
||||
how the component should be configured. You can override its settings via the
|
||||
`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your
|
||||
[`config.cfg` for training](/usage/training#config). See the
|
||||
[model architectures](/api/architectures#coref-architectures) documentation for
|
||||
details on the architectures and their arguments and hyperparameters.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> from spacy_experimental.coref.span_resolver_component import DEFAULT_SPAN_RESOLVER_MODEL
|
||||
> from spacy_experimental.coref.coref_util import DEFAULT_CLUSTER_PREFIX, DEFAULT_CLUSTER_HEAD_PREFIX
|
||||
> config={
|
||||
> "model": DEFAULT_SPAN_RESOLVER_MODEL,
|
||||
> "input_prefix": DEFAULT_CLUSTER_HEAD_PREFIX,
|
||||
> "output_prefix": DEFAULT_CLUSTER_PREFIX,
|
||||
> },
|
||||
> nlp.add_pipe("experimental_span_resolver", config=config)
|
||||
> ```
|
||||
|
||||
| Setting | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [SpanResolver](/api/architectures#SpanResolver). ~~Model~~ |
|
||||
| `input_prefix` | The prefix to use for input `SpanGroup`s. Defaults to `coref_head_clusters`. ~~str~~ |
|
||||
| `output_prefix` | The prefix for predicted `SpanGroup`s. Defaults to `coref_clusters`. ~~str~~ |
|
||||
|
||||
## SpanResolver.\_\_init\_\_ {#init tag="method"}
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> # Construction via add_pipe with default model
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
>
|
||||
> # Construction via add_pipe with custom model
|
||||
> config = {"model": {"@architectures": "my_span_resolver.v1"}}
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver", config=config)
|
||||
>
|
||||
> # Construction from class
|
||||
> from spacy_experimental.coref.span_resolver_component import SpanResolver
|
||||
> span_resolver = SpanResolver(nlp.vocab, model)
|
||||
> ```
|
||||
|
||||
Create a new pipeline instance. In your application, you would normally use a
|
||||
shortcut for this and instantiate the component using its string name and
|
||||
[`nlp.add_pipe`](/api/language#add_pipe).
|
||||
|
||||
| Name | Description |
|
||||
| --------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| `vocab` | The shared vocabulary. ~~Vocab~~ |
|
||||
| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ |
|
||||
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
|
||||
| _keyword-only_ | |
|
||||
| `input_prefix` | The prefix to use for input `SpanGroup`s. Defaults to `coref_head_clusters`. ~~str~~ |
|
||||
| `output_prefix` | The prefix for predicted `SpanGroup`s. Defaults to `coref_clusters`. ~~str~~ |
|
||||
|
||||
## SpanResolver.\_\_call\_\_ {#call tag="method"}
|
||||
|
||||
Apply the pipe to one document. The document is modified in place and returned.
|
||||
This usually happens under the hood when the `nlp` object is called on a text
|
||||
and all pipeline components are applied to the `Doc` in order. Both
|
||||
[`__call__`](#call) and [`pipe`](#pipe) delegate to the [`predict`](#predict)
|
||||
and [`set_annotations`](#set_annotations) methods.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> doc = nlp("This is a sentence.")
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> # This usually happens under the hood
|
||||
> processed = span_resolver(doc)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | -------------------------------- |
|
||||
| `doc` | The document to process. ~~Doc~~ |
|
||||
| **RETURNS** | The processed document. ~~Doc~~ |
|
||||
|
||||
## SpanResolver.pipe {#pipe tag="method"}
|
||||
|
||||
Apply the pipe to a stream of documents. This usually happens under the hood
|
||||
when the `nlp` object is called on a text and all pipeline components are
|
||||
applied to the `Doc` in order. Both [`__call__`](/api/span-resolver#call) and
|
||||
[`pipe`](/api/span-resolver#pipe) delegate to the
|
||||
[`predict`](/api/span-resolver#predict) and
|
||||
[`set_annotations`](/api/span-resolver#set_annotations) methods.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> for doc in span_resolver.pipe(docs, batch_size=50):
|
||||
> pass
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------- |
|
||||
| `stream` | A stream of documents. ~~Iterable[Doc]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ |
|
||||
| **YIELDS** | The processed documents in order. ~~Doc~~ |
|
||||
|
||||
## SpanResolver.initialize {#initialize tag="method"}
|
||||
|
||||
Initialize the component for training. `get_examples` should be a function that
|
||||
returns an iterable of [`Example`](/api/example) objects. **At least one example
|
||||
should be supplied.** The data examples are used to **initialize the model** of
|
||||
the component and can either be the full training data or a representative
|
||||
sample. Initialization includes validating the network,
|
||||
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
|
||||
setting up the label scheme based on the data. This method is typically called
|
||||
by [`Language.initialize`](/api/language#initialize).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> span_resolver.initialize(lambda: examples, nlp=nlp)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
|
||||
|
||||
## SpanResolver.predict {#predict tag="method"}
|
||||
|
||||
Apply the component's model to a batch of [`Doc`](/api/doc) objects, without
|
||||
modifying them. Predictions are returned as a list of `MentionClusters`, one for
|
||||
each input `Doc`. A `MentionClusters` instance is just a list of lists of pairs
|
||||
of `int`s, where each item corresponds to an input `SpanGroup`, and the `int`s
|
||||
correspond to token indices.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> spans = span_resolver.predict([doc1, doc2])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ------------------------------------------------------------- |
|
||||
| `docs` | The documents to predict. ~~Iterable[Doc]~~ |
|
||||
| **RETURNS** | The predicted spans for the `Doc`s. ~~List[MentionClusters]~~ |
|
||||
|
||||
## SpanResolver.set_annotations {#set_annotations tag="method"}
|
||||
|
||||
Modify a batch of documents, saving predictions using the output prefix in
|
||||
`Doc.spans`.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> spans = span_resolver.predict([doc1, doc2])
|
||||
> span_resolver.set_annotations([doc1, doc2], spans)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------- | ------------------------------------------------------------- |
|
||||
| `docs` | The documents to modify. ~~Iterable[Doc]~~ |
|
||||
| `spans` | The predicted spans for the `docs`. ~~List[MentionClusters]~~ |
|
||||
|
||||
## SpanResolver.update {#update tag="method"}
|
||||
|
||||
Learn from a batch of [`Example`](/api/example) objects. Delegates to
|
||||
[`predict`](/api/span-resolver#predict).
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> optimizer = nlp.initialize()
|
||||
> losses = span_resolver.update(examples, sgd=optimizer)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `examples` | A batch of [`Example`](/api/example) objects to learn from. ~~Iterable[Example]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `drop` | The dropout rate. ~~float~~ |
|
||||
| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ |
|
||||
| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ |
|
||||
| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ |
|
||||
|
||||
## SpanResolver.create_optimizer {#create_optimizer tag="method"}
|
||||
|
||||
Create an optimizer for the pipeline component.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> optimizer = span_resolver.create_optimizer()
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------- |
|
||||
| **RETURNS** | The optimizer. ~~Optimizer~~ |
|
||||
|
||||
## SpanResolver.use_params {#use_params tag="method, contextmanager"}
|
||||
|
||||
Modify the pipe's model, to use the given parameter values. At the end of the
|
||||
context, the original parameters are restored.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> with span_resolver.use_params(optimizer.averages):
|
||||
> span_resolver.to_disk("/best_model")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------- | -------------------------------------------------- |
|
||||
| `params` | The parameter values to use in the model. ~~dict~~ |
|
||||
|
||||
## SpanResolver.to_disk {#to_disk tag="method"}
|
||||
|
||||
Serialize the pipe to disk.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> span_resolver.to_disk("/path/to/span_resolver")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
|
||||
## SpanResolver.from_disk {#from_disk tag="method"}
|
||||
|
||||
Load the pipe from disk. Modifies the object in place and returns it.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> span_resolver.from_disk("/path/to/span_resolver")
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | The modified `SpanResolver` object. ~~SpanResolver~~ |
|
||||
|
||||
## SpanResolver.to_bytes {#to_bytes tag="method"}
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> span_resolver_bytes = span_resolver.to_bytes()
|
||||
> ```
|
||||
|
||||
Serialize the pipe to a bytestring.
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------- |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | The serialized form of the `SpanResolver` object. ~~bytes~~ |
|
||||
|
||||
## SpanResolver.from_bytes {#from_bytes tag="method"}
|
||||
|
||||
Load the pipe from a bytestring. Modifies the object in place and returns it.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> span_resolver_bytes = span_resolver.to_bytes()
|
||||
> span_resolver = nlp.add_pipe("experimental_span_resolver")
|
||||
> span_resolver.from_bytes(span_resolver_bytes)
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| -------------- | ------------------------------------------------------------------------------------------- |
|
||||
| `bytes_data` | The data to load from. ~~bytes~~ |
|
||||
| _keyword-only_ | |
|
||||
| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ |
|
||||
| **RETURNS** | The `SpanResolver` object. ~~SpanResolver~~ |
|
||||
|
||||
## Serialization fields {#serialization-fields}
|
||||
|
||||
During serialization, spaCy will export several data fields used to restore
|
||||
different aspects of the object. If needed, you can exclude them from
|
||||
serialization by passing in the string names via the `exclude` argument.
|
||||
|
||||
> #### Example
|
||||
>
|
||||
> ```python
|
||||
> data = span_resolver.to_disk("/path", exclude=["vocab"])
|
||||
> ```
|
||||
|
||||
| Name | Description |
|
||||
| ------- | -------------------------------------------------------------- |
|
||||
| `vocab` | The shared [`Vocab`](/api/vocab). |
|
||||
| `cfg` | The config file. You usually don't want to exclude this. |
|
||||
| `model` | The binary model data. You usually don't want to exclude this. |
|
|
@ -148,6 +148,13 @@ skipped. You can also set `--force` to force re-running a command, or `--dry` to
|
|||
perform a "dry run" and see what would happen (without actually running the
|
||||
script).
|
||||
|
||||
Since spaCy v3.4.2, `spacy projects run` checks your installed dependencies to
|
||||
verify that your environment is properly set up and aligns with the project's
|
||||
`requirements.txt`, if there is one. If missing or conflicting dependencies are
|
||||
detected, a corresponding warning is displayed. If you'd like to disable the
|
||||
dependency check, set `check_requirements: false` in your project's
|
||||
`project.yml`.
|
||||
|
||||
### 4. Run a workflow {#run-workfow}
|
||||
|
||||
> #### project.yml
|
||||
|
@ -226,26 +233,28 @@ pipelines.
|
|||
```yaml
|
||||
%%GITHUB_PROJECTS/pipelines/tagger_parser_ud/project.yml
|
||||
```
|
||||
|
||||
> #### Tip: Overriding variables on the CLI
|
||||
>
|
||||
> If you want to override one or more variables on the CLI and are not already specifying a
|
||||
> project directory, you need to add `.` as a placeholder:
|
||||
> If you want to override one or more variables on the CLI and are not already
|
||||
> specifying a project directory, you need to add `.` as a placeholder:
|
||||
>
|
||||
> ```
|
||||
> python -m spacy project run test . --vars.foo bar
|
||||
> ```
|
||||
|
||||
| Section | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). |
|
||||
| `description` | An optional project description used in [auto-generated docs](#custom-docs). |
|
||||
| `vars` | A dictionary of variables that can be referenced in paths, URLs and scripts and overriden on the CLI, just like [`config.cfg` variables](/usage/training#config-interpolation). For example, `${vars.name}` will use the value of the variable `name`. Variables need to be defined in the section `vars`, but can be a nested dict, so you're able to reference `${vars.model.name}`. |
|
||||
| `env` | A dictionary of variables, mapped to the names of environment variables that will be read in when running the project. For example, `${env.name}` will use the value of the environment variable defined as `name`. |
|
||||
| `directories` | An optional list of [directories](#project-files) that should be created in the project for assets, training outputs, metrics etc. spaCy will make sure that these directories always exist. |
|
||||
| `assets` | A list of assets that can be fetched with the [`project assets`](/api/cli#project-assets) command. `url` defines a URL or local path, `dest` is the destination file relative to the project directory, and an optional `checksum` ensures that an error is raised if the file's checksum doesn't match. Instead of `url`, you can also provide a `git` block with the keys `repo`, `branch` and `path`, to download from a Git repo. |
|
||||
| `workflows` | A dictionary of workflow names, mapped to a list of command names, to execute in order. Workflows can be run with the [`project run`](/api/cli#project-run) command. |
|
||||
| `commands` | A list of named commands. A command can define an optional help message (shown in the CLI when the user adds `--help`) and the `script`, a list of commands to run. The `deps` and `outputs` let you define the created file the command depends on and produces, respectively. This lets spaCy determine whether a command needs to be re-run because its dependencies or outputs changed. Commands can be run as part of a workflow, or separately with the [`project run`](/api/cli#project-run) command. |
|
||||
| `spacy_version` | Optional spaCy version range like `>=3.0.0,<3.1.0` that the project is compatible with. If it's loaded with an incompatible version, an error is raised when the project is loaded. |
|
||||
| Section | Description |
|
||||
| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `title` | An optional project title used in `--help` message and [auto-generated docs](#custom-docs). |
|
||||
| `description` | An optional project description used in [auto-generated docs](#custom-docs). |
|
||||
| `vars` | A dictionary of variables that can be referenced in paths, URLs and scripts and overriden on the CLI, just like [`config.cfg` variables](/usage/training#config-interpolation). For example, `${vars.name}` will use the value of the variable `name`. Variables need to be defined in the section `vars`, but can be a nested dict, so you're able to reference `${vars.model.name}`. |
|
||||
| `env` | A dictionary of variables, mapped to the names of environment variables that will be read in when running the project. For example, `${env.name}` will use the value of the environment variable defined as `name`. |
|
||||
| `directories` | An optional list of [directories](#project-files) that should be created in the project for assets, training outputs, metrics etc. spaCy will make sure that these directories always exist. |
|
||||
| `assets` | A list of assets that can be fetched with the [`project assets`](/api/cli#project-assets) command. `url` defines a URL or local path, `dest` is the destination file relative to the project directory, and an optional `checksum` ensures that an error is raised if the file's checksum doesn't match. Instead of `url`, you can also provide a `git` block with the keys `repo`, `branch` and `path`, to download from a Git repo. |
|
||||
| `workflows` | A dictionary of workflow names, mapped to a list of command names, to execute in order. Workflows can be run with the [`project run`](/api/cli#project-run) command. |
|
||||
| `commands` | A list of named commands. A command can define an optional help message (shown in the CLI when the user adds `--help`) and the `script`, a list of commands to run. The `deps` and `outputs` let you define the created file the command depends on and produces, respectively. This lets spaCy determine whether a command needs to be re-run because its dependencies or outputs changed. Commands can be run as part of a workflow, or separately with the [`project run`](/api/cli#project-run) command. |
|
||||
| `spacy_version` | Optional spaCy version range like `>=3.0.0,<3.1.0` that the project is compatible with. If it's loaded with an incompatible version, an error is raised when the project is loaded. |
|
||||
| `check_requirements` <Tag variant="new">3.4.2</Tag> | A flag determining whether to verify that the installed dependencies align with the project's `requirements.txt`. Defaults to `true`. |
|
||||
|
||||
### Data assets {#data-assets}
|
||||
|
||||
|
|
|
@ -65,10 +65,10 @@ The English CNN pipelines have new word vectors:
|
|||
|
||||
| Package | Model Version | TAG | Parser LAS | NER F |
|
||||
| ----------------------------------------------- | ------------- | ---: | ---------: | ----: |
|
||||
| [`en_core_news_md`](/models/en#en_core_news_md) | v3.3.0 | 97.3 | 90.1 | 84.6 |
|
||||
| [`en_core_news_md`](/models/en#en_core_news_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 |
|
||||
| [`en_core_news_lg`](/models/en#en_core_news_md) | v3.3.0 | 97.4 | 90.1 | 85.3 |
|
||||
| [`en_core_news_lg`](/models/en#en_core_news_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 |
|
||||
| [`en_core_web_md`](/models/en#en_core_web_md) | v3.3.0 | 97.3 | 90.1 | 84.6 |
|
||||
| [`en_core_web_md`](/models/en#en_core_web_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 |
|
||||
| [`en_core_web_lg`](/models/en#en_core_web_md) | v3.3.0 | 97.4 | 90.1 | 85.3 |
|
||||
| [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 |
|
||||
|
||||
## Notes about upgrading from v3.3 {#upgrading}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
{ "text": "New in v3.0", "url": "/usage/v3" },
|
||||
{ "text": "New in v3.1", "url": "/usage/v3-1" },
|
||||
{ "text": "New in v3.2", "url": "/usage/v3-2" },
|
||||
{ "text": "New in v3.2", "url": "/usage/v3-2" },
|
||||
{ "text": "New in v3.3", "url": "/usage/v3-3" },
|
||||
{ "text": "New in v3.4", "url": "/usage/v3-4" }
|
||||
]
|
||||
|
@ -95,6 +94,7 @@
|
|||
"label": "Pipeline",
|
||||
"items": [
|
||||
{ "text": "AttributeRuler", "url": "/api/attributeruler" },
|
||||
{ "text": "CoreferenceResolver", "url": "/api/coref" },
|
||||
{ "text": "DependencyParser", "url": "/api/dependencyparser" },
|
||||
{ "text": "EditTreeLemmatizer", "url": "/api/edittreelemmatizer" },
|
||||
{ "text": "EntityLinker", "url": "/api/entitylinker" },
|
||||
|
@ -105,6 +105,7 @@
|
|||
{ "text": "SentenceRecognizer", "url": "/api/sentencerecognizer" },
|
||||
{ "text": "Sentencizer", "url": "/api/sentencizer" },
|
||||
{ "text": "SpanCategorizer", "url": "/api/spancategorizer" },
|
||||
{ "text": "SpanResolver", "url": "/api/span-resolver" },
|
||||
{ "text": "SpanRuler", "url": "/api/spanruler" },
|
||||
{ "text": "Tagger", "url": "/api/tagger" },
|
||||
{ "text": "TextCategorizer", "url": "/api/textcategorizer" },
|
||||
|
|
|
@ -1,5 +1,62 @@
|
|||
{
|
||||
"resources": [
|
||||
{
|
||||
"id": "Zshot",
|
||||
"title": "Zshot",
|
||||
"slogan": "Zero and Few shot named entity & relationships recognition",
|
||||
"github": "ibm/zshot",
|
||||
"pip": "zshot",
|
||||
"code_example": [
|
||||
"import spacy",
|
||||
"from zshot import PipelineConfig, displacy",
|
||||
"from zshot.linker import LinkerRegen",
|
||||
"from zshot.mentions_extractor import MentionsExtractorSpacy",
|
||||
"from zshot.utils.data_models import Entity",
|
||||
"",
|
||||
"nlp = spacy.load('en_core_web_sm')",
|
||||
"# zero shot definition of entities",
|
||||
"nlp_config = PipelineConfig(",
|
||||
" mentions_extractor=MentionsExtractorSpacy(),",
|
||||
" linker=LinkerRegen(),",
|
||||
" entities=[",
|
||||
" Entity(name='Paris',",
|
||||
" description='Paris is located in northern central France, in a north-bending arc of the river Seine'),",
|
||||
" Entity(name='IBM',",
|
||||
" description='International Business Machines Corporation (IBM) is an American multinational technology corporation headquartered in Armonk, New York'),",
|
||||
" Entity(name='New York', description='New York is a city in U.S. state'),",
|
||||
" Entity(name='Florida', description='southeasternmost U.S. state'),",
|
||||
" Entity(name='American',",
|
||||
" description='American, something of, from, or related to the United States of America, commonly known as the United States or America'),",
|
||||
" Entity(name='Chemical formula',",
|
||||
" description='In chemistry, a chemical formula is a way of presenting information about the chemical proportions of atoms that constitute a particular chemical compound or molecul'),",
|
||||
" Entity(name='Acetamide',",
|
||||
" description='Acetamide (systematic name: ethanamide) is an organic compound with the formula CH3CONH2. It is the simplest amide derived from acetic acid. It finds some use as a plasticizer and as an industrial solvent.'),",
|
||||
" Entity(name='Armonk',",
|
||||
" description='Armonk is a hamlet and census-designated place (CDP) in the town of North Castle, located in Westchester County, New York, United States.'),",
|
||||
" Entity(name='Acetic Acid',",
|
||||
" description='Acetic acid, systematically named ethanoic acid, is an acidic, colourless liquid and organic compound with the chemical formula CH3COOH'),",
|
||||
" Entity(name='Industrial solvent',",
|
||||
" description='Acetamide (systematic name: ethanamide) is an organic compound with the formula CH3CONH2. It is the simplest amide derived from acetic acid. It finds some use as a plasticizer and as an industrial solvent.'),",
|
||||
" ]",
|
||||
")",
|
||||
"nlp.add_pipe('zshot', config=nlp_config, last=True)",
|
||||
"",
|
||||
"text = 'International Business Machines Corporation (IBM) is an American multinational technology corporation' \\",
|
||||
" ' headquartered in Armonk, New York, with operations in over 171 countries.'",
|
||||
"",
|
||||
"doc = nlp(text)",
|
||||
"displacy.serve(doc, style='ent')"
|
||||
],
|
||||
"thumb": "https://ibm.github.io/zshot/img/graph.png",
|
||||
"url": "https://ibm.github.io/zshot/",
|
||||
"author": "IBM Research",
|
||||
"author_links": {
|
||||
"github": "ibm",
|
||||
"twitter": "IBMResearch",
|
||||
"website": "https://research.ibm.com/labs/ireland/"
|
||||
},
|
||||
"category": ["scientific", "models", "research"]
|
||||
},
|
||||
{
|
||||
"id": "concepcy",
|
||||
"title": "concepCy",
|
||||
|
@ -2403,20 +2460,20 @@
|
|||
"import spacy",
|
||||
"from spacy_wordnet.wordnet_annotator import WordnetAnnotator ",
|
||||
"",
|
||||
"# Load an spacy model (supported models are \"es\" and \"en\") ",
|
||||
"nlp = spacy.load('en')",
|
||||
"# Spacy 3.x",
|
||||
"nlp.add_pipe(\"spacy_wordnet\", after='tagger', config={'lang': nlp.lang})",
|
||||
"# Spacy 2.x",
|
||||
"# Load a spaCy model (supported languages are \"es\" and \"en\") ",
|
||||
"nlp = spacy.load('en_core_web_sm')",
|
||||
"# spaCy 3.x",
|
||||
"nlp.add_pipe(\"spacy_wordnet\", after='tagger')",
|
||||
"# spaCy 2.x",
|
||||
"# nlp.add_pipe(WordnetAnnotator(nlp.lang), after='tagger')",
|
||||
"token = nlp('prices')[0]",
|
||||
"",
|
||||
"# wordnet object link spacy token with nltk wordnet interface by giving acces to",
|
||||
"# WordNet object links spaCy token with NLTK WordNet interface by giving access to",
|
||||
"# synsets and lemmas ",
|
||||
"token._.wordnet.synsets()",
|
||||
"token._.wordnet.lemmas()",
|
||||
"",
|
||||
"# And automatically tags with wordnet domains",
|
||||
"# And automatically add info about WordNet domains",
|
||||
"token._.wordnet.wordnet_domains()"
|
||||
],
|
||||
"author": "recognai",
|
||||
|
@ -3984,7 +4041,21 @@
|
|||
},
|
||||
"category": ["pipeline"],
|
||||
"tags": ["interpretation", "ja"]
|
||||
},
|
||||
{
|
||||
"id": "spacy-partial-tagger",
|
||||
"title": "spaCy - Partial Tagger",
|
||||
"slogan": "Sequence Tagger for Partially Annotated Dataset in spaCy",
|
||||
"description": "This is a library to build a CRF tagger with a partially annotated dataset in spaCy. You can build your own tagger only from dictionary.",
|
||||
"github": "doccano/spacy-partial-tagger",
|
||||
"pip": "spacy-partial-tagger",
|
||||
"category": ["pipeline", "training"],
|
||||
"author": "Yasufumi Taniguchi",
|
||||
"author_links": {
|
||||
"github": "yasufumy"
|
||||
}
|
||||
}
|
||||
|
||||
],
|
||||
|
||||
"categories": [
|
||||
|
|
Loading…
Reference in New Issue
Block a user