Merge branch 'v4' into rename-islandic-and-multi-lang-code

This commit is contained in:
thomashacker 2023-01-02 11:34:58 +01:00
commit 1092dccd1a
128 changed files with 3359 additions and 2014 deletions

View File

@ -52,12 +52,17 @@ steps:
python -W error -c "import spacy" python -W error -c "import spacy"
displayName: "Test import" displayName: "Test import"
- script: | # - script: |
python -m spacy download ca_core_news_sm # python -m spacy download ca_core_news_sm
python -m spacy download ca_core_news_md # python -m spacy download ca_core_news_md
python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" # python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
displayName: 'Test download CLI' # displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
#
# - script: |
# python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
# displayName: 'Test no warnings on load (#11713)'
# condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
@ -81,17 +86,17 @@ steps:
displayName: 'Test train CLI' displayName: 'Test train CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir # PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
displayName: 'Test assemble CLI' # displayName: 'Test assemble CLI'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
#
- script: | # - script: |
python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" # python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 # python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
displayName: 'Test assemble CLI vectors warning' # displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8') # condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m pip install -U -r requirements.txt python -m pip install -U -r requirements.txt
@ -102,7 +107,7 @@ steps:
displayName: "Run CPU tests" displayName: "Run CPU tests"
- script: | - script: |
python -m pip install --pre thinc-apple-ops python -m pip install 'spacy[apple]'
python -m pytest --pyargs spacy python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops" displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11')) condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11'))

View File

@ -15,11 +15,11 @@ jobs:
action: action:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: dessant/lock-threads@v3 - uses: dessant/lock-threads@v4
with: with:
process-only: 'issues' process-only: 'issues'
issue-inactive-days: '30' issue-inactive-days: '30'
issue-comment: > issue-comment: >
This thread has been automatically locked since there This thread has been automatically locked since there
has not been any recent activity after it was closed. has not been any recent activity after it was closed.
Please open a new issue for related bugs. Please open a new issue for related bugs.

View File

@ -19,6 +19,8 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Bernadette app dependency and send an alert - name: Install Bernadette app dependency and send an alert
env: env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}

View File

@ -5,7 +5,7 @@ repos:
- id: black - id: black
language_version: python3.7 language_version: python3.7
additional_dependencies: ['click==8.0.4'] additional_dependencies: ['click==8.0.4']
- repo: https://gitlab.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 5.0.4 rev: 5.0.4
hooks: hooks:
- id: flake8 - id: flake8

View File

@ -14,7 +14,7 @@ parsing, **named entity recognition**, **text classification** and more,
multi-task learning with pretrained **transformers** like BERT, as well as a multi-task learning with pretrained **transformers** like BERT, as well as a
production-ready [**training system**](https://spacy.io/usage/training) and easy production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license. open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE).
💫 **Version 3.4 out now!** 💫 **Version 3.4 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases) [Check out the release notes here.](https://github.com/explosion/spaCy/releases)
@ -46,6 +46,7 @@ open-source software, released under the MIT license.
| 🛠 **[Changelog]** | Changes and version history. | | 🛠 **[Changelog]** | Changes and version history. |
| 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | | 💝 **[Contribute]** | How to contribute to the spaCy project and code base. |
| <a href="https://explosion.ai/spacy-tailored-pipelines"><img src="https://user-images.githubusercontent.com/13643239/152853098-1c761611-ccb0-4ec6-9066-b234552831fe.png" width="125" alt="spaCy Tailored Pipelines"/></a> | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more &rarr;](https://explosion.ai/spacy-tailored-pipelines)** | | <a href="https://explosion.ai/spacy-tailored-pipelines"><img src="https://user-images.githubusercontent.com/13643239/152853098-1c761611-ccb0-4ec6-9066-b234552831fe.png" width="125" alt="spaCy Tailored Pipelines"/></a> | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more &rarr;](https://explosion.ai/spacy-tailored-pipelines)** |
| <a href="https://explosion.ai/spacy-tailored-analysis"><img src="https://user-images.githubusercontent.com/1019791/206151300-b00cd189-e503-4797-aa1e-1bb6344062c5.png" width="125" alt="spaCy Tailored Pipelines"/></a> | Bespoke advice for problem solving, strategy and analysis for applied NLP projects. Services include data strategy, code reviews, pipeline design and annotation coaching. Curious? Fill in our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more &rarr;](https://explosion.ai/spacy-tailored-analysis)** |
[spacy 101]: https://spacy.io/usage/spacy-101 [spacy 101]: https://spacy.io/usage/spacy-101
[new in v3.0]: https://spacy.io/usage/v3 [new in v3.0]: https://spacy.io/usage/v3
@ -59,6 +60,7 @@ open-source software, released under the MIT license.
[changelog]: https://spacy.io/usage#changelog [changelog]: https://spacy.io/usage#changelog
[contribute]: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md [contribute]: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md
## 💬 Where to ask questions ## 💬 Where to ask questions
The spaCy project is maintained by the [spaCy team](https://explosion.ai/about). The spaCy project is maintained by the [spaCy team](https://explosion.ai/about).

View File

@ -41,7 +41,7 @@ jobs:
matrix: matrix:
# We're only running one platform per Python version to speed up builds # We're only running one platform per Python version to speed up builds
Python36Linux: Python36Linux:
imageName: "ubuntu-latest" imageName: "ubuntu-20.04"
python.version: "3.6" python.version: "3.6"
# Python36Windows: # Python36Windows:
# imageName: "windows-latest" # imageName: "windows-latest"
@ -50,7 +50,7 @@ jobs:
# imageName: "macos-latest" # imageName: "macos-latest"
# python.version: "3.6" # python.version: "3.6"
# Python37Linux: # Python37Linux:
# imageName: "ubuntu-latest" # imageName: "ubuntu-20.04"
# python.version: "3.7" # python.version: "3.7"
Python37Windows: Python37Windows:
imageName: "windows-latest" imageName: "windows-latest"
@ -87,13 +87,13 @@ jobs:
# python.version: "3.10" # python.version: "3.10"
Python311Linux: Python311Linux:
imageName: 'ubuntu-latest' imageName: 'ubuntu-latest'
python.version: '3.11.0' python.version: '3.11'
Python311Windows: Python311Windows:
imageName: 'windows-latest' imageName: 'windows-latest'
python.version: '3.11.0' python.version: '3.11'
Python311Mac: Python311Mac:
imageName: 'macos-latest' imageName: 'macos-latest'
python.version: '3.11.0' python.version: '3.11'
maxParallel: 4 maxParallel: 4
pool: pool:
vmImage: $(imageName) vmImage: $(imageName)

View File

@ -5,4 +5,5 @@ numpy==1.17.3; python_version=='3.8' and platform_machine!='aarch64'
numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64' numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'
numpy==1.19.3; python_version=='3.9' numpy==1.19.3; python_version=='3.9'
numpy==1.21.3; python_version=='3.10' numpy==1.21.3; python_version=='3.10'
numpy; python_version>='3.11' numpy==1.23.2; python_version=='3.11'
numpy; python_version>='3.12'

View File

@ -5,7 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0", "cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0", "preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0", "murmurhash>=0.28.0,<1.1.0",
"thinc>=8.1.0,<8.2.0", "thinc>=9.0.0.dev1,<9.1.0",
"numpy>=1.15.0", "numpy>=1.15.0",
] ]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"

View File

@ -3,14 +3,15 @@ spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0,<8.2.0 thinc>=9.0.0.dev1,<9.1.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.2.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.10.0
smart-open>=5.2.1,<7.0.0
# Third party dependencies # Third party dependencies
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
@ -30,7 +31,7 @@ pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0 mock>=2.0.0,<3.0.0
flake8>=3.8.0,<6.0.0 flake8>=3.8.0,<6.0.0
hypothesis>=3.27.0,<7.0.0 hypothesis>=3.27.0,<7.0.0
mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7" mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
types-dataclasses>=0.1.3; python_version < "3.7" types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1 types-mock>=0.1.1
types-setuptools>=57.0.0 types-setuptools>=57.0.0

View File

@ -38,13 +38,14 @@ install_requires =
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0,<8.2.0 thinc>=9.0.0.dev1,<9.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.2.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
# Third-party dependencies # Third-party dependencies
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.10.0
smart-open>=5.2.1,<7.0.0
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0

View File

@ -38,7 +38,6 @@ MOD_NAMES = [
"spacy.pipeline.dep_parser", "spacy.pipeline.dep_parser",
"spacy.pipeline._edit_tree_internals.edit_trees", "spacy.pipeline._edit_tree_internals.edit_trees",
"spacy.pipeline.morphologizer", "spacy.pipeline.morphologizer",
"spacy.pipeline.multitask",
"spacy.pipeline.ner", "spacy.pipeline.ner",
"spacy.pipeline.pipe", "spacy.pipeline.pipe",
"spacy.pipeline.trainable_pipe", "spacy.pipeline.trainable_pipe",
@ -49,6 +48,7 @@ MOD_NAMES = [
"spacy.pipeline._parser_internals.arc_eager", "spacy.pipeline._parser_internals.arc_eager",
"spacy.pipeline._parser_internals.ner", "spacy.pipeline._parser_internals.ner",
"spacy.pipeline._parser_internals.nonproj", "spacy.pipeline._parser_internals.nonproj",
"spacy.pipeline._parser_internals.search",
"spacy.pipeline._parser_internals._state", "spacy.pipeline._parser_internals._state",
"spacy.pipeline._parser_internals.stateclass", "spacy.pipeline._parser_internals.stateclass",
"spacy.pipeline._parser_internals.transition_system", "spacy.pipeline._parser_internals.transition_system",
@ -68,6 +68,7 @@ MOD_NAMES = [
"spacy.matcher.dependencymatcher", "spacy.matcher.dependencymatcher",
"spacy.symbols", "spacy.symbols",
"spacy.vectors", "spacy.vectors",
"spacy.tests.parser._search",
] ]
COMPILE_OPTIONS = { COMPILE_OPTIONS = {
"msvc": ["/Ox", "/EHsc"], "msvc": ["/Ox", "/EHsc"],

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.4.2" __version__ = "3.5.0"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -16,6 +16,7 @@ from .debug_config import debug_config # noqa: F401
from .debug_model import debug_model # noqa: F401 from .debug_model import debug_model # noqa: F401
from .debug_diff import debug_diff # noqa: F401 from .debug_diff import debug_diff # noqa: F401
from .evaluate import evaluate # noqa: F401 from .evaluate import evaluate # noqa: F401
from .apply import apply # noqa: F401
from .convert import convert # noqa: F401 from .convert import convert # noqa: F401
from .init_pipeline import init_pipeline_cli # noqa: F401 from .init_pipeline import init_pipeline_cli # noqa: F401
from .init_config import init_config, fill_config # noqa: F401 from .init_config import init_config, fill_config # noqa: F401
@ -27,6 +28,7 @@ from .project.dvc import project_update_dvc # noqa: F401
from .project.push import project_push # noqa: F401 from .project.push import project_push # noqa: F401
from .project.pull import project_pull # noqa: F401 from .project.pull import project_pull # noqa: F401
from .project.document import project_document # noqa: F401 from .project.document import project_document # noqa: F401
from .find_threshold import find_threshold # noqa: F401
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True) @app.command("link", no_args_is_help=True, deprecated=True, hidden=True)

View File

@ -24,7 +24,7 @@ from ..errors import RENAMED_LANGUAGE_CODES
from .. import about from .. import about
if TYPE_CHECKING: if TYPE_CHECKING:
from pathy import Pathy # noqa: F401 from pathy import FluidPath # noqa: F401
SDIST_SUFFIX = ".tar.gz" SDIST_SUFFIX = ".tar.gz"
@ -169,15 +169,15 @@ def load_project_config(
sys.exit(1) sys.exit(1)
validate_project_version(config) validate_project_version(config)
validate_project_commands(config) validate_project_commands(config)
if interpolate:
err = f"{PROJECT_FILE} validation error"
with show_validation_error(title=err, hint_fill=False):
config = substitute_project_variables(config, overrides)
# Make sure directories defined in config exist # Make sure directories defined in config exist
for subdir in config.get("directories", []): for subdir in config.get("directories", []):
dir_path = path / subdir dir_path = path / subdir
if not dir_path.exists(): if not dir_path.exists():
dir_path.mkdir(parents=True) dir_path.mkdir(parents=True)
if interpolate:
err = f"{PROJECT_FILE} validation error"
with show_validation_error(title=err, hint_fill=False):
config = substitute_project_variables(config, overrides)
return config return config
@ -342,7 +342,7 @@ def import_code(code_path: Optional[Union[Path, str]]) -> None:
msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1) msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1)
def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None: def upload_file(src: Path, dest: Union[str, "FluidPath"]) -> None:
"""Upload a file. """Upload a file.
src (Path): The source path. src (Path): The source path.
@ -350,13 +350,20 @@ def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None:
""" """
import smart_open import smart_open
# Create parent directories for local paths
if isinstance(dest, Path):
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
dest = str(dest) dest = str(dest)
with smart_open.open(dest, mode="wb") as output_file: with smart_open.open(dest, mode="wb") as output_file:
with src.open(mode="rb") as input_file: with src.open(mode="rb") as input_file:
output_file.write(input_file.read()) output_file.write(input_file.read())
def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) -> None: def download_file(
src: Union[str, "FluidPath"], dest: Path, *, force: bool = False
) -> None:
"""Download a file using smart_open. """Download a file using smart_open.
url (str): The URL of the file. url (str): The URL of the file.
@ -369,7 +376,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False)
if dest.exists() and not force: if dest.exists() and not force:
return None return None
src = str(src) src = str(src)
with smart_open.open(src, mode="rb", ignore_ext=True) as input_file: with smart_open.open(src, mode="rb", compression="disable") as input_file:
with dest.open(mode="wb") as output_file: with dest.open(mode="wb") as output_file:
shutil.copyfileobj(input_file, output_file) shutil.copyfileobj(input_file, output_file)
@ -379,7 +386,7 @@ def ensure_pathy(path):
slow and annoying Google Cloud warning).""" slow and annoying Google Cloud warning)."""
from pathy import Pathy # noqa: F811 from pathy import Pathy # noqa: F811
return Pathy(path) return Pathy.fluid(path)
def git_checkout( def git_checkout(
@ -586,6 +593,29 @@ def setup_gpu(use_gpu: int, silent=None) -> None:
local_msg.info("To switch to GPU 0, use the option: --gpu-id 0") local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")
def walk_directory(path: Path, suffix: Optional[str] = None) -> List[Path]:
if not path.is_dir():
return [path]
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif suffix is not None and not path.parts[-1].endswith(suffix):
continue
else:
locs.append(path)
# It's good to sort these, in case the ordering messes up cache.
locs.sort()
return locs
def _format_number(number: Union[int, float], ndigits: int = 2) -> str: def _format_number(number: Union[int, float], ndigits: int = 2) -> str:
"""Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s, """Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s,
as happens with `round(number, ndigits)`""" as happens with `round(number, ndigits)`"""

143
spacy/cli/apply.py Normal file
View File

@ -0,0 +1,143 @@
import tqdm
import srsly
from itertools import chain
from pathlib import Path
from typing import Optional, List, Iterable, cast, Union
from wasabi import msg
from ._util import app, Arg, Opt, setup_gpu, import_code, walk_directory
from ..tokens import Doc, DocBin
from ..vocab import Vocab
from ..util import ensure_path, load_model
path_help = """Location of the documents to predict on.
Can be a single file in .spacy format or a .jsonl file.
Files with other extensions are treated as single plain text documents.
If a directory is provided it is traversed recursively to grab
all files to be processed.
The files can be a mixture of .spacy, .jsonl and text files.
If .jsonl is provided the specified field is going
to be grabbed ("text" by default)."""
out_help = "Path to save the resulting .spacy file"
code_help = (
"Path to Python file with additional " "code (registered functions) to be imported"
)
gold_help = "Use gold preprocessing provided in the .spacy files"
force_msg = (
"The provided output file already exists. "
"To force overwriting the output file, set the --force or -F flag."
)
DocOrStrStream = Union[Iterable[str], Iterable[Doc]]
def _stream_docbin(path: Path, vocab: Vocab) -> Iterable[Doc]:
"""
Stream Doc objects from DocBin.
"""
docbin = DocBin().from_disk(path)
for doc in docbin.get_docs(vocab):
yield doc
def _stream_jsonl(path: Path, field: str) -> Iterable[str]:
"""
Stream "text" field from JSONL. If the field "text" is
not found it raises error.
"""
for entry in srsly.read_jsonl(path):
if field not in entry:
msg.fail(
f"{path} does not contain the required '{field}' field.", exits=1
)
else:
yield entry[field]
def _stream_texts(paths: Iterable[Path]) -> Iterable[str]:
"""
Yields strings from text files in paths.
"""
for path in paths:
with open(path, "r") as fin:
text = fin.read()
yield text
@app.command("apply")
def apply_cli(
# fmt: off
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help=path_help, exists=True),
output_file: Path = Arg(..., help=out_help, dir_okay=False),
code_path: Optional[Path] = Opt(None, "--code", "-c", help=code_help),
text_key: str = Opt("text", "--text-key", "-tk", help="Key containing text string for JSONL"),
force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"),
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU."),
batch_size: int = Opt(1, "--batch-size", "-b", help="Batch size."),
n_process: int = Opt(1, "--n-process", "-n", help="number of processors to use.")
):
"""
Apply a trained pipeline to documents to get predictions.
Expects a loadable spaCy pipeline and path to the data, which
can be a directory or a file.
The data files can be provided in multiple formats:
1. .spacy files
2. .jsonl files with a specified "field" to read the text from.
3. Files with any other extension are assumed to be containing
a single document.
DOCS: https://spacy.io/api/cli#apply
"""
data_path = ensure_path(data_path)
output_file = ensure_path(output_file)
code_path = ensure_path(code_path)
if output_file.exists() and not force_overwrite:
msg.fail(force_msg, exits=1)
if not data_path.exists():
msg.fail(f"Couldn't find data path: {data_path}", exits=1)
import_code(code_path)
setup_gpu(use_gpu)
apply(data_path, output_file, model, text_key, batch_size, n_process)
def apply(
data_path: Path,
output_file: Path,
model: str,
json_field: str,
batch_size: int,
n_process: int,
):
docbin = DocBin(store_user_data=True)
paths = walk_directory(data_path)
if len(paths) == 0:
docbin.to_disk(output_file)
msg.warn("Did not find data to process,"
f" {data_path} seems to be an empty directory.")
return
nlp = load_model(model)
msg.good(f"Loaded model {model}")
vocab = nlp.vocab
streams: List[DocOrStrStream] = []
text_files = []
for path in paths:
if path.suffix == ".spacy":
streams.append(_stream_docbin(path, vocab))
elif path.suffix == ".jsonl":
streams.append(_stream_jsonl(path, json_field))
else:
text_files.append(path)
if len(text_files) > 0:
streams.append(_stream_texts(text_files))
datagen = cast(DocOrStrStream, chain(*streams))
for doc in tqdm.tqdm(nlp.pipe(datagen, batch_size=batch_size, n_process=n_process)):
docbin.add(doc)
if output_file.suffix == "":
output_file = output_file.with_suffix(".spacy")
docbin.to_disk(output_file)

View File

@ -1,4 +1,4 @@
from typing import Callable, Iterable, Mapping, Optional, Any, List, Union from typing import Callable, Iterable, Mapping, Optional, Any, Union
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from wasabi import Printer from wasabi import Printer
@ -193,33 +193,6 @@ def autodetect_ner_format(input_data: str) -> Optional[str]:
return None return None
def walk_directory(path: Path, converter: str) -> List[Path]:
if not path.is_dir():
return [path]
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif converter == "json" and not path.parts[-1].endswith("json"):
continue
elif converter == "conll" and not path.parts[-1].endswith("conll"):
continue
elif converter == "iob" and not path.parts[-1].endswith("iob"):
continue
else:
locs.append(path)
# It's good to sort these, in case the ordering messes up cache.
locs.sort()
return locs
def verify_cli_args( def verify_cli_args(
msg: Printer, msg: Printer,
input_path: Path, input_path: Path,

View File

@ -13,6 +13,7 @@ from ._util import import_code, debug_cli, _format_number
from ..training import Example, remove_bilu_prefix from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining from ..schemas import ConfigSchemaTraining
from ..pipeline import TrainablePipe
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
from ..pipeline._parser_internals.nonproj import DELIMITER from ..pipeline._parser_internals.nonproj import DELIMITER
from ..pipeline import Morphologizer, SpanCategorizer from ..pipeline import Morphologizer, SpanCategorizer
@ -934,6 +935,7 @@ def _get_labels_from_model(nlp: Language, factory_name: str) -> Set[str]:
labels: Set[str] = set() labels: Set[str] = set()
for pipe_name in pipe_names: for pipe_name in pipe_names:
pipe = nlp.get_pipe(pipe_name) pipe = nlp.get_pipe(pipe_name)
assert isinstance(pipe, TrainablePipe)
labels.update(pipe.labels) labels.update(pipe.labels)
return labels return labels

View File

@ -8,7 +8,6 @@ from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX
from .. import about from .. import about
from ..util import is_package, get_minor_version, run_command from ..util import is_package, get_minor_version, run_command
from ..util import is_prerelease_version from ..util import is_prerelease_version
from ..errors import OLD_MODEL_SHORTCUTS
@app.command( @app.command(
@ -61,12 +60,6 @@ def download(
version = components[-1] version = components[-1]
else: else:
model_name = model model_name = model
if model in OLD_MODEL_SHORTCUTS:
msg.warn(
f"As of spaCy v3.0, shortcuts like '{model}' are deprecated. Please "
f"use the full pipeline package name '{OLD_MODEL_SHORTCUTS[model]}' instead."
)
model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility() compatibility = get_compatibility()
version = get_version(model_name, compatibility) version = get_version(model_name, compatibility)

233
spacy/cli/find_threshold.py Normal file
View File

@ -0,0 +1,233 @@
import functools
import operator
from pathlib import Path
import logging
from typing import Optional, Tuple, Any, Dict, List
import numpy
import wasabi.tables
from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer
from ..errors import Errors
from ..training import Corpus
from ._util import app, Arg, Opt, import_code, setup_gpu
from .. import util
_DEFAULTS = {
"n_trials": 11,
"use_gpu": -1,
"gold_preproc": False,
}
@app.command(
"find-threshold",
context_settings={"allow_extra_args": False, "ignore_unknown_options": True},
)
def find_threshold_cli(
# fmt: off
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"),
threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"),
scores_key: str = Arg(..., help="Metric to optimize"),
n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"),
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"),
verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"),
# fmt: on
):
"""
Runs prediction trials for a trained model with varying tresholds to maximize
the specified metric. The search space for the threshold is traversed linearly
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
returns all results).
This is applicable only for components whose predictions are influenced by
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
that the full path to the corresponding threshold attribute in the config has to
be provided.
DOCS: https://spacy.io/api/cli#find-threshold
"""
util.logger.setLevel(logging.DEBUG if verbose else logging.INFO)
import_code(code_path)
find_threshold(
model=model,
data_path=data_path,
pipe_name=pipe_name,
threshold_key=threshold_key,
scores_key=scores_key,
n_trials=n_trials,
use_gpu=use_gpu,
gold_preproc=gold_preproc,
silent=False,
)
def find_threshold(
model: str,
data_path: Path,
pipe_name: str,
threshold_key: str,
scores_key: str,
*,
n_trials: int = _DEFAULTS["n_trials"], # type: ignore
use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore
gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore
silent: bool = True,
) -> Tuple[float, float, Dict[float, float]]:
"""
Runs prediction trials for models with varying tresholds to maximize the specified metric.
model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory.
data_path (Path): Path to file with DocBin with docs to use for threshold search.
pipe_name (str): Name of pipe to examine thresholds for.
threshold_key (str): Key of threshold attribute in component's configuration.
scores_key (str): Name of score to metric to optimize.
n_trials (int): Number of trials to determine optimal thresholds.
use_gpu (int): GPU ID or -1 for CPU.
gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the
tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due
to train/test skew.
silent (bool): Whether to print non-error-related output to stdout.
RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all
evaluated thresholds.
"""
setup_gpu(use_gpu, silent=silent)
data_path = util.ensure_path(data_path)
if not data_path.exists():
wasabi.msg.fail("Evaluation data not found", data_path, exits=1)
nlp = util.load_model(model)
if pipe_name not in nlp.component_names:
raise AttributeError(
Errors.E001.format(name=pipe_name, opts=nlp.component_names)
)
pipe = nlp.get_pipe(pipe_name)
if not hasattr(pipe, "scorer"):
raise AttributeError(Errors.E1045)
if type(pipe) == TextCategorizer:
wasabi.msg.warn(
"The `textcat` component doesn't use a threshold as it's not applicable to the concept of "
"exclusive classes. All thresholds will yield the same results."
)
if not silent:
wasabi.msg.info(
title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} "
f"trials."
)
# Load evaluation corpus.
corpus = Corpus(data_path, gold_preproc=gold_preproc)
dev_dataset = list(corpus(nlp))
config_keys = threshold_key.split(".")
def set_nested_item(
config: Dict[str, Any], keys: List[str], value: float
) -> Dict[str, Any]:
"""Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200.
config (Dict[str, Any]): Configuration dictionary.
keys (List[Any]): Path to value to set.
value (float): Value to set.
RETURNS (Dict[str, Any]): Updated dictionary.
"""
functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value
return config
def filter_config(
config: Dict[str, Any], keys: List[str], full_key: str
) -> Dict[str, Any]:
"""Filters provided config dictionary so that only the specified keys path remains.
config (Dict[str, Any]): Configuration dictionary.
keys (List[Any]): Path to value to set.
full_key (str): Full user-specified key.
RETURNS (Dict[str, Any]): Filtered dictionary.
"""
if keys[0] not in config:
wasabi.msg.fail(
title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.",
text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: "
f"{list(config.keys())}",
exits=1,
)
return {
keys[0]: filter_config(config[keys[0]], keys[1:], full_key)
if len(keys) > 1
else config[keys[0]]
}
# Evaluate with varying threshold values.
scores: Dict[float, float] = {}
config_keys_full = ["components", pipe_name, *config_keys]
table_col_widths = (10, 10)
thresholds = numpy.linspace(0, 1, n_trials)
print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths))
for threshold in thresholds:
# Reload pipeline with overrides specifying the new threshold.
nlp = util.load_model(
model,
config=set_nested_item(
filter_config(
nlp.config, config_keys_full, ".".join(config_keys_full)
).copy(),
config_keys_full,
threshold,
),
)
if hasattr(pipe, "cfg"):
setattr(
nlp.get_pipe(pipe_name),
"cfg",
set_nested_item(getattr(pipe, "cfg"), config_keys, threshold),
)
eval_scores = nlp.evaluate(dev_dataset)
if scores_key not in eval_scores:
wasabi.msg.fail(
title=f"Failed to look up score `{scores_key}` in evaluation results.",
text=f"Make sure you specified the correct value for `scores_key`. The following scores are "
f"available: {list(eval_scores.keys())}",
exits=1,
)
scores[threshold] = eval_scores[scores_key]
if not isinstance(scores[threshold], (float, int)):
wasabi.msg.fail(
f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric "
f"scores.",
exits=1,
)
print(
wasabi.row(
[round(threshold, 3), round(scores[threshold], 3)],
widths=table_col_widths,
)
)
best_threshold = max(scores.keys(), key=(lambda key: scores[key]))
# If all scores are identical, emit warning.
if len(set(scores.values())) == 1:
wasabi.msg.warn(
title="All scores are identical. Verify that all settings are correct.",
text=""
if (
not isinstance(pipe, MultiLabel_TextCategorizer)
or scores_key in ("cats_macro_f", "cats_micro_f")
)
else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.",
)
else:
if not silent:
print(
f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}."
)
return best_threshold, scores[best_threshold], scores

View File

@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str:
RETURNS (str): The converted URL. RETURNS (str): The converted URL.
""" """
# If the asset URL is a regular GitHub URL it's likely a mistake # If the asset URL is a regular GitHub URL it's likely a mistake
if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url: if (
re.match(r"(http(s?)):\/\/github.com", url)
and "releases/download" not in url
and "/raw/" not in url
):
converted = url.replace("github.com", "raw.githubusercontent.com") converted = url.replace("github.com", "raw.githubusercontent.com")
converted = re.sub(r"/(tree|blob)/", "/", converted) converted = re.sub(r"/(tree|blob)/", "/", converted)
msg.warn( msg.warn(

View File

@ -5,14 +5,17 @@ import hashlib
import urllib.parse import urllib.parse
import tarfile import tarfile
from pathlib import Path from pathlib import Path
from wasabi import msg
from .._util import get_hash, get_checksum, download_file, ensure_pathy from .._util import get_hash, get_checksum, upload_file, download_file
from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var from .._util import ensure_pathy, make_tempdir
from ...util import get_minor_version, ENV_VARS, check_bool_env_var
from ...git_info import GIT_VERSION from ...git_info import GIT_VERSION
from ... import about from ... import about
from ...errors import Errors
if TYPE_CHECKING: if TYPE_CHECKING:
from pathy import Pathy # noqa: F401 from pathy import FluidPath # noqa: F401
class RemoteStorage: class RemoteStorage:
@ -27,7 +30,7 @@ class RemoteStorage:
self.url = ensure_pathy(url) self.url = ensure_pathy(url)
self.compression = compression self.compression = compression
def push(self, path: Path, command_hash: str, content_hash: str) -> "Pathy": def push(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
"""Compress a file or directory within a project and upload it to a remote """Compress a file or directory within a project and upload it to a remote
storage. If an object exists at the full URL, nothing is done. storage. If an object exists at the full URL, nothing is done.
@ -48,9 +51,7 @@ class RemoteStorage:
mode_string = f"w:{self.compression}" if self.compression else "w" mode_string = f"w:{self.compression}" if self.compression else "w"
with tarfile.open(tar_loc, mode=mode_string) as tar_file: with tarfile.open(tar_loc, mode=mode_string) as tar_file:
tar_file.add(str(loc), arcname=str(path)) tar_file.add(str(loc), arcname=str(path))
with tar_loc.open(mode="rb") as input_file: upload_file(tar_loc, url)
with url.open(mode="wb") as output_file:
output_file.write(input_file.read())
return url return url
def pull( def pull(
@ -59,7 +60,7 @@ class RemoteStorage:
*, *,
command_hash: Optional[str] = None, command_hash: Optional[str] = None,
content_hash: Optional[str] = None, content_hash: Optional[str] = None,
) -> Optional["Pathy"]: ) -> Optional["FluidPath"]:
"""Retrieve a file from the remote cache. If the file already exists, """Retrieve a file from the remote cache. If the file already exists,
nothing is done. nothing is done.
@ -84,7 +85,23 @@ class RemoteStorage:
with tarfile.open(tar_loc, mode=mode_string) as tar_file: with tarfile.open(tar_loc, mode=mode_string) as tar_file:
# This requires that the path is added correctly, relative # This requires that the path is added correctly, relative
# to root. This is how we set things up in push() # to root. This is how we set things up in push()
tar_file.extractall(self.root)
# Disallow paths outside the current directory for the tar
# file (CVE-2007-4559, directory traversal vulnerability)
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise ValueError(Errors.E852)
tar.extractall(path)
safe_extract(tar_file, self.root)
return url return url
def find( def find(
@ -93,25 +110,37 @@ class RemoteStorage:
*, *,
command_hash: Optional[str] = None, command_hash: Optional[str] = None,
content_hash: Optional[str] = None, content_hash: Optional[str] = None,
) -> Optional["Pathy"]: ) -> Optional["FluidPath"]:
"""Find the best matching version of a file within the storage, """Find the best matching version of a file within the storage,
or `None` if no match can be found. If both the creation and content hash or `None` if no match can be found. If both the creation and content hash
are specified, only exact matches will be returned. Otherwise, the most are specified, only exact matches will be returned. Otherwise, the most
recent matching file is preferred. recent matching file is preferred.
""" """
name = self.encode_name(str(path)) name = self.encode_name(str(path))
urls = []
if command_hash is not None and content_hash is not None: if command_hash is not None and content_hash is not None:
url = self.make_url(path, command_hash, content_hash) url = self.url / name / command_hash / content_hash
urls = [url] if url.exists() else [] urls = [url] if url.exists() else []
elif command_hash is not None: elif command_hash is not None:
urls = list((self.url / name / command_hash).iterdir()) if (self.url / name / command_hash).exists():
urls = list((self.url / name / command_hash).iterdir())
else: else:
urls = list((self.url / name).iterdir()) if (self.url / name).exists():
if content_hash is not None: for sub_dir in (self.url / name).iterdir():
urls = [url for url in urls if url.parts[-1] == content_hash] urls.extend(sub_dir.iterdir())
if content_hash is not None:
urls = [url for url in urls if url.parts[-1] == content_hash]
if len(urls) >= 2:
try:
urls.sort(key=lambda x: x.stat().last_modified) # type: ignore
except Exception:
msg.warn(
"Unable to sort remote files by last modified. The file(s) "
"pulled from the cache may not be the most recent."
)
return urls[-1] if urls else None return urls[-1] if urls else None
def make_url(self, path: Path, command_hash: str, content_hash: str) -> "Pathy": def make_url(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
"""Construct a URL from a subpath, a creation hash and a content hash.""" """Construct a URL from a subpath, a creation hash and a content hash."""
return self.url / self.encode_name(str(path)) / command_hash / content_hash return self.url / self.encode_name(str(path)) / command_hash / content_hash

View File

@ -53,6 +53,7 @@ def project_run(
force: bool = False, force: bool = False,
dry: bool = False, dry: bool = False,
capture: bool = False, capture: bool = False,
skip_requirements_check: bool = False,
) -> None: ) -> None:
"""Run a named script defined in the project.yml. If the script is part """Run a named script defined in the project.yml. If the script is part
of the default pipeline (defined in the "run" section), DVC is used to of the default pipeline (defined in the "run" section), DVC is used to
@ -69,6 +70,7 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function. when you want to run the command more like a function.
skip_requirements_check (bool): Whether to skip the requirements check.
""" """
config = load_project_config(project_dir, overrides=overrides) config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
@ -76,9 +78,10 @@ def project_run(
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
req_path = project_dir / "requirements.txt" req_path = project_dir / "requirements.txt"
if config.get("check_requirements", True) and os.path.exists(req_path): if not skip_requirements_check:
with req_path.open() as requirements_file: if config.get("check_requirements", True) and os.path.exists(req_path):
_check_requirements([req.replace("\n", "") for req in requirements_file]) with req_path.open() as requirements_file:
_check_requirements([req.strip() for req in requirements_file])
if subcommand in workflows: if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'") msg.info(f"Running workflow '{subcommand}'")
@ -90,6 +93,7 @@ def project_run(
force=force, force=force,
dry=dry, dry=dry,
capture=capture, capture=capture,
skip_requirements_check=True,
) )
else: else:
cmd = commands[subcommand] cmd = commands[subcommand]
@ -97,8 +101,8 @@ def project_run(
if not (project_dir / dep).exists(): if not (project_dir / dep).exists():
err = f"Missing dependency specified by command '{subcommand}': {dep}" err = f"Missing dependency specified by command '{subcommand}': {dep}"
err_help = "Maybe you forgot to run the 'project assets' command or a previous step?" err_help = "Maybe you forgot to run the 'project assets' command or a previous step?"
err_kwargs = {"exits": 1} if not dry else {} err_exits = 1 if not dry else None
msg.fail(err, err_help, **err_kwargs) msg.fail(err, err_help, exits=err_exits)
check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION) check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION)
with working_dir(project_dir) as current_dir: with working_dir(project_dir) as current_dir:
msg.divider(subcommand) msg.divider(subcommand)
@ -338,6 +342,12 @@ def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
failed_pkgs_msgs.append(dnf.report()) failed_pkgs_msgs.append(dnf.report())
except pkg_resources.VersionConflict as vc: except pkg_resources.VersionConflict as vc:
conflicting_pkgs_msgs.append(vc.report()) conflicting_pkgs_msgs.append(vc.report())
except Exception:
msg.warn(
f"Unable to check requirement: {req} "
"Checks are currently limited to requirement specifiers "
"(PEP 508)"
)
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
msg.warn( msg.warn(

View File

@ -1,7 +1,7 @@
{# This is a template for training configs used for the quickstart widget in {# This is a template for training configs used for the quickstart widget in
the docs and the init config command. It encodes various best practices and the docs and the init config command. It encodes various best practices and
can help generate the best possible configuration, given a user's requirements. #} can help generate the best possible configuration, given a user's requirements. #}
{%- set use_transformer = hardware != "cpu" -%} {%- set use_transformer = hardware != "cpu" and transformer_data -%}
{%- set transformer = transformer_data[optimize] if use_transformer else {} -%} {%- set transformer = transformer_data[optimize] if use_transformer else {} -%}
{%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%} {%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%}
[paths] [paths]

View File

@ -37,6 +37,15 @@ bn:
accuracy: accuracy:
name: sagorsarker/bangla-bert-base name: sagorsarker/bangla-bert-base
size_factor: 3 size_factor: 3
ca:
word_vectors: null
transformer:
efficiency:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
accuracy:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
da: da:
word_vectors: da_core_news_lg word_vectors: da_core_news_lg
transformer: transformer:

View File

@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
train_corpus = "corpora.train" train_corpus = "corpora.train"
# Optional callback before nlp object is saved to disk after training # Optional callback before nlp object is saved to disk after training
before_to_disk = null before_to_disk = null
# Optional callback that is invoked at the start of each training step
before_update = null
[training.logger] [training.logger]
@loggers = "spacy.ConsoleLogger.v1" @loggers = "spacy.ConsoleLogger.v1"

View File

@ -228,12 +228,13 @@ def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"kb_id": span.kb_id_ if span.kb_id_ else "", "kb_id": span.kb_id_ if span.kb_id_ else "",
"kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#", "kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#",
} }
for span in doc.spans[spans_key] for span in doc.spans.get(spans_key, [])
] ]
tokens = [token.text for token in doc] tokens = [token.text for token in doc]
if not spans: if not spans:
warnings.warn(Warnings.W117.format(spans_key=spans_key)) keys = list(doc.spans.keys())
warnings.warn(Warnings.W117.format(spans_key=spans_key, keys=keys))
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
settings = get_doc_settings(doc) settings = get_doc_settings(doc)
return { return {

View File

@ -131,13 +131,6 @@ class Warnings(metaclass=ErrorsWithCodes):
"and make it independent. For example, `replace_listeners = " "and make it independent. For example, `replace_listeners = "
"[\"model.tok2vec\"]` See the documentation for details: " "[\"model.tok2vec\"]` See the documentation for details: "
"https://spacy.io/usage/training#config-components-listeners") "https://spacy.io/usage/training#config-components-listeners")
W088 = ("The pipeline component {name} implements a `begin_training` "
"method, which won't be called by spaCy. As of v3.0, `begin_training` "
"has been renamed to `initialize`, so you likely want to rename the "
"component method. See the documentation for details: "
"https://spacy.io/api/language#initialize")
W089 = ("As of spaCy v3.0, the `nlp.begin_training` method has been renamed "
"to `nlp.initialize`.")
W090 = ("Could not locate any {format} files in path '{path}'.") W090 = ("Could not locate any {format} files in path '{path}'.")
W091 = ("Could not clean/remove the temp directory at {dir}: {msg}.") W091 = ("Could not clean/remove the temp directory at {dir}: {msg}.")
W092 = ("Ignoring annotations for sentence starts, as dependency heads are set.") W092 = ("Ignoring annotations for sentence starts, as dependency heads are set.")
@ -199,7 +192,7 @@ class Warnings(metaclass=ErrorsWithCodes):
W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is " W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is "
"surprising to you, make sure the Doc was processed using a model " "surprising to you, make sure the Doc was processed using a model "
"that supports span categorization, and check the `doc.spans[spans_key]` " "that supports span categorization, and check the `doc.spans[spans_key]` "
"property manually if necessary.") "property manually if necessary.\n\nAvailable keys: {keys}")
W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation " W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation "
"for the corpora used to train the language. Please check " "for the corpora used to train the language. Please check "
"`nlp.meta[\"sources\"]` for any relevant links.") "`nlp.meta[\"sources\"]` for any relevant links.")
@ -212,8 +205,8 @@ class Warnings(metaclass=ErrorsWithCodes):
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'") W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class " W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.") "is a Cython extension type.")
W123 = ("Argument {arg} with value {arg_value} is used instead of {config_value} as specified in the config. Be " W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option "
"aware that this might affect other components in your pipeline.") "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.")
class Errors(metaclass=ErrorsWithCodes): class Errors(metaclass=ErrorsWithCodes):
@ -250,8 +243,6 @@ class Errors(metaclass=ErrorsWithCodes):
"https://spacy.io/usage/models") "https://spacy.io/usage/models")
E011 = ("Unknown operator: '{op}'. Options: {opts}") E011 = ("Unknown operator: '{op}'. Options: {opts}")
E012 = ("Cannot add pattern for zero tokens to matcher.\nKey: {key}") E012 = ("Cannot add pattern for zero tokens to matcher.\nKey: {key}")
E016 = ("MultitaskObjective target should be function or one of: dep, "
"tag, ent, dep_tag_offset, ent_tag.")
E017 = ("Can only add 'str' inputs to StringStore. Got type: {value_type}") E017 = ("Can only add 'str' inputs to StringStore. Got type: {value_type}")
E018 = ("Can't retrieve string for hash '{hash_value}'. This usually " E018 = ("Can't retrieve string for hash '{hash_value}'. This usually "
"refers to an issue with the `Vocab` or `StringStore`.") "refers to an issue with the `Vocab` or `StringStore`.")
@ -345,6 +336,11 @@ class Errors(metaclass=ErrorsWithCodes):
"clear the existing vectors and resize the table.") "clear the existing vectors and resize the table.")
E074 = ("Error interpreting compiled match pattern: patterns are expected " E074 = ("Error interpreting compiled match pattern: patterns are expected "
"to end with the attribute {attr}. Got: {bad_attr}.") "to end with the attribute {attr}. Got: {bad_attr}.")
E079 = ("Error computing states in beam: number of predicted beams "
"({pbeams}) does not equal number of gold beams ({gbeams}).")
E080 = ("Duplicate state found in beam: {key}.")
E081 = ("Error getting gradient in beam: number of histories ({n_hist}) "
"does not equal number of losses ({losses}).")
E082 = ("Error deprojectivizing parse: number of heads ({n_heads}), " E082 = ("Error deprojectivizing parse: number of heads ({n_heads}), "
"projective heads ({n_proj_heads}) and labels ({n_labels}) do not " "projective heads ({n_proj_heads}) and labels ({n_labels}) do not "
"match.") "match.")
@ -544,6 +540,10 @@ class Errors(metaclass=ErrorsWithCodes):
"during training, make sure to include it in 'annotating components'") "during training, make sure to include it in 'annotating components'")
# New errors added in v3.x # New errors added in v3.x
E851 = ("The 'textcat' component labels should only have values of 0 or 1, "
"but found value of '{val}'.")
E852 = ("The tar file pulled from the remote attempted an unsafe path "
"traversal.")
E853 = ("Unsupported component factory name '{name}'. The character '.' is " E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.") "not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
@ -723,13 +723,6 @@ class Errors(metaclass=ErrorsWithCodes):
"method in component '{name}'. If you want to use this " "method in component '{name}'. If you want to use this "
"method, make sure it's overwritten on the subclass.") "method, make sure it's overwritten on the subclass.")
E940 = ("Found NaN values in scores.") E940 = ("Found NaN values in scores.")
E941 = ("Can't find model '{name}'. It looks like you're trying to load a "
"model from a shortcut, which is obsolete as of spaCy v3.0. To "
"load the model, use its full name instead:\n\n"
"nlp = spacy.load(\"{full}\")\n\nFor more details on the available "
"models, see the models directory: https://spacy.io/models. If you "
"want to create a blank model, use spacy.blank: "
"nlp = spacy.blank(\"{name}\")")
E942 = ("Executing `after_{name}` callback failed. Expected the function to " E942 = ("Executing `after_{name}` callback failed. Expected the function to "
"return an initialized nlp object but got: {value}. Maybe " "return an initialized nlp object but got: {value}. Maybe "
"you forgot to return the modified object in your function?") "you forgot to return the modified object in your function?")
@ -950,6 +943,7 @@ class Errors(metaclass=ErrorsWithCodes):
"sure it's overwritten on the subclass.") "sure it's overwritten on the subclass.")
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default " E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
"knowledge base, use `InMemoryLookupKB`.") "knowledge base, use `InMemoryLookupKB`.")
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
# v4 error strings # v4 error strings
E4000 = ("Expected a Doc as input, but got: '{type}'") E4000 = ("Expected a Doc as input, but got: '{type}'")
@ -969,7 +963,6 @@ OLD_MODEL_SHORTCUTS = {
RENAMED_LANGUAGE_CODES = {"xx": "mul", "is": "isl"} RENAMED_LANGUAGE_CODES = {"xx": "mul", "is": "isl"}
class MatchPatternError(ValueError): class MatchPatternError(ValueError):
def __init__(self, key, errors): def __init__(self, key, errors):
"""Custom error for validating match patterns. """Custom error for validating match patterns.

View File

@ -15,7 +15,7 @@
STOP_WORDS = set( STOP_WORDS = set(
""" """
aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaangde aangezien achter achterna aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaande aangezien achter achterna
afgelopen aldus alhoewel anderzijds afgelopen aldus alhoewel anderzijds
ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven

View File

@ -28,34 +28,39 @@ class RussianLemmatizer(Lemmatizer):
from pymorphy2 import MorphAnalyzer from pymorphy2 import MorphAnalyzer
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"The Russian lemmatizer mode 'pymorphy2' requires the " "The lemmatizer mode 'pymorphy2' requires the "
"pymorphy2 library. Install it with: pip install pymorphy2" "pymorphy2 library and dictionaries. Install them with: "
"pip install pymorphy2"
"# for Ukrainian dictionaries:"
"pip install pymorphy2-dicts-uk"
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer(lang="ru")
elif mode == "pymorphy3": elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try: try:
from pymorphy3 import MorphAnalyzer from pymorphy3 import MorphAnalyzer
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"The Russian lemmatizer mode 'pymorphy3' requires the " "The lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library. Install it with: pip install pymorphy3" "pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3"
"# for Ukrainian dictionaries:"
"pip install pymorphy3-dicts-uk"
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer(lang="ru")
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )
def pymorphy2_lemmatize(self, token: Token) -> List[str]: def _pymorphy_lemmatize(self, token: Token) -> List[str]:
string = token.text string = token.text
univ_pos = token.pos_ univ_pos = token.pos_
morphology = token.morph.to_dict() morphology = token.morph.to_dict()
if univ_pos == "PUNCT": if univ_pos == "PUNCT":
return [PUNCT_RULES.get(string, string)] return [PUNCT_RULES.get(string, string)]
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"): if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
# Skip unchangeable pos return self._pymorphy_lookup_lemmatize(token)
return [string.lower()]
analyses = self._morph.parse(string) analyses = self._morph.parse(string)
filtered_analyses = [] filtered_analyses = []
for analysis in analyses: for analysis in analyses:
@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer):
# Skip suggested parse variant for unknown word for pymorphy # Skip suggested parse variant for unknown word for pymorphy
continue continue
analysis_pos, _ = oc2ud(str(analysis.tag)) analysis_pos, _ = oc2ud(str(analysis.tag))
if analysis_pos == univ_pos or ( if (
analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN") analysis_pos == univ_pos
or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN"))
or ((analysis_pos == "PRON") and (univ_pos == "DET"))
): ):
filtered_analyses.append(analysis) filtered_analyses.append(analysis)
if not len(filtered_analyses): if not len(filtered_analyses):
@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer):
dict.fromkeys([analysis.normal_form for analysis in filtered_analyses]) dict.fromkeys([analysis.normal_form for analysis in filtered_analyses])
) )
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text string = token.text
analyses = self._morph.parse(string) analyses = self._morph.parse(string)
if len(analyses) == 1: # often multiple forms would derive from the same normal form
return [analyses[0].normal_form] # thus check _unique_ normal forms
normal_forms = set([an.normal_form for an in analyses])
if len(normal_forms) == 1:
return [next(iter(normal_forms))]
return [string] return [string]
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lemmatize(token)
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lookup_lemmatize(token)
def pymorphy3_lemmatize(self, token: Token) -> List[str]: def pymorphy3_lemmatize(self, token: Token) -> List[str]:
return self.pymorphy2_lemmatize(token) return self._pymorphy_lemmatize(token)
def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lookup_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:

View File

@ -61,6 +61,11 @@ for abbr in [
{ORTH: "2к23", NORM: "2023"}, {ORTH: "2к23", NORM: "2023"},
{ORTH: "2к24", NORM: "2024"}, {ORTH: "2к24", NORM: "2024"},
{ORTH: "2к25", NORM: "2025"}, {ORTH: "2к25", NORM: "2025"},
{ORTH: "2к26", NORM: "2026"},
{ORTH: "2к27", NORM: "2027"},
{ORTH: "2к28", NORM: "2028"},
{ORTH: "2к29", NORM: "2029"},
{ORTH: "2к30", NORM: "2030"},
]: ]:
_exc[abbr[ORTH]] = [abbr] _exc[abbr[ORTH]] = [abbr]
@ -268,8 +273,8 @@ for abbr in [
{ORTH: "з-ка", NORM: "заимка"}, {ORTH: "з-ка", NORM: "заимка"},
{ORTH: "п-к", NORM: "починок"}, {ORTH: "п-к", NORM: "починок"},
{ORTH: "киш.", NORM: "кишлак"}, {ORTH: "киш.", NORM: "кишлак"},
{ORTH: "п. ст. ", NORM: "поселок станция"}, {ORTH: "п. ст.", NORM: "поселок станция"},
{ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"}, {ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"},
{ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"}, {ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
{ORTH: "ж/д б-ка", NORM: "железнодорожная будка"}, {ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
{ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"}, {ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
@ -280,12 +285,12 @@ for abbr in [
{ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"}, {ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
{ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"}, {ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
{ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"}, {ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
{ORTH: "ж/д ст. ", NORM: "железнодорожная станция"}, {ORTH: "ж/д ст.", NORM: "железнодорожная станция"},
{ORTH: "м-ко", NORM: "местечко"}, {ORTH: "м-ко", NORM: "местечко"},
{ORTH: "д.", NORM: "деревня"}, {ORTH: "д.", NORM: "деревня"},
{ORTH: "с.", NORM: "село"}, {ORTH: "с.", NORM: "село"},
{ORTH: "сл.", NORM: "слобода"}, {ORTH: "сл.", NORM: "слобода"},
{ORTH: "ст. ", NORM: "станция"}, {ORTH: "ст.", NORM: "станция"},
{ORTH: "ст-ца", NORM: "станица"}, {ORTH: "ст-ца", NORM: "станица"},
{ORTH: "у.", NORM: "улус"}, {ORTH: "у.", NORM: "улус"},
{ORTH: "х.", NORM: "хутор"}, {ORTH: "х.", NORM: "хутор"},
@ -388,8 +393,9 @@ for abbr in [
{ORTH: "прим.", NORM: "примечание"}, {ORTH: "прим.", NORM: "примечание"},
{ORTH: "прим.ред.", NORM: "примечание редакции"}, {ORTH: "прим.ред.", NORM: "примечание редакции"},
{ORTH: "см. также", NORM: "смотри также"}, {ORTH: "см. также", NORM: "смотри также"},
{ORTH: "кв.м.", NORM: "квадрантный метр"}, {ORTH: "см.", NORM: "смотри"},
{ORTH: "м2", NORM: "квадрантный метр"}, {ORTH: "кв.м.", NORM: "квадратный метр"},
{ORTH: "м2", NORM: "квадратный метр"},
{ORTH: "б/у", NORM: "бывший в употреблении"}, {ORTH: "б/у", NORM: "бывший в употреблении"},
{ORTH: "сокр.", NORM: "сокращение"}, {ORTH: "сокр.", NORM: "сокращение"},
{ORTH: "чел.", NORM: "человек"}, {ORTH: "чел.", NORM: "человек"},

View File

@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk") self._morph = MorphAnalyzer(lang="uk")
elif mode == "pymorphy3": elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try: try:
from pymorphy3 import MorphAnalyzer from pymorphy3 import MorphAnalyzer
except ImportError: except ImportError:

View File

@ -43,8 +43,7 @@ from .lookups import load_lookups
from .compat import Literal from .compat import Literal
if TYPE_CHECKING: PipeCallable = Callable[[Doc], Doc]
from .pipeline import Pipe # noqa: F401
# This is the base config will all settings (training etc.) # This is the base config will all settings (training etc.)
@ -181,7 +180,7 @@ class Language:
self.vocab: Vocab = vocab self.vocab: Vocab = vocab
if self.lang is None: if self.lang is None:
self.lang = self.vocab.lang self.lang = self.vocab.lang
self._components: List[Tuple[str, "Pipe"]] = [] self._components: List[Tuple[str, PipeCallable]] = []
self._disabled: Set[str] = set() self._disabled: Set[str] = set()
self.max_length = max_length self.max_length = max_length
# Create the default tokenizer from the default config # Create the default tokenizer from the default config
@ -303,7 +302,7 @@ class Language:
return SimpleFrozenList(names) return SimpleFrozenList(names)
@property @property
def components(self) -> List[Tuple[str, "Pipe"]]: def components(self) -> List[Tuple[str, PipeCallable]]:
"""Get all (name, component) tuples in the pipeline, including the """Get all (name, component) tuples in the pipeline, including the
currently disabled components. currently disabled components.
""" """
@ -322,12 +321,12 @@ class Language:
return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names")) return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names"))
@property @property
def pipeline(self) -> List[Tuple[str, "Pipe"]]: def pipeline(self) -> List[Tuple[str, PipeCallable]]:
"""The processing pipeline consisting of (name, component) tuples. The """The processing pipeline consisting of (name, component) tuples. The
components are called on the Doc in order as it passes through the components are called on the Doc in order as it passes through the
pipeline. pipeline.
RETURNS (List[Tuple[str, Pipe]]): The pipeline. RETURNS (List[Tuple[str, Callable[[Doc], Doc]]]): The pipeline.
""" """
pipes = [(n, p) for n, p in self._components if n not in self._disabled] pipes = [(n, p) for n, p in self._components if n not in self._disabled]
return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline")) return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline"))
@ -527,7 +526,7 @@ class Language:
assigns: Iterable[str] = SimpleFrozenList(), assigns: Iterable[str] = SimpleFrozenList(),
requires: Iterable[str] = SimpleFrozenList(), requires: Iterable[str] = SimpleFrozenList(),
retokenizes: bool = False, retokenizes: bool = False,
func: Optional["Pipe"] = None, func: Optional[PipeCallable] = None,
) -> Callable[..., Any]: ) -> Callable[..., Any]:
"""Register a new pipeline component. Can be used for stateless function """Register a new pipeline component. Can be used for stateless function
components that don't require a separate factory. Can be used as a components that don't require a separate factory. Can be used as a
@ -542,7 +541,7 @@ class Language:
e.g. "token.ent_id". Used for pipeline analysis. e.g. "token.ent_id". Used for pipeline analysis.
retokenizes (bool): Whether the component changes the tokenization. retokenizes (bool): Whether the component changes the tokenization.
Used for pipeline analysis. Used for pipeline analysis.
func (Optional[Callable]): Factory function if not used as a decorator. func (Optional[Callable[[Doc], Doc]): Factory function if not used as a decorator.
DOCS: https://spacy.io/api/language#component DOCS: https://spacy.io/api/language#component
""" """
@ -553,11 +552,11 @@ class Language:
raise ValueError(Errors.E853.format(name=name)) raise ValueError(Errors.E853.format(name=name))
component_name = name if name is not None else util.get_object_name(func) component_name = name if name is not None else util.get_object_name(func)
def add_component(component_func: "Pipe") -> Callable: def add_component(component_func: PipeCallable) -> Callable:
if isinstance(func, type): # function is a class if isinstance(func, type): # function is a class
raise ValueError(Errors.E965.format(name=component_name)) raise ValueError(Errors.E965.format(name=component_name))
def factory_func(nlp, name: str) -> "Pipe": def factory_func(nlp, name: str) -> PipeCallable:
return component_func return component_func
internal_name = cls.get_factory_name(name) internal_name = cls.get_factory_name(name)
@ -607,7 +606,7 @@ class Language:
print_pipe_analysis(analysis, keys=keys) print_pipe_analysis(analysis, keys=keys)
return analysis return analysis
def get_pipe(self, name: str) -> "Pipe": def get_pipe(self, name: str) -> PipeCallable:
"""Get a pipeline component for a given component name. """Get a pipeline component for a given component name.
name (str): Name of pipeline component to get. name (str): Name of pipeline component to get.
@ -628,7 +627,7 @@ class Language:
config: Dict[str, Any] = SimpleFrozenDict(), config: Dict[str, Any] = SimpleFrozenDict(),
raw_config: Optional[Config] = None, raw_config: Optional[Config] = None,
validate: bool = True, validate: bool = True,
) -> "Pipe": ) -> PipeCallable:
"""Create a pipeline component. Mostly used internally. To create and """Create a pipeline component. Mostly used internally. To create and
add a component to the pipeline, you can use nlp.add_pipe. add a component to the pipeline, you can use nlp.add_pipe.
@ -640,7 +639,7 @@ class Language:
raw_config (Optional[Config]): Internals: the non-interpolated config. raw_config (Optional[Config]): Internals: the non-interpolated config.
validate (bool): Whether to validate the component config against the validate (bool): Whether to validate the component config against the
arguments and types expected by the factory. arguments and types expected by the factory.
RETURNS (Pipe): The pipeline component. RETURNS (Callable[[Doc], Doc]): The pipeline component.
DOCS: https://spacy.io/api/language#create_pipe DOCS: https://spacy.io/api/language#create_pipe
""" """
@ -695,24 +694,18 @@ class Language:
def create_pipe_from_source( def create_pipe_from_source(
self, source_name: str, source: "Language", *, name: str self, source_name: str, source: "Language", *, name: str
) -> Tuple["Pipe", str]: ) -> Tuple[PipeCallable, str]:
"""Create a pipeline component by copying it from an existing model. """Create a pipeline component by copying it from an existing model.
source_name (str): Name of the component in the source pipeline. source_name (str): Name of the component in the source pipeline.
source (Language): The source nlp object to copy from. source (Language): The source nlp object to copy from.
name (str): Optional alternative name to use in current pipeline. name (str): Optional alternative name to use in current pipeline.
RETURNS (Tuple[Callable, str]): The component and its factory name. RETURNS (Tuple[Callable[[Doc], Doc], str]): The component and its factory name.
""" """
# Check source type # Check source type
if not isinstance(source, Language): if not isinstance(source, Language):
raise ValueError(Errors.E945.format(name=source_name, source=type(source))) raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
# Check vectors, with faster checks first if self.vocab.vectors != source.vocab.vectors:
if (
self.vocab.vectors.shape != source.vocab.vectors.shape
or self.vocab.vectors.key2row != source.vocab.vectors.key2row
or self.vocab.vectors.to_bytes(exclude=["strings"])
!= source.vocab.vectors.to_bytes(exclude=["strings"])
):
warnings.warn(Warnings.W113.format(name=source_name)) warnings.warn(Warnings.W113.format(name=source_name))
if source_name not in source.component_names: if source_name not in source.component_names:
raise KeyError( raise KeyError(
@ -746,7 +739,7 @@ class Language:
config: Dict[str, Any] = SimpleFrozenDict(), config: Dict[str, Any] = SimpleFrozenDict(),
raw_config: Optional[Config] = None, raw_config: Optional[Config] = None,
validate: bool = True, validate: bool = True,
) -> "Pipe": ) -> PipeCallable:
"""Add a component to the processing pipeline. Valid components are """Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last". of before/after/first/last can be set. Default behaviour is "last".
@ -769,7 +762,7 @@ class Language:
raw_config (Optional[Config]): Internals: the non-interpolated config. raw_config (Optional[Config]): Internals: the non-interpolated config.
validate (bool): Whether to validate the component config against the validate (bool): Whether to validate the component config against the
arguments and types expected by the factory. arguments and types expected by the factory.
RETURNS (Pipe): The pipeline component. RETURNS (Callable[[Doc], Doc]): The pipeline component.
DOCS: https://spacy.io/api/language#add_pipe DOCS: https://spacy.io/api/language#add_pipe
""" """
@ -790,14 +783,6 @@ class Language:
factory_name, source, name=name factory_name, source, name=name
) )
else: else:
if not self.has_factory(factory_name):
err = Errors.E002.format(
name=factory_name,
opts=", ".join(self.factory_names),
method="add_pipe",
lang=util.get_object_name(self),
lang_code=self.lang,
)
pipe_component = self.create_pipe( pipe_component = self.create_pipe(
factory_name, factory_name,
name=name, name=name,
@ -883,7 +868,7 @@ class Language:
*, *,
config: Dict[str, Any] = SimpleFrozenDict(), config: Dict[str, Any] = SimpleFrozenDict(),
validate: bool = True, validate: bool = True,
) -> "Pipe": ) -> PipeCallable:
"""Replace a component in the pipeline. """Replace a component in the pipeline.
name (str): Name of the component to replace. name (str): Name of the component to replace.
@ -892,7 +877,7 @@ class Language:
component. Will be merged with default config, if available. component. Will be merged with default config, if available.
validate (bool): Whether to validate the component config against the validate (bool): Whether to validate the component config against the
arguments and types expected by the factory. arguments and types expected by the factory.
RETURNS (Pipe): The new pipeline component. RETURNS (Callable[[Doc], Doc]): The new pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe DOCS: https://spacy.io/api/language#replace_pipe
""" """
@ -944,11 +929,11 @@ class Language:
init_cfg = self._config["initialize"]["components"].pop(old_name) init_cfg = self._config["initialize"]["components"].pop(old_name)
self._config["initialize"]["components"][new_name] = init_cfg self._config["initialize"]["components"][new_name] = init_cfg
def remove_pipe(self, name: str) -> Tuple[str, "Pipe"]: def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]:
"""Remove a component from the pipeline. """Remove a component from the pipeline.
name (str): Name of the component to remove. name (str): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component. RETURNS (Tuple[str, Callable[[Doc], Doc]]): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe DOCS: https://spacy.io/api/language#remove_pipe
""" """
@ -1254,15 +1239,6 @@ class Language:
sgd(key, W, dW) # type: ignore[call-arg, misc] sgd(key, W, dW) # type: ignore[call-arg, misc]
return losses return losses
def begin_training(
self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
*,
sgd: Optional[Optimizer] = None,
) -> Optimizer:
warnings.warn(Warnings.W089, DeprecationWarning)
return self.initialize(get_examples, sgd=sgd)
def initialize( def initialize(
self, self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None, get_examples: Optional[Callable[[], Iterable[Example]]] = None,
@ -1363,15 +1339,15 @@ class Language:
def set_error_handler( def set_error_handler(
self, self,
error_handler: Callable[[str, "Pipe", List[Doc], Exception], NoReturn], error_handler: Callable[[str, PipeCallable, List[Doc], Exception], NoReturn],
): ):
"""Set an error handler object for all the components in the pipeline that implement """Set an error handler object for all the components in the pipeline
a set_error_handler function. that implement a set_error_handler function.
error_handler (Callable[[str, Pipe, List[Doc], Exception], NoReturn]): error_handler (Callable[[str, Callable[[Doc], Doc], List[Doc], Exception], NoReturn]):
Function that deals with a failing batch of documents. This callable function should take in Function that deals with a failing batch of documents. This callable
the component's name, the component itself, the offending batch of documents, and the exception function should take in the component's name, the component itself,
that was thrown. the offending batch of documents, and the exception that was thrown.
DOCS: https://spacy.io/api/language#set_error_handler DOCS: https://spacy.io/api/language#set_error_handler
""" """
self.default_error_handler = error_handler self.default_error_handler = error_handler
@ -1879,31 +1855,22 @@ class Language:
if isinstance(exclude, str): if isinstance(exclude, str):
exclude = [exclude] exclude = [exclude]
def fetch_pipes_status(value: Iterable[str], key: str) -> Iterable[str]: # `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config
"""Fetch value for `enable` or `disable` w.r.t. the specified config and passed arguments passed to # specifies values for `enabled` not included in `enable`, emit warning.
.load(). If both arguments and config specified values for this field, the passed arguments take precedence if id(enable) != id(_DEFAULT_EMPTY_PIPES):
and a warning is printed. enabled = config["nlp"].get("enabled", [])
value (Iterable[str]): Passed value for `enable` or `disable`. if len(enabled) and not set(enabled).issubset(enable):
key (str): Key for field in config (either "enabled" or "disabled"). warnings.warn(
RETURN (Iterable[str]): Warnings.W123.format(
""" enable=enable,
# We assume that no argument was passed if the value is the specified default value. enabled=enabled,
if id(value) == id(_DEFAULT_EMPTY_PIPES):
return config["nlp"].get(key, [])
else:
if len(config["nlp"].get(key, [])):
warnings.warn(
Warnings.W123.format(
arg=key[:-1],
arg_value=value,
config_value=config["nlp"][key],
)
) )
return value )
# Ensure sets of disabled/enabled pipe names are not contradictory.
disabled_pipes = cls._resolve_component_status( disabled_pipes = cls._resolve_component_status(
fetch_pipes_status(disable, "disabled"), list({*disable, *config["nlp"].get("disabled", [])}),
fetch_pipes_status(enable, "enabled"), enable,
config["nlp"]["pipeline"], config["nlp"]["pipeline"],
) )
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
@ -2084,10 +2051,12 @@ class Language:
if enable: if enable:
if isinstance(enable, str): if isinstance(enable, str):
enable = [enable] enable = [enable]
to_disable = [ to_disable = {
pipe_name for pipe_name in pipe_names if pipe_name not in enable *[pipe_name for pipe_name in pipe_names if pipe_name not in enable],
] *disable,
if disable and disable != to_disable: }
# If any pipe to be enabled is in to_disable, the specification is inconsistent.
if len(set(enable) & to_disable):
raise ValueError(Errors.E1042.format(enable=enable, disable=disable)) raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
return tuple(to_disable) return tuple(to_disable)

View File

@ -20,7 +20,6 @@ class Lexeme:
def vector_norm(self) -> float: ... def vector_norm(self) -> float: ...
vector: Floats1d vector: Floats1d
rank: int rank: int
sentiment: float
@property @property
def orth_(self) -> str: ... def orth_(self) -> str: ...
@property @property

View File

@ -173,19 +173,6 @@ cdef class Lexeme:
def __set__(self, value): def __set__(self, value):
self.c.id = value self.c.id = value
property sentiment:
"""RETURNS (float): A scalar value indicating the positivity or
negativity of the lexeme."""
def __get__(self):
sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment", {})
return sentiment_table.get(self.c.orth, 0.0)
def __set__(self, float x):
if "lexeme_sentiment" not in self.vocab.lookups:
self.vocab.lookups.add_table("lexeme_sentiment")
sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment")
sentiment_table[self.c.orth] = x
@property @property
def orth_(self): def orth_(self):
"""RETURNS (str): The original verbatim text of the lexeme """RETURNS (str): The original verbatim text of the lexeme

View File

@ -1,4 +1,4 @@
# cython: infer_types=True, cython: profile=True # cython: infer_types=True, profile=True
from typing import List, Iterable from typing import List, Iterable
from libcpp.vector cimport vector from libcpp.vector cimport vector

View File

@ -3,7 +3,6 @@ cimport numpy as np
from libc.math cimport exp from libc.math cimport exp
from libc.string cimport memset, memcpy from libc.string cimport memset, memcpy
from libc.stdlib cimport calloc, free, realloc from libc.stdlib cimport calloc, free, realloc
from thinc.backends.linalg cimport Vec, VecVec
from thinc.backends.cblas cimport saxpy, sgemm from thinc.backends.cblas cimport saxpy, sgemm
import numpy import numpy
@ -102,11 +101,10 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states,
sum_state_features(cblas, A.unmaxed, sum_state_features(cblas, A.unmaxed,
W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces) W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces)
for i in range(n.states): for i in range(n.states):
VecVec.add_i(&A.unmaxed[i*n.hiddens*n.pieces], saxpy(cblas)(n.hiddens * n.pieces, 1., W.feat_bias, 1, &A.unmaxed[i*n.hiddens*n.pieces], 1)
W.feat_bias, 1., n.hiddens * n.pieces)
for j in range(n.hiddens): for j in range(n.hiddens):
index = i * n.hiddens * n.pieces + j * n.pieces index = i * n.hiddens * n.pieces + j * n.pieces
which = Vec.arg_max(&A.unmaxed[index], n.pieces) which = _arg_max(&A.unmaxed[index], n.pieces)
A.hiddens[i*n.hiddens + j] = A.unmaxed[index + which] A.hiddens[i*n.hiddens + j] = A.unmaxed[index + which]
memset(A.scores, 0, n.states * n.classes * sizeof(float)) memset(A.scores, 0, n.states * n.classes * sizeof(float))
if W.hidden_weights == NULL: if W.hidden_weights == NULL:
@ -119,8 +117,7 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states,
0.0, A.scores, n.classes) 0.0, A.scores, n.classes)
# Add bias # Add bias
for i in range(n.states): for i in range(n.states):
VecVec.add_i(&A.scores[i*n.classes], saxpy(cblas)(n.classes, 1., W.hidden_bias, 1, &A.scores[i*n.classes], 1)
W.hidden_bias, 1., n.classes)
# Set unseen classes to minimum value # Set unseen classes to minimum value
i = 0 i = 0
min_ = A.scores[0] min_ = A.scores[0]
@ -158,7 +155,8 @@ cdef void cpu_log_loss(float* d_scores,
"""Do multi-label log loss""" """Do multi-label log loss"""
cdef double max_, gmax, Z, gZ cdef double max_, gmax, Z, gZ
best = arg_max_if_gold(scores, costs, is_valid, O) best = arg_max_if_gold(scores, costs, is_valid, O)
guess = Vec.arg_max(scores, O) guess = _arg_max(scores, O)
if best == -1 or guess == -1: if best == -1 or guess == -1:
# These shouldn't happen, but if they do, we want to make sure we don't # These shouldn't happen, but if they do, we want to make sure we don't
# cause an OOB access. # cause an OOB access.
@ -488,3 +486,15 @@ cdef class precompute_hiddens:
return d_best.reshape((d_best.shape + (1,))) return d_best.reshape((d_best.shape + (1,)))
return state_vector, backprop_relu return state_vector, backprop_relu
cdef inline int _arg_max(const float* scores, const int n_classes) nogil:
if n_classes == 2:
return 0 if scores[0] > scores[1] else 1
cdef int i
cdef int best = 0
cdef float mode = scores[0]
for i in range(1, n_classes):
if scores[i] > mode:
mode = scores[i]
best = i
return best

View File

@ -1,6 +1,6 @@
from ...typedefs cimport class_t, hash_t from ...typedefs cimport class_t, hash_t
# These are passed as callbacks to thinc.search.Beam # These are passed as callbacks to .search.Beam
cdef int transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1 cdef int transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1
cdef int check_final_state(void* _state, void* extra_args) except -1 cdef int check_final_state(void* _state, void* extra_args) except -1

View File

@ -3,17 +3,16 @@
cimport numpy as np cimport numpy as np
import numpy import numpy
from cpython.ref cimport PyObject, Py_XDECREF from cpython.ref cimport PyObject, Py_XDECREF
from thinc.extra.search cimport Beam
from thinc.extra.search import MaxViolation
from thinc.extra.search cimport MaxViolation
from ...typedefs cimport hash_t, class_t from ...typedefs cimport hash_t, class_t
from .transition_system cimport TransitionSystem, Transition from .transition_system cimport TransitionSystem, Transition
from ...errors import Errors from ...errors import Errors
from .search cimport Beam, MaxViolation
from .search import MaxViolation
from .stateclass cimport StateC, StateClass from .stateclass cimport StateC, StateClass
# These are passed as callbacks to thinc.search.Beam # These are passed as callbacks to .search.Beam
cdef int transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1: cdef int transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1:
dest = <StateC*>_dest dest = <StateC*>_dest
src = <StateC*>_src src = <StateC*>_src

View File

@ -15,7 +15,7 @@ from ...training.example cimport Example
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC, ArcC from ._state cimport StateC, ArcC
from ...errors import Errors from ...errors import Errors
from thinc.extra.search cimport Beam from .search cimport Beam
cdef weight_t MIN_SCORE = -90000 cdef weight_t MIN_SCORE = -90000
cdef attr_t SUBTOK_LABEL = hash_string('subtok') cdef attr_t SUBTOK_LABEL = hash_string('subtok')

View File

@ -6,7 +6,6 @@ from libcpp.vector cimport vector
from cymem.cymem cimport Pool from cymem.cymem cimport Pool
from collections import Counter from collections import Counter
from thinc.extra.search cimport Beam
from ...tokens.doc cimport Doc from ...tokens.doc cimport Doc
from ...tokens.span import Span from ...tokens.span import Span
@ -17,6 +16,7 @@ from ...attrs cimport IS_SPACE
from ...structs cimport TokenC, SpanC from ...structs cimport TokenC, SpanC
from ...training import split_bilu_label from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .search cimport Beam
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC from ._state cimport StateC
from .transition_system cimport Transition, do_func_t from .transition_system cimport Transition, do_func_t

View File

@ -0,0 +1,89 @@
from cymem.cymem cimport Pool
from libc.stdint cimport uint32_t
from libc.stdint cimport uint64_t
from libcpp.pair cimport pair
from libcpp.queue cimport priority_queue
from libcpp.vector cimport vector
from ...typedefs cimport class_t, weight_t, hash_t
ctypedef pair[weight_t, size_t] Entry
ctypedef priority_queue[Entry] Queue
ctypedef int (*trans_func_t)(void* dest, void* src, class_t clas, void* x) except -1
ctypedef void* (*init_func_t)(Pool mem, int n, void* extra_args) except NULL
ctypedef int (*del_func_t)(Pool mem, void* state, void* extra_args) except -1
ctypedef int (*finish_func_t)(void* state, void* extra_args) except -1
ctypedef hash_t (*hash_func_t)(void* state, void* x) except 0
cdef struct _State:
void* content
class_t* hist
weight_t score
weight_t loss
int i
int t
bint is_done
cdef class Beam:
cdef Pool mem
cdef class_t nr_class
cdef class_t width
cdef class_t size
cdef public weight_t min_density
cdef int t
cdef readonly bint is_done
cdef list histories
cdef list _parent_histories
cdef weight_t** scores
cdef int** is_valid
cdef weight_t** costs
cdef _State* _parents
cdef _State* _states
cdef del_func_t del_func
cdef int _fill(self, Queue* q, weight_t** scores, int** is_valid) except -1
cdef inline void* at(self, int i) nogil:
return self._states[i].content
cdef int initialize(self, init_func_t init_func, del_func_t del_func, int n, void* extra_args) except -1
cdef int advance(self, trans_func_t transition_func, hash_func_t hash_func,
void* extra_args) except -1
cdef int check_done(self, finish_func_t finish_func, void* extra_args) except -1
cdef inline void set_cell(self, int i, int j, weight_t score, int is_valid, weight_t cost) nogil:
self.scores[i][j] = score
self.is_valid[i][j] = is_valid
self.costs[i][j] = cost
cdef int set_row(self, int i, const weight_t* scores, const int* is_valid,
const weight_t* costs) except -1
cdef int set_table(self, weight_t** scores, int** is_valid, weight_t** costs) except -1
cdef class MaxViolation:
cdef Pool mem
cdef weight_t cost
cdef weight_t delta
cdef readonly weight_t p_score
cdef readonly weight_t g_score
cdef readonly double Z
cdef readonly double gZ
cdef class_t n
cdef readonly list p_hist
cdef readonly list g_hist
cdef readonly list p_probs
cdef readonly list g_probs
cpdef int check(self, Beam pred, Beam gold) except -1
cpdef int check_crf(self, Beam pred, Beam gold) except -1

View File

@ -0,0 +1,306 @@
# cython: profile=True, experimental_cpp_class_def=True, cdivision=True, infer_types=True
cimport cython
from libc.string cimport memset, memcpy
from libc.math cimport log, exp
import math
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
cdef class Beam:
def __init__(self, class_t nr_class, class_t width, weight_t min_density=0.0):
assert nr_class != 0
assert width != 0
self.nr_class = nr_class
self.width = width
self.min_density = min_density
self.size = 1
self.t = 0
self.mem = Pool()
self.del_func = NULL
self._parents = <_State*>self.mem.alloc(self.width, sizeof(_State))
self._states = <_State*>self.mem.alloc(self.width, sizeof(_State))
cdef int i
self.histories = [[] for i in range(self.width)]
self._parent_histories = [[] for i in range(self.width)]
self.scores = <weight_t**>self.mem.alloc(self.width, sizeof(weight_t*))
self.is_valid = <int**>self.mem.alloc(self.width, sizeof(weight_t*))
self.costs = <weight_t**>self.mem.alloc(self.width, sizeof(weight_t*))
for i in range(self.width):
self.scores[i] = <weight_t*>self.mem.alloc(self.nr_class, sizeof(weight_t))
self.is_valid[i] = <int*>self.mem.alloc(self.nr_class, sizeof(int))
self.costs[i] = <weight_t*>self.mem.alloc(self.nr_class, sizeof(weight_t))
def __len__(self):
return self.size
property score:
def __get__(self):
return self._states[0].score
property min_score:
def __get__(self):
return self._states[self.size-1].score
property loss:
def __get__(self):
return self._states[0].loss
property probs:
def __get__(self):
return _softmax([self._states[i].score for i in range(self.size)])
property scores:
def __get__(self):
return [self._states[i].score for i in range(self.size)]
property histories:
def __get__(self):
return self.histories
cdef int set_row(self, int i, const weight_t* scores, const int* is_valid,
const weight_t* costs) except -1:
cdef int j
for j in range(self.nr_class):
self.scores[i][j] = scores[j]
self.is_valid[i][j] = is_valid[j]
self.costs[i][j] = costs[j]
cdef int set_table(self, weight_t** scores, int** is_valid, weight_t** costs) except -1:
cdef int i, j
for i in range(self.width):
memcpy(self.scores[i], scores[i], sizeof(weight_t) * self.nr_class)
memcpy(self.is_valid[i], is_valid[i], sizeof(bint) * self.nr_class)
memcpy(self.costs[i], costs[i], sizeof(int) * self.nr_class)
cdef int initialize(self, init_func_t init_func, del_func_t del_func, int n, void* extra_args) except -1:
for i in range(self.width):
self._states[i].content = init_func(self.mem, n, extra_args)
self._parents[i].content = init_func(self.mem, n, extra_args)
self.del_func = del_func
def __dealloc__(self):
if self.del_func == NULL:
return
for i in range(self.width):
self.del_func(self.mem, self._states[i].content, NULL)
self.del_func(self.mem, self._parents[i].content, NULL)
@cython.cdivision(True)
cdef int advance(self, trans_func_t transition_func, hash_func_t hash_func,
void* extra_args) except -1:
cdef weight_t** scores = self.scores
cdef int** is_valid = self.is_valid
cdef weight_t** costs = self.costs
cdef Queue* q = new Queue()
self._fill(q, scores, is_valid)
# For a beam of width k, we only ever need 2k state objects. How?
# Each transition takes a parent and a class and produces a new state.
# So, we don't need the whole history --- just the parent. So at
# each step, we take a parent, and apply one or more extensions to
# it.
self._parents, self._states = self._states, self._parents
self._parent_histories, self.histories = self.histories, self._parent_histories
cdef weight_t score
cdef int p_i
cdef int i = 0
cdef class_t clas
cdef _State* parent
cdef _State* state
cdef hash_t key
cdef PreshMap seen_states = PreshMap(self.width)
cdef uint64_t is_seen
cdef uint64_t one = 1
while i < self.width and not q.empty():
data = q.top()
p_i = data.second / self.nr_class
clas = data.second % self.nr_class
score = data.first
q.pop()
parent = &self._parents[p_i]
# Indicates terminal state reached; i.e. state is done
if parent.is_done:
# Now parent will not be changed, so we don't have to copy.
# Once finished, should also be unbranching.
self._states[i], parent[0] = parent[0], self._states[i]
parent.i = self._states[i].i
parent.t = self._states[i].t
parent.is_done = self._states[i].t
self._states[i].score = score
self.histories[i] = list(self._parent_histories[p_i])
i += 1
else:
state = &self._states[i]
# The supplied transition function should adjust the destination
# state to be the result of applying the class to the source state
transition_func(state.content, parent.content, clas, extra_args)
key = hash_func(state.content, extra_args) if hash_func is not NULL else 0
is_seen = <uint64_t>seen_states.get(key)
if key == 0 or key == 1 or not is_seen:
if key != 0 and key != 1:
seen_states.set(key, <void*>one)
state.score = score
state.loss = parent.loss + costs[p_i][clas]
self.histories[i] = list(self._parent_histories[p_i])
self.histories[i].append(clas)
i += 1
del q
self.size = i
assert self.size >= 1
for i in range(self.width):
memset(self.scores[i], 0, sizeof(weight_t) * self.nr_class)
memset(self.costs[i], 0, sizeof(weight_t) * self.nr_class)
memset(self.is_valid[i], 0, sizeof(int) * self.nr_class)
self.t += 1
cdef int check_done(self, finish_func_t finish_func, void* extra_args) except -1:
cdef int i
for i in range(self.size):
if not self._states[i].is_done:
self._states[i].is_done = finish_func(self._states[i].content, extra_args)
for i in range(self.size):
if not self._states[i].is_done:
self.is_done = False
break
else:
self.is_done = True
@cython.cdivision(True)
cdef int _fill(self, Queue* q, weight_t** scores, int** is_valid) except -1:
"""Populate the queue from a k * n matrix of scores, where k is the
beam-width, and n is the number of classes.
"""
cdef Entry entry
cdef weight_t score
cdef _State* s
cdef int i, j, move_id
assert self.size >= 1
cdef vector[Entry] entries
for i in range(self.size):
s = &self._states[i]
move_id = i * self.nr_class
if s.is_done:
# Update score by path average, following TACL '13 paper.
if self.histories[i]:
entry.first = s.score + (s.score / self.t)
else:
entry.first = s.score
entry.second = move_id
entries.push_back(entry)
else:
for j in range(self.nr_class):
if is_valid[i][j]:
entry.first = s.score + scores[i][j]
entry.second = move_id + j
entries.push_back(entry)
cdef double max_, Z, cutoff
if self.min_density == 0.0:
for i in range(entries.size()):
q.push(entries[i])
elif not entries.empty():
max_ = entries[0].first
Z = 0.
cutoff = 0.
# Softmax into probabilities, so we can prune
for i in range(entries.size()):
if entries[i].first > max_:
max_ = entries[i].first
for i in range(entries.size()):
Z += exp(entries[i].first-max_)
cutoff = (1. / Z) * self.min_density
for i in range(entries.size()):
prob = exp(entries[i].first-max_) / Z
if prob >= cutoff:
q.push(entries[i])
cdef class MaxViolation:
def __init__(self):
self.p_score = 0.0
self.g_score = 0.0
self.Z = 0.0
self.gZ = 0.0
self.delta = -1
self.cost = 0
self.p_hist = []
self.g_hist = []
self.p_probs = []
self.g_probs = []
cpdef int check(self, Beam pred, Beam gold) except -1:
cdef _State* p = &pred._states[0]
cdef _State* g = &gold._states[0]
cdef weight_t d = p.score - g.score
if p.loss >= 1 and (self.cost == 0 or d > self.delta):
self.cost = p.loss
self.delta = d
self.p_hist = list(pred.histories[0])
self.g_hist = list(gold.histories[0])
self.p_score = p.score
self.g_score = g.score
self.Z = 1e-10
self.gZ = 1e-10
for i in range(pred.size):
if pred._states[i].loss > 0:
self.Z += exp(pred._states[i].score)
for i in range(gold.size):
if gold._states[i].loss == 0:
prob = exp(gold._states[i].score)
self.Z += prob
self.gZ += prob
cpdef int check_crf(self, Beam pred, Beam gold) except -1:
d = pred.score - gold.score
seen_golds = set([tuple(gold.histories[i]) for i in range(gold.size)])
if pred.loss > 0 and (self.cost == 0 or d > self.delta):
p_hist = []
p_scores = []
g_hist = []
g_scores = []
for i in range(pred.size):
if pred._states[i].loss > 0:
p_scores.append(pred._states[i].score)
p_hist.append(list(pred.histories[i]))
# This can happen from non-monotonic actions
# If we find a better gold analysis this way, be sure to keep it.
elif pred._states[i].loss <= 0 \
and tuple(pred.histories[i]) not in seen_golds:
g_scores.append(pred._states[i].score)
g_hist.append(list(pred.histories[i]))
for i in range(gold.size):
if gold._states[i].loss == 0:
g_scores.append(gold._states[i].score)
g_hist.append(list(gold.histories[i]))
all_probs = _softmax(p_scores + g_scores)
p_probs = all_probs[:len(p_scores)]
g_probs_all = all_probs[len(p_scores):]
g_probs = _softmax(g_scores)
self.cost = pred.loss
self.delta = d
self.p_hist = p_hist
self.g_hist = g_hist
# TODO: These variables are misnamed! These are the gradients of the loss.
self.p_probs = p_probs
# Intuition here:
# The gradient of the loss is:
# P(model) - P(truth)
# Normally, P(truth) is 1 for the gold
# But, if we want to do the "partial credit" scheme, we want
# to create a distribution over the gold, proportional to the scores
# awarded.
self.g_probs = [x-y for x, y in zip(g_probs_all, g_probs)]
def _softmax(nums):
if not nums:
return []
max_ = max(nums)
nums = [(exp(n-max_) if n is not None else None) for n in nums]
Z = sum(n for n in nums if n is not None)
return [(n/Z if n is not None else None) for n in nums]

View File

@ -5,8 +5,9 @@ from itertools import islice
import numpy as np import numpy as np
import srsly import srsly
from thinc.api import Config, Model, SequenceCategoricalCrossentropy from thinc.api import Config, Model
from thinc.types import ArrayXd, Floats2d, Ints1d from thinc.types import ArrayXd, Floats2d, Ints1d
from thinc.legacy import LegacySequenceCategoricalCrossentropy
from ._edit_tree_internals.edit_trees import EditTrees from ._edit_tree_internals.edit_trees import EditTrees
from ._edit_tree_internals.schemas import validate_edit_tree from ._edit_tree_internals.schemas import validate_edit_tree
@ -129,7 +130,9 @@ class EditTreeLemmatizer(TrainablePipe):
self, examples: Iterable[Example], scores: List[Floats2d] self, examples: Iterable[Example], scores: List[Floats2d]
) -> Tuple[float, List[Floats2d]]: ) -> Tuple[float, List[Floats2d]]:
validate_examples(examples, "EditTreeLemmatizer.get_loss") validate_examples(examples, "EditTreeLemmatizer.get_loss")
loss_func = SequenceCategoricalCrossentropy(normalize=False, missing_value=-1) loss_func = LegacySequenceCategoricalCrossentropy(
normalize=False, missing_value=-1
)
truths = [] truths = []
for eg in examples: for eg in examples:
@ -347,9 +350,9 @@ class EditTreeLemmatizer(TrainablePipe):
tree = dict(tree) tree = dict(tree)
if "orig" in tree: if "orig" in tree:
tree["orig"] = self.vocab.strings[tree["orig"]] tree["orig"] = self.vocab.strings.add(tree["orig"])
if "orig" in tree: if "orig" in tree:
tree["subst"] = self.vocab.strings[tree["subst"]] tree["subst"] = self.vocab.strings.add(tree["subst"])
trees.append(tree) trees.append(tree)

View File

@ -1,7 +1,8 @@
# cython: infer_types=True, profile=True, binding=True # cython: infer_types=True, profile=True, binding=True
from typing import Callable, Dict, Iterable, List, Optional, Union from typing import Callable, Dict, Iterable, List, Optional, Union
import srsly import srsly
from thinc.api import SequenceCategoricalCrossentropy, Model, Config from thinc.api import Model, Config
from thinc.legacy import LegacySequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints1d from thinc.types import Floats2d, Ints1d
from itertools import islice from itertools import islice
@ -290,7 +291,7 @@ class Morphologizer(Tagger):
DOCS: https://spacy.io/api/morphologizer#get_loss DOCS: https://spacy.io/api/morphologizer#get_loss
""" """
validate_examples(examples, "Morphologizer.get_loss") validate_examples(examples, "Morphologizer.get_loss")
loss_func = SequenceCategoricalCrossentropy(names=tuple(self.labels), normalize=False) loss_func = LegacySequenceCategoricalCrossentropy(names=tuple(self.labels), normalize=False)
truths = [] truths = []
for eg in examples: for eg in examples:
eg_truths = [] eg_truths = []

View File

@ -1,221 +0,0 @@
# cython: infer_types=True, profile=True, binding=True
from typing import Optional
import numpy
from thinc.api import CosineDistance, to_categorical, Model, Config
from thinc.api import set_dropout_rate
from ..tokens.doc cimport Doc
from .trainable_pipe import TrainablePipe
from .tagger import Tagger
from ..training import validate_examples
from ..language import Language
from ._parser_internals import nonproj
from ..attrs import POS, ID
from ..errors import Errors
default_model_config = """
[model]
@architectures = "spacy.MultiTask.v1"
maxout_pieces = 3
token_vector_width = 96
[model.tok2vec]
@architectures = "spacy.HashEmbedCNN.v2"
pretrained_vectors = null
width = 96
depth = 4
embed_size = 2000
window_size = 1
maxout_pieces = 2
subword_features = true
"""
DEFAULT_MT_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"nn_labeller",
default_config={"labels": None, "target": "dep_tag_offset", "model": DEFAULT_MT_MODEL}
)
def make_nn_labeller(nlp: Language, name: str, model: Model, labels: Optional[dict], target: str):
return MultitaskObjective(nlp.vocab, model, name)
class MultitaskObjective(Tagger):
"""Experimental: Assist training of a parser or tagger, by training a
side-objective.
"""
def __init__(self, vocab, model, name="nn_labeller", *, target):
self.vocab = vocab
self.model = model
self.name = name
if target == "dep":
self.make_label = self.make_dep
elif target == "tag":
self.make_label = self.make_tag
elif target == "ent":
self.make_label = self.make_ent
elif target == "dep_tag_offset":
self.make_label = self.make_dep_tag_offset
elif target == "ent_tag":
self.make_label = self.make_ent_tag
elif target == "sent_start":
self.make_label = self.make_sent_start
elif hasattr(target, "__call__"):
self.make_label = target
else:
raise ValueError(Errors.E016)
cfg = {"labels": {}, "target": target}
self.cfg = dict(cfg)
@property
def labels(self):
return self.cfg.setdefault("labels", {})
@labels.setter
def labels(self, value):
self.cfg["labels"] = value
def set_annotations(self, docs, dep_ids):
pass
def initialize(self, get_examples, nlp=None, labels=None):
if not hasattr(get_examples, "__call__"):
err = Errors.E930.format(name="MultitaskObjective", obj=type(get_examples))
raise ValueError(err)
if labels is not None:
self.labels = labels
else:
for example in get_examples():
for token in example.y:
label = self.make_label(token)
if label is not None and label not in self.labels:
self.labels[label] = len(self.labels)
self.model.initialize() # TODO: fix initialization by defining X and Y
def predict(self, docs):
tokvecs = self.model.get_ref("tok2vec")(docs)
scores = self.model.get_ref("softmax")(tokvecs)
return tokvecs, scores
def get_loss(self, examples, scores):
cdef int idx = 0
correct = numpy.zeros((scores.shape[0],), dtype="i")
guesses = scores.argmax(axis=1)
docs = [eg.predicted for eg in examples]
for i, eg in enumerate(examples):
# Handles alignment for tokenization differences
doc_annots = eg.get_aligned() # TODO
for j in range(len(eg.predicted)):
tok_annots = {key: values[j] for key, values in tok_annots.items()}
label = self.make_label(j, tok_annots)
if label is None or label not in self.labels:
correct[idx] = guesses[idx]
else:
correct[idx] = self.labels[label]
idx += 1
correct = self.model.ops.xp.array(correct, dtype="i")
d_scores = scores - to_categorical(correct, n_classes=scores.shape[1])
loss = (d_scores**2).sum()
return float(loss), d_scores
@staticmethod
def make_dep(token):
return token.dep_
@staticmethod
def make_tag(token):
return token.tag_
@staticmethod
def make_ent(token):
if token.ent_iob_ == "O":
return "O"
else:
return token.ent_iob_ + "-" + token.ent_type_
@staticmethod
def make_dep_tag_offset(token):
dep = token.dep_
tag = token.tag_
offset = token.head.i - token.i
offset = min(offset, 2)
offset = max(offset, -2)
return f"{dep}-{tag}:{offset}"
@staticmethod
def make_ent_tag(token):
if token.ent_iob_ == "O":
ent = "O"
else:
ent = token.ent_iob_ + "-" + token.ent_type_
tag = token.tag_
return f"{tag}-{ent}"
@staticmethod
def make_sent_start(token):
"""A multi-task objective for representing sentence boundaries,
using BILU scheme. (O is impossible)
"""
if token.is_sent_start and token.is_sent_end:
return "U-SENT"
elif token.is_sent_start:
return "B-SENT"
else:
return "I-SENT"
class ClozeMultitask(TrainablePipe):
def __init__(self, vocab, model, **cfg):
self.vocab = vocab
self.model = model
self.cfg = cfg
self.distance = CosineDistance(ignore_zeros=True, normalize=False) # TODO: in config
def set_annotations(self, docs, dep_ids):
pass
def initialize(self, get_examples, nlp=None):
self.model.initialize() # TODO: fix initialization by defining X and Y
X = self.model.ops.alloc((5, self.model.get_ref("tok2vec").get_dim("nO")))
self.model.output_layer.initialize(X)
def predict(self, docs):
tokvecs = self.model.get_ref("tok2vec")(docs)
vectors = self.model.get_ref("output_layer")(tokvecs)
return tokvecs, vectors
def get_loss(self, examples, vectors, prediction):
validate_examples(examples, "ClozeMultitask.get_loss")
# The simplest way to implement this would be to vstack the
# token.vector values, but that's a bit inefficient, especially on GPU.
# Instead we fetch the index into the vectors table for each of our tokens,
# and look them up all at once. This prevents data copying.
ids = self.model.ops.flatten([eg.predicted.to_array(ID).ravel() for eg in examples])
target = vectors[ids]
gradient = self.distance.get_grad(prediction, target)
loss = self.distance.get_loss(prediction, target)
return float(loss), gradient
def update(self, examples, *, drop=0., sgd=None, losses=None):
pass
def rehearse(self, examples, drop=0., sgd=None, losses=None):
if losses is not None and self.name not in losses:
losses[self.name] = 0.
set_dropout_rate(self.model, drop)
validate_examples(examples, "ClozeMultitask.rehearse")
docs = [eg.predicted for eg in examples]
predictions, bp_predictions = self.model.begin_update()
loss, d_predictions = self.get_loss(examples, self.vocab.vectors.data, predictions)
bp_predictions(d_predictions)
if sgd is not None:
self.finish_update(sgd)
if losses is not None:
losses[self.name] += loss
return losses
def add_label(self, label):
raise NotImplementedError

View File

@ -19,13 +19,6 @@ cdef class Pipe:
DOCS: https://spacy.io/api/pipe DOCS: https://spacy.io/api/pipe
""" """
@classmethod
def __init_subclass__(cls, **kwargs):
"""Raise a warning if an inheriting class implements 'begin_training'
(from v2) instead of the new 'initialize' method (from v3)"""
if hasattr(cls, "begin_training"):
warnings.warn(Warnings.W088.format(name=cls.__name__))
def __call__(self, Doc doc) -> Doc: def __call__(self, Doc doc) -> Doc:
"""Apply the pipe to one document. The document is modified in place, """Apply the pipe to one document. The document is modified in place,
and returned. This usually happens under the hood when the nlp object and returned. This usually happens under the hood when the nlp object

View File

@ -3,7 +3,9 @@ from typing import Dict, Iterable, Optional, Callable, List, Union
from itertools import islice from itertools import islice
import srsly import srsly
from thinc.api import Model, SequenceCategoricalCrossentropy, Config from thinc.api import Model, Config
from thinc.legacy import LegacySequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints1d from thinc.types import Floats2d, Ints1d
from ..tokens.doc cimport Doc from ..tokens.doc cimport Doc
@ -161,7 +163,7 @@ class SentenceRecognizer(Tagger):
""" """
validate_examples(examples, "SentenceRecognizer.get_loss") validate_examples(examples, "SentenceRecognizer.get_loss")
labels = self.labels labels = self.labels
loss_func = SequenceCategoricalCrossentropy(names=labels, normalize=False) loss_func = LegacySequenceCategoricalCrossentropy(names=labels, normalize=False)
truths = [] truths = []
for eg in examples: for eg in examples:
eg_truth = [] eg_truth = []

View File

@ -2,7 +2,7 @@ from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
from typing import Union from typing import Union
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d from thinc.types import Ragged, Ints2d, Floats2d
import numpy import numpy
@ -282,7 +282,10 @@ class SpanCategorizer(TrainablePipe):
DOCS: https://spacy.io/api/spancategorizer#predict DOCS: https://spacy.io/api/spancategorizer#predict
""" """
indices = self.suggester(docs, ops=self.model.ops) indices = self.suggester(docs, ops=self.model.ops)
scores = self.model.predict((docs, indices)) # type: ignore if indices.lengths.sum() == 0:
scores = self.model.ops.alloc2f(0, 0)
else:
scores = self.model.predict((docs, indices)) # type: ignore
return {"indices": indices, "scores": scores} return {"indices": indices, "scores": scores}
def set_candidates( def set_candidates(

View File

@ -2,7 +2,8 @@
from typing import Callable, Dict, Iterable, List, Optional, Union from typing import Callable, Dict, Iterable, List, Optional, Union
import numpy import numpy
import srsly import srsly
from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config from thinc.api import Model, set_dropout_rate, Config
from thinc.legacy import LegacySequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints1d from thinc.types import Floats2d, Ints1d
import warnings import warnings
from itertools import islice from itertools import islice
@ -244,7 +245,7 @@ class Tagger(TrainablePipe):
DOCS: https://spacy.io/api/tagger#rehearse DOCS: https://spacy.io/api/tagger#rehearse
""" """
loss_func = SequenceCategoricalCrossentropy() loss_func = LegacySequenceCategoricalCrossentropy()
if losses is None: if losses is None:
losses = {} losses = {}
losses.setdefault(self.name, 0.0) losses.setdefault(self.name, 0.0)
@ -275,7 +276,7 @@ class Tagger(TrainablePipe):
DOCS: https://spacy.io/api/tagger#get_loss DOCS: https://spacy.io/api/tagger#get_loss
""" """
validate_examples(examples, "Tagger.get_loss") validate_examples(examples, "Tagger.get_loss")
loss_func = SequenceCategoricalCrossentropy(names=self.labels, normalize=False, neg_prefix=self.cfg["neg_prefix"]) loss_func = LegacySequenceCategoricalCrossentropy(names=self.labels, normalize=False, neg_prefix=self.cfg["neg_prefix"])
# Convert empty tag "" to missing value None so that both misaligned # Convert empty tag "" to missing value None so that both misaligned
# tokens and tokens with missing annotation have the default missing # tokens and tokens with missing annotation have the default missing
# value None. # value None.

View File

@ -91,7 +91,6 @@ subword_features = true
"cats_macro_f": None, "cats_macro_f": None,
"cats_macro_auc": None, "cats_macro_auc": None,
"cats_f_per_type": None, "cats_f_per_type": None,
"cats_macro_auc_per_type": None,
}, },
) )
def make_textcat( def make_textcat(
@ -169,7 +168,11 @@ class TextCategorizer(TrainablePipe):
self.model = model self.model = model
self.name = name self.name = name
self._rehearsal_model = None self._rehearsal_model = None
cfg: Dict[str, Any] = {"labels": [], "threshold": threshold, "positive_label": None} cfg: Dict[str, Any] = {
"labels": [],
"threshold": threshold,
"positive_label": None,
}
self.cfg = dict(cfg) self.cfg = dict(cfg)
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations self.save_activations = save_activations
@ -416,5 +419,9 @@ class TextCategorizer(TrainablePipe):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""Check whether the provided examples all have single-label cats annotations.""" """Check whether the provided examples all have single-label cats annotations."""
for ex in examples: for ex in examples:
if list(ex.reference.cats.values()).count(1.0) > 1: vals = list(ex.reference.cats.values())
if vals.count(1.0) > 1:
raise ValueError(Errors.E895.format(value=ex.reference.cats)) raise ValueError(Errors.E895.format(value=ex.reference.cats))
for val in vals:
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -88,7 +88,6 @@ subword_features = true
"cats_macro_f": None, "cats_macro_f": None,
"cats_macro_auc": None, "cats_macro_auc": None,
"cats_f_per_type": None, "cats_f_per_type": None,
"cats_macro_auc_per_type": None,
}, },
) )
def make_multilabel_textcat( def make_multilabel_textcat(
@ -156,11 +155,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
<<<<<<< HEAD
save_activations (bool): save model activations in Doc when annotating.
=======
scorer (Optional[Callable]): The scoring method. scorer (Optional[Callable]): The scoring method.
>>>>>>> upstream/master save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/textcategorizer#init DOCS: https://spacy.io/api/textcategorizer#init
""" """
@ -205,6 +201,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
for label in labels: for label in labels:
self.add_label(label) self.add_label(label)
subbatch = list(islice(get_examples(), 10)) subbatch = list(islice(get_examples(), 10))
self._validate_categories(subbatch)
doc_sample = [eg.reference for eg in subbatch] doc_sample = [eg.reference for eg in subbatch]
label_sample, _ = self._examples_to_truth(subbatch) label_sample, _ = self._examples_to_truth(subbatch)
self._require_labels() self._require_labels()
@ -215,4 +213,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""This component allows any type of single- or multi-label annotations. """This component allows any type of single- or multi-label annotations.
This method overwrites the more strict one from 'textcat'.""" This method overwrites the more strict one from 'textcat'."""
pass # check that annotation values are valid
for ex in examples:
for val in ex.reference.cats.values():
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -10,12 +10,12 @@ import random
import srsly import srsly
from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps
from thinc.extra.search cimport Beam
import numpy.random import numpy.random
import numpy import numpy
import warnings import warnings
from ._parser_internals.stateclass cimport StateClass from ._parser_internals.stateclass cimport StateClass
from ._parser_internals.search cimport Beam
from ..ml.parser_model cimport alloc_activations, free_activations from ..ml.parser_model cimport alloc_activations, free_activations
from ..ml.parser_model cimport predict_states, arg_max_if_valid from ..ml.parser_model cimport predict_states, arg_max_if_valid
from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC, cpu_log_loss from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC, cpu_log_loss

View File

@ -329,6 +329,7 @@ class ConfigSchemaTraining(BaseModel):
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training") frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training") annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk") before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
# fmt: on # fmt: on
class Config: class Config:

View File

@ -40,7 +40,7 @@ py.test spacy/tests/tokenizer/test_exceptions.py::test_tokenizer_handles_emoji #
To keep the behavior of the tests consistent and predictable, we try to follow a few basic conventions: To keep the behavior of the tests consistent and predictable, we try to follow a few basic conventions:
- **Test names** should follow a pattern of `test_[module]_[tested behaviour]`. For example: `test_tokenizer_keeps_email` or `test_spans_override_sentiment`. - **Test names** should follow a pattern of `test_[module]_[tested behaviour]`. For example: `test_tokenizer_keeps_email`.
- If you're testing for a bug reported in a specific issue, always create a **regression test**. Regression tests should be named `test_issue[ISSUE NUMBER]` and live in the [`regression`](regression) directory. - If you're testing for a bug reported in a specific issue, always create a **regression test**. Regression tests should be named `test_issue[ISSUE NUMBER]` and live in the [`regression`](regression) directory.
- Only use `@pytest.mark.xfail` for tests that **should pass, but currently fail**. To test for desired negative behavior, use `assert not` in your test. - Only use `@pytest.mark.xfail` for tests that **should pass, but currently fail**. To test for desired negative behavior, use `assert not` in your test.
- Very **extensive tests** that take a long time to run should be marked with `@pytest.mark.slow`. If your slow test is testing important behavior, consider adding an additional simpler version. - Very **extensive tests** that take a long time to run should be marked with `@pytest.mark.slow`. If your slow test is testing important behavior, consider adding an additional simpler version.

View File

@ -1,6 +1,10 @@
import pytest import pytest
from spacy.util import get_lang_class from spacy.util import get_lang_class
import functools
from hypothesis import settings from hypothesis import settings
import inspect
import importlib
import sys
# Functionally disable deadline settings for tests # Functionally disable deadline settings for tests
# to prevent spurious test failures in CI builds. # to prevent spurious test failures in CI builds.
@ -47,6 +51,33 @@ def pytest_runtest_setup(item):
pytest.skip("not referencing any issues") pytest.skip("not referencing any issues")
# Decorator for Cython-built tests
# https://shwina.github.io/cython-testing/
def cytest(func):
"""
Wraps `func` in a plain Python function.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
bound = inspect.signature(func).bind(*args, **kwargs)
return func(*bound.args, **bound.kwargs)
return wrapped
def register_cython_tests(cython_mod_name: str, test_mod_name: str):
"""
Registers all callables with name `test_*` in Cython module `cython_mod_name`
as attributes in module `test_mod_name`, making them discoverable by pytest.
"""
cython_mod = importlib.import_module(cython_mod_name)
for name in dir(cython_mod):
item = getattr(cython_mod, name)
if callable(item) and name.startswith("test_"):
setattr(sys.modules[test_mod_name], name, item)
# Fixtures for language tokenizers (languages sorted alphabetically) # Fixtures for language tokenizers (languages sorted alphabetically)
@ -351,17 +382,17 @@ def ru_tokenizer():
return get_lang_class("ru")().tokenizer return get_lang_class("ru")().tokenizer
@pytest.fixture @pytest.fixture(scope="session")
def ru_lemmatizer(): def ru_lemmatizer():
pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer") return get_lang_class("ru")().add_pipe("lemmatizer")
@pytest.fixture @pytest.fixture(scope="session")
def ru_lookup_lemmatizer(): def ru_lookup_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe( return get_lang_class("ru")().add_pipe(
"lemmatizer", config={"mode": "pymorphy2_lookup"} "lemmatizer", config={"mode": "pymorphy3_lookup"}
) )
@ -437,19 +468,19 @@ def uk_tokenizer():
return get_lang_class("uk")().tokenizer return get_lang_class("uk")().tokenizer
@pytest.fixture @pytest.fixture(scope="session")
def uk_lemmatizer(): def uk_lemmatizer():
pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy3_dicts_uk") pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer") return get_lang_class("uk")().add_pipe("lemmatizer")
@pytest.fixture @pytest.fixture(scope="session")
def uk_lookup_lemmatizer(): def uk_lookup_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy2_dicts_uk") pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe( return get_lang_class("uk")().add_pipe(
"lemmatizer", config={"mode": "pymorphy2_lookup"} "lemmatizer", config={"mode": "pymorphy3_lookup"}
) )

View File

@ -123,14 +123,14 @@ def test_doc_from_array_heads_in_bounds(en_vocab):
# head before start # head before start
arr = doc.to_array(["HEAD"]) arr = doc.to_array(["HEAD"])
arr[0] = -1 arr[0] = numpy.int32(-1).astype(numpy.uint64)
doc_from_array = Doc(en_vocab, words=words) doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError): with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr) doc_from_array.from_array(["HEAD"], arr)
# head after end # head after end
arr = doc.to_array(["HEAD"]) arr = doc.to_array(["HEAD"])
arr[0] = 5 arr[0] = numpy.int32(5).astype(numpy.uint64)
doc_from_array = Doc(en_vocab, words=words) doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError): with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr) doc_from_array.from_array(["HEAD"], arr)

View File

@ -380,9 +380,7 @@ def test_doc_api_serialize(en_tokenizer, text):
assert [t.text for t in tokens] == [t.text for t in new_tokens] assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens] assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
new_tokens = Doc(tokens.vocab).from_bytes( new_tokens = Doc(tokens.vocab).from_bytes(tokens.to_bytes())
tokens.to_bytes(exclude=["sentiment"]), exclude=["sentiment"]
)
assert tokens.text == new_tokens.text assert tokens.text == new_tokens.text
assert [t.text for t in tokens] == [t.text for t in new_tokens] assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens] assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
@ -990,3 +988,12 @@ def test_doc_spans_setdefault(en_tokenizer):
assert len(doc.spans["key2"]) == 1 assert len(doc.spans["key2"]) == 1
doc.spans.setdefault("key3", default=SpanGroup(doc, spans=[doc[0:1], doc[1:2]])) doc.spans.setdefault("key3", default=SpanGroup(doc, spans=[doc[0:1], doc[1:2]]))
assert len(doc.spans["key3"]) == 2 assert len(doc.spans["key3"]) == 2
def test_doc_sentiment_from_bytes_v3_to_v4():
"""Test if a doc with sentiment attribute created in v3.x works with '.from_bytes' in v4.x without throwing errors. The sentiment attribute was removed in v4"""
doc_bytes = b"\x89\xa4text\xa5happy\xaaarray_head\x9fGQACKOLMN\xcd\x01\xc4\xcd\x01\xc6I\xcd\x01\xc5JP\xaaarray_body\x85\xc4\x02nd\xc3\xc4\x04type\xa3<u8\xc4\x04kind\xc4\x00\xc4\x05shape\x92\x01\x0f\xc4\x04data\xc4x\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x9a\xd3\x17\xca\xf0b\x03\xa4\x9a\xd3\x17\xca\xf0b\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xa9sentiment\xcb?\xf0\x00\x00\x00\x00\x00\x00\xa6tensor\x85\xc4\x02nd\xc3\xc4\x04type\xa3<f4\xc4\x04kind\xc4\x00\xc4\x05shape\x91\x00\xc4\x04data\xc4\x00\xa4cats\x80\xa5spans\xc4\x01\x90\xa7strings\x92\xa0\xa5happy\xb2has_unknown_spaces\xc2"
doc = Doc(Vocab()).from_bytes(doc_bytes)
assert doc.text == "happy"
with pytest.raises(AttributeError):
doc.sentiment == 1.0

View File

@ -370,3 +370,12 @@ def test_json_to_doc_validation_error(doc):
doc_json.pop("tokens") doc_json.pop("tokens")
with pytest.raises(ValueError): with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json, validate=True) Doc(doc.vocab).from_json(doc_json, validate=True)
def test_to_json_underscore_doc_getters(doc):
def get_text_length(doc):
return len(doc.text)
Doc.set_extension("text_length", getter=get_text_length)
doc_json = doc.to_json(underscore=["text_length"])
assert doc_json["_"]["text_length"] == get_text_length(doc)

View File

@ -305,31 +305,6 @@ def test_span_similarity_match():
assert span1[:1].similarity(doc.vocab["a"]) == 1.0 assert span1[:1].similarity(doc.vocab["a"]) == 1.0
def test_spans_default_sentiment(en_tokenizer):
"""Test span.sentiment property's default averaging behaviour"""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
tokens.vocab[tokens[0].text].sentiment = 3.0
tokens.vocab[tokens[2].text].sentiment = -2.0
doc = Doc(tokens.vocab, words=[t.text for t in tokens])
assert doc[:2].sentiment == 3.0 / 2
assert doc[-2:].sentiment == -2.0 / 2
assert doc[:-1].sentiment == (3.0 + -2) / 3.0
def test_spans_override_sentiment(en_tokenizer):
"""Test span.sentiment property's default averaging behaviour"""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
tokens.vocab[tokens[0].text].sentiment = 3.0
tokens.vocab[tokens[2].text].sentiment = -2.0
doc = Doc(tokens.vocab, words=[t.text for t in tokens])
doc.user_span_hooks["sentiment"] = lambda span: 10.0
assert doc[:2].sentiment == 10.0
assert doc[-2:].sentiment == 10.0
assert doc[:-1].sentiment == 10.0
def test_spans_are_hashable(en_tokenizer): def test_spans_are_hashable(en_tokenizer):
"""Test spans can be hashed.""" """Test spans can be hashed."""
text = "good stuff bad stuff" text = "good stuff bad stuff"

View File

@ -1,7 +1,10 @@
from typing import List
import pytest import pytest
from random import Random from random import Random
from spacy.matcher import Matcher from spacy.matcher import Matcher
from spacy.tokens import Span, SpanGroup from spacy.tokens import Span, SpanGroup, Doc
from spacy.util import filter_spans
@pytest.fixture @pytest.fixture
@ -242,3 +245,13 @@ def test_span_group_extend(doc):
def test_span_group_dealloc(span_group): def test_span_group_dealloc(span_group):
with pytest.raises(AttributeError): with pytest.raises(AttributeError):
print(span_group.doc) print(span_group.doc)
@pytest.mark.issue(11975)
def test_span_group_typing(doc: Doc):
"""Tests whether typing of `SpanGroup` as `Iterable[Span]`-like object is accepted by mypy."""
span_group: SpanGroup = doc.spans["SPANS"]
spans: List[Span] = list(span_group)
for i, span in enumerate(span_group):
assert span == span_group[i] == spans[i]
filter_spans(span_group)

View File

@ -3,6 +3,10 @@ from mock import Mock
from spacy.tokens import Doc, Span, Token from spacy.tokens import Doc, Span, Token
from spacy.tokens.underscore import Underscore from spacy.tokens.underscore import Underscore
# Helper functions
def _get_tuple(s: Span):
return "._.", "span_extension", s.start_char, s.end_char, s.label, s.kb_id, s.id
@pytest.fixture(scope="function", autouse=True) @pytest.fixture(scope="function", autouse=True)
def clean_underscore(): def clean_underscore():
@ -171,3 +175,118 @@ def test_underscore_docstring(en_vocab):
doc = Doc(en_vocab, words=["hello", "world"]) doc = Doc(en_vocab, words=["hello", "world"])
assert test_method.__doc__ == "I am a docstring" assert test_method.__doc__ == "I am a docstring"
assert doc._.test_docstrings.__doc__.rsplit(". ")[-1] == "I am a docstring" assert doc._.test_docstrings.__doc__.rsplit(". ")[-1] == "I am a docstring"
def test_underscore_for_unique_span(en_tokenizer):
"""Test that spans with the same boundaries but with different labels are uniquely identified (see #9706)."""
Doc.set_extension(name="doc_extension", default=None)
Span.set_extension(name="span_extension", default=None)
Token.set_extension(name="token_extension", default=None)
# Initialize doc
text = "Hello, world!"
doc = en_tokenizer(text)
span_1 = Span(doc, 0, 2, "SPAN_1")
span_2 = Span(doc, 0, 2, "SPAN_2")
# Set custom extensions
doc._.doc_extension = "doc extension"
doc[0]._.token_extension = "token extension"
span_1._.span_extension = "span_1 extension"
span_2._.span_extension = "span_2 extension"
# Assert extensions
assert doc.user_data[_get_tuple(span_1)] == "span_1 extension"
assert doc.user_data[_get_tuple(span_2)] == "span_2 extension"
# Change label of span and assert extensions
span_1.label_ = "NEW_LABEL"
assert doc.user_data[_get_tuple(span_1)] == "span_1 extension"
assert doc.user_data[_get_tuple(span_2)] == "span_2 extension"
# Change KB_ID and assert extensions
span_1.kb_id_ = "KB_ID"
assert doc.user_data[_get_tuple(span_1)] == "span_1 extension"
assert doc.user_data[_get_tuple(span_2)] == "span_2 extension"
# Change extensions and assert
span_2._.span_extension = "updated span_2 extension"
assert doc.user_data[_get_tuple(span_1)] == "span_1 extension"
assert doc.user_data[_get_tuple(span_2)] == "updated span_2 extension"
# Change span ID and assert extensions
span_2.id = 2
assert doc.user_data[_get_tuple(span_1)] == "span_1 extension"
assert doc.user_data[_get_tuple(span_2)] == "updated span_2 extension"
# Assert extensions with original key
assert doc.user_data[("._.", "doc_extension", None, None)] == "doc extension"
assert doc.user_data[("._.", "token_extension", 0, None)] == "token extension"
def test_underscore_for_unique_span_from_docs(en_tokenizer):
"""Test that spans in the user_data keep the same data structure when using Doc.from_docs"""
Span.set_extension(name="span_extension", default=None)
Token.set_extension(name="token_extension", default=None)
# Initialize doc
text_1 = "Hello, world!"
doc_1 = en_tokenizer(text_1)
span_1a = Span(doc_1, 0, 2, "SPAN_1a")
span_1b = Span(doc_1, 0, 2, "SPAN_1b")
text_2 = "This is a test."
doc_2 = en_tokenizer(text_2)
span_2a = Span(doc_2, 0, 3, "SPAN_2a")
# Set custom extensions
doc_1[0]._.token_extension = "token_1"
doc_2[1]._.token_extension = "token_2"
span_1a._.span_extension = "span_1a extension"
span_1b._.span_extension = "span_1b extension"
span_2a._.span_extension = "span_2a extension"
doc = Doc.from_docs([doc_1, doc_2])
# Assert extensions
assert doc_1.user_data[_get_tuple(span_1a)] == "span_1a extension"
assert doc_1.user_data[_get_tuple(span_1b)] == "span_1b extension"
assert doc_2.user_data[_get_tuple(span_2a)] == "span_2a extension"
# Check extensions on merged doc
assert doc.user_data[_get_tuple(span_1a)] == "span_1a extension"
assert doc.user_data[_get_tuple(span_1b)] == "span_1b extension"
assert (
doc.user_data[
(
"._.",
"span_extension",
span_2a.start_char + len(doc_1.text) + 1,
span_2a.end_char + len(doc_1.text) + 1,
span_2a.label,
span_2a.kb_id,
span_2a.id,
)
]
== "span_2a extension"
)
def test_underscore_for_unique_span_as_span(en_tokenizer):
"""Test that spans in the user_data keep the same data structure when using Span.as_doc"""
Span.set_extension(name="span_extension", default=None)
# Initialize doc
text = "Hello, world!"
doc = en_tokenizer(text)
span_1 = Span(doc, 0, 2, "SPAN_1")
span_2 = Span(doc, 0, 2, "SPAN_2")
# Set custom extensions
span_1._.span_extension = "span_1 extension"
span_2._.span_extension = "span_2 extension"
span_doc = span_1.as_doc(copy_user_data=True)
# Assert extensions
assert span_doc.user_data[_get_tuple(span_1)] == "span_1 extension"
assert span_doc.user_data[_get_tuple(span_2)] == "span_2 extension"

View File

@ -81,6 +81,7 @@ def test_ru_lemmatizer_punct(ru_lemmatizer):
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
words = ["мама", "мыла", "раму"] words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"] pos = ["NOUN", "VERB", "NOUN"]
morphs = [ morphs = [
@ -92,3 +93,17 @@ def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
doc = ru_lookup_lemmatizer(doc) doc = ru_lookup_lemmatizer(doc)
lemmas = [token.lemma_ for token in doc] lemmas = [token.lemma_ for token in doc]
assert lemmas == ["мама", "мыла", "раму"] assert lemmas == ["мама", "мыла", "раму"]
@pytest.mark.parametrize(
"word,lemma",
(
("бременем", "бремя"),
("будешь", "быть"),
("какая-то", "какой-то"),
),
)
def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(ru_lookup_lemmatizer.vocab, words=[word])
assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma

View File

@ -8,12 +8,20 @@ pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_uk_lemmatizer(uk_lemmatizer): def test_uk_lemmatizer(uk_lemmatizer):
"""Check that the default uk lemmatizer runs.""" """Check that the default uk lemmatizer runs."""
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"]) doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
assert uk_lemmatizer.mode == "pymorphy3"
uk_lemmatizer(doc) uk_lemmatizer(doc)
assert [token.lemma for token in doc] assert [token.lemma for token in doc]
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer): @pytest.mark.parametrize(
"""Check that the lookup uk lemmatizer runs.""" "word,lemma",
doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"]) (
uk_lookup_lemmatizer(doc) ("якийсь", "якийсь"),
assert [token.lemma for token in doc] ("розповідають", "розповідати"),
("розповіси", "розповісти"),
),
)
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma):
assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(uk_lookup_lemmatizer.vocab, words=[word])
assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma

View File

@ -50,8 +50,6 @@ def test_matcher_from_usage_docs(en_vocab):
def label_sentiment(matcher, doc, i, matches): def label_sentiment(matcher, doc, i, matches):
match_id, start, end = matches[i] match_id, start, end = matches[i]
if doc.vocab.strings[match_id] == "HAPPY":
doc.sentiment += 0.1
span = doc[start:end] span = doc[start:end]
with doc.retokenize() as retokenizer: with doc.retokenize() as retokenizer:
retokenizer.merge(span) retokenizer.merge(span)
@ -61,7 +59,6 @@ def test_matcher_from_usage_docs(en_vocab):
matcher = Matcher(en_vocab) matcher = Matcher(en_vocab)
matcher.add("HAPPY", pos_patterns, on_match=label_sentiment) matcher.add("HAPPY", pos_patterns, on_match=label_sentiment)
matcher(doc) matcher(doc)
assert doc.sentiment != 0
assert doc[1].norm_ == "happy emoji" assert doc[1].norm_ == "happy emoji"

View File

@ -0,0 +1,119 @@
# cython: infer_types=True, binding=True
from spacy.pipeline._parser_internals.search cimport Beam, MaxViolation
from spacy.typedefs cimport class_t, weight_t
from cymem.cymem cimport Pool
from ..conftest import cytest
import pytest
cdef struct TestState:
int length
int x
Py_UNICODE* string
cdef int transition(void* dest, void* src, class_t clas, void* extra_args) except -1:
dest_state = <TestState*>dest
src_state = <TestState*>src
dest_state.length = src_state.length
dest_state.x = src_state.x
dest_state.x += clas
if extra_args != NULL:
dest_state.string = <Py_UNICODE*>extra_args
else:
dest_state.string = src_state.string
cdef void* initialize(Pool mem, int n, void* extra_args) except NULL:
state = <TestState*>mem.alloc(1, sizeof(TestState))
state.length = n
state.x = 1
if extra_args == NULL:
state.string = u'default'
else:
state.string = <Py_UNICODE*>extra_args
return state
cdef int destroy(Pool mem, void* state, void* extra_args) except -1:
state = <TestState*>state
mem.free(state)
@cytest
@pytest.mark.parametrize("nr_class,beam_width",
[
(2, 3),
(3, 6),
(4, 20),
]
)
def test_init(nr_class, beam_width):
b = Beam(nr_class, beam_width)
assert b.size == 1
assert b.width == beam_width
assert b.nr_class == nr_class
@cytest
def test_init_violn():
MaxViolation()
@cytest
@pytest.mark.parametrize("nr_class,beam_width,length",
[
(2, 3, 3),
(3, 6, 15),
(4, 20, 32),
]
)
def test_initialize(nr_class, beam_width, length):
b = Beam(nr_class, beam_width)
b.initialize(initialize, destroy, length, NULL)
for i in range(b.width):
s = <TestState*>b.at(i)
assert s.length == length, s.length
assert s.string == 'default'
@cytest
@pytest.mark.parametrize("nr_class,beam_width,length,extra",
[
(2, 3, 4, None),
(3, 6, 15, u"test beam 1"),
]
)
def test_initialize_extra(nr_class, beam_width, length, extra):
b = Beam(nr_class, beam_width)
if extra is None:
b.initialize(initialize, destroy, length, NULL)
else:
b.initialize(initialize, destroy, length, <void*><Py_UNICODE*>extra)
for i in range(b.width):
s = <TestState*>b.at(i)
assert s.length == length
@cytest
@pytest.mark.parametrize("nr_class,beam_width,length",
[
(3, 6, 15),
(4, 20, 32),
]
)
def test_transition(nr_class, beam_width, length):
b = Beam(nr_class, beam_width)
b.initialize(initialize, destroy, length, NULL)
b.set_cell(0, 2, 30, True, 0)
b.set_cell(0, 1, 42, False, 0)
b.advance(transition, NULL, NULL)
assert b.size == 1, b.size
assert b.score == 30, b.score
s = <TestState*>b.at(0)
assert s.x == 3
assert b._states[0].score == 30, b._states[0].score
b.set_cell(0, 1, 10, True, 0)
b.set_cell(0, 2, 20, True, 0)
b.advance(transition, NULL, NULL)
assert b._states[0].score == 50, b._states[0].score
assert b._states[1].score == 40
s = <TestState*>b.at(0)
assert s.x == 5

View File

@ -0,0 +1,3 @@
from ..conftest import register_cython_tests
register_cython_tests("spacy.tests.parser._search", __name__)

View File

@ -62,10 +62,45 @@ def test_initialize_from_labels():
nlp2 = Language() nlp2 = Language()
lemmatizer2 = nlp2.add_pipe("trainable_lemmatizer") lemmatizer2 = nlp2.add_pipe("trainable_lemmatizer")
lemmatizer2.initialize( lemmatizer2.initialize(
get_examples=lambda: train_examples, # We want to check that the strings in replacement nodes are
# added to the string store. Avoid that they get added through
# the examples.
get_examples=lambda: train_examples[:1],
labels=lemmatizer.label_data, labels=lemmatizer.label_data,
) )
assert lemmatizer2.tree2label == {1: 0, 3: 1, 4: 2, 6: 3} assert lemmatizer2.tree2label == {1: 0, 3: 1, 4: 2, 6: 3}
assert lemmatizer2.label_data == {
"trees": [
{"orig": "S", "subst": "s"},
{
"prefix_len": 1,
"suffix_len": 0,
"prefix_tree": 0,
"suffix_tree": 4294967295,
},
{"orig": "s", "subst": ""},
{
"prefix_len": 0,
"suffix_len": 1,
"prefix_tree": 4294967295,
"suffix_tree": 2,
},
{
"prefix_len": 0,
"suffix_len": 0,
"prefix_tree": 4294967295,
"suffix_tree": 4294967295,
},
{"orig": "E", "subst": "e"},
{
"prefix_len": 1,
"suffix_len": 0,
"prefix_tree": 5,
"suffix_tree": 4294967295,
},
],
"labels": (1, 3, 4, 6),
}
def test_no_data(): def test_no_data():

View File

@ -529,17 +529,6 @@ def test_pipe_label_data_no_labels(pipe):
assert "labels" not in get_arg_names(initialize) assert "labels" not in get_arg_names(initialize)
def test_warning_pipe_begin_training():
with pytest.warns(UserWarning, match="begin_training"):
class IncompatPipe(TrainablePipe):
def __init__(self):
...
def begin_training(*args, **kwargs):
...
def test_pipe_methods_initialize(): def test_pipe_methods_initialize():
"""Test that the [initialize] config reflects the components correctly.""" """Test that the [initialize] config reflects the components correctly."""
nlp = Language() nlp = Language()
@ -615,20 +604,18 @@ def test_enable_disable_conflict_with_config():
with make_tempdir() as tmp_dir: with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir) nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict. # Expected to succeed, as config and arguments do not conflict.
with pytest.raises(ValueError): assert spacy.load(
spacy.load( tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} ).disabled == ["senter", "sentencizer"]
)
# Expected to succeed without warning due to the lack of a conflicting config option. # Expected to succeed without warning due to the lack of a conflicting config option.
spacy.load(tmp_dir, enable=["tagger"]) spacy.load(tmp_dir, enable=["tagger"])
# Expected to succeed with a warning, as disable=[] should override the config setting. # Expected to fail due to conflict between enable and disabled.
with pytest.warns(UserWarning): with pytest.raises(ValueError):
spacy.load( spacy.load(
tmp_dir, tmp_dir,
enable=["tagger"], enable=["senter"],
disable=[], config={"nlp": {"disabled": ["senter", "tagger"]}},
config={"nlp": {"disabled": ["senter"]}},
) )

View File

@ -372,24 +372,39 @@ def test_overfitting_IO_overlapping():
def test_zero_suggestions(): def test_zero_suggestions():
# Test with a suggester that returns 0 suggestions # Test with a suggester that can return 0 suggestions
@registry.misc("test_zero_suggester") @registry.misc("test_mixed_zero_suggester")
def make_zero_suggester(): def make_mixed_zero_suggester():
def zero_suggester(docs, *, ops=None): def mixed_zero_suggester(docs, *, ops=None):
if ops is None: if ops is None:
ops = get_current_ops() ops = get_current_ops()
return Ragged( spans = []
ops.xp.zeros((0, 0), dtype="i"), ops.xp.zeros((len(docs),), dtype="i") lengths = []
) for doc in docs:
if len(doc) > 0 and len(doc) % 2 == 0:
spans.append((0, 1))
lengths.append(1)
else:
lengths.append(0)
spans = ops.asarray2i(spans)
lengths_array = ops.asarray1i(lengths)
if len(spans) > 0:
output = Ragged(ops.xp.vstack(spans), lengths_array)
else:
output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array)
return output
return zero_suggester return mixed_zero_suggester
fix_random_seed(0) fix_random_seed(0)
nlp = English() nlp = English()
spancat = nlp.add_pipe( spancat = nlp.add_pipe(
"spancat", "spancat",
config={"suggester": {"@misc": "test_zero_suggester"}, "spans_key": SPAN_KEY}, config={
"suggester": {"@misc": "test_mixed_zero_suggester"},
"spans_key": SPAN_KEY,
},
) )
train_examples = make_examples(nlp) train_examples = make_examples(nlp)
optimizer = nlp.initialize(get_examples=lambda: train_examples) optimizer = nlp.initialize(get_examples=lambda: train_examples)
@ -397,6 +412,16 @@ def test_zero_suggestions():
assert set(spancat.labels) == {"LOC", "PERSON"} assert set(spancat.labels) == {"LOC", "PERSON"}
nlp.update(train_examples, sgd=optimizer) nlp.update(train_examples, sgd=optimizer)
# empty doc
nlp("")
# single doc with zero suggestions
nlp("one")
# single doc with one suggestion
nlp("two two")
# batch with mixed zero/one suggestions
list(nlp.pipe(["one", "two two", "three three three", "", "four four four four"]))
# batch with no suggestions
list(nlp.pipe(["", "one", "three three three"]))
def test_set_candidates(): def test_set_candidates():

View File

@ -361,6 +361,30 @@ def test_label_types(name):
nlp.initialize() nlp.initialize()
@pytest.mark.parametrize(
"name,get_examples",
[
("textcat", make_get_examples_single_label),
("textcat_multilabel", make_get_examples_multi_label),
],
)
def test_invalid_label_value(name, get_examples):
nlp = Language()
textcat = nlp.add_pipe(name)
example_getter = get_examples(nlp)
def invalid_examples():
# make one example with an invalid score
examples = example_getter()
ref = examples[0].reference
key = list(ref.cats.keys())[0]
ref.cats[key] = 2.0
return examples
with pytest.raises(ValueError):
nlp.initialize(get_examples=invalid_examples)
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"]) @pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
def test_no_label(name): def test_no_label(name):
nlp = Language() nlp = Language()
@ -815,8 +839,8 @@ def test_textcat_loss(multi_label: bool, expected_loss: float):
textcat = nlp.add_pipe("textcat_multilabel") textcat = nlp.add_pipe("textcat_multilabel")
else: else:
textcat = nlp.add_pipe("textcat") textcat = nlp.add_pipe("textcat")
textcat.initialize(lambda: train_examples)
assert isinstance(textcat, TextCategorizer) assert isinstance(textcat, TextCategorizer)
textcat.initialize(lambda: train_examples)
scores = textcat.model.ops.asarray( scores = textcat.model.ops.asarray(
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype="f" # type: ignore [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype="f" # type: ignore
) )

View File

@ -361,11 +361,10 @@ def test_serialize_pipeline_disable_enable():
assert nlp3.component_names == ["ner", "tagger"] assert nlp3.component_names == ["ner", "tagger"]
with make_tempdir() as d: with make_tempdir() as d:
nlp3.to_disk(d) nlp3.to_disk(d)
with pytest.warns(UserWarning): nlp4 = spacy.load(d, disable=["ner"])
nlp4 = spacy.load(d, disable=["ner"]) assert nlp4.pipe_names == []
assert nlp4.pipe_names == ["tagger"]
assert nlp4.component_names == ["ner", "tagger"] assert nlp4.component_names == ["ner", "tagger"]
assert nlp4.disabled == ["ner"] assert nlp4.disabled == ["ner", "tagger"]
with make_tempdir() as d: with make_tempdir() as d:
nlp.to_disk(d) nlp.to_disk(d)
nlp5 = spacy.load(d, exclude=["tagger"]) nlp5 = spacy.load(d, exclude=["tagger"])

View File

@ -1,8 +1,12 @@
import os import os
import math import math
from random import sample from collections import Counter
from typing import Counter from typing import Tuple, List, Dict, Any
import pkg_resources
import time
import spacy
import numpy
import pytest import pytest
import srsly import srsly
from click import NoSuchOption from click import NoSuchOption
@ -15,6 +19,7 @@ from spacy.cli._util import is_subpath_of, load_project_config
from spacy.cli._util import parse_config_overrides, string_to_list from spacy.cli._util import parse_config_overrides, string_to_list
from spacy.cli._util import substitute_project_variables from spacy.cli._util import substitute_project_variables
from spacy.cli._util import validate_project_commands from spacy.cli._util import validate_project_commands
from spacy.cli._util import upload_file, download_file
from spacy.cli.debug_data import _compile_gold, _get_labels_from_model from spacy.cli.debug_data import _compile_gold, _get_labels_from_model
from spacy.cli.debug_data import _get_labels_from_spancat from spacy.cli.debug_data import _get_labels_from_spancat
from spacy.cli.debug_data import _get_distribution, _get_kl_divergence from spacy.cli.debug_data import _get_distribution, _get_kl_divergence
@ -25,12 +30,16 @@ from spacy.cli.download import get_compatibility, get_version
from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config
from spacy.cli.package import get_third_party_dependencies from spacy.cli.package import get_third_party_dependencies
from spacy.cli.package import _is_permitted_package_name from spacy.cli.package import _is_permitted_package_name
from spacy.cli.project.remote_storage import RemoteStorage
from spacy.cli.project.run import _check_requirements
from spacy.cli.validate import get_model_pkgs from spacy.cli.validate import get_model_pkgs
from spacy.cli.apply import apply
from spacy.cli.find_threshold import find_threshold
from spacy.lang.en import English from spacy.lang.en import English
from spacy.lang.nl import Dutch from spacy.lang.nl import Dutch
from spacy.language import Language from spacy.language import Language
from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate
from spacy.tokens import Doc from spacy.tokens import Doc, DocBin
from spacy.tokens.span import Span from spacy.tokens.span import Span
from spacy.training import Example, docs_to_json, offsets_to_biluo_tags from spacy.training import Example, docs_to_json, offsets_to_biluo_tags
from spacy.training.converters import conll_ner_to_docs, conllu_to_docs from spacy.training.converters import conll_ner_to_docs, conllu_to_docs
@ -116,6 +125,25 @@ def test_issue7055():
assert "model" in filled_cfg["components"]["ner"] assert "model" in filled_cfg["components"]["ner"]
@pytest.mark.issue(11235)
def test_issue11235():
"""
Test that the cli handles interpolation in the directory names correctly when loading project config.
"""
lang_var = "en"
variables = {"lang": lang_var}
commands = [{"name": "x", "script": ["hello ${vars.lang}"]}]
directories = ["cfg", "${vars.lang}_model"]
project = {"commands": commands, "vars": variables, "directories": directories}
with make_tempdir() as d:
srsly.write_yaml(d / "project.yml", project)
cfg = load_project_config(d)
# Check that the directories are interpolated and created correctly
assert os.path.exists(d / "cfg")
assert os.path.exists(d / f"{lang_var}_model")
assert cfg["commands"][0]["script"][0] == f"hello {lang_var}"
def test_cli_info(): def test_cli_info():
nlp = Dutch() nlp = Dutch()
nlp.add_pipe("textcat") nlp.add_pipe("textcat")
@ -589,6 +617,7 @@ def test_string_to_list_intify(value):
assert string_to_list(value, intify=True) == [1, 2, 3] assert string_to_list(value, intify=True) == [1, 2, 3]
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_download_compatibility(): def test_download_compatibility():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -599,6 +628,7 @@ def test_download_compatibility():
assert get_minor_version(about.__version__) == get_minor_version(version) assert get_minor_version(about.__version__) == get_minor_version(version)
@pytest.mark.skip(reason="Temporarily skip for dev version")
def test_validate_compatibility_table(): def test_validate_compatibility_table():
spec = SpecifierSet("==" + about.__version__) spec = SpecifierSet("==" + about.__version__)
spec.prereleases = False spec.prereleases = False
@ -855,3 +885,303 @@ def test_span_length_freq_dist_output_must_be_correct():
span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold) span_freqs = _get_spans_length_freq_dist(sample_span_lengths, threshold)
assert sum(span_freqs.values()) >= threshold assert sum(span_freqs.values()) >= threshold
assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] assert list(span_freqs.keys()) == [3, 1, 4, 5, 2]
def test_applycli_empty_dir():
with make_tempdir() as data_path:
output = data_path / "test.spacy"
apply(data_path, output, "blank:en", "text", 1, 1)
def test_applycli_docbin():
with make_tempdir() as data_path:
output = data_path / "testout.spacy"
nlp = spacy.blank("en")
doc = nlp("testing apply cli.")
# test empty DocBin case
docbin = DocBin()
docbin.to_disk(data_path / "testin.spacy")
apply(data_path, output, "blank:en", "text", 1, 1)
docbin.add(doc)
docbin.to_disk(data_path / "testin.spacy")
apply(data_path, output, "blank:en", "text", 1, 1)
def test_applycli_jsonl():
with make_tempdir() as data_path:
output = data_path / "testout.spacy"
data = [{"field": "Testing apply cli.", "key": 234}]
data2 = [{"field": "234"}]
srsly.write_jsonl(data_path / "test.jsonl", data)
apply(data_path, output, "blank:en", "field", 1, 1)
srsly.write_jsonl(data_path / "test2.jsonl", data2)
apply(data_path, output, "blank:en", "field", 1, 1)
def test_applycli_txt():
with make_tempdir() as data_path:
output = data_path / "testout.spacy"
with open(data_path / "test.foo", "w") as ftest:
ftest.write("Testing apply cli.")
apply(data_path, output, "blank:en", "text", 1, 1)
def test_applycli_mixed():
with make_tempdir() as data_path:
output = data_path / "testout.spacy"
text = "Testing apply cli"
nlp = spacy.blank("en")
doc = nlp(text)
jsonl_data = [{"text": text}]
srsly.write_jsonl(data_path / "test.jsonl", jsonl_data)
docbin = DocBin()
docbin.add(doc)
docbin.to_disk(data_path / "testin.spacy")
with open(data_path / "test.txt", "w") as ftest:
ftest.write(text)
apply(data_path, output, "blank:en", "text", 1, 1)
# Check whether it worked
result = list(DocBin().from_disk(output).get_docs(nlp.vocab))
assert len(result) == 3
for doc in result:
assert doc.text == text
def test_applycli_user_data():
Doc.set_extension("ext", default=0)
val = ("ext", 0)
with make_tempdir() as data_path:
output = data_path / "testout.spacy"
nlp = spacy.blank("en")
doc = nlp("testing apply cli.")
doc._.ext = val
docbin = DocBin(store_user_data=True)
docbin.add(doc)
docbin.to_disk(data_path / "testin.spacy")
apply(data_path, output, "blank:en", "", 1, 1)
result = list(DocBin().from_disk(output).get_docs(nlp.vocab))
assert result[0]._.ext == val
def test_local_remote_storage():
with make_tempdir() as d:
filename = "a.txt"
content_hashes = ("aaaa", "cccc", "bbbb")
for i, content_hash in enumerate(content_hashes):
# make sure that each subsequent file has a later timestamp
if i > 0:
time.sleep(1)
content = f"{content_hash} content"
loc_file = d / "root" / filename
if not loc_file.parent.exists():
loc_file.parent.mkdir(parents=True)
with loc_file.open(mode="w") as file_:
file_.write(content)
# push first version to remote storage
remote = RemoteStorage(d / "root", str(d / "remote"))
remote.push(filename, "aaaa", content_hash)
# retrieve with full hashes
loc_file.unlink()
remote.pull(filename, command_hash="aaaa", content_hash=content_hash)
with loc_file.open(mode="r") as file_:
assert file_.read() == content
# retrieve with command hash
loc_file.unlink()
remote.pull(filename, command_hash="aaaa")
with loc_file.open(mode="r") as file_:
assert file_.read() == content
# retrieve with content hash
loc_file.unlink()
remote.pull(filename, content_hash=content_hash)
with loc_file.open(mode="r") as file_:
assert file_.read() == content
# retrieve with no hashes
loc_file.unlink()
remote.pull(filename)
with loc_file.open(mode="r") as file_:
assert file_.read() == content
def test_local_remote_storage_pull_missing():
# pulling from a non-existent remote pulls nothing gracefully
with make_tempdir() as d:
filename = "a.txt"
remote = RemoteStorage(d / "root", str(d / "remote"))
assert remote.pull(filename, command_hash="aaaa") is None
assert remote.pull(filename) is None
def test_cli_find_threshold(capsys):
thresholds = numpy.linspace(0, 1, 10)
def make_examples(nlp: Language) -> List[Example]:
docs: List[Example] = []
for t in [
(
"I am angry and confused in the Bank of America.",
{
"cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0},
"spans": {"sc": [(31, 46, "ORG")]},
},
),
(
"I am confused but happy in New York.",
{
"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0},
"spans": {"sc": [(27, 35, "GPE")]},
},
),
]:
doc = nlp.make_doc(t[0])
docs.append(Example.from_dict(doc, t[1]))
return docs
def init_nlp(
components: Tuple[Tuple[str, Dict[str, Any]], ...] = ()
) -> Tuple[Language, List[Example]]:
new_nlp = English()
new_nlp.add_pipe( # type: ignore
factory_name="textcat_multilabel",
name="tc_multi",
config={"threshold": 0.9},
)
# Append additional components to pipeline.
for cfn, comp_config in components:
new_nlp.add_pipe(cfn, config=comp_config)
new_examples = make_examples(new_nlp)
new_nlp.initialize(get_examples=lambda: new_examples)
for i in range(5):
new_nlp.update(new_examples)
return new_nlp, new_examples
with make_tempdir() as docs_dir:
# Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches
# the current model behavior with the examples above. This can break once the model behavior changes and serves
# mostly as a smoke test.
nlp, examples = init_nlp()
DocBin(docs=[example.reference for example in examples]).to_disk(
docs_dir / "docs.spacy"
)
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
res = find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="tc_multi",
threshold_key="threshold",
scores_key="cats_macro_f",
silent=True,
)
assert res[0] != thresholds[0]
assert thresholds[0] < res[0] < thresholds[9]
assert res[1] == 1.0
assert res[2][1.0] == 0.0
# Test with spancat.
nlp, _ = init_nlp((("spancat", {}),))
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
res = find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="spancat",
threshold_key="threshold",
scores_key="spans_sc_f",
silent=True,
)
assert res[0] != thresholds[0]
assert thresholds[0] < res[0] < thresholds[8]
assert res[1] >= 0.6
assert res[2][1.0] == 0.0
# Having multiple textcat_multilabel components should work, since the name has to be specified.
nlp, _ = init_nlp((("textcat_multilabel", {}),))
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
assert find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="tc_multi",
threshold_key="threshold",
scores_key="cats_macro_f",
silent=True,
)
# Specifying the name of an non-existing pipe should fail.
nlp, _ = init_nlp()
with make_tempdir() as nlp_dir:
nlp.to_disk(nlp_dir)
with pytest.raises(AttributeError):
find_threshold(
model=nlp_dir,
data_path=docs_dir / "docs.spacy",
pipe_name="_",
threshold_key="threshold",
scores_key="cats_macro_f",
silent=True,
)
@pytest.mark.parametrize(
"reqs,output",
[
[
"""
spacy
# comment
thinc""",
(False, False),
],
[
"""# comment
--some-flag
spacy""",
(False, False),
],
[
"""# comment
--some-flag
spacy; python_version >= '3.6'""",
(False, False),
],
[
"""# comment
spacyunknowndoesnotexist12345""",
(True, False),
],
],
)
def test_project_check_requirements(reqs, output):
# excessive guard against unlikely package name
try:
pkg_resources.require("spacyunknowndoesnotexist12345")
except pkg_resources.DistributionNotFound:
assert output == _check_requirements([req.strip() for req in reqs.split("\n")])
def test_upload_download_local_file():
with make_tempdir() as d1, make_tempdir() as d2:
filename = "f.txt"
content = "content"
local_file = d1 / filename
remote_file = d2 / filename
with local_file.open(mode="w") as file_:
file_.write(content)
upload_file(local_file, remote_file)
local_file.unlink()
download_file(remote_file, local_file)
with local_file.open(mode="r") as file_:
assert file_.read() == content

View File

@ -203,6 +203,16 @@ def test_displacy_parse_spans_different_spans_key(en_vocab):
] ]
def test_displacy_parse_empty_spans_key(en_vocab):
"""Test that having an unset spans key doesn't raise an error"""
doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"])
doc.spans["custom"] = [Span(doc, 3, 6, "BANK")]
with pytest.warns(UserWarning, match="W117"):
spans = displacy.parse_spans(doc)
assert isinstance(spans, dict)
def test_displacy_parse_ents(en_vocab): def test_displacy_parse_ents(en_vocab):
"""Test that named entities on a Doc are converted into displaCy's format.""" """Test that named entities on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"]) doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])

View File

@ -2,6 +2,7 @@ import random
import numpy import numpy
import pytest import pytest
import spacy
import srsly import srsly
from spacy.lang.en import English from spacy.lang.en import English
from spacy.tokens import Doc, DocBin from spacy.tokens import Doc, DocBin
@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags
from spacy.training.alignment_array import AlignmentArray from spacy.training.alignment_array import AlignmentArray
from spacy.training.align import get_alignments from spacy.training.align import get_alignments
from spacy.training.converters import json_to_docs from spacy.training.converters import json_to_docs
from spacy.training.loop import train_while_improving
from spacy.util import get_words_and_spaces, load_model_from_path, minibatch from spacy.util import get_words_and_spaces, load_model_from_path, minibatch
from spacy.util import load_config_from_str from spacy.util import load_config_from_str
from thinc.api import compounding from thinc.api import compounding, Adam
from ..util import make_tempdir from ..util import make_tempdir
@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc):
retokenizer.merge(doc1[0:2]) retokenizer.merge(doc1[0:2])
retokenizer.merge(doc1[5:7]) retokenizer.merge(doc1[5:7])
assert example.get_aligned("ORTH", as_string=True) == expected2 assert example.get_aligned("ORTH", as_string=True) == expected2
def test_training_before_update(doc):
def before_update(nlp, args):
assert args["step"] == 0
assert args["epoch"] == 1
# Raise an error here as the rest of the loop
# will not run to completion due to uninitialized
# models.
raise ValueError("ran_before_update")
def generate_batch():
yield 1, [Example(doc, doc)]
nlp = spacy.blank("en")
nlp.add_pipe("tagger")
optimizer = Adam()
generator = train_while_improving(
nlp,
optimizer,
generate_batch(),
lambda: None,
dropout=0.1,
eval_frequency=100,
accumulate_gradient=10,
patience=10,
max_steps=100,
exclude=[],
annotating_components=[],
before_update=before_update,
)
with pytest.raises(ValueError, match="ran_before_update"):
for _ in generator:
pass

View File

@ -626,3 +626,23 @@ def test_floret_vectors(floret_vectors_vec_str, floret_vectors_hashvec_str):
OPS.to_numpy(vocab_r[word].vector), OPS.to_numpy(vocab_r[word].vector),
decimal=6, decimal=6,
) )
def test_equality():
vectors1 = Vectors(shape=(10, 10))
vectors2 = Vectors(shape=(10, 8))
assert vectors1 != vectors2
vectors2 = Vectors(shape=(10, 10))
assert vectors1 == vectors2
vectors1.add("hello", row=2)
assert vectors1 != vectors2
vectors2.add("hello", row=2)
assert vectors1 == vectors2
vectors1.resize((5, 9))
vectors2.resize((5, 9))
assert vectors1 == vectors2

View File

@ -1,8 +1,13 @@
import os
import pytest import pytest
from spacy.attrs import IS_ALPHA, LEMMA, ORTH from spacy.attrs import IS_ALPHA, LEMMA, ORTH
from spacy.lang.en import English
from spacy.parts_of_speech import NOUN, VERB from spacy.parts_of_speech import NOUN, VERB
from spacy.vocab import Vocab from spacy.vocab import Vocab
from ..util import make_tempdir
@pytest.mark.issue(1868) @pytest.mark.issue(1868)
def test_issue1868(): def test_issue1868():
@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text):
def test_vocab_writing_system(en_vocab): def test_vocab_writing_system(en_vocab):
assert en_vocab.writing_system["direction"] == "ltr" assert en_vocab.writing_system["direction"] == "ltr"
assert en_vocab.writing_system["has_case"] is True assert en_vocab.writing_system["has_case"] is True
def test_to_disk():
nlp = English()
with make_tempdir() as d:
nlp.vocab.to_disk(d)
assert "vectors" in os.listdir(d)
assert "lookups.bin" in os.listdir(d)
def test_to_disk_exclude():
nlp = English()
with make_tempdir() as d:
nlp.vocab.to_disk(d, exclude=("vectors", "lookups"))
assert "vectors" not in os.listdir(d)
assert "lookups.bin" not in os.listdir(d)

View File

@ -48,8 +48,6 @@ cdef class Doc:
cdef TokenC* c cdef TokenC* c
cdef public float sentiment
cdef public dict activations cdef public dict activations
cdef public dict user_hooks cdef public dict user_hooks

View File

@ -21,7 +21,6 @@ class Doc:
spans: SpanGroups spans: SpanGroups
max_length: int max_length: int
length: int length: int
sentiment: float
activations: Dict[str, Dict[str, Union[ArrayXd, Ragged]]] activations: Dict[str, Dict[str, Union[ArrayXd, Ragged]]]
cats: Dict[str, float] cats: Dict[str, float]
user_hooks: Dict[str, Callable[..., Any]] user_hooks: Dict[str, Callable[..., Any]]

View File

@ -243,7 +243,6 @@ cdef class Doc:
self.c = data_start + PADDING self.c = data_start + PADDING
self.max_length = size self.max_length = size
self.length = 0 self.length = 0
self.sentiment = 0.0
self.cats = {} self.cats = {}
self.activations = {} self.activations = {}
self.user_hooks = {} self.user_hooks = {}
@ -360,6 +359,7 @@ cdef class Doc:
for annot in annotations: for annot in annotations:
if annot: if annot:
if annot is heads or annot is sent_starts or annot is ent_iobs: if annot is heads or annot is sent_starts or annot is ent_iobs:
annot = numpy.array(annot, dtype=numpy.int32).astype(numpy.uint64)
for i in range(len(words)): for i in range(len(words)):
if attrs.ndim == 1: if attrs.ndim == 1:
attrs[i] = annot[i] attrs[i] = annot[i]
@ -1178,13 +1178,22 @@ cdef class Doc:
if "user_data" not in exclude: if "user_data" not in exclude:
for key, value in doc.user_data.items(): for key, value in doc.user_data.items():
if isinstance(key, tuple) and len(key) == 4 and key[0] == "._.": if isinstance(key, tuple) and len(key) >= 4 and key[0] == "._.":
data_type, name, start, end = key data_type = key[0]
name = key[1]
start = key[2]
end = key[3]
if start is not None or end is not None: if start is not None or end is not None:
start += char_offset start += char_offset
if end is not None: if end is not None:
end += char_offset end += char_offset
concat_user_data[(data_type, name, start, end)] = copy.copy(value) _label = key[4]
_kb_id = key[5]
_span_id = key[6]
concat_user_data[(data_type, name, start, end, _label, _kb_id, _span_id)] = copy.copy(value)
else:
concat_user_data[(data_type, name, start, end)] = copy.copy(value)
else: else:
warnings.warn(Warnings.W101.format(name=name)) warnings.warn(Warnings.W101.format(name=name))
else: else:
@ -1270,7 +1279,6 @@ cdef class Doc:
other.tensor = copy.deepcopy(self.tensor) other.tensor = copy.deepcopy(self.tensor)
other.cats = copy.deepcopy(self.cats) other.cats = copy.deepcopy(self.cats)
other.user_data = copy.deepcopy(self.user_data) other.user_data = copy.deepcopy(self.user_data)
other.sentiment = self.sentiment
other.has_unknown_spaces = self.has_unknown_spaces other.has_unknown_spaces = self.has_unknown_spaces
other.user_hooks = dict(self.user_hooks) other.user_hooks = dict(self.user_hooks)
other.user_token_hooks = dict(self.user_token_hooks) other.user_token_hooks = dict(self.user_token_hooks)
@ -1367,7 +1375,6 @@ cdef class Doc:
"text": lambda: self.text, "text": lambda: self.text,
"array_head": lambda: array_head, "array_head": lambda: array_head,
"array_body": lambda: self.to_array(array_head), "array_body": lambda: self.to_array(array_head),
"sentiment": lambda: self.sentiment,
"tensor": lambda: self.tensor, "tensor": lambda: self.tensor,
"cats": lambda: self.cats, "cats": lambda: self.cats,
"spans": lambda: self.spans.to_bytes(), "spans": lambda: self.spans.to_bytes(),
@ -1405,8 +1412,6 @@ cdef class Doc:
for key, value in zip(user_data_keys, user_data_values): for key, value in zip(user_data_keys, user_data_values):
self.user_data[key] = value self.user_data[key] = value
cdef int i, start, end, has_space cdef int i, start, end, has_space
if "sentiment" not in exclude and "sentiment" in msg:
self.sentiment = msg["sentiment"]
if "tensor" not in exclude and "tensor" in msg: if "tensor" not in exclude and "tensor" in msg:
self.tensor = msg["tensor"] self.tensor = msg["tensor"]
if "cats" not in exclude and "cats" in msg: if "cats" not in exclude and "cats" in msg:
@ -1569,6 +1574,7 @@ cdef class Doc:
for j, (attr, annot) in enumerate(token_annotations.items()): for j, (attr, annot) in enumerate(token_annotations.items()):
if attr is HEAD: if attr is HEAD:
annot = numpy.array(annot, dtype=numpy.int32).astype(numpy.uint64)
for i in range(len(words)): for i in range(len(words)):
array[i, j] = annot[i] array[i, j] = annot[i]
elif attr is MORPH: elif attr is MORPH:
@ -1632,7 +1638,11 @@ cdef class Doc:
Span.set_extension(span_attr) Span.set_extension(span_attr)
for span_data in doc_json["underscore_span"][span_attr]: for span_data in doc_json["underscore_span"][span_attr]:
value = span_data["value"] value = span_data["value"]
self.char_span(span_data["start"], span_data["end"])._.set(span_attr, value) span = self.char_span(span_data["start"], span_data["end"])
span.label = span_data["label"]
span.kb_id = span_data["kb_id"]
span.id = span_data["id"]
span._.set(span_attr, value)
return self return self
def to_json(self, underscore=None): def to_json(self, underscore=None):
@ -1679,6 +1689,20 @@ cdef class Doc:
if underscore: if underscore:
user_keys = set() user_keys = set()
# Handle doc attributes with .get to include values from getters
# and not only values stored in user_data, for backwards
# compatibility
for attr in underscore:
if self.has_extension(attr):
if "_" not in data:
data["_"] = {}
value = self._.get(attr)
if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
data["_"][attr] = value
user_keys.add(attr)
# Token and span attributes only include values stored in user_data
# and not values generated by getters
if self.user_data: if self.user_data:
for data_key, value in self.user_data.copy().items(): for data_key, value in self.user_data.copy().items():
if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.": if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.":
@ -1689,25 +1713,23 @@ cdef class Doc:
user_keys.add(attr) user_keys.add(attr)
if not srsly.is_json_serializable(value): if not srsly.is_json_serializable(value):
raise ValueError(Errors.E107.format(attr=attr, value=repr(value))) raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
# Check if doc attribute # Token attribute
if start is None: if start is not None and end is None:
if "_" not in data:
data["_"] = {}
data["_"][attr] = value
# Check if token attribute
elif end is None:
if "underscore_token" not in data: if "underscore_token" not in data:
data["underscore_token"] = {} data["underscore_token"] = {}
if attr not in data["underscore_token"]: if attr not in data["underscore_token"]:
data["underscore_token"][attr] = [] data["underscore_token"][attr] = []
data["underscore_token"][attr].append({"start": start, "value": value}) data["underscore_token"][attr].append({"start": start, "value": value})
# Else span attribute # Else span attribute
else: elif end is not None:
_label = data_key[4]
_kb_id = data_key[5]
_span_id = data_key[6]
if "underscore_span" not in data: if "underscore_span" not in data:
data["underscore_span"] = {} data["underscore_span"] = {}
if attr not in data["underscore_span"]: if attr not in data["underscore_span"]:
data["underscore_span"][attr] = [] data["underscore_span"][attr] = []
data["underscore_span"][attr].append({"start": start, "end": end, "value": value}) data["underscore_span"][attr].append({"start": start, "end": end, "value": value, "label": _label, "kb_id": _kb_id, "id":_span_id})
for attr in underscore: for attr in underscore:
if attr not in user_keys: if attr not in user_keys:

View File

@ -82,8 +82,6 @@ class Span:
@property @property
def tensor(self) -> FloatsXd: ... def tensor(self) -> FloatsXd: ...
@property @property
def sentiment(self) -> float: ...
@property
def text(self) -> str: ... def text(self) -> str: ...
@property @property
def text_with_ws(self) -> str: ... def text_with_ws(self) -> str: ...
@ -95,8 +93,8 @@ class Span:
self, self,
start_idx: int, start_idx: int,
end_idx: int, end_idx: int,
label: int = ..., label: Union[int, str] = ...,
kb_id: int = ..., kb_id: Union[int, str] = ...,
vector: Optional[Floats1d] = ..., vector: Optional[Floats1d] = ...,
) -> Span: ... ) -> Span: ...
@property @property

View File

@ -218,11 +218,10 @@ cdef class Span:
cdef SpanC* span_c = self.span_c() cdef SpanC* span_c = self.span_c()
"""Custom extension attributes registered via `set_extension`.""" """Custom extension attributes registered via `set_extension`."""
return Underscore(Underscore.span_extensions, self, return Underscore(Underscore.span_extensions, self,
start=span_c.start_char, end=span_c.end_char) start=span_c.start_char, end=span_c.end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
def as_doc(self, *, bint copy_user_data=False, array_head=None, array=None): def as_doc(self, *, bint copy_user_data=False, array_head=None, array=None):
"""Create a `Doc` object with a copy of the `Span`'s data. """Create a `Doc` object with a copy of the `Span`'s data.
copy_user_data (bool): Whether or not to copy the original doc's user data. copy_user_data (bool): Whether or not to copy the original doc's user data.
array_head (tuple): `Doc` array attrs, can be passed in to speed up computation. array_head (tuple): `Doc` array attrs, can be passed in to speed up computation.
array (ndarray): `Doc` as array, can be passed in to speed up computation. array (ndarray): `Doc` as array, can be passed in to speed up computation.
@ -275,12 +274,22 @@ cdef class Span:
char_offset = self.start_char char_offset = self.start_char
for key, value in self.doc.user_data.items(): for key, value in self.doc.user_data.items():
if isinstance(key, tuple) and len(key) == 4 and key[0] == "._.": if isinstance(key, tuple) and len(key) == 4 and key[0] == "._.":
data_type, name, start, end = key data_type = key[0]
name = key[1]
start = key[2]
end = key[3]
if start is not None or end is not None: if start is not None or end is not None:
start -= char_offset start -= char_offset
# Check if Span object
if end is not None: if end is not None:
end -= char_offset end -= char_offset
user_data[(data_type, name, start, end)] = copy.copy(value) _label = key[4]
_kb_id = key[5]
_span_id = key[6]
user_data[(data_type, name, start, end, _label, _kb_id, _span_id)] = copy.copy(value)
# Else Token object
else:
user_data[(data_type, name, start, end)] = copy.copy(value)
else: else:
user_data[key] = copy.copy(value) user_data[key] = copy.copy(value)
doc.user_data = user_data doc.user_data = user_data
@ -309,7 +318,7 @@ cdef class Span:
for ancestor in ancestors: for ancestor in ancestors:
ancestor_i = ancestor.i - span_c.start ancestor_i = ancestor.i - span_c.start
if ancestor_i in range(length): if ancestor_i in range(length):
array[i, head_col] = ancestor_i - i array[i, head_col] = numpy.int32(ancestor_i - i).astype(numpy.uint64)
# if there is no appropriate ancestor, define a new artificial root # if there is no appropriate ancestor, define a new artificial root
value = array[i, head_col] value = array[i, head_col]
@ -317,7 +326,7 @@ cdef class Span:
new_root = old_to_new_root.get(ancestor_i, None) new_root = old_to_new_root.get(ancestor_i, None)
if new_root is not None: if new_root is not None:
# take the same artificial root as a previous token from the same sentence # take the same artificial root as a previous token from the same sentence
array[i, head_col] = new_root - i array[i, head_col] = numpy.int32(new_root - i).astype(numpy.uint64)
else: else:
# set this token as the new artificial root # set this token as the new artificial root
array[i, head_col] = 0 array[i, head_col] = 0
@ -566,16 +575,6 @@ cdef class Span:
return None return None
return self.doc.tensor[self.start : self.end] return self.doc.tensor[self.start : self.end]
@property
def sentiment(self):
"""RETURNS (float): A scalar value indicating the positivity or
negativity of the span.
"""
if "sentiment" in self.doc.user_span_hooks:
return self.doc.user_span_hooks["sentiment"](self)
else:
return sum([token.sentiment for token in self]) / len(self)
@property @property
def text(self): def text(self):
"""RETURNS (str): The original verbatim text of the span.""" """RETURNS (str): The original verbatim text of the span."""
@ -791,21 +790,36 @@ cdef class Span:
return self.span_c().label return self.span_c().label
def __set__(self, attr_t label): def __set__(self, attr_t label):
self.span_c().label = label if label != self.span_c().label :
old_label = self.span_c().label
self.span_c().label = label
new = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
old = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=old_label, kb_id=self.kb_id, span_id=self.id)
Underscore._replace_keys(old, new)
property kb_id: property kb_id:
def __get__(self): def __get__(self):
return self.span_c().kb_id return self.span_c().kb_id
def __set__(self, attr_t kb_id): def __set__(self, attr_t kb_id):
self.span_c().kb_id = kb_id if kb_id != self.span_c().kb_id :
old_kb_id = self.span_c().kb_id
self.span_c().kb_id = kb_id
new = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
old = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=old_kb_id, span_id=self.id)
Underscore._replace_keys(old, new)
property id: property id:
def __get__(self): def __get__(self):
return self.span_c().id return self.span_c().id
def __set__(self, attr_t id): def __set__(self, attr_t id):
self.span_c().id = id if id != self.span_c().id :
old_id = self.span_c().id
self.span_c().id = id
new = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=self.id)
old = Underscore(Underscore.span_extensions, self, start=self.span_c().start_char, end=self.span_c().end_char, label=self.label, kb_id=self.kb_id, span_id=old_id)
Underscore._replace_keys(old, new)
property ent_id: property ent_id:
"""Alias for the span's ID.""" """Alias for the span's ID."""

View File

@ -18,6 +18,7 @@ class SpanGroup:
def doc(self) -> Doc: ... def doc(self) -> Doc: ...
@property @property
def has_overlap(self) -> bool: ... def has_overlap(self) -> bool: ...
def __iter__(self): ...
def __len__(self) -> int: ... def __len__(self) -> int: ...
def append(self, span: Span) -> None: ... def append(self, span: Span) -> None: ...
def extend(self, spans: Iterable[Span]) -> None: ... def extend(self, spans: Iterable[Span]) -> None: ...

View File

@ -159,6 +159,16 @@ cdef class SpanGroup:
return self._concat(other) return self._concat(other)
return NotImplemented return NotImplemented
def __iter__(self):
"""
Iterate over the spans in this SpanGroup.
YIELDS (Span): A span in this SpanGroup.
DOCS: https://spacy.io/api/spangroup#iter
"""
for i in range(self.c.size()):
yield self[i]
def append(self, Span span): def append(self, Span span):
"""Add a span to the group. The span must refer to the same Doc """Add a span to the group. The span must refer to the same Doc
object as the span group. object as the span group.

View File

@ -79,8 +79,6 @@ class Token:
@property @property
def prob(self) -> float: ... def prob(self) -> float: ...
@property @property
def sentiment(self) -> float: ...
@property
def lang(self) -> int: ... def lang(self) -> int: ...
@property @property
def idx(self) -> int: ... def idx(self) -> int: ...

View File

@ -283,14 +283,6 @@ cdef class Token:
"""RETURNS (float): Smoothed log probability estimate of token type.""" """RETURNS (float): Smoothed log probability estimate of token type."""
return self.vocab[self.c.lex.orth].prob return self.vocab[self.c.lex.orth].prob
@property
def sentiment(self):
"""RETURNS (float): A scalar value indicating the positivity or
negativity of the token."""
if "sentiment" in self.doc.user_token_hooks:
return self.doc.user_token_hooks["sentiment"](self)
return self.vocab[self.c.lex.orth].sentiment
@property @property
def lang(self): def lang(self):
"""RETURNS (uint64): ID of the language of the parent document's """RETURNS (uint64): ID of the language of the parent document's

View File

@ -2,10 +2,10 @@ from typing import Dict, Any, List, Optional, Tuple, Union, TYPE_CHECKING
import functools import functools
import copy import copy
from ..errors import Errors from ..errors import Errors
from .span import Span
if TYPE_CHECKING: if TYPE_CHECKING:
from .doc import Doc from .doc import Doc
from .span import Span
from .token import Token from .token import Token
@ -25,6 +25,9 @@ class Underscore:
obj: Union["Doc", "Span", "Token"], obj: Union["Doc", "Span", "Token"],
start: Optional[int] = None, start: Optional[int] = None,
end: Optional[int] = None, end: Optional[int] = None,
label: int = 0,
kb_id: int = 0,
span_id: int = 0,
): ):
object.__setattr__(self, "_extensions", extensions) object.__setattr__(self, "_extensions", extensions)
object.__setattr__(self, "_obj", obj) object.__setattr__(self, "_obj", obj)
@ -36,6 +39,10 @@ class Underscore:
object.__setattr__(self, "_doc", obj.doc) object.__setattr__(self, "_doc", obj.doc)
object.__setattr__(self, "_start", start) object.__setattr__(self, "_start", start)
object.__setattr__(self, "_end", end) object.__setattr__(self, "_end", end)
if type(obj) == Span:
object.__setattr__(self, "_label", label)
object.__setattr__(self, "_kb_id", kb_id)
object.__setattr__(self, "_span_id", span_id)
def __dir__(self) -> List[str]: def __dir__(self) -> List[str]:
# Hack to enable autocomplete on custom extensions # Hack to enable autocomplete on custom extensions
@ -88,8 +95,39 @@ class Underscore:
def has(self, name: str) -> bool: def has(self, name: str) -> bool:
return name in self._extensions return name in self._extensions
def _get_key(self, name: str) -> Tuple[str, str, Optional[int], Optional[int]]: def _get_key(
return ("._.", name, self._start, self._end) self, name: str
) -> Union[
Tuple[str, str, Optional[int], Optional[int]],
Tuple[str, str, Optional[int], Optional[int], int, int, int],
]:
if hasattr(self, "_label"):
return (
"._.",
name,
self._start,
self._end,
self._label,
self._kb_id,
self._span_id,
)
else:
return "._.", name, self._start, self._end
@staticmethod
def _replace_keys(old_underscore: "Underscore", new_underscore: "Underscore"):
"""
This function is called by Span when its kb_id or label are re-assigned.
It checks if any user_data is stored for this span and replaces the keys
"""
for name in old_underscore._extensions:
old_key = old_underscore._get_key(name)
old_doc = old_underscore._doc
new_key = new_underscore._get_key(name)
if old_key != new_key and old_key in old_doc.user_data:
old_underscore._doc.user_data[
new_key
] = old_underscore._doc.user_data.pop(old_key)
@classmethod @classmethod
def get_state(cls) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]: def get_state(cls) -> Tuple[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]:

View File

@ -2,11 +2,12 @@ from typing import Union, Iterable, Sequence, TypeVar, List, Callable, Iterator
from typing import Optional, Any from typing import Optional, Any
from functools import partial from functools import partial
import itertools import itertools
from thinc.schedules import Schedule, constant as constant_schedule
from ..util import registry, minibatch from ..util import registry, minibatch
Sizing = Union[Sequence[int], int] Sizing = Union[Sequence[int], int, Schedule[int]]
ItemT = TypeVar("ItemT") ItemT = TypeVar("ItemT")
BatcherT = Callable[[Iterable[ItemT]], Iterable[List[ItemT]]] BatcherT = Callable[[Iterable[ItemT]], Iterable[List[ItemT]]]
@ -111,12 +112,13 @@ def minibatch_by_padded_size(
The `len` function is used by default. The `len` function is used by default.
""" """
if isinstance(size, int): if isinstance(size, int):
size_ = itertools.repeat(size) # type: Iterator[int] size_ = constant_schedule(size)
else: else:
size_ = iter(size) assert isinstance(size, Schedule)
for outer_batch in minibatch(seqs, size=buffer): size_ = size
for step, outer_batch in enumerate(minibatch(seqs, size=buffer)):
outer_batch = list(outer_batch) outer_batch = list(outer_batch)
target_size = next(size_) target_size = size_(step)
for indices in _batch_by_length(outer_batch, target_size, get_length): for indices in _batch_by_length(outer_batch, target_size, get_length):
subbatch = [outer_batch[i] for i in indices] subbatch = [outer_batch[i] for i in indices]
padded_size = max(len(seq) for seq in subbatch) * len(subbatch) padded_size = max(len(seq) for seq in subbatch) * len(subbatch)
@ -147,10 +149,12 @@ def minibatch_by_words(
item. The `len` function is used by default. item. The `len` function is used by default.
""" """
if isinstance(size, int): if isinstance(size, int):
size_ = itertools.repeat(size) # type: Iterator[int] size_ = constant_schedule(size)
else: else:
size_ = iter(size) assert isinstance(size, Schedule)
target_size = next(size_) size_ = size
step = 0
target_size = size_(step)
tol_size = target_size * tolerance tol_size = target_size * tolerance
batch = [] batch = []
overflow = [] overflow = []
@ -175,7 +179,8 @@ def minibatch_by_words(
else: else:
if batch: if batch:
yield batch yield batch
target_size = next(size_) step += 1
target_size = size_(step)
tol_size = target_size * tolerance tol_size = target_size * tolerance
batch = overflow batch = overflow
batch_size = overflow_size batch_size = overflow_size
@ -193,7 +198,8 @@ def minibatch_by_words(
else: else:
if batch: if batch:
yield batch yield batch
target_size = next(size_) step += 1
target_size = size_(step)
tol_size = target_size * tolerance tol_size = target_size * tolerance
batch = [seq] batch = [seq]
batch_size = n_words batch_size = n_words

View File

@ -443,26 +443,27 @@ def _annot2array(vocab, tok_annot, doc_annot):
if key not in IDS: if key not in IDS:
raise ValueError(Errors.E974.format(obj="token", key=key)) raise ValueError(Errors.E974.format(obj="token", key=key))
elif key in ["ORTH", "SPACY"]: elif key in ["ORTH", "SPACY"]:
pass continue
elif key == "HEAD": elif key == "HEAD":
attrs.append(key) attrs.append(key)
values.append([h-i if h is not None else 0 for i, h in enumerate(value)]) row = [h-i if h is not None else 0 for i, h in enumerate(value)]
elif key == "DEP": elif key == "DEP":
attrs.append(key) attrs.append(key)
values.append([vocab.strings.add(h) if h is not None else MISSING_DEP for h in value]) row = [vocab.strings.add(h) if h is not None else MISSING_DEP for h in value]
elif key == "SENT_START": elif key == "SENT_START":
attrs.append(key) attrs.append(key)
values.append([to_ternary_int(v) for v in value]) row = [to_ternary_int(v) for v in value]
elif key == "MORPH": elif key == "MORPH":
attrs.append(key) attrs.append(key)
values.append([vocab.morphology.add(v) for v in value]) row = [vocab.morphology.add(v) for v in value]
else: else:
attrs.append(key) attrs.append(key)
if not all(isinstance(v, str) for v in value): if not all(isinstance(v, str) for v in value):
types = set([type(v) for v in value]) types = set([type(v) for v in value])
raise TypeError(Errors.E969.format(field=key, types=types)) from None raise TypeError(Errors.E969.format(field=key, types=types)) from None
values.append([vocab.strings.add(v) for v in value]) row = [vocab.strings.add(v) for v in value]
array = numpy.asarray(values, dtype="uint64") values.append([numpy.array(v, dtype=numpy.int32).astype(numpy.uint64) if v < 0 else v for v in row])
array = numpy.array(values, dtype=numpy.uint64)
return attrs, array.T return attrs, array.T

View File

@ -59,6 +59,7 @@ def train(
batcher = T["batcher"] batcher = T["batcher"]
train_logger = T["logger"] train_logger = T["logger"]
before_to_disk = create_before_to_disk_callback(T["before_to_disk"]) before_to_disk = create_before_to_disk_callback(T["before_to_disk"])
before_update = T["before_update"]
# Helper function to save checkpoints. This is a closure for convenience, # Helper function to save checkpoints. This is a closure for convenience,
# to avoid passing in all the args all the time. # to avoid passing in all the args all the time.
@ -89,6 +90,7 @@ def train(
eval_frequency=T["eval_frequency"], eval_frequency=T["eval_frequency"],
exclude=frozen_components, exclude=frozen_components,
annotating_components=annotating_components, annotating_components=annotating_components,
before_update=before_update,
) )
clean_output_dir(output_path) clean_output_dir(output_path)
stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n") stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n")
@ -150,6 +152,7 @@ def train_while_improving(
max_steps: int, max_steps: int,
exclude: List[str], exclude: List[str],
annotating_components: List[str], annotating_components: List[str],
before_update: Optional[Callable[["Language", Dict[str, Any]], None]],
): ):
"""Train until an evaluation stops improving. Works as a generator, """Train until an evaluation stops improving. Works as a generator,
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`, with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
@ -198,7 +201,10 @@ def train_while_improving(
words_seen = 0 words_seen = 0
start_time = timer() start_time = timer()
for step, (epoch, batch) in enumerate(train_data): for step, (epoch, batch) in enumerate(train_data):
dropout = next(dropouts) # type: ignore if before_update:
before_update_args = {"step": step, "epoch": epoch}
before_update(nlp, before_update_args)
dropout = dropouts(optimizer.step) # type: ignore
for subbatch in subdivide_batch(batch, accumulate_gradient): for subbatch in subdivide_batch(batch, accumulate_gradient):
nlp.update( nlp.update(
subbatch, subbatch,
@ -224,6 +230,7 @@ def train_while_improving(
score, other_scores = evaluate() score, other_scores = evaluate()
else: else:
score, other_scores = evaluate() score, other_scores = evaluate()
optimizer.last_score = score
results.append((score, step)) results.append((score, step))
is_best_checkpoint = score == max(results)[0] is_best_checkpoint = score == max(results)[0]
else: else:

View File

@ -9,7 +9,7 @@ import re
from pathlib import Path from pathlib import Path
import thinc import thinc
from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer
from thinc.api import ConfigValidationError, Model from thinc.api import ConfigValidationError, Model, constant as constant_schedule
import functools import functools
import itertools import itertools
import numpy import numpy
@ -41,13 +41,12 @@ except ImportError:
from .symbols import ORTH from .symbols import ORTH
from .compat import cupy, CudaStream, is_windows, importlib_metadata from .compat import cupy, CudaStream, is_windows, importlib_metadata
from .errors import Errors, Warnings, OLD_MODEL_SHORTCUTS from .errors import Errors, Warnings
from . import about from . import about
if TYPE_CHECKING: if TYPE_CHECKING:
# This lets us add type hints for mypy etc. without causing circular imports # This lets us add type hints for mypy etc. without causing circular imports
from .language import Language # noqa: F401 from .language import Language, PipeCallable # noqa: F401
from .pipeline import Pipe # noqa: F401
from .tokens import Doc, Span # noqa: F401 from .tokens import Doc, Span # noqa: F401
from .vocab import Vocab # noqa: F401 from .vocab import Vocab # noqa: F401
@ -425,8 +424,6 @@ def load_model(
return load_model_from_path(Path(name), **kwargs) # type: ignore[arg-type] return load_model_from_path(Path(name), **kwargs) # type: ignore[arg-type]
elif hasattr(name, "exists"): # Path or Path-like to model data elif hasattr(name, "exists"): # Path or Path-like to model data
return load_model_from_path(name, **kwargs) # type: ignore[arg-type] return load_model_from_path(name, **kwargs) # type: ignore[arg-type]
if name in OLD_MODEL_SHORTCUTS:
raise IOError(Errors.E941.format(name=name, full=OLD_MODEL_SHORTCUTS[name])) # type: ignore[index]
raise IOError(Errors.E050.format(name=name)) raise IOError(Errors.E050.format(name=name))
@ -434,9 +431,9 @@ def load_model_from_package(
name: str, name: str,
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Union[str, Iterable[str]] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Union[str, Iterable[str]] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Union[str, Iterable[str]] = SimpleFrozenList(), exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
"""Load a model from an installed package. """Load a model from an installed package.
@ -610,9 +607,9 @@ def load_model_from_init_py(
init_file: Union[Path, str], init_file: Union[Path, str],
*, *,
vocab: Union["Vocab", bool] = True, vocab: Union["Vocab", bool] = True,
disable: Union[str, Iterable[str]] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Union[str, Iterable[str]] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Union[str, Iterable[str]] = SimpleFrozenList(), exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language": ) -> "Language":
"""Helper function to use in the `load()` method of a model package's """Helper function to use in the `load()` method of a model package's
@ -1582,12 +1579,12 @@ def minibatch(items, size):
so that batch-size can vary on each step. so that batch-size can vary on each step.
""" """
if isinstance(size, int): if isinstance(size, int):
size_ = itertools.repeat(size) size_ = constant_schedule(size)
else: else:
size_ = size size_ = size
items = iter(items) items = iter(items)
while True: for step in itertools.count():
batch_size = next(size_) batch_size = size_(step)
batch = list(itertools.islice(items, int(batch_size))) batch = list(itertools.islice(items, int(batch_size)))
if len(batch) == 0: if len(batch) == 0:
break break
@ -1633,9 +1630,11 @@ def check_bool_env_var(env_var: str) -> bool:
def _pipe( def _pipe(
docs: Iterable["Doc"], docs: Iterable["Doc"],
proc: "Pipe", proc: "PipeCallable",
name: str, name: str,
default_error_handler: Callable[[str, "Pipe", List["Doc"], Exception], NoReturn], default_error_handler: Callable[
[str, "PipeCallable", List["Doc"], Exception], NoReturn
],
kwargs: Mapping[str, Any], kwargs: Mapping[str, Any],
) -> Iterator["Doc"]: ) -> Iterator["Doc"]:
if hasattr(proc, "pipe"): if hasattr(proc, "pipe"):

View File

@ -243,6 +243,15 @@ cdef class Vectors:
else: else:
return key in self.key2row return key in self.key2row
def __eq__(self, other):
# Check for equality, with faster checks first
return (
self.shape == other.shape
and self.key2row == other.key2row
and self.to_bytes(exclude=["strings"])
== other.to_bytes(exclude=["strings"])
)
def resize(self, shape, inplace=False): def resize(self, shape, inplace=False):
"""Resize the underlying vectors array. If inplace=True, the memory """Resize the underlying vectors array. If inplace=True, the memory
is reallocated. This may cause other references to the data to become is reallocated. This may cause other references to the data to become

View File

@ -467,9 +467,9 @@ cdef class Vocab:
setters = ["strings", "vectors"] setters = ["strings", "vectors"]
if "strings" not in exclude: if "strings" not in exclude:
self.strings.to_disk(path / "strings.json") self.strings.to_disk(path / "strings.json")
if "vectors" not in "exclude": if "vectors" not in exclude:
self.vectors.to_disk(path, exclude=["strings"]) self.vectors.to_disk(path, exclude=["strings"])
if "lookups" not in "exclude": if "lookups" not in exclude:
self.lookups.to_disk(path) self.lookups.to_disk(path)
def from_disk(self, path, *, exclude=tuple()): def from_disk(self, path, *, exclude=tuple()):

View File

@ -1,531 +1,11 @@
<Comment>
# spacy.io website and docs # spacy.io website and docs
![Netlify Status](https://api.netlify.com/api/v1/badges/d65fe97d-99ab-47f8-a339-1d8987251da0/deploy-status) ![Netlify Status](https://api.netlify.com/api/v1/badges/d65fe97d-99ab-47f8-a339-1d8987251da0/deploy-status)
_This page contains the documentation and styleguide for the spaCy website. Its The styleguide for the spaCy website is available at
rendered version is available at https://spacy.io/styleguide._ [spacy.io/styleguide](https://spacy.io/styleguide).
--- ## Setup and installation
</Comment>
The [spacy.io](https://spacy.io) website is implemented using
[Gatsby](https://www.gatsbyjs.org) with
[Remark](https://github.com/remarkjs/remark) and [MDX](https://mdxjs.com/). This
allows authoring content in **straightforward Markdown** without the usual
limitations. Standard elements can be overwritten with powerful
[React](http://reactjs.org/) components and wherever Markdown syntax isn't
enough, JSX components can be used.
> #### Contributing to the site
>
> The docs can always use another example or more detail, and they should always
> be up to date and not misleading. We always appreciate a
> [pull request](https://github.com/explosion/spaCy/pulls). To quickly find the
> correct file to edit, simply click on the "Suggest edits" button at the bottom
> of a page.
>
> For more details on editing the site locally, see the installation
> instructions and markdown reference below.
## Logo {#logo source="website/src/images/logo.svg"}
import { Logos } from 'widgets/styleguide'
If you would like to use the spaCy logo on your site, please get in touch and
ask us first. However, if you want to show support and tell others that your
project is using spaCy, you can grab one of our
[spaCy badges](/usage/spacy-101#faq-project-with-spacy).
<Logos />
## Colors {#colors}
import { Colors, Patterns } from 'widgets/styleguide'
<Colors />
### Patterns
<Patterns />
## Typography {#typography}
import { H1, H2, H3, H4, H5, Label, InlineList, Comment } from
'components/typography'
> #### Markdown
>
> ```markdown_
> ## Headline 2
> ## Headline 2 {#some_id}
> ## Headline 2 {#some_id tag="method"}
> ```
>
> #### JSX
>
> ```jsx
> <H2>Headline 2</H2>
> <H2 id="some_id">Headline 2</H2>
> <H2 id="some_id" tag="method">Headline 2</H2>
> ```
Headlines are set in
[HK Grotesk](http://cargocollective.com/hanken/HK-Grotesk-Open-Source-Font) by
Hanken Design. All other body text and code uses the best-matching default
system font to provide a "native" reading experience. All code uses the
[JetBrains Mono](https://www.jetbrains.com/lp/mono/) typeface by JetBrains.
<Infobox title="Important note" variant="warning">
Level 2 headings are automatically wrapped in `<section>` elements at compile
time, using a custom
[Markdown transformer](https://github.com/explosion/spaCy/tree/master/website/plugins/remark-wrap-section.js).
This makes it easier to highlight the section that's currently in the viewpoint
in the sidebar menu.
</Infobox>
<div>
<H1>Headline 1</H1>
<H2>Headline 2</H2>
<H3>Headline 3</H3>
<H4>Headline 4</H4>
<H5>Headline 5</H5>
<Label>Label</Label>
</div>
---
The following optional attributes can be set on the headline to modify it. For
example, to add a tag for the documented type or mark features that have been
introduced in a specific version or require statistical models to be loaded.
Tags are also available as standalone `<Tag />` components.
| Argument | Example | Result |
| -------- | -------------------------- | ----------------------------------------- |
| `tag` | `{tag="method"}` | <Tag>method</Tag> |
| `new` | `{new="3"}` | <Tag variant="new">3</Tag> |
| `model` | `{model="tagger, parser"}` | <Tag variant="model">tagger, parser</Tag> |
| `hidden` | `{hidden="true"}` | |
## Elements {#elements}
### Links {#links}
> #### Markdown
>
> ```markdown
> [I am a link](https://spacy.io)
> ```
>
> #### JSX
>
> ```jsx
> <Link to="https://spacy.io">I am a link</Link>
> ```
Special link styles are used depending on the link URL.
- [I am a regular external link](https://explosion.ai)
- [I am a link to the documentation](/api/doc)
- [I am a link to an architecture](/api/architectures#HashEmbedCNN)
- [I am a link to a model](/models/en#en_core_web_sm)
- [I am a link to GitHub](https://github.com/explosion/spaCy)
### Abbreviations {#abbr}
import { Abbr } from 'components/typography'
> #### JSX
>
> ```jsx
> <Abbr title="Explanation">Abbreviation</Abbr>
> ```
Some text with <Abbr title="Explanation here">an abbreviation</Abbr>. On small
screens, I collapse and the explanation text is displayed next to the
abbreviation.
### Tags {#tags}
import Tag from 'components/tag'
> ```jsx
> <Tag>method</Tag>
> <Tag variant="new">2.1</Tag>
> <Tag variant="model">tagger, parser</Tag>
> ```
Tags can be used together with headlines, or next to properties across the
documentation, and combined with tooltips to provide additional information. An
optional `variant` argument can be used for special tags. `variant="new"` makes
the tag take a version number to mark new features. Using the component,
visibility of this tag can later be toggled once the feature isn't considered
new anymore. Setting `variant="model"` takes a description of model capabilities
and can be used to mark features that require a respective model to be
installed.
<InlineList>
<Tag>method</Tag> <Tag variant="new">2</Tag> <Tag variant="model">tagger,
parser</Tag>
</InlineList>
### Buttons {#buttons}
import Button from 'components/button'
> ```jsx
> <Button to="#" variant="primary">Primary small</Button>
> <Button to="#" variant="secondary">Secondary small</Button>
> ```
Link buttons come in two variants, `primary` and `secondary` and two sizes, with
an optional `large` size modifier. Since they're mostly used as enhanced links,
the buttons are implemented as styled links instead of native button elements.
<InlineList><Button to="#" variant="primary">Primary small</Button>
<Button to="#" variant="secondary">Secondary small</Button></InlineList>
<br />
<InlineList><Button to="#" variant="primary" large>Primary large</Button>
<Button to="#" variant="secondary" large>Secondary large</Button></InlineList>
## Components
### Table {#table}
> #### Markdown
>
> ```markdown_
> | Header 1 | Header 2 |
> | -------- | -------- |
> | Column 1 | Column 2 |
> ```
>
> #### JSX
>
> ```markup
> <Table>
> <Tr><Th>Header 1</Th><Th>Header 2</Th></Tr></thead>
> <Tr><Td>Column 1</Td><Td>Column 2</Td></Tr>
> </Table>
> ```
Tables are used to present data and API documentation. Certain keywords can be
used to mark a footer row with a distinct style, for example to visualize the
return values of a documented function.
| Header 1 | Header 2 | Header 3 | Header 4 |
| ----------- | -------- | :------: | -------: |
| Column 1 | Column 2 | Column 3 | Column 4 |
| Column 1 | Column 2 | Column 3 | Column 4 |
| Column 1 | Column 2 | Column 3 | Column 4 |
| Column 1 | Column 2 | Column 3 | Column 4 |
| **RETURNS** | Column 2 | Column 3 | Column 4 |
Tables also support optional "divider" rows that are typically used to denote
keyword-only arguments in API documentation. To turn a row into a dividing
headline, it should only include content in its first cell, and its value should
be italicized:
> #### Markdown
>
> ```markdown_
> | Header 1 | Header 2 | Header 3 |
> | -------- | -------- | -------- |
> | Column 1 | Column 2 | Column 3 |
> | _Hello_ | | |
> | Column 1 | Column 2 | Column 3 |
> ```
| Header 1 | Header 2 | Header 3 |
| -------- | -------- | -------- |
| Column 1 | Column 2 | Column 3 |
| _Hello_ | | |
| Column 1 | Column 2 | Column 3 |
### Type Annotations {#type-annotations}
> #### Markdown
>
> ```markdown_
> ~~Model[List[Doc], Floats2d]~~
> ```
>
> #### JSX
>
> ```markup
> <TypeAnnotation>Model[List[Doc], Floats2d]</Typeannotation>
> ```
Type annotations are special inline code blocks are used to describe Python
types in the [type hints](https://docs.python.org/3/library/typing.html) format.
The special component will split the type, apply syntax highlighting and link
all types that specify links in `meta/type-annotations.json`. Types can link to
internal or external documentation pages. To make it easy to represent the type
annotations in Markdown, the rendering "hijacks" the `~~` tags that would
typically be converted to a `<del>` element but in this case, text surrounded
by `~~` becomes a type annotation.
- ~~Dict[str, List[Union[Doc, Span]]]~~
- ~~Model[List[Doc], List[numpy.ndarray]]~~
Type annotations support a special visual style in tables and will render as a
separate row, under the cell text. This allows the API docs to display complex
types without taking up too much space in the cell. The type annotation should
always be the **last element** in the row.
> #### Markdown
>
> ```markdown_
> | Header 1 | Header 2 |
> | -------- | ----------------------- |
> | Column 1 | Column 2 ~~List[Doc]~~ |
> ```
| Name | Description |
| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `vocab` | The shared vocabulary. ~~Vocab~~ |
| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. ~~Model[List[Doc], FullTransformerBatch]~~ |
| `set_extra_annotations` | Function that takes a batch of `Doc` objects and transformer outputs and can set additional annotations on the `Doc`. ~~Callable[[List[Doc], FullTransformerBatch], None]~~ |
### List {#list}
> #### Markdown
>
> ```markdown_
> 1. One
> 2. Two
> ```
>
> #### JSX
>
> ```markup
> <Ol>
> <Li>One</Li>
> <Li>Two</Li>
> </Ol>
> ```
Lists are available as bulleted and numbered. Markdown lists are transformed
automatically.
- I am a bulleted list
- I have nice bullets
- Lorem ipsum dolor
- consectetur adipiscing elit
1. I am an ordered list
2. I have nice numbers
3. Lorem ipsum dolor
4. consectetur adipiscing elit
### Aside {#aside}
> #### Markdown
>
> ```markdown_
> > #### Aside title
> > This is aside text.
> ```
>
> #### JSX
>
> ```jsx
> <Aside title="Aside title">This is aside text.</Aside>
> ```
Asides can be used to display additional notes and content in the right-hand
column. Asides can contain text, code and other elements if needed. Visually,
asides are moved to the side on the X-axis, and displayed at the same level they
were inserted. On small screens, they collapse and are rendered in their
original position, in between the text.
To make them easier to use in Markdown, paragraphs formatted as blockquotes will
turn into asides by default. Level 4 headlines (with a leading `####`) will
become aside titles.
### Code Block {#code-block}
> #### Markdown
>
> ````markdown_
> ```python
> ### This is a title
> import spacy
> ```
> ````
>
> #### JSX
>
> ```jsx
> <CodeBlock title="This is a title" lang="python">
> import spacy
> </CodeBlock>
> ```
Code blocks use the [Prism](http://prismjs.com/) syntax highlighter with a
custom theme. The language can be set individually on each block, and defaults
to raw text with no highlighting. An optional label can be added as the first
line with the prefix `####` (Python-like) and `///` (JavaScript-like). the
indented block as plain text and preserve whitespace.
```python
### Using spaCy
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("This is a sentence.")
for token in doc:
print(token.text, token.pos_)
```
Code blocks and also specify an optional range of line numbers to highlight by
adding `{highlight="..."}` to the headline. Acceptable ranges are spans like
`5-7`, but also `5-7,10` or `5-7,10,13-14`.
> #### Markdown
>
> ````markdown_
> ```python
> ### This is a title {highlight="1-2"}
> import spacy
> nlp = spacy.load("en_core_web_sm")
> ```
> ````
```python
### Using the matcher {highlight="5-7"}
import spacy
from spacy.matcher import Matcher
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
pattern = [{"LOWER": "hello"}, {"IS_PUNCT": True}, {"LOWER": "world"}]
matcher.add("HelloWorld", None, pattern)
doc = nlp("Hello, world! Hello world!")
matches = matcher(doc)
```
Adding `{executable="true"}` to the title turns the code into an executable
block, powered by [Binder](https://mybinder.org) and
[Juniper](https://github.com/ines/juniper). If JavaScript is disabled, the
interactive widget defaults to a regular code block.
> #### Markdown
>
> ````markdown_
> ```python
> ### {executable="true"}
> import spacy
> nlp = spacy.load("en_core_web_sm")
> ```
> ````
```python
### {executable="true"}
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("This is a sentence.")
for token in doc:
print(token.text, token.pos_)
```
If a code block only contains a URL to a GitHub file, the raw file contents are
embedded automatically and syntax highlighting is applied. The link to the
original file is shown at the top of the widget.
> #### Markdown
>
> ````markdown_
> ```python
> https://github.com/...
> ```
> ````
>
> #### JSX
>
> ```jsx
> <GitHubCode url="https://github.com/..." lang="python" />
> ```
```python
https://github.com/explosion/spaCy/tree/master/spacy/language.py
```
### Infobox {#infobox}
import Infobox from 'components/infobox'
> #### JSX
>
> ```jsx
> <Infobox title="Information">Regular infobox</Infobox>
> <Infobox title="Important note" variant="warning">This is a warning.</Infobox>
> <Infobox title="Be careful!" variant="danger">This is dangerous.</Infobox>
> ```
Infoboxes can be used to add notes, updates, warnings or additional information
to a page or section. Semantically, they're implemented and interpreted as an
`aside` element. Infoboxes can take an optional `title` argument, as well as an
optional `variant` (either `"warning"` or `"danger"`).
<Infobox title="This is an infobox">
If needed, an infobox can contain regular text, `inline code`, lists and other
blocks.
</Infobox>
<Infobox title="This is a warning" variant="warning">
If needed, an infobox can contain regular text, `inline code`, lists and other
blocks.
</Infobox>
<Infobox title="This is dangerous" variant="danger">
If needed, an infobox can contain regular text, `inline code`, lists and other
blocks.
</Infobox>
### Accordion {#accordion}
import Accordion from 'components/accordion'
> #### JSX
>
> ```jsx
> <Accordion title="This is an accordion">
> Accordion content goes here.
> </Accordion>
> ```
Accordions are collapsible sections that are mostly used for lengthy tables,
like the tag and label annotation schemes for different languages. They all need
to be presented but chances are the user doesn't actually care about _all_ of
them, especially not at the same time. So it's fairly reasonable to hide them
begin a click. This particular implementation was inspired by the amazing
[Inclusive Components blog](https://inclusive-components.design/collapsible-sections/).
<Accordion title="This is an accordion">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante,
pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt
nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor
gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor,
sit amet dignissim justo congue.
</Accordion>
## Setup and installation {#setup}
Before running the setup, make sure your versions of Before running the setup, make sure your versions of
[Node](https://nodejs.org/en/) and [npm](https://www.npmjs.com/) are up to date. [Node](https://nodejs.org/en/) and [npm](https://www.npmjs.com/) are up to date.
@ -554,14 +34,14 @@ extensions for your code editor. The
[`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc) [`.prettierrc`](https://github.com/explosion/spaCy/tree/master/website/.prettierrc)
file in the root defines the settings used in this codebase. file in the root defines the settings used in this codebase.
## Building & developing the site with Docker {#docker} ## Building & developing the site with Docker
Sometimes it's hard to get a local environment working due to rapid updates to node dependencies,
so it may be easier to use docker for building the docs.
If you'd like to do this, Sometimes it's hard to get a local environment working due to rapid updates to
**be sure you do *not* include your local `node_modules` folder**, node dependencies, so it may be easier to use docker for building the docs.
since there are some dependencies that need to be built for the image system.
Rename it before using. If you'd like to do this, **be sure you do _not_ include your local
`node_modules` folder**, since there are some dependencies that need to be built
for the image system. Rename it before using.
```bash ```bash
docker run -it \ docker run -it \
@ -571,16 +51,16 @@ docker run -it \
gatsby develop -H 0.0.0.0 gatsby develop -H 0.0.0.0
``` ```
This will allow you to access the built website at http://0.0.0.0:8000/ This will allow you to access the built website at http://0.0.0.0:8000/ in your
in your browser, and still edit code in your editor while having the site browser, and still edit code in your editor while having the site reflect those
reflect those changes. changes.
**Note**: If you're working on a Mac with an M1 processor, **Note**: If you're working on a Mac with an M1 processor, you might see
you might see segfault errors from `qemu` if you use the default image. segfault errors from `qemu` if you use the default image. To fix this use the
To fix this use the `arm64` tagged image in the `docker run` command `arm64` tagged image in the `docker run` command
(ghcr.io/explosion/spacy-io:arm64). (ghcr.io/explosion/spacy-io:arm64).
### Building the Docker image {#docker-build} ### Building the Docker image
If you'd like to build the image locally, you can do so like this: If you'd like to build the image locally, you can do so like this:
@ -588,67 +68,21 @@ If you'd like to build the image locally, you can do so like this:
docker build -t spacy-io . docker build -t spacy-io .
``` ```
This will take some time, so if you want to use the prebuilt image you'll save a bit of time. This will take some time, so if you want to use the prebuilt image you'll save a
bit of time.
## Markdown reference {#markdown} ## Project structure
All page content and page meta lives in the `.md` files in the `/docs`
directory. The frontmatter block at the top of each file defines the page title
and other settings like the sidebar menu.
````markdown
---
title: Page title
---
## Headline starting a section {#some_id}
This is a regular paragraph with a [link](https://spacy.io) and **bold text**.
> #### This is an aside title
>
> This is aside text.
### Subheadline
| Header 1 | Header 2 |
| -------- | -------- |
| Column 1 | Column 2 |
```python
### Code block title {highlight="2-3"}
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("Hello world")
```
<Infobox title="Important note" variant="warning">
This is content in the infobox.
</Infobox>
````
In addition to the native markdown elements, you can use the components
[`<Infobox />`][infobox], [`<Accordion />`][accordion], [`<Abbr />`][abbr] and
[`<Tag />`][tag] via their JSX syntax.
[infobox]: https://spacy.io/styleguide#infobox
[accordion]: https://spacy.io/styleguide#accordion
[abbr]: https://spacy.io/styleguide#abbr
[tag]: https://spacy.io/styleguide#tag
## Project structure {#structure}
```yaml ```yaml
### Directory structure
├── docs # the actual markdown content ├── docs # the actual markdown content
├── meta # JSON-formatted site metadata ├── meta # JSON-formatted site metadata
| ├── languages.json # supported languages and statistical models | ├── languages.json # supported languages and statistical models
| ├── sidebars.json # sidebar navigations for different sections | ├── sidebars.json # sidebar navigations for different sections
| ├── site.json # general site metadata | ├── site.json # general site metadata
| ├── type-annotations.json # Type annotations
| └── universe.json # data for the spaCy universe section | └── universe.json # data for the spaCy universe section
├── public # compiled site ├── public # compiled site
├── setup # Jinja setup
├── src # source ├── src # source
| ├── components # React components | ├── components # React components
| ├── fonts # webfonts | ├── fonts # webfonts
@ -661,54 +95,10 @@ In addition to the native markdown elements, you can use the components
| | ├── models.js # layout template for model pages | | ├── models.js # layout template for model pages
| | └── universe.js # layout templates for universe | | └── universe.js # layout templates for universe
| └── widgets # non-reusable components with content, e.g. changelog | └── widgets # non-reusable components with content, e.g. changelog
├── .eslintrc.json # ESLint config file
├── .prettierrc # Prettier config file
├── gatsby-browser.js # browser-specific hooks for Gatsby ├── gatsby-browser.js # browser-specific hooks for Gatsby
├── gatsby-config.js # Gatsby configuration ├── gatsby-config.js # Gatsby configuration
├── gatsby-node.js # Node-specific hooks for Gatsby ├── gatsby-node.js # Node-specific hooks for Gatsby
└── package.json # package settings and dependencies └── package.json # package settings and dependencies
``` ```
## Editorial {#editorial}
- "spaCy" should always be spelled with a lowercase "s" and a capital "C",
unless it specifically refers to the Python package or Python import `spacy`
(in which case it should be formatted as code).
- ✅ spaCy is a library for advanced NLP in Python.
- ❌ Spacy is a library for advanced NLP in Python.
- ✅ First, you need to install the `spacy` package from pip.
- Mentions of code, like function names, classes, variable names etc. in inline
text should be formatted as `code`.
- ✅ "Calling the `nlp` object on a text returns a `Doc`."
- Objects that have pages in the [API docs](/api) should be linked for
example, [`Doc`](/api/doc) or [`Language.to_disk`](/api/language#to_disk). The
mentions should still be formatted as code within the link. Links pointing to
the API docs will automatically receive a little icon. However, if a paragraph
includes many references to the API, the links can easily get messy. In that
case, we typically only link the first mention of an object and not any
subsequent ones.
- ✅ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
[`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a `Doc` object
from a `Span`.
- ❌ The [`Span`](/api/span) and [`Token`](/api/token) objects are views of a
[`Doc`](/api/doc). [`Span.as_doc`](/api/span#as_doc) creates a
[`Doc`](/api/doc) object from a [`Span`](/api/span).
* Other things we format as code are: references to trained pipeline packages
like `en_core_web_sm` or file names like `code.py` or `meta.json`.
- ✅ After training, the `config.cfg` is saved to disk.
* [Type annotations](#type-annotations) are a special type of code formatting,
expressed by wrapping the text in `~~` instead of backticks. The result looks
like this: ~~List[Doc]~~. All references to known types will be linked
automatically.
- ✅ The model has the input type ~~List[Doc]~~ and it outputs a
~~List[Array2d]~~.
* We try to keep links meaningful but short.
- ✅ For details, see the usage guide on
[training with custom code](/usage/training#custom-code).
- ❌ For details, see
[the usage guide on training with custom code](/usage/training#custom-code).
- ❌ For details, see the usage guide on training with custom code
[here](/usage/training#custom-code).

View File

@ -51,7 +51,7 @@ markup is correct.
"import spacy", "import spacy",
"import package_name", "import package_name",
"", "",
"nlp = spacy.load('en')", "nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe(package_name)" "nlp.add_pipe(package_name)"
], ],
"code_language": "python", "code_language": "python",

View File

@ -12,10 +12,11 @@ menu:
- ['train', 'train'] - ['train', 'train']
- ['pretrain', 'pretrain'] - ['pretrain', 'pretrain']
- ['evaluate', 'evaluate'] - ['evaluate', 'evaluate']
- ['apply', 'apply']
- ['find-threshold', 'find-threshold']
- ['assemble', 'assemble'] - ['assemble', 'assemble']
- ['package', 'package'] - ['package', 'package']
- ['project', 'project'] - ['project', 'project']
- ['ray', 'ray']
- ['huggingface-hub', 'huggingface-hub'] - ['huggingface-hub', 'huggingface-hub']
--- ---
@ -53,7 +54,7 @@ $ python -m spacy download [model] [--direct] [--sdist] [pip_args]
| `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ | | `--direct`, `-D` | Force direct download of exact package version. ~~bool (flag)~~ |
| `--sdist`, `-S` <Tag variant="new">3</Tag> | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ | | `--sdist`, `-S` <Tag variant="new">3</Tag> | Download the source package (`.tar.gz` archive) instead of the default pre-built binary wheel. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| pip args <Tag variant="new">2.1</Tag> | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ | | pip args | Additional installation options to be passed to `pip install` when installing the pipeline package. For example, `--user` to install to the user home directory or `--no-deps` to not install package dependencies. ~~Any (option/flag)~~ |
| **CREATES** | The installed pipeline package in your `site-packages` directory. | | **CREATES** | The installed pipeline package in your `site-packages` directory. |
## info {#info tag="command"} ## info {#info tag="command"}
@ -77,15 +78,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude]
$ python -m spacy info [model] [--markdown] [--silent] [--exclude] $ python -m spacy info [model] [--markdown] [--silent] [--exclude]
``` ```
| Name | Description | | Name | Description |
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | | -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- |
| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ | | `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ | | `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
| `--silent`, `-s` <Tag variant="new">2.0.12</Tag> | Don't print anything, just return the values. ~~bool (flag)~~ | | `--silent`, `-s` | Don't print anything, just return the values. ~~bool (flag)~~ |
| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ | | `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
| `--url`, `-u` <Tag variant="new">3.5.0</Tag> | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ | | `--url`, `-u` <Tag variant="new">3.5.0</Tag> | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **PRINTS** | Information about your spaCy installation. | | **PRINTS** | Information about your spaCy installation. |
## validate {#validate new="2" tag="command"} ## validate {#validate new="2" tag="command"}
@ -260,22 +261,22 @@ chosen based on the file extension of the input file.
$ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang] $ python -m spacy convert [input_file] [output_dir] [--converter] [--file-type] [--n-sents] [--seg-sents] [--base] [--morphology] [--merge-subtokens] [--ner-map] [--lang]
``` ```
| Name | Description | | Name | Description |
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | | ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
| `input_path` | Input file or directory. ~~Path (positional)~~ | | `input_path` | Input file or directory. ~~Path (positional)~~ |
| `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ | | `output_dir` | Output directory for converted file. Defaults to `"-"`, meaning data will be written to `stdout`. ~~Optional[Path] \(option)~~ |
| `--converter`, `-c` <Tag variant="new">2</Tag> | Name of converter to use (see below). ~~str (option)~~ | | `--converter`, `-c` | Name of converter to use (see below). ~~str (option)~~ |
| `--file-type`, `-t` <Tag variant="new">2.1</Tag> | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ | | `--file-type`, `-t` | Type of file to create. Either `spacy` (default) for binary [`DocBin`](/api/docbin) data or `json` for v2.x JSON format. ~~str (option)~~ |
| `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ | | `--n-sents`, `-n` | Number of sentences per document. Supported for: `conll`, `conllu`, `iob`, `ner` ~~int (option)~~ |
| `--seg-sents`, `-s` <Tag variant="new">2.2</Tag> | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ | | `--seg-sents`, `-s` | Segment sentences. Supported for: `conll`, `ner` ~~bool (flag)~~ |
| `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ | | `--base`, `-b`, `--model` | Trained spaCy pipeline for sentence segmentation to use as base (for `--seg-sents`). ~~Optional[str](option)~~ |
| `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ | | `--morphology`, `-m` | Enable appending morphology to tags. Supported for: `conllu` ~~bool (flag)~~ |
| `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ | | `--merge-subtokens`, `-T` | Merge CoNLL-U subtokens ~~bool (flag)~~ |
| `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ | | `--ner-map`, `-nm` | NER tag mapping (as JSON-encoded dict of entity types). Supported for: `conllu` ~~Optional[Path](option)~~ |
| `--lang`, `-l` <Tag variant="new">2.1</Tag> | Language code (if tokenizer required). ~~Optional[str] \(option)~~ | | `--lang`, `-l` | Language code (if tokenizer required). ~~Optional[str] \(option)~~ |
| `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ | | `--concatenate`, `-C` | Concatenate output to a single file ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). | | **CREATES** | Binary [`DocBin`](/api/docbin) training data that can be used with [`spacy train`](/api/cli#train). |
### Converters {#converters} ### Converters {#converters}
@ -474,8 +475,7 @@ report span characteristics such as the average span length and the span (or
span boundary) distinctiveness. The distinctiveness measure shows how different span boundary) distinctiveness. The distinctiveness measure shows how different
the tokens are with respect to the rest of the corpus using the KL-divergence of the tokens are with respect to the rest of the corpus using the KL-divergence of
the token distributions. To learn more, you can check out Papay et al.'s work on the token distributions. To learn more, you can check out Papay et al.'s work on
[*Dissecting Span Identification Tasks with Performance Prediction* (EMNLP [_Dissecting Span Identification Tasks with Performance Prediction_ (EMNLP 2020)](https://aclanthology.org/2020.emnlp-main.396/).
2020)](https://aclanthology.org/2020.emnlp-main.396/).
</Infobox> </Infobox>
@ -1163,6 +1163,76 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | Training results and optional metrics and visualizations. | | **CREATES** | Training results and optional metrics and visualizations. |
## apply {#apply new="3.5" tag="command"}
Applies a trained pipeline to data and stores the resulting annotated documents
in a `DocBin`. The input can be a single file or a directory. The recognized
input formats are:
1. `.spacy`
2. `.jsonl` containing a user specified `text_key`
3. Files with any other extension are assumed to be plain text files containing
a single document.
When a directory is provided it is traversed recursively to collect all files.
```cli
$ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process]
```
| Name | Description |
| ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `model` | Pipeline to apply to the data. Can be a package or a path to a data directory. ~~str (positional)~~ |
| `data_path` | Location of data to be evaluated in spaCy's [binary format](/api/data-formats#training), jsonl, or plain text. ~~Path (positional)~~ |
| `output-file`, `-o` | Output `DocBin` path. ~~str (positional)~~ |
| `--code`, `-c` <Tag variant="new">3</Tag> | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
| `--text-key`, `-tk` | The key for `.jsonl` files to use to grab the texts from. Defaults to `text`. ~~Optional[str] \(option)~~ |
| `--force-overwrite`, `-F` | If the provided `output-file` already exists, then force `apply` to overwrite it. If this is `False` (default) then quits with a warning instead. ~~bool (flag)~~ |
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--batch-size`, `-b` | Batch size to use for prediction. Defaults to `1`. ~~int (option)~~ |
| `--n-process`, `-n` | Number of processes to use for prediction. Defaults to `1`. ~~int (option)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A `DocBin` with the annotations from the `model` for all the files found in `data-path`. |
## find-threshold {#find-threshold new="3.5" tag="command"}
Runs prediction trials for a trained model with varying tresholds to maximize
the specified metric. The search space for the threshold is traversed linearly
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
returns all results).
This is applicable only for components whose predictions are influenced by
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
that the full path to the corresponding threshold attribute in the config has to
be provided.
> #### Examples
>
> ```cli
> # For textcat_multilabel:
> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f
> ```
>
> ```cli
> # For spancat:
> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f
> ```
| Name | Description |
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ |
| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ |
| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ |
| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ |
| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ |
| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ |
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ |
| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
## assemble {#assemble tag="command"} ## assemble {#assemble tag="command"}
Assemble a pipeline from a config file without additional training. Expects a Assemble a pipeline from a config file without additional training. Expects a
@ -1229,19 +1299,19 @@ $ python -m spacy package [input_dir] [output_dir] [--code] [--meta-path] [--cre
> $ pip install dist/en_pipeline-0.0.0.tar.gz > $ pip install dist/en_pipeline-0.0.0.tar.gz
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ | | `input_dir` | Path to directory containing pipeline data. ~~Path (positional)~~ |
| `output_dir` | Directory to create package folder in. ~~Path (positional)~~ | | `output_dir` | Directory to create package folder in. ~~Path (positional)~~ |
| `--code`, `-c` <Tag variant="new">3</Tag> | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ | | `--code`, `-c` <Tag variant="new">3</Tag> | Comma-separated paths to Python files to be included in the package and imported in its `__init__.py`. This allows including [registering functions](/usage/training#custom-functions) and [custom components](/usage/processing-pipelines#custom-components). ~~str (option)~~ |
| `--meta-path`, `-m` <Tag variant="new">2</Tag> | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ | | `--meta-path`, `-m` | Path to [`meta.json`](/api/data-formats#meta) file (optional). ~~Optional[Path] \(option)~~ |
| `--create-meta`, `-C` <Tag variant="new">2</Tag> | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ | | `--create-meta`, `-C` | Create a `meta.json` file on the command line, even if one already exists in the directory. If an existing file is found, its entries will be shown as the defaults in the command line prompt. ~~bool (flag)~~ |
| `--build`, `-b` <Tag variant="new">3</Tag> | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ | | `--build`, `-b` <Tag variant="new">3</Tag> | Comma-separated artifact formats to build. Can be `sdist` (for a `.tar.gz` archive) and/or `wheel` (for a binary `.whl` file), or `none` if you want to run this step manually. The generated artifacts can be installed by `pip install`. Defaults to `sdist`. ~~str (option)~~ |
| `--name`, `-n` <Tag variant="new">3</Tag> | Package name to override in meta. ~~Optional[str] \(option)~~ | | `--name`, `-n` <Tag variant="new">3</Tag> | Package name to override in meta. ~~Optional[str] \(option)~~ |
| `--version`, `-v` <Tag variant="new">3</Tag> | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ | | `--version`, `-v` <Tag variant="new">3</Tag> | Package version to override in meta. Useful when training new versions, as it doesn't require editing the meta template. ~~Optional[str] \(option)~~ |
| `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ | | `--force`, `-f` | Force overwriting of existing folder in output directory. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A Python package containing the spaCy pipeline. | | **CREATES** | A Python package containing the spaCy pipeline. |
## project {#project new="3"} ## project {#project new="3"}
@ -1352,12 +1422,13 @@ If the contents are different, the new version of the file is uploaded. Deleting
obsolete files is left up to you. obsolete files is left up to you.
Remotes can be defined in the `remotes` section of the Remotes can be defined in the `remotes` section of the
[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the [`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses
[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to [`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
communicate with the remote storages, so you can use any protocol that remote storages, so you can use any protocol that `Pathy` supports, including
`smart-open` supports, including [S3](https://aws.amazon.com/s3/), [S3](https://aws.amazon.com/s3/),
[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although [Google Cloud Storage](https://cloud.google.com/storage), and the local
you may need to install extra dependencies to use certain protocols. filesystem, although you may need to install extra dependencies to use certain
protocols.
```cli ```cli
$ python -m spacy project push [remote] [project_dir] $ python -m spacy project push [remote] [project_dir]
@ -1396,12 +1467,13 @@ outputs, so if you change the config back, you'll be able to fetch back the
result. result.
Remotes can be defined in the `remotes` section of the Remotes can be defined in the `remotes` section of the
[`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses the [`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses
[`smart-open`](https://github.com/RaRe-Technologies/smart_open) library to [`Pathy`](https://github.com/justindujardin/pathy) to communicate with the
communicate with the remote storages, so you can use any protocol that remote storages, so you can use any protocol that `Pathy` supports, including
`smart-open` supports, including [S3](https://aws.amazon.com/s3/), [S3](https://aws.amazon.com/s3/),
[Google Cloud Storage](https://cloud.google.com/storage), SSH and more, although [Google Cloud Storage](https://cloud.google.com/storage), and the local
you may need to install extra dependencies to use certain protocols. filesystem, although you may need to install extra dependencies to use certain
protocols.
```cli ```cli
$ python -m spacy project pull [remote] [project_dir] $ python -m spacy project pull [remote] [project_dir]
@ -1503,50 +1575,6 @@ $ python -m spacy project dvc [project_dir] [workflow] [--force] [--verbose] [--
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. | | **CREATES** | A `dvc.yaml` file in the project directory, based on the steps defined in the given workflow. |
## ray {#ray new="3"}
The `spacy ray` CLI includes commands for parallel and distributed computing via
[Ray](https://ray.io).
<Infobox variant="warning">
To use this command, you need the
[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed.
Installing the package will automatically add the `ray` command to the spaCy
CLI.
</Infobox>
### ray train {#ray-train tag="command"}
Train a spaCy pipeline using [Ray](https://ray.io) for parallel training. The
command works just like [`spacy train`](/api/cli#train). For more details and
examples, see the usage guide on
[parallel training](/usage/training#parallel-training) and the spaCy project
[integration](/usage/projects#ray).
```cli
$ python -m spacy ray train [config_path] [--code] [--output] [--n-workers] [--address] [--gpu-id] [--verbose] [overrides]
```
> #### Example
>
> ```cli
> $ python -m spacy ray train config.cfg --n-workers 2
> ```
| Name | Description |
| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `config_path` | Path to [training config](/api/data-formats#config) file containing all settings and hyperparameters. ~~Path (positional)~~ |
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
| `--output`, `-o` | Directory or remote storage URL for saving trained pipeline. The directory will be created if it doesn't exist. ~~Optional[Path] \(option)~~ |
| `--n-workers`, `-n` | The number of workers. Defaults to `1`. ~~int (option)~~ |
| `--address`, `-a` | Optional address of the Ray cluster. If not set (default), Ray will run locally. ~~Optional[str] \(option)~~ |
| `--gpu-id`, `-g` | GPU ID or `-1` for CPU. Defaults to `-1`. ~~int (option)~~ |
| `--verbose`, `-V` | Display more information for debugging purposes. ~~bool (flag)~~ |
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
| overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ |
## huggingface-hub {#huggingface-hub new="3.1"} ## huggingface-hub {#huggingface-hub new="3.1"}
The `spacy huggingface-cli` CLI includes commands for uploading your trained The `spacy huggingface-cli` CLI includes commands for uploading your trained

View File

@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train).
| `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ | | `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ |
| `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ | | `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ |
| `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ | | `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ |
| `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ | | `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ |
| `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ | | `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ |
| `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ | | `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ |

View File

@ -169,12 +169,6 @@ arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the [`[initialize.components]`](/api/data-formats#config-initialize) block in the
config. config.
<Infobox variant="warning" title="Changed in v3.0" id="begin_training">
This method was previously called `begin_training`.
</Infobox>
> #### Example > #### Example
> >
> ```python > ```python

View File

@ -209,15 +209,15 @@ alignment mode `"strict".
> assert span.text == "New York" > assert span.text == "New York"
> ``` > ```
| Name | Description | | Name | Description |
| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `start` | The index of the first character of the span. ~~int~~ | | `start` | The index of the first character of the span. ~~int~~ |
| `end` | The index of the last character after the span. ~~int~~ | | `end` | The index of the last character after the span. ~~int~~ |
| `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ | | `label` | A label to attach to the span, e.g. for named entities. ~~Union[int, str]~~ |
| `kb_id` <Tag variant="new">2.2</Tag> | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ | | `kb_id` | An ID from a knowledge base to capture the meaning of a named entity. ~~Union[int, str]~~ |
| `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | | `vector` | A meaning representation of the span. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
| `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ | | `alignment_mode` | How character indices snap to token boundaries. Options: `"strict"` (no snapping), `"contract"` (span of all tokens completely within the character span), `"expand"` (span of all tokens at least partially covered by the character span). Defaults to `"strict"`. ~~str~~ |
| **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ | | **RETURNS** | The newly constructed object or `None`. ~~Optional[Span]~~ |
## Doc.set_ents {#set_ents tag="method" new="3"} ## Doc.set_ents {#set_ents tag="method" new="3"}
@ -757,11 +757,10 @@ The L2 norm of the document's vector representation.
| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ | | `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ | | `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
| `vocab` | The store of lexical types. ~~Vocab~~ | | `vocab` | The store of lexical types. ~~Vocab~~ |
| `tensor` <Tag variant="new">2</Tag> | Container for dense vector representations. ~~numpy.ndarray~~ | | `tensor` | Container for dense vector representations. ~~numpy.ndarray~~ |
| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ | | `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
| `lang` <Tag variant="new">2.1</Tag> | Language of the document's vocabulary. ~~int~~ | | `lang` | Language of the document's vocabulary. ~~int~~ |
| `lang_` <Tag variant="new">2.1</Tag> | Language of the document's vocabulary. ~~str~~ | | `lang_` | Language of the document's vocabulary. ~~str~~ |
| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ | | `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ | | `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ | | `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
@ -785,7 +784,6 @@ serialization by passing in the string names via the `exclude` argument.
| Name | Description | | Name | Description |
| ------------------ | --------------------------------------------- | | ------------------ | --------------------------------------------- |
| `text` | The value of the `Doc.text` attribute. | | `text` | The value of the `Doc.text` attribute. |
| `sentiment` | The value of the `Doc.sentiment` attribute. |
| `tensor` | The value of the `Doc.tensor` attribute. | | `tensor` | The value of the `Doc.tensor` attribute. |
| `user_data` | The value of the `Doc.user_data` dictionary. | | `user_data` | The value of the `Doc.user_data` dictionary. |
| `user_data_keys` | The keys of the `Doc.user_data` dictionary. | | `user_data_keys` | The keys of the `Doc.user_data` dictionary. |

View File

@ -200,12 +200,6 @@ knowledge base. This argument should be a function that takes a `Vocab` instance
and creates the `KnowledgeBase`, ensuring that the strings of the knowledge base and creates the `KnowledgeBase`, ensuring that the strings of the knowledge base
are synced with the current vocab. are synced with the current vocab.
<Infobox variant="warning" title="Changed in v3.0" id="begin_training">
This method was previously called `begin_training`.
</Infobox>
> #### Example > #### Example
> >
> ```python > ```python

View File

@ -165,12 +165,6 @@ arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the [`[initialize.components]`](/api/data-formats#config-initialize) block in the
config. config.
<Infobox variant="warning" title="Changed in v3.0" id="begin_training">
This method was previously called `begin_training`.
</Infobox>
> #### Example > #### Example
> >
> ```python > ```python

Some files were not shown because too many files have changed in this diff Show More