Merge branch 'explosion:master' into data-debug-lemmatizer

This commit is contained in:
Peter Baumgartner 2023-01-19 13:21:25 -05:00 committed by GitHub
commit 51b68aacad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
387 changed files with 35119 additions and 37046 deletions

View File

@ -10,7 +10,7 @@ about: Use this template if you came across a bug or unexpected behaviour differ
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. --> <!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
## Your Environment ## Your Environment
<!-- Include details of your environment. If you're using spaCy 1.7+, you can also type `python -m spacy info --markdown` and copy-paste the result here.--> <!-- Include details of your environment. You can also type `python -m spacy info --markdown` and copy-paste the result here.-->
* Operating System: * Operating System:
* Python Version Used: * Python Version Used:
* spaCy Version Used: * spaCy Version Used:

View File

@ -1,67 +1,56 @@
parameters: parameters:
python_version: '' python_version: ''
architecture: '' architecture: 'x64'
prefix: '' num_build_jobs: 2
gpu: false
num_build_jobs: 1
steps: steps:
- task: UsePythonVersion@0 - task: UsePythonVersion@0
inputs: inputs:
versionSpec: ${{ parameters.python_version }} versionSpec: ${{ parameters.python_version }}
architecture: ${{ parameters.architecture }} architecture: ${{ parameters.architecture }}
allowUnstable: true
- bash: | - bash: |
echo "##vso[task.setvariable variable=python_version]${{ parameters.python_version }}" echo "##vso[task.setvariable variable=python_version]${{ parameters.python_version }}"
displayName: 'Set variables' displayName: 'Set variables'
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U pip setuptools python -m pip install -U build pip setuptools
${{ parameters.prefix }} python -m pip install -U -r requirements.txt python -m pip install -U -r requirements.txt
displayName: "Install dependencies" displayName: "Install dependencies"
- script: | - script: |
${{ parameters.prefix }} python setup.py build_ext --inplace -j ${{ parameters.num_build_jobs }} python -m build --sdist
${{ parameters.prefix }} python setup.py sdist --formats=gztar displayName: "Build sdist"
displayName: "Compile and build sdist"
- script: python -m mypy spacy - script: |
python -m mypy spacy
displayName: 'Run mypy' displayName: 'Run mypy'
condition: ne(variables['python_version'], '3.6')
- task: DeleteFiles@1 - task: DeleteFiles@1
inputs: inputs:
contents: "spacy" contents: "spacy"
displayName: "Delete source directory" displayName: "Delete source directory"
- task: DeleteFiles@1
inputs:
contents: "*.egg-info"
displayName: "Delete egg-info directory"
- script: | - script: |
${{ parameters.prefix }} python -m pip freeze --exclude torch --exclude cupy-cuda110 > installed.txt python -m pip freeze > installed.txt
${{ parameters.prefix }} python -m pip uninstall -y -r installed.txt python -m pip uninstall -y -r installed.txt
displayName: "Uninstall all packages" displayName: "Uninstall all packages"
- bash: | - bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1) SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
${{ parameters.prefix }} SPACY_NUM_BUILD_JOBS=2 python -m pip install dist/$SDIST SPACY_NUM_BUILD_JOBS=${{ parameters.num_build_jobs }} python -m pip install dist/$SDIST
displayName: "Install from sdist" displayName: "Install from sdist"
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U -r requirements.txt python -W error -c "import spacy"
displayName: "Install test requirements" displayName: "Test import"
- script: |
${{ parameters.prefix }} python -m pip install -U cupy-cuda110 -f https://github.com/cupy/cupy/releases/v9.0.0
${{ parameters.prefix }} python -m pip install "torch==1.7.1+cu110" -f https://download.pytorch.org/whl/torch_stable.html
displayName: "Install GPU requirements"
condition: eq(${{ parameters.gpu }}, true)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
condition: eq(${{ parameters.gpu }}, false)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -W error -p spacy.tests.enable_gpu
displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true)
- script: | - script: |
python -m spacy download ca_core_news_sm python -m spacy download ca_core_news_sm
@ -70,6 +59,11 @@ steps:
displayName: 'Test download CLI' displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
displayName: 'Test no warnings on load (#11713)'
condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
displayName: 'Test convert CLI' displayName: 'Test convert CLI'
@ -104,13 +98,22 @@ steps:
displayName: 'Test assemble CLI vectors warning' displayName: 'Test assemble CLI vectors warning'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
python -m pip install -U -r requirements.txt
displayName: "Install test requirements"
- script: |
python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
- script: |
python -m pip install 'spacy[apple]'
python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11'))
- script: | - script: |
python .github/validate_universe_json.py website/meta/universe.json python .github/validate_universe_json.py website/meta/universe.json
displayName: 'Test website/meta/universe.json' displayName: 'Test website/meta/universe.json'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

View File

@ -12,10 +12,10 @@ jobs:
if: github.repository_owner == 'explosion' if: github.repository_owner == 'explosion'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}
- uses: actions/setup-python@v2 - uses: actions/setup-python@v4
- run: pip install black - run: pip install black
- name: Auto-format code if needed - name: Auto-format code if needed
run: black spacy run: black spacy
@ -23,10 +23,11 @@ jobs:
# code and makes GitHub think the action failed # code and makes GitHub think the action failed
- name: Check for modified files - name: Check for modified files
id: git-check id: git-check
run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT
- name: Create Pull Request - name: Create Pull Request
if: steps.git-check.outputs.modified == 'true' if: steps.git-check.outputs.modified == 'true'
uses: peter-evans/create-pull-request@v3 uses: peter-evans/create-pull-request@v4
with: with:
title: Auto-format code with black title: Auto-format code with black
labels: meta labels: meta

View File

@ -8,14 +8,14 @@ on:
jobs: jobs:
explosion-bot: explosion-bot:
runs-on: ubuntu-18.04 runs-on: ubuntu-latest
steps: steps:
- name: Dump GitHub context - name: Dump GitHub context
env: env:
GITHUB_CONTEXT: ${{ toJson(github) }} GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT" run: echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
- name: Install and run explosion-bot - name: Install and run explosion-bot
run: | run: |
pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot

View File

@ -15,7 +15,7 @@ jobs:
action: action:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: dessant/lock-threads@v3 - uses: dessant/lock-threads@v4
with: with:
process-only: 'issues' process-only: 'issues'
issue-inactive-days: '30' issue-inactive-days: '30'

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v1 uses: actions/checkout@v3
with: with:
ref: ${{ matrix.branch }} ref: ${{ matrix.branch }}
- name: Get commits from past 24 hours - name: Get commits from past 24 hours
@ -23,9 +23,9 @@ jobs:
today=$(date '+%Y-%m-%d %H:%M:%S') today=$(date '+%Y-%m-%d %H:%M:%S')
yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S') yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S')
if git log --after="$yesterday" --before="$today" | grep commit ; then if git log --after="$yesterday" --before="$today" | grep commit ; then
echo "::set-output name=run_tests::true" echo run_tests=true >> $GITHUB_OUTPUT
else else
echo "::set-output name=run_tests::false" echo run_tests=false >> $GITHUB_OUTPUT
fi fi
- name: Trigger buildkite build - name: Trigger buildkite build

View File

@ -17,8 +17,10 @@ jobs:
run: | run: |
echo "$GITHUB_CONTEXT" echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Bernadette app dependency and send an alert - name: Install Bernadette app dependency and send an alert
env: env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}

11
.gitignore vendored
View File

@ -10,20 +10,11 @@ spacy/tests/package/setup.cfg
spacy/tests/package/pyproject.toml spacy/tests/package/pyproject.toml
spacy/tests/package/requirements.txt spacy/tests/package/requirements.txt
# Website
website/.cache/
website/public/
website/node_modules
website/.npm
website/logs
*.log
npm-debug.log*
quickstart-training-generator.js
# Cython / C extensions # Cython / C extensions
cythonize.json cythonize.json
spacy/*.html spacy/*.html
*.cpp *.cpp
*.c
*.so *.so
# Vim / VSCode / editors # Vim / VSCode / editors

View File

@ -5,8 +5,8 @@ repos:
- id: black - id: black
language_version: python3.7 language_version: python3.7
additional_dependencies: ['click==8.0.4'] additional_dependencies: ['click==8.0.4']
- repo: https://gitlab.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 3.9.2 rev: 5.0.4
hooks: hooks:
- id: flake8 - id: flake8
args: args:

View File

@ -8,15 +8,15 @@ be used in real products.
spaCy comes with spaCy comes with
[pretrained pipelines](https://spacy.io/models) and [pretrained pipelines](https://spacy.io/models) and
currently supports tokenization and training for **60+ languages**. It features currently supports tokenization and training for **70+ languages**. It features
state-of-the-art speed and **neural network models** for tagging, state-of-the-art speed and **neural network models** for tagging,
parsing, **named entity recognition**, **text classification** and more, parsing, **named entity recognition**, **text classification** and more,
multi-task learning with pretrained **transformers** like BERT, as well as a multi-task learning with pretrained **transformers** like BERT, as well as a
production-ready [**training system**](https://spacy.io/usage/training) and easy production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license. open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE).
💫 **Version 3.4.0 out now!** 💫 **Version 3.5 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases) [Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
@ -46,6 +46,7 @@ open-source software, released under the MIT license.
| 🛠 **[Changelog]** | Changes and version history. | | 🛠 **[Changelog]** | Changes and version history. |
| 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | | 💝 **[Contribute]** | How to contribute to the spaCy project and code base. |
| <a href="https://explosion.ai/spacy-tailored-pipelines"><img src="https://user-images.githubusercontent.com/13643239/152853098-1c761611-ccb0-4ec6-9066-b234552831fe.png" width="125" alt="spaCy Tailored Pipelines"/></a> | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more &rarr;](https://explosion.ai/spacy-tailored-pipelines)** | | <a href="https://explosion.ai/spacy-tailored-pipelines"><img src="https://user-images.githubusercontent.com/13643239/152853098-1c761611-ccb0-4ec6-9066-b234552831fe.png" width="125" alt="spaCy Tailored Pipelines"/></a> | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more &rarr;](https://explosion.ai/spacy-tailored-pipelines)** |
| <a href="https://explosion.ai/spacy-tailored-analysis"><img src="https://user-images.githubusercontent.com/1019791/206151300-b00cd189-e503-4797-aa1e-1bb6344062c5.png" width="125" alt="spaCy Tailored Pipelines"/></a> | Bespoke advice for problem solving, strategy and analysis for applied NLP projects. Services include data strategy, code reviews, pipeline design and annotation coaching. Curious? Fill in our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more &rarr;](https://explosion.ai/spacy-tailored-analysis)** |
[spacy 101]: https://spacy.io/usage/spacy-101 [spacy 101]: https://spacy.io/usage/spacy-101
[new in v3.0]: https://spacy.io/usage/v3 [new in v3.0]: https://spacy.io/usage/v3
@ -59,6 +60,7 @@ open-source software, released under the MIT license.
[changelog]: https://spacy.io/usage#changelog [changelog]: https://spacy.io/usage#changelog
[contribute]: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md [contribute]: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md
## 💬 Where to ask questions ## 💬 Where to ask questions
The spaCy project is maintained by the [spaCy team](https://explosion.ai/about). The spaCy project is maintained by the [spaCy team](https://explosion.ai/about).
@ -79,7 +81,7 @@ more people can benefit from it.
## Features ## Features
- Support for **60+ languages** - Support for **70+ languages**
- **Trained pipelines** for different languages and tasks - **Trained pipelines** for different languages and tasks
- Multi-task learning with pretrained **transformers** like BERT - Multi-task learning with pretrained **transformers** like BERT
- Support for pretrained **word vectors** and embeddings - Support for pretrained **word vectors** and embeddings

View File

@ -31,7 +31,7 @@ jobs:
inputs: inputs:
versionSpec: "3.7" versionSpec: "3.7"
- script: | - script: |
pip install flake8==3.9.2 pip install flake8==5.0.4
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
displayName: "flake8" displayName: "flake8"
@ -41,7 +41,7 @@ jobs:
matrix: matrix:
# We're only running one platform per Python version to speed up builds # We're only running one platform per Python version to speed up builds
Python36Linux: Python36Linux:
imageName: "ubuntu-latest" imageName: "ubuntu-20.04"
python.version: "3.6" python.version: "3.6"
# Python36Windows: # Python36Windows:
# imageName: "windows-latest" # imageName: "windows-latest"
@ -50,7 +50,7 @@ jobs:
# imageName: "macos-latest" # imageName: "macos-latest"
# python.version: "3.6" # python.version: "3.6"
# Python37Linux: # Python37Linux:
# imageName: "ubuntu-latest" # imageName: "ubuntu-20.04"
# python.version: "3.7" # python.version: "3.7"
Python37Windows: Python37Windows:
imageName: "windows-latest" imageName: "windows-latest"
@ -76,15 +76,24 @@ jobs:
# Python39Mac: # Python39Mac:
# imageName: "macos-latest" # imageName: "macos-latest"
# python.version: "3.9" # python.version: "3.9"
Python310Linux: # Python310Linux:
imageName: "ubuntu-latest" # imageName: "ubuntu-latest"
python.version: "3.10" # python.version: "3.10"
Python310Windows: Python310Windows:
imageName: "windows-latest" imageName: "windows-latest"
python.version: "3.10" python.version: "3.10"
Python310Mac: # Python310Mac:
imageName: "macos-latest" # imageName: "macos-latest"
python.version: "3.10" # python.version: "3.10"
Python311Linux:
imageName: 'ubuntu-latest'
python.version: '3.11'
Python311Windows:
imageName: 'windows-latest'
python.version: '3.11'
Python311Mac:
imageName: 'macos-latest'
python.version: '3.11'
maxParallel: 4 maxParallel: 4
pool: pool:
vmImage: $(imageName) vmImage: $(imageName)
@ -92,20 +101,3 @@ jobs:
- template: .github/azure-steps.yml - template: .github/azure-steps.yml
parameters: parameters:
python_version: '$(python.version)' python_version: '$(python.version)'
architecture: 'x64'
# - job: "TestGPU"
# dependsOn: "Validate"
# strategy:
# matrix:
# Python38LinuxX64_GPU:
# python.version: '3.8'
# pool:
# name: "LinuxX64_GPU"
# steps:
# - template: .github/azure-steps.yml
# parameters:
# python_version: '$(python.version)'
# architecture: 'x64'
# gpu: true
# num_build_jobs: 24

View File

@ -5,4 +5,5 @@ numpy==1.17.3; python_version=='3.8' and platform_machine!='aarch64'
numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64' numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'
numpy==1.19.3; python_version=='3.9' numpy==1.19.3; python_version=='3.9'
numpy==1.21.3; python_version=='3.10' numpy==1.21.3; python_version=='3.10'
numpy; python_version>='3.11' numpy==1.23.2; python_version=='3.11'
numpy; python_version>='3.12'

View File

@ -0,0 +1,82 @@
# spaCy Satellite Packages
This is a list of all the active repos relevant to spaCy besides the main one, with short descriptions, history, and current status. Archived repos will not be covered.
## Always Included in spaCy
These packages are always pulled in when you install spaCy. Most of them are direct dependencies, but some are transitive dependencies through other packages.
- [spacy-legacy](https://github.com/explosion/spacy-legacy): When an architecture in spaCy changes enough to get a new version, the old version is frozen and moved to spacy-legacy. This allows us to keep the core library slim while also preserving backwards compatability.
- [thinc](https://github.com/explosion/thinc): Thinc is the machine learning library that powers trainable components in spaCy. It wraps backends like Numpy, PyTorch, and Tensorflow to provide a functional interface for specifying architectures.
- [catalogue](https://github.com/explosion/catalogue): Small library for adding function registries, like those used for model architectures in spaCy.
- [confection](https://github.com/explosion/confection): This library contains the functionality for config parsing that was formerly contained directly in Thinc.
- [spacy-loggers](https://github.com/explosion/spacy-loggers): Contains loggers beyond the default logger available in spaCy&#39;s core code base. This includes loggers integrated with third-party services, which may differ in release cadence from spaCy itself.
- [wasabi](https://github.com/explosion/wasabi): A command line formatting library, used for terminal output in spaCy.
- [srsly](https://github.com/explosion/srsly): A wrapper that vendors several serialization libraries for spaCy. Includes parsers for JSON, JSONL, MessagePack, (extended) Pickle, and YAML.
- [preshed](https://github.com/explosion/preshed): A Cython library for low-level data structures like hash maps, used for memory efficient data storage.
- [cython-blis](https://github.com/explosion/cython-blis): Fast matrix multiplication using BLIS without depending on system libraries. Required by Thinc, rather than spaCy directly.
- [murmurhash](https://github.com/explosion/murmurhash): A wrapper library for a C++ murmurhash implementation, used for string IDs in spaCy and preshed.
- [cymem](https://github.com/explosion/cymem): A small library for RAII-style memory management in Cython.
## Optional Extensions for spaCy
These are repos that can be used by spaCy but aren&#39;t part of a default installation. Many of these are wrappers to integrate various kinds of third-party libraries.
- [spacy-transformers](https://github.com/explosion/spacy-transformers): A wrapper for the [HuggingFace Transformers](https://huggingface.co/docs/transformers/index) library, this handles the extensive conversion necessary to coordinate spaCy&#39;s powerful `Doc` representation, training pipeline, and the Transformer embeddings. When released, this was known as `spacy-pytorch-transformers`, but it changed to the current name when HuggingFace update the name of their library as well.
- [spacy-huggingface-hub](https://github.com/explosion/spacy-huggingface-hub): This package has a CLI script for uploading a packaged spaCy pipeline (created with `spacy package`) to the [Hugging Face Hub](https://huggingface.co/models).
- [spacy-alignments](https://github.com/explosion/spacy-alignments): A wrapper for the tokenizations library (mentioned below) with a modified build system to simplify cross-platform wheel creation. Used in spacy-transformers for aligning spaCy and HuggingFace tokenizations.
- [spacy-experimental](https://github.com/explosion/spacy-experimental): Experimental components that are not quite ready for inclusion in the main spaCy library. Usually there are unresolved questions around their APIs, so the experimental library allows us to expose them to the community for feedback before fully integrating them.
- [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data): A repository of linguistic data, such as lemmas, that takes up a lot of disk space. Originally created to reduce the size of the spaCy core library. This is mainly useful if you want the data included but aren&#39;t using a pretrained pipeline; for the affected languages, the relevant data is included in pretrained pipelines directly.
- [coreferee](https://github.com/explosion/coreferee): Coreference resolution for English, French, German and Polish, optimised for limited training data and easily extensible for further languages. Used as a spaCy pipeline component.
- [spacy-stanza](https://github.com/explosion/spacy-stanza): This is a wrapper that allows the use of Stanford&#39;s Stanza library in spaCy.
- [spacy-streamlit](https://github.com/explosion/spacy-streamlit): A wrapper for the Streamlit dashboard building library to help with integrating [displaCy](https://spacy.io/api/top-level/#displacy).
- [spacymoji](https://github.com/explosion/spacymoji): A library to add extra support for emoji to spaCy, such as including character names.
- [thinc-apple-ops](https://github.com/explosion/thinc-apple-ops): A special backend for OSX that uses Apple&#39;s native libraries for improved performance.
- [os-signpost](https://github.com/explosion/os-signpost): A Python package that allows you to use the `OSSignposter` API in OSX for performance analysis.
- [spacy-ray](https://github.com/explosion/spacy-ray): A wrapper to integrate spaCy with Ray, a distributed training framework. Currently a work in progress.
## Prodigy
[Prodigy](https://prodi.gy) is Explosion&#39;s easy to use and highly customizable tool for annotating data. Prodigy itself requires a license, but the repos below contain documentation, examples, and editor or notebook integrations.
- [prodigy-recipes](https://github.com/explosion/prodigy-recipes): Sample recipes for Prodigy, along with notebooks and other examples of usage.
- [vscode-prodigy](https://github.com/explosion/vscode-prodigy): A VS Code extension that lets you run Prodigy inside VS Code.
- [jupyterlab-prodigy](https://github.com/explosion/jupyterlab-prodigy): An extension for JupyterLab that lets you run Prodigy inside JupyterLab.
## Independent Tools or Projects
These are tools that may be related to or use spaCy, but are functional independent projects in their own right as well.
- [floret](https://github.com/explosion/floret): A modification of fastText to use Bloom Embeddings. Can be used to add vectors with subword features to spaCy, and also works independently in the same manner as fastText.
- [sense2vec](https://github.com/explosion/sense2vec): A library to make embeddings of noun phrases or words coupled with their part of speech. This library uses spaCy.
- [spacy-vectors-builder](https://github.com/explosion/spacy-vectors-builder): This is a spaCy project that builds vectors using floret and a lot of input text. It handles downloading the input data as well as the actual building of vectors.
- [holmes-extractor](https://github.com/explosion/holmes-extractor): Information extraction from English and German texts based on predicate logic. Uses spaCy.
- [healthsea](https://github.com/explosion/healthsea): Healthsea is a project to extract information from comments about health supplements. Structurally, it&#39;s a self-contained, large spaCy project.
- [spacy-pkuseg](https://github.com/explosion/spacy-pkuseg): A fork of the pkuseg Chinese tokenizer. Used for Chinese support in spaCy, but also works independently.
- [ml-datasets](https://github.com/explosion/ml-datasets): This repo includes loaders for several standard machine learning datasets, like MNIST or WikiNER, and has historically been used in spaCy example code and documentation.
## Documentation and Informational Repos
These repos are used to support the spaCy docs or otherwise present information about spaCy or other Explosion projects.
- [projects](https://github.com/explosion/projects): The projects repo is used to show detailed examples of spaCy usage. Individual projects can be checked out using the spaCy command line tool, rather than checking out the projects repo directly.
- [spacy-course](https://github.com/explosion/spacy-course): Home to the interactive spaCy course for learning about how to use the library and some basic NLP principles.
- [spacy-io-binder](https://github.com/explosion/spacy-io-binder): Home to the notebooks used for interactive examples in the documentation.
## Organizational / Meta
These repos are used for organizing data around spaCy, but are not something an end user would need to install as part of using the library.
- [spacy-models](https://github.com/explosion/spacy-models): This repo contains metadata (but not training data) for all the spaCy models. This includes information about where their training data came from, version compatability, and performance information. It also includes tests for the model packages, and the built models are hosted as releases of this repo.
- [wheelwright](https://github.com/explosion/wheelwright): A tool for automating our PyPI builds and releases.
- [ec2buildwheel](https://github.com/explosion/ec2buildwheel): A small project that allows you to build Python packages in the manner of cibuildwheel, but on any EC2 image. Used by wheelwright.
## Other
Repos that don&#39;t fit in any of the above categories.
- [blis](https://github.com/explosion/blis): A fork of the official BLIS library. The main branch is not updated, but work continues in various branches. This is used for cython-blis.
- [tokenizations](https://github.com/explosion/tokenizations): A library originally by Yohei Tamura to align strings with tolerance to some variations in features like case and diacritics, used for aligning tokens and wordpieces. Adopted and maintained by Explosion, but usually spacy-alignments is used instead.
- [conll-2012](https://github.com/explosion/conll-2012): A repo to hold some slightly cleaned up versions of the official scripts for the CoNLL 2012 shared task involving coreference resolution. Used in the coref project.
- [fastapi-explosion-extras](https://github.com/explosion/fastapi-explosion-extras): Some small tweaks to FastAPI used at Explosion.

View File

@ -127,3 +127,34 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
polyleven
---------
* Files: spacy/matcher/polyleven.c
MIT License
Copyright (c) 2021 Fujimoto Seiji <fujimoto@ceptord.net>
Copyright (c) 2021 Max Bachmann <kontakt@maxbachmann.de>
Copyright (c) 2022 Nick Mazuk
Copyright (c) 2022 Michael Weiss <code@mweiss.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,37 +1,40 @@
# Our libraries # Our libraries
spacy-legacy>=3.0.10,<3.1.0 spacy-legacy>=3.0.11,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0,<8.2.0 thinc>=8.1.0,<8.2.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.2.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.10.0
smart-open>=5.2.1,<7.0.0
# Third party dependencies # Third party dependencies
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0 pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
jinja2 jinja2
langcodes>=3.2.0,<4.0.0 langcodes>=3.2.0,<4.0.0
# Official Python utilities # Official Python utilities
setuptools setuptools
packaging>=20.0 packaging>=20.0
typing_extensions>=3.7.4.1,<4.2.0; python_version < "3.8" typing_extensions>=3.7.4.1,<4.5.0; python_version < "3.8"
# Development dependencies # Development dependencies
pre-commit>=2.13.0 pre-commit>=2.13.0
cython>=0.25,<3.0 cython>=0.25,<3.0
pytest>=5.2.0,!=7.1.0 pytest>=5.2.0,!=7.1.0
pytest-timeout>=1.3.0,<2.0.0 pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0 mock>=2.0.0,<3.0.0
flake8>=3.8.0,<3.10.0 flake8>=3.8.0,<6.0.0
hypothesis>=3.27.0,<7.0.0 hypothesis>=3.27.0,<7.0.0
mypy>=0.910,<0.970; platform_machine!='aarch64' mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
types-dataclasses>=0.1.3; python_version < "3.7" types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1 types-mock>=0.1.1
types-setuptools>=57.0.0
types-requests types-requests
types-setuptools>=57.0.0
black>=22.0,<23.0 black>=22.0,<23.0

View File

@ -22,6 +22,7 @@ classifiers =
Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Topic :: Scientific/Engineering Topic :: Scientific/Engineering
project_urls = project_urls =
Release notes = https://github.com/explosion/spaCy/releases Release notes = https://github.com/explosion/spaCy/releases
@ -41,27 +42,28 @@ setup_requires =
thinc>=8.1.0,<8.2.0 thinc>=8.1.0,<8.2.0
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.10,<3.1.0 spacy-legacy>=3.0.11,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0,<8.2.0 thinc>=8.1.0,<8.2.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.2.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
# Third-party dependencies # Third-party dependencies
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.10.0
smart-open>=5.2.1,<7.0.0
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0 pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
jinja2 jinja2
# Official Python utilities # Official Python utilities
setuptools setuptools
packaging>=20.0 packaging>=20.0
typing_extensions>=3.7.4,<4.2.0; python_version < "3.8" typing_extensions>=3.7.4.1,<4.5.0; python_version < "3.8"
langcodes>=3.2.0,<4.0.0 langcodes>=3.2.0,<4.0.0
[options.entry_points] [options.entry_points]
@ -72,41 +74,45 @@ console_scripts =
lookups = lookups =
spacy_lookups_data>=1.0.3,<1.1.0 spacy_lookups_data>=1.0.3,<1.1.0
transformers = transformers =
spacy_transformers>=1.1.2,<1.2.0 spacy_transformers>=1.1.2,<1.3.0
ray = ray =
spacy_ray>=0.1.0,<1.0.0 spacy_ray>=0.1.0,<1.0.0
cuda = cuda =
cupy>=5.0.0b4,<11.0.0 cupy>=5.0.0b4,<12.0.0
cuda80 = cuda80 =
cupy-cuda80>=5.0.0b4,<11.0.0 cupy-cuda80>=5.0.0b4,<12.0.0
cuda90 = cuda90 =
cupy-cuda90>=5.0.0b4,<11.0.0 cupy-cuda90>=5.0.0b4,<12.0.0
cuda91 = cuda91 =
cupy-cuda91>=5.0.0b4,<11.0.0 cupy-cuda91>=5.0.0b4,<12.0.0
cuda92 = cuda92 =
cupy-cuda92>=5.0.0b4,<11.0.0 cupy-cuda92>=5.0.0b4,<12.0.0
cuda100 = cuda100 =
cupy-cuda100>=5.0.0b4,<11.0.0 cupy-cuda100>=5.0.0b4,<12.0.0
cuda101 = cuda101 =
cupy-cuda101>=5.0.0b4,<11.0.0 cupy-cuda101>=5.0.0b4,<12.0.0
cuda102 = cuda102 =
cupy-cuda102>=5.0.0b4,<11.0.0 cupy-cuda102>=5.0.0b4,<12.0.0
cuda110 = cuda110 =
cupy-cuda110>=5.0.0b4,<11.0.0 cupy-cuda110>=5.0.0b4,<12.0.0
cuda111 = cuda111 =
cupy-cuda111>=5.0.0b4,<11.0.0 cupy-cuda111>=5.0.0b4,<12.0.0
cuda112 = cuda112 =
cupy-cuda112>=5.0.0b4,<11.0.0 cupy-cuda112>=5.0.0b4,<12.0.0
cuda113 = cuda113 =
cupy-cuda113>=5.0.0b4,<11.0.0 cupy-cuda113>=5.0.0b4,<12.0.0
cuda114 = cuda114 =
cupy-cuda114>=5.0.0b4,<11.0.0 cupy-cuda114>=5.0.0b4,<12.0.0
cuda115 = cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0 cupy-cuda115>=5.0.0b4,<12.0.0
cuda116 = cuda116 =
cupy-cuda116>=5.0.0b4,<11.0.0 cupy-cuda116>=5.0.0b4,<12.0.0
cuda117 = cuda117 =
cupy-cuda117>=5.0.0b4,<11.0.0 cupy-cuda117>=5.0.0b4,<12.0.0
cuda11x =
cupy-cuda11x>=11.0.0,<12.0.0
cuda-autodetect =
cupy-wheel>=11.0.0,<12.0.0
apple = apple =
thinc-apple-ops>=0.1.0.dev0,<1.0.0 thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies # Language tokenizers with external dependencies

View File

@ -30,7 +30,9 @@ MOD_NAMES = [
"spacy.lexeme", "spacy.lexeme",
"spacy.vocab", "spacy.vocab",
"spacy.attrs", "spacy.attrs",
"spacy.kb", "spacy.kb.candidate",
"spacy.kb.kb",
"spacy.kb.kb_in_memory",
"spacy.ml.parser_model", "spacy.ml.parser_model",
"spacy.morphology", "spacy.morphology",
"spacy.pipeline.dep_parser", "spacy.pipeline.dep_parser",
@ -205,6 +207,17 @@ def setup_package():
get_python_inc(plat_specific=True), get_python_inc(plat_specific=True),
] ]
ext_modules = [] ext_modules = []
ext_modules.append(
Extension(
"spacy.matcher.levenshtein",
[
"spacy/matcher/levenshtein.pyx",
"spacy/matcher/polyleven.c",
],
language="c",
include_dirs=include_dirs,
)
)
for name in MOD_NAMES: for name in MOD_NAMES:
mod_path = name.replace(".", "/") + ".pyx" mod_path = name.replace(".", "/") + ".pyx"
ext = Extension( ext = Extension(

View File

@ -31,21 +31,21 @@ def load(
name: Union[str, Path], name: Union[str, Path],
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = util.SimpleFrozenList(), disable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
enable: Iterable[str] = util.SimpleFrozenList(), enable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
exclude: Iterable[str] = util.SimpleFrozenList(), exclude: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language: ) -> Language:
"""Load a spaCy model from an installed package or a local path. """Load a spaCy model from an installed package or a local path.
name (str): Package name or model path. name (str): Package name or model path.
vocab (Vocab): A Vocab object. If True, a vocab is created. vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
pipes will be disabled (but can be enabled later using nlp.enable_pipe). pipes will be disabled (but can be enabled later using nlp.enable_pipe).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation. keyed by section values in dot notation.

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.4.1" __version__ = "3.5.0"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -4,6 +4,7 @@ from ._util import app, setup_cli # noqa: F401
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands # These are the actual functions, NOT the wrapped CLI commands. The CLI commands
# are registered automatically and won't have to be imported here. # are registered automatically and won't have to be imported here.
from .benchmark_speed import benchmark_speed_cli # noqa: F401
from .download import download # noqa: F401 from .download import download # noqa: F401
from .info import info # noqa: F401 from .info import info # noqa: F401
from .package import package # noqa: F401 from .package import package # noqa: F401
@ -16,6 +17,7 @@ from .debug_config import debug_config # noqa: F401
from .debug_model import debug_model # noqa: F401 from .debug_model import debug_model # noqa: F401
from .debug_diff import debug_diff # noqa: F401 from .debug_diff import debug_diff # noqa: F401
from .evaluate import evaluate # noqa: F401 from .evaluate import evaluate # noqa: F401
from .apply import apply # noqa: F401
from .convert import convert # noqa: F401 from .convert import convert # noqa: F401
from .init_pipeline import init_pipeline_cli # noqa: F401 from .init_pipeline import init_pipeline_cli # noqa: F401
from .init_config import init_config, fill_config # noqa: F401 from .init_config import init_config, fill_config # noqa: F401
@ -27,6 +29,7 @@ from .project.dvc import project_update_dvc # noqa: F401
from .project.push import project_push # noqa: F401 from .project.push import project_push # noqa: F401
from .project.pull import project_pull # noqa: F401 from .project.pull import project_pull # noqa: F401
from .project.document import project_document # noqa: F401 from .project.document import project_document # noqa: F401
from .find_threshold import find_threshold # noqa: F401
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True) @app.command("link", no_args_is_help=True, deprecated=True, hidden=True)

View File

@ -23,7 +23,7 @@ from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS
from .. import about from .. import about
if TYPE_CHECKING: if TYPE_CHECKING:
from pathy import Pathy # noqa: F401 from pathy import FluidPath # noqa: F401
SDIST_SUFFIX = ".tar.gz" SDIST_SUFFIX = ".tar.gz"
@ -46,6 +46,7 @@ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes
commands to check and validate your config files, training and evaluation data, commands to check and validate your config files, training and evaluation data,
and custom model implementations. and custom model implementations.
""" """
BENCHMARK_HELP = """Commands for benchmarking pipelines."""
INIT_HELP = """Commands for initializing configs and pipeline packages.""" INIT_HELP = """Commands for initializing configs and pipeline packages."""
# Wrappers for Typer's annotations. Initially created to set defaults and to # Wrappers for Typer's annotations. Initially created to set defaults and to
@ -54,12 +55,14 @@ Arg = typer.Argument
Opt = typer.Option Opt = typer.Option
app = typer.Typer(name=NAME, help=HELP) app = typer.Typer(name=NAME, help=HELP)
benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True)
project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True) project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True)
debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True) debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True)
init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True) init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True)
app.add_typer(project_cli) app.add_typer(project_cli)
app.add_typer(debug_cli) app.add_typer(debug_cli)
app.add_typer(benchmark_cli)
app.add_typer(init_cli) app.add_typer(init_cli)
@ -158,15 +161,15 @@ def load_project_config(
sys.exit(1) sys.exit(1)
validate_project_version(config) validate_project_version(config)
validate_project_commands(config) validate_project_commands(config)
if interpolate:
err = f"{PROJECT_FILE} validation error"
with show_validation_error(title=err, hint_fill=False):
config = substitute_project_variables(config, overrides)
# Make sure directories defined in config exist # Make sure directories defined in config exist
for subdir in config.get("directories", []): for subdir in config.get("directories", []):
dir_path = path / subdir dir_path = path / subdir
if not dir_path.exists(): if not dir_path.exists():
dir_path.mkdir(parents=True) dir_path.mkdir(parents=True)
if interpolate:
err = f"{PROJECT_FILE} validation error"
with show_validation_error(title=err, hint_fill=False):
config = substitute_project_variables(config, overrides)
return config return config
@ -331,7 +334,7 @@ def import_code(code_path: Optional[Union[Path, str]]) -> None:
msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1) msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1)
def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None: def upload_file(src: Path, dest: Union[str, "FluidPath"]) -> None:
"""Upload a file. """Upload a file.
src (Path): The source path. src (Path): The source path.
@ -339,13 +342,20 @@ def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None:
""" """
import smart_open import smart_open
# Create parent directories for local paths
if isinstance(dest, Path):
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
dest = str(dest) dest = str(dest)
with smart_open.open(dest, mode="wb") as output_file: with smart_open.open(dest, mode="wb") as output_file:
with src.open(mode="rb") as input_file: with src.open(mode="rb") as input_file:
output_file.write(input_file.read()) output_file.write(input_file.read())
def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) -> None: def download_file(
src: Union[str, "FluidPath"], dest: Path, *, force: bool = False
) -> None:
"""Download a file using smart_open. """Download a file using smart_open.
url (str): The URL of the file. url (str): The URL of the file.
@ -358,7 +368,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False)
if dest.exists() and not force: if dest.exists() and not force:
return None return None
src = str(src) src = str(src)
with smart_open.open(src, mode="rb", ignore_ext=True) as input_file: with smart_open.open(src, mode="rb", compression="disable") as input_file:
with dest.open(mode="wb") as output_file: with dest.open(mode="wb") as output_file:
shutil.copyfileobj(input_file, output_file) shutil.copyfileobj(input_file, output_file)
@ -368,7 +378,7 @@ def ensure_pathy(path):
slow and annoying Google Cloud warning).""" slow and annoying Google Cloud warning)."""
from pathy import Pathy # noqa: F811 from pathy import Pathy # noqa: F811
return Pathy(path) return Pathy.fluid(path)
def git_checkout( def git_checkout(
@ -573,3 +583,39 @@ def setup_gpu(use_gpu: int, silent=None) -> None:
local_msg.info("Using CPU") local_msg.info("Using CPU")
if gpu_is_available(): if gpu_is_available():
local_msg.info("To switch to GPU 0, use the option: --gpu-id 0") local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")
def walk_directory(path: Path, suffix: Optional[str] = None) -> List[Path]:
"""Given a directory and a suffix, recursively find all files matching the suffix.
Directories or files with names beginning with a . are ignored, but hidden flags on
filesystems are not checked.
When provided with a suffix `None`, there is no suffix-based filtering."""
if not path.is_dir():
return [path]
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif suffix is not None and not path.parts[-1].endswith(suffix):
continue
else:
locs.append(path)
# It's good to sort these, in case the ordering messes up cache.
locs.sort()
return locs
def _format_number(number: Union[int, float], ndigits: int = 2) -> str:
"""Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s,
as happens with `round(number, ndigits)`"""
if isinstance(number, float):
return f"{number:.{ndigits}f}"
else:
return str(number)

143
spacy/cli/apply.py Normal file
View File

@ -0,0 +1,143 @@
import tqdm
import srsly
from itertools import chain
from pathlib import Path
from typing import Optional, List, Iterable, cast, Union
from wasabi import msg
from ._util import app, Arg, Opt, setup_gpu, import_code, walk_directory
from ..tokens import Doc, DocBin
from ..vocab import Vocab
from ..util import ensure_path, load_model
path_help = """Location of the documents to predict on.
Can be a single file in .spacy format or a .jsonl file.
Files with other extensions are treated as single plain text documents.
If a directory is provided it is traversed recursively to grab
all files to be processed.
The files can be a mixture of .spacy, .jsonl and text files.
If .jsonl is provided the specified field is going
to be grabbed ("text" by default)."""
out_help = "Path to save the resulting .spacy file"
code_help = (
"Path to Python file with additional " "code (registered functions) to be imported"
)
gold_help = "Use gold preprocessing provided in the .spacy files"
force_msg = (
"The provided output file already exists. "
"To force overwriting the output file, set the --force or -F flag."
)
DocOrStrStream = Union[Iterable[str], Iterable[Doc]]
def _stream_docbin(path: Path, vocab: Vocab) -> Iterable[Doc]:
"""
Stream Doc objects from DocBin.
"""
docbin = DocBin().from_disk(path)
for doc in docbin.get_docs(vocab):
yield doc
def _stream_jsonl(path: Path, field: str) -> Iterable[str]:
"""
Stream "text" field from JSONL. If the field "text" is
not found it raises error.
"""
for entry in srsly.read_jsonl(path):
if field not in entry:
msg.fail(f"{path} does not contain the required '{field}' field.", exits=1)
else:
yield entry[field]
def _stream_texts(paths: Iterable[Path]) -> Iterable[str]:
"""
Yields strings from text files in paths.
"""
for path in paths:
with open(path, "r") as fin:
text = fin.read()
yield text
@app.command("apply")
def apply_cli(
# fmt: off
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help=path_help, exists=True),
output_file: Path = Arg(..., help=out_help, dir_okay=False),
code_path: Optional[Path] = Opt(None, "--code", "-c", help=code_help),
text_key: str = Opt("text", "--text-key", "-tk", help="Key containing text string for JSONL"),
force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"),
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU."),
batch_size: int = Opt(1, "--batch-size", "-b", help="Batch size."),
n_process: int = Opt(1, "--n-process", "-n", help="number of processors to use.")
):
"""
Apply a trained pipeline to documents to get predictions.
Expects a loadable spaCy pipeline and path to the data, which
can be a directory or a file.
The data files can be provided in multiple formats:
1. .spacy files
2. .jsonl files with a specified "field" to read the text from.
3. Files with any other extension are assumed to be containing
a single document.
DOCS: https://spacy.io/api/cli#apply
"""
data_path = ensure_path(data_path)
output_file = ensure_path(output_file)
code_path = ensure_path(code_path)
if output_file.exists() and not force_overwrite:
msg.fail(force_msg, exits=1)
if not data_path.exists():
msg.fail(f"Couldn't find data path: {data_path}", exits=1)
import_code(code_path)
setup_gpu(use_gpu)
apply(data_path, output_file, model, text_key, batch_size, n_process)
def apply(
data_path: Path,
output_file: Path,
model: str,
json_field: str,
batch_size: int,
n_process: int,
):
docbin = DocBin(store_user_data=True)
paths = walk_directory(data_path)
if len(paths) == 0:
docbin.to_disk(output_file)
msg.warn(
"Did not find data to process,"
f" {data_path} seems to be an empty directory."
)
return
nlp = load_model(model)
msg.good(f"Loaded model {model}")
vocab = nlp.vocab
streams: List[DocOrStrStream] = []
text_files = []
for path in paths:
if path.suffix == ".spacy":
streams.append(_stream_docbin(path, vocab))
elif path.suffix == ".jsonl":
streams.append(_stream_jsonl(path, json_field))
else:
text_files.append(path)
if len(text_files) > 0:
streams.append(_stream_texts(text_files))
datagen = cast(DocOrStrStream, chain(*streams))
for doc in tqdm.tqdm(nlp.pipe(datagen, batch_size=batch_size, n_process=n_process)):
docbin.add(doc)
if output_file.suffix == "":
output_file = output_file.with_suffix(".spacy")
docbin.to_disk(output_file)

View File

@ -0,0 +1,174 @@
from typing import Iterable, List, Optional
import random
from itertools import islice
import numpy
from pathlib import Path
import time
from tqdm import tqdm
import typer
from wasabi import msg
from .. import util
from ..language import Language
from ..tokens import Doc
from ..training import Corpus
from ._util import Arg, Opt, benchmark_cli, setup_gpu
@benchmark_cli.command(
"speed",
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
)
def benchmark_speed_cli(
# fmt: off
ctx: typer.Context,
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
batch_size: Optional[int] = Opt(None, "--batch-size", "-b", min=1, help="Override the pipeline batch size"),
no_shuffle: bool = Opt(False, "--no-shuffle", help="Do not shuffle benchmark data"),
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,),
warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"),
# fmt: on
):
"""
Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark
data in the binary .spacy format.
"""
setup_gpu(use_gpu=use_gpu, silent=False)
nlp = util.load_model(model)
batch_size = batch_size if batch_size is not None else nlp.batch_size
corpus = Corpus(data_path)
docs = [eg.predicted for eg in corpus(nlp)]
if len(docs) == 0:
msg.fail("Cannot benchmark speed using an empty corpus.", exits=1)
print(f"Warming up for {warmup_epochs} epochs...")
warmup(nlp, docs, warmup_epochs, batch_size)
print()
print(f"Benchmarking {n_batches} batches...")
wps = benchmark(nlp, docs, n_batches, batch_size, not no_shuffle)
print()
print_outliers(wps)
print_mean_with_ci(wps)
# Lowercased, behaves as a context manager function.
class time_context:
"""Register the running time of a context."""
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, type, value, traceback):
self.elapsed = time.perf_counter() - self.start
class Quartiles:
"""Calculate the q1, q2, q3 quartiles and the inter-quartile range (iqr)
of a sample."""
q1: float
q2: float
q3: float
iqr: float
def __init__(self, sample: numpy.ndarray) -> None:
self.q1 = numpy.quantile(sample, 0.25)
self.q2 = numpy.quantile(sample, 0.5)
self.q3 = numpy.quantile(sample, 0.75)
self.iqr = self.q3 - self.q1
def annotate(
nlp: Language, docs: List[Doc], batch_size: Optional[int]
) -> numpy.ndarray:
docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size)
wps = []
while True:
with time_context() as elapsed:
batch_docs = list(
islice(docs, batch_size if batch_size else nlp.batch_size)
)
if len(batch_docs) == 0:
break
n_tokens = count_tokens(batch_docs)
wps.append(n_tokens / elapsed.elapsed)
return numpy.array(wps)
def benchmark(
nlp: Language,
docs: List[Doc],
n_batches: int,
batch_size: int,
shuffle: bool,
) -> numpy.ndarray:
if shuffle:
bench_docs = [
nlp.make_doc(random.choice(docs).text)
for _ in range(n_batches * batch_size)
]
else:
bench_docs = [
nlp.make_doc(docs[i % len(docs)].text)
for i in range(n_batches * batch_size)
]
return annotate(nlp, bench_docs, batch_size)
def bootstrap(x, statistic=numpy.mean, iterations=10000) -> numpy.ndarray:
"""Apply a statistic to repeated random samples of an array."""
return numpy.fromiter(
(
statistic(numpy.random.choice(x, len(x), replace=True))
for _ in range(iterations)
),
numpy.float64,
)
def count_tokens(docs: Iterable[Doc]) -> int:
return sum(len(doc) for doc in docs)
def print_mean_with_ci(sample: numpy.ndarray):
mean = numpy.mean(sample)
bootstrap_means = bootstrap(sample)
bootstrap_means.sort()
# 95% confidence interval
low = bootstrap_means[int(len(bootstrap_means) * 0.025)]
high = bootstrap_means[int(len(bootstrap_means) * 0.975)]
print(f"Mean: {mean:.1f} words/s (95% CI: {low-mean:.1f} +{high-mean:.1f})")
def print_outliers(sample: numpy.ndarray):
quartiles = Quartiles(sample)
n_outliers = numpy.sum(
(sample < (quartiles.q1 - 1.5 * quartiles.iqr))
| (sample > (quartiles.q3 + 1.5 * quartiles.iqr))
)
n_extreme_outliers = numpy.sum(
(sample < (quartiles.q1 - 3.0 * quartiles.iqr))
| (sample > (quartiles.q3 + 3.0 * quartiles.iqr))
)
print(
f"Outliers: {(100 * n_outliers) / len(sample):.1f}%, extreme outliers: {(100 * n_extreme_outliers) / len(sample)}%"
)
def warmup(
nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int]
) -> numpy.ndarray:
docs = warmup_epochs * docs
return annotate(nlp, docs, batch_size)

View File

@ -1,4 +1,4 @@
from typing import Callable, Iterable, Mapping, Optional, Any, List, Union from typing import Callable, Iterable, Mapping, Optional, Any, Union
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from wasabi import Printer from wasabi import Printer
@ -7,7 +7,7 @@ import re
import sys import sys
import itertools import itertools
from ._util import app, Arg, Opt from ._util import app, Arg, Opt, walk_directory
from ..training import docs_to_json from ..training import docs_to_json
from ..tokens import Doc, DocBin from ..tokens import Doc, DocBin
from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs
@ -28,6 +28,8 @@ CONVERTERS: Mapping[str, Callable[..., Iterable[Doc]]] = {
"json": json_to_docs, "json": json_to_docs,
} }
AUTO = "auto"
# File types that can be written to stdout # File types that can be written to stdout
FILE_TYPES_STDOUT = ("json",) FILE_TYPES_STDOUT = ("json",)
@ -49,7 +51,7 @@ def convert_cli(
model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"), model: Optional[str] = Opt(None, "--model", "--base", "-b", help="Trained spaCy pipeline for sentence segmentation to use as base (for --seg-sents)"),
morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"), morphology: bool = Opt(False, "--morphology", "-m", help="Enable appending morphology to tags"),
merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"), merge_subtokens: bool = Opt(False, "--merge-subtokens", "-T", help="Merge CoNLL-U subtokens"),
converter: str = Opt("auto", "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"), converter: str = Opt(AUTO, "--converter", "-c", help=f"Converter: {tuple(CONVERTERS.keys())}"),
ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True), ner_map: Optional[Path] = Opt(None, "--ner-map", "-nm", help="NER tag mapping (as JSON-encoded dict of entity types)", exists=True),
lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"), lang: Optional[str] = Opt(None, "--lang", "-l", help="Language (if tokenizer required)"),
concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"), concatenate: bool = Opt(None, "--concatenate", "-C", help="Concatenate output to a single file"),
@ -70,8 +72,8 @@ def convert_cli(
output_dir: Union[str, Path] = "-" if output_dir == Path("-") else output_dir output_dir: Union[str, Path] = "-" if output_dir == Path("-") else output_dir
silent = output_dir == "-" silent = output_dir == "-"
msg = Printer(no_print=silent) msg = Printer(no_print=silent)
verify_cli_args(msg, input_path, output_dir, file_type.value, converter, ner_map)
converter = _get_converter(msg, converter, input_path) converter = _get_converter(msg, converter, input_path)
verify_cli_args(msg, input_path, output_dir, file_type.value, converter, ner_map)
convert( convert(
input_path, input_path,
output_dir, output_dir,
@ -100,7 +102,7 @@ def convert(
model: Optional[str] = None, model: Optional[str] = None,
morphology: bool = False, morphology: bool = False,
merge_subtokens: bool = False, merge_subtokens: bool = False,
converter: str = "auto", converter: str,
ner_map: Optional[Path] = None, ner_map: Optional[Path] = None,
lang: Optional[str] = None, lang: Optional[str] = None,
concatenate: bool = False, concatenate: bool = False,
@ -189,33 +191,6 @@ def autodetect_ner_format(input_data: str) -> Optional[str]:
return None return None
def walk_directory(path: Path, converter: str) -> List[Path]:
if not path.is_dir():
return [path]
paths = [path]
locs = []
seen = set()
for path in paths:
if str(path) in seen:
continue
seen.add(str(path))
if path.parts[-1].startswith("."):
continue
elif path.is_dir():
paths.extend(path.iterdir())
elif converter == "json" and not path.parts[-1].endswith("json"):
continue
elif converter == "conll" and not path.parts[-1].endswith("conll"):
continue
elif converter == "iob" and not path.parts[-1].endswith("iob"):
continue
else:
locs.append(path)
# It's good to sort these, in case the ordering messes up cache.
locs.sort()
return locs
def verify_cli_args( def verify_cli_args(
msg: Printer, msg: Printer,
input_path: Path, input_path: Path,
@ -239,18 +214,22 @@ def verify_cli_args(
input_locs = walk_directory(input_path, converter) input_locs = walk_directory(input_path, converter)
if len(input_locs) == 0: if len(input_locs) == 0:
msg.fail("No input files in directory", input_path, exits=1) msg.fail("No input files in directory", input_path, exits=1)
file_types = list(set([loc.suffix[1:] for loc in input_locs])) if converter not in CONVERTERS:
if converter == "auto" and len(file_types) >= 2:
file_types_str = ",".join(file_types)
msg.fail("All input files must be same type", file_types_str, exits=1)
if converter != "auto" and converter not in CONVERTERS:
msg.fail(f"Can't find converter for {converter}", exits=1) msg.fail(f"Can't find converter for {converter}", exits=1)
def _get_converter(msg, converter, input_path: Path): def _get_converter(msg, converter, input_path: Path):
if input_path.is_dir(): if input_path.is_dir():
input_path = walk_directory(input_path, converter)[0] if converter == AUTO:
if converter == "auto": input_locs = walk_directory(input_path, suffix=None)
file_types = list(set([loc.suffix[1:] for loc in input_locs]))
if len(file_types) >= 2:
file_types_str = ",".join(file_types)
msg.fail("All input files must be same type", file_types_str, exits=1)
input_path = input_locs[0]
else:
input_path = walk_directory(input_path, suffix=converter)[0]
if converter == AUTO:
converter = input_path.suffix[1:] converter = input_path.suffix[1:]
if converter == "ner" or converter == "iob": if converter == "ner" or converter == "iob":
with input_path.open(encoding="utf8") as file_: with input_path.open(encoding="utf8") as file_:

View File

@ -9,10 +9,11 @@ import typer
import math import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli from ._util import import_code, debug_cli, _format_number
from ..training import Example, remove_bilu_prefix from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining from ..schemas import ConfigSchemaTraining
from ..pipeline import TrainablePipe
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
from ..pipeline._parser_internals.nonproj import DELIMITER from ..pipeline._parser_internals.nonproj import DELIMITER
from ..pipeline import Morphologizer, SpanCategorizer from ..pipeline import Morphologizer, SpanCategorizer
@ -1017,6 +1018,7 @@ def _get_labels_from_model(nlp: Language, factory_name: str) -> Set[str]:
labels: Set[str] = set() labels: Set[str] = set()
for pipe_name in pipe_names: for pipe_name in pipe_names:
pipe = nlp.get_pipe(pipe_name) pipe = nlp.get_pipe(pipe_name)
assert isinstance(pipe, TrainablePipe)
labels.update(pipe.labels) labels.update(pipe.labels)
return labels return labels
@ -1072,7 +1074,8 @@ def _get_kl_divergence(p: Counter, q: Counter) -> float:
def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]: def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]:
"""Compile into one list for easier reporting""" """Compile into one list for easier reporting"""
d = { d = {
label: [label] + list(round(d[label], 2) for d in span_data) for label in labels label: [label] + list(_format_number(d[label]) for d in span_data)
for label in labels
} }
return list(d.values()) return list(d.values())
@ -1087,6 +1090,10 @@ def _get_span_characteristics(
label: _gmean(l) label: _gmean(l)
for label, l in compiled_gold["spans_length"][spans_key].items() for label, l in compiled_gold["spans_length"][spans_key].items()
} }
spans_per_type = {
label: len(spans)
for label, spans in compiled_gold["spans_per_type"][spans_key].items()
}
min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()] min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()]
max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()] max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()]
@ -1114,6 +1121,7 @@ def _get_span_characteristics(
return { return {
"sd": span_distinctiveness, "sd": span_distinctiveness,
"bd": sb_distinctiveness, "bd": sb_distinctiveness,
"spans_per_type": spans_per_type,
"lengths": span_length, "lengths": span_length,
"min_length": min(min_lengths), "min_length": min(min_lengths),
"max_length": max(max_lengths), "max_length": max(max_lengths),
@ -1128,12 +1136,15 @@ def _get_span_characteristics(
def _print_span_characteristics(span_characteristics: Dict[str, Any]): def _print_span_characteristics(span_characteristics: Dict[str, Any]):
"""Print all span characteristics into a table""" """Print all span characteristics into a table"""
headers = ("Span Type", "Length", "SD", "BD") headers = ("Span Type", "Length", "SD", "BD", "N")
# Wasabi has this at 30 by default, but we might have some long labels
max_col = max(30, max(len(label) for label in span_characteristics["labels"]))
# Prepare table data with all span characteristics # Prepare table data with all span characteristics
table_data = [ table_data = [
span_characteristics["lengths"], span_characteristics["lengths"],
span_characteristics["sd"], span_characteristics["sd"],
span_characteristics["bd"], span_characteristics["bd"],
span_characteristics["spans_per_type"],
] ]
table = _format_span_row( table = _format_span_row(
span_data=table_data, labels=span_characteristics["labels"] span_data=table_data, labels=span_characteristics["labels"]
@ -1144,8 +1155,18 @@ def _print_span_characteristics(span_characteristics: Dict[str, Any]):
span_characteristics["avg_sd"], span_characteristics["avg_sd"],
span_characteristics["avg_bd"], span_characteristics["avg_bd"],
] ]
footer = ["Wgt. Average"] + [str(round(f, 2)) for f in footer_data]
msg.table(table, footer=footer, header=headers, divider=True) footer = (
["Wgt. Average"] + ["{:.2f}".format(round(f, 2)) for f in footer_data] + ["-"]
)
msg.table(
table,
footer=footer,
header=headers,
divider=True,
aligns=["l"] + ["r"] * (len(footer_data) + 1),
max_col=max_col,
)
def _get_spans_length_freq_dist( def _get_spans_length_freq_dist(

View File

@ -20,7 +20,7 @@ def download_cli(
ctx: typer.Context, ctx: typer.Context,
model: str = Arg(..., help="Name of pipeline package to download"), model: str = Arg(..., help="Name of pipeline package to download"),
direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"), direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"),
sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel") sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel"),
# fmt: on # fmt: on
): ):
""" """
@ -36,7 +36,12 @@ def download_cli(
download(model, direct, sdist, *ctx.args) download(model, direct, sdist, *ctx.args)
def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -> None: def download(
model: str,
direct: bool = False,
sdist: bool = False,
*pip_args,
) -> None:
if ( if (
not (is_package("spacy") or is_package("spacy-nightly")) not (is_package("spacy") or is_package("spacy-nightly"))
and "--no-deps" not in pip_args and "--no-deps" not in pip_args
@ -50,13 +55,10 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
"dependencies, you'll have to install them manually." "dependencies, you'll have to install them manually."
) )
pip_args = pip_args + ("--no-deps",) pip_args = pip_args + ("--no-deps",)
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
dl_tpl = "{m}-{v}/{m}-{v}{s}#egg={m}=={v}"
if direct: if direct:
components = model.split("-") components = model.split("-")
model_name = "".join(components[:-1]) model_name = "".join(components[:-1])
version = components[-1] version = components[-1]
download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
else: else:
model_name = model model_name = model
if model in OLD_MODEL_SHORTCUTS: if model in OLD_MODEL_SHORTCUTS:
@ -67,13 +69,26 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
model_name = OLD_MODEL_SHORTCUTS[model] model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility() compatibility = get_compatibility()
version = get_version(model_name, compatibility) version = get_version(model_name, compatibility)
download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
filename = get_model_filename(model_name, version, sdist)
download_model(filename, pip_args)
msg.good( msg.good(
"Download and installation successful", "Download and installation successful",
f"You can now load the package via spacy.load('{model_name}')", f"You can now load the package via spacy.load('{model_name}')",
) )
def get_model_filename(model_name: str, version: str, sdist: bool = False) -> str:
dl_tpl = "{m}-{v}/{m}-{v}{s}"
egg_tpl = "#egg={m}=={v}"
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
filename = dl_tpl.format(m=model_name, v=version, s=suffix)
if sdist:
filename += egg_tpl.format(m=model_name, v=version)
return filename
def get_compatibility() -> dict: def get_compatibility() -> dict:
if is_prerelease_version(about.__version__): if is_prerelease_version(about.__version__):
version: Optional[str] = about.__version__ version: Optional[str] = about.__version__
@ -105,6 +120,11 @@ def get_version(model: str, comp: dict) -> str:
return comp[model][0] return comp[model][0]
def get_latest_version(model: str) -> str:
comp = get_compatibility()
return get_version(model, comp)
def download_model( def download_model(
filename: str, user_pip_args: Optional[Sequence[str]] = None filename: str, user_pip_args: Optional[Sequence[str]] = None
) -> None: ) -> None:

View File

@ -7,12 +7,15 @@ from thinc.api import fix_random_seed
from ..training import Corpus from ..training import Corpus
from ..tokens import Doc from ..tokens import Doc
from ._util import app, Arg, Opt, setup_gpu, import_code from ._util import app, Arg, Opt, setup_gpu, import_code, benchmark_cli
from ..scorer import Scorer from ..scorer import Scorer
from .. import util from .. import util
from .. import displacy from .. import displacy
@benchmark_cli.command(
"accuracy",
)
@app.command("evaluate") @app.command("evaluate")
def evaluate_cli( def evaluate_cli(
# fmt: off # fmt: off
@ -36,7 +39,7 @@ def evaluate_cli(
dependency parses in a HTML file, set as output directory as the dependency parses in a HTML file, set as output directory as the
displacy_path argument. displacy_path argument.
DOCS: https://spacy.io/api/cli#evaluate DOCS: https://spacy.io/api/cli#benchmark-accuracy
""" """
import_code(code_path) import_code(code_path)
evaluate( evaluate(

233
spacy/cli/find_threshold.py Normal file
View File

@ -0,0 +1,233 @@
import functools
import operator
from pathlib import Path
import logging
from typing import Optional, Tuple, Any, Dict, List
import numpy
import wasabi.tables
from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer
from ..errors import Errors
from ..training import Corpus
from ._util import app, Arg, Opt, import_code, setup_gpu
from .. import util
_DEFAULTS = {
"n_trials": 11,
"use_gpu": -1,
"gold_preproc": False,
}
@app.command(
"find-threshold",
context_settings={"allow_extra_args": False, "ignore_unknown_options": True},
)
def find_threshold_cli(
# fmt: off
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"),
threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"),
scores_key: str = Arg(..., help="Metric to optimize"),
n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"),
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"),
verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"),
# fmt: on
):
"""
Runs prediction trials for a trained model with varying tresholds to maximize
the specified metric. The search space for the threshold is traversed linearly
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
returns all results).
This is applicable only for components whose predictions are influenced by
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
that the full path to the corresponding threshold attribute in the config has to
be provided.
DOCS: https://spacy.io/api/cli#find-threshold
"""
util.logger.setLevel(logging.DEBUG if verbose else logging.INFO)
import_code(code_path)
find_threshold(
model=model,
data_path=data_path,
pipe_name=pipe_name,
threshold_key=threshold_key,
scores_key=scores_key,
n_trials=n_trials,
use_gpu=use_gpu,
gold_preproc=gold_preproc,
silent=False,
)
def find_threshold(
model: str,
data_path: Path,
pipe_name: str,
threshold_key: str,
scores_key: str,
*,
n_trials: int = _DEFAULTS["n_trials"], # type: ignore
use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore
gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore
silent: bool = True,
) -> Tuple[float, float, Dict[float, float]]:
"""
Runs prediction trials for models with varying tresholds to maximize the specified metric.
model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory.
data_path (Path): Path to file with DocBin with docs to use for threshold search.
pipe_name (str): Name of pipe to examine thresholds for.
threshold_key (str): Key of threshold attribute in component's configuration.
scores_key (str): Name of score to metric to optimize.
n_trials (int): Number of trials to determine optimal thresholds.
use_gpu (int): GPU ID or -1 for CPU.
gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the
tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due
to train/test skew.
silent (bool): Whether to print non-error-related output to stdout.
RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all
evaluated thresholds.
"""
setup_gpu(use_gpu, silent=silent)
data_path = util.ensure_path(data_path)
if not data_path.exists():
wasabi.msg.fail("Evaluation data not found", data_path, exits=1)
nlp = util.load_model(model)
if pipe_name not in nlp.component_names:
raise AttributeError(
Errors.E001.format(name=pipe_name, opts=nlp.component_names)
)
pipe = nlp.get_pipe(pipe_name)
if not hasattr(pipe, "scorer"):
raise AttributeError(Errors.E1045)
if type(pipe) == TextCategorizer:
wasabi.msg.warn(
"The `textcat` component doesn't use a threshold as it's not applicable to the concept of "
"exclusive classes. All thresholds will yield the same results."
)
if not silent:
wasabi.msg.info(
title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} "
f"trials."
)
# Load evaluation corpus.
corpus = Corpus(data_path, gold_preproc=gold_preproc)
dev_dataset = list(corpus(nlp))
config_keys = threshold_key.split(".")
def set_nested_item(
config: Dict[str, Any], keys: List[str], value: float
) -> Dict[str, Any]:
"""Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200.
config (Dict[str, Any]): Configuration dictionary.
keys (List[Any]): Path to value to set.
value (float): Value to set.
RETURNS (Dict[str, Any]): Updated dictionary.
"""
functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value
return config
def filter_config(
config: Dict[str, Any], keys: List[str], full_key: str
) -> Dict[str, Any]:
"""Filters provided config dictionary so that only the specified keys path remains.
config (Dict[str, Any]): Configuration dictionary.
keys (List[Any]): Path to value to set.
full_key (str): Full user-specified key.
RETURNS (Dict[str, Any]): Filtered dictionary.
"""
if keys[0] not in config:
wasabi.msg.fail(
title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.",
text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: "
f"{list(config.keys())}",
exits=1,
)
return {
keys[0]: filter_config(config[keys[0]], keys[1:], full_key)
if len(keys) > 1
else config[keys[0]]
}
# Evaluate with varying threshold values.
scores: Dict[float, float] = {}
config_keys_full = ["components", pipe_name, *config_keys]
table_col_widths = (10, 10)
thresholds = numpy.linspace(0, 1, n_trials)
print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths))
for threshold in thresholds:
# Reload pipeline with overrides specifying the new threshold.
nlp = util.load_model(
model,
config=set_nested_item(
filter_config(
nlp.config, config_keys_full, ".".join(config_keys_full)
).copy(),
config_keys_full,
threshold,
),
)
if hasattr(pipe, "cfg"):
setattr(
nlp.get_pipe(pipe_name),
"cfg",
set_nested_item(getattr(pipe, "cfg"), config_keys, threshold),
)
eval_scores = nlp.evaluate(dev_dataset)
if scores_key not in eval_scores:
wasabi.msg.fail(
title=f"Failed to look up score `{scores_key}` in evaluation results.",
text=f"Make sure you specified the correct value for `scores_key`. The following scores are "
f"available: {list(eval_scores.keys())}",
exits=1,
)
scores[threshold] = eval_scores[scores_key]
if not isinstance(scores[threshold], (float, int)):
wasabi.msg.fail(
f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric "
f"scores.",
exits=1,
)
print(
wasabi.row(
[round(threshold, 3), round(scores[threshold], 3)],
widths=table_col_widths,
)
)
best_threshold = max(scores.keys(), key=(lambda key: scores[key]))
# If all scores are identical, emit warning.
if len(set(scores.values())) == 1:
wasabi.msg.warn(
title="All scores are identical. Verify that all settings are correct.",
text=""
if (
not isinstance(pipe, MultiLabel_TextCategorizer)
or scores_key in ("cats_macro_f", "cats_micro_f")
)
else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.",
)
else:
if not silent:
print(
f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}."
)
return best_threshold, scores[best_threshold], scores

View File

@ -1,10 +1,13 @@
from typing import Optional, Dict, Any, Union, List from typing import Optional, Dict, Any, Union, List
import platform import platform
import pkg_resources
import json
from pathlib import Path from pathlib import Path
from wasabi import Printer, MarkdownRenderer from wasabi import Printer, MarkdownRenderer
import srsly import srsly
from ._util import app, Arg, Opt, string_to_list from ._util import app, Arg, Opt, string_to_list
from .download import get_model_filename, get_latest_version
from .. import util from .. import util
from .. import about from .. import about
@ -16,6 +19,7 @@ def info_cli(
markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"), markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"),
silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"), silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"),
exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"), exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"),
url: bool = Opt(False, "--url", "-u", help="Print the URL to download the most recent compatible version of the pipeline"),
# fmt: on # fmt: on
): ):
""" """
@ -23,10 +27,19 @@ def info_cli(
print its meta information. Flag --markdown prints details in Markdown for easy print its meta information. Flag --markdown prints details in Markdown for easy
copy-pasting to GitHub issues. copy-pasting to GitHub issues.
Flag --url prints only the download URL of the most recent compatible
version of the pipeline.
DOCS: https://spacy.io/api/cli#info DOCS: https://spacy.io/api/cli#info
""" """
exclude = string_to_list(exclude) exclude = string_to_list(exclude)
info(model, markdown=markdown, silent=silent, exclude=exclude) info(
model,
markdown=markdown,
silent=silent,
exclude=exclude,
url=url,
)
def info( def info(
@ -35,11 +48,20 @@ def info(
markdown: bool = False, markdown: bool = False,
silent: bool = True, silent: bool = True,
exclude: Optional[List[str]] = None, exclude: Optional[List[str]] = None,
url: bool = False,
) -> Union[str, dict]: ) -> Union[str, dict]:
msg = Printer(no_print=silent, pretty=not silent) msg = Printer(no_print=silent, pretty=not silent)
if not exclude: if not exclude:
exclude = [] exclude = []
if model: if url:
if model is not None:
title = f"Download info for pipeline '{model}'"
data = info_model_url(model)
print(data["download_url"])
return data
else:
msg.fail("--url option requires a pipeline name", exits=1)
elif model:
title = f"Info about pipeline '{model}'" title = f"Info about pipeline '{model}'"
data = info_model(model, silent=silent) data = info_model(model, silent=silent)
else: else:
@ -99,11 +121,44 @@ def info_model(model: str, *, silent: bool = True) -> Dict[str, Any]:
meta["source"] = str(model_path.resolve()) meta["source"] = str(model_path.resolve())
else: else:
meta["source"] = str(model_path) meta["source"] = str(model_path)
download_url = info_installed_model_url(model)
if download_url:
meta["download_url"] = download_url
return { return {
k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed") k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed")
} }
def info_installed_model_url(model: str) -> Optional[str]:
"""Given a pipeline name, get the download URL if available, otherwise
return None.
This is only available for pipelines installed as modules that have
dist-info available.
"""
try:
dist = pkg_resources.get_distribution(model)
data = json.loads(dist.get_metadata("direct_url.json"))
return data["url"]
except pkg_resources.DistributionNotFound:
# no such package
return None
except Exception:
# something else, like no file or invalid JSON
return None
def info_model_url(model: str) -> Dict[str, Any]:
"""Return the download URL for the latest version of a pipeline."""
version = get_latest_version(model)
filename = get_model_filename(model, version)
download_url = about.__download_url__ + "/" + filename
release_tpl = "https://github.com/explosion/spacy-models/releases/tag/{m}-{v}"
release_url = release_tpl.format(m=model, v=version)
return {"download_url": download_url, "release_url": release_url}
def get_markdown( def get_markdown(
data: Dict[str, Any], data: Dict[str, Any],
title: Optional[str] = None, title: Optional[str] = None,

View File

@ -299,8 +299,8 @@ def get_meta(
} }
nlp = util.load_model_from_path(Path(model_path)) nlp = util.load_model_from_path(Path(model_path))
meta.update(nlp.meta) meta.update(nlp.meta)
meta.update(existing_meta)
meta["spacy_version"] = util.get_minor_version_range(about.__version__) meta["spacy_version"] = util.get_minor_version_range(about.__version__)
meta.update(existing_meta)
meta["vectors"] = { meta["vectors"] = {
"width": nlp.vocab.vectors_length, "width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors), "vectors": len(nlp.vocab.vectors),

View File

@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str:
RETURNS (str): The converted URL. RETURNS (str): The converted URL.
""" """
# If the asset URL is a regular GitHub URL it's likely a mistake # If the asset URL is a regular GitHub URL it's likely a mistake
if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url: if (
re.match(r"(http(s?)):\/\/github.com", url)
and "releases/download" not in url
and "/raw/" not in url
):
converted = url.replace("github.com", "raw.githubusercontent.com") converted = url.replace("github.com", "raw.githubusercontent.com")
converted = re.sub(r"/(tree|blob)/", "/", converted) converted = re.sub(r"/(tree|blob)/", "/", converted)
msg.warn( msg.warn(

View File

@ -25,6 +25,7 @@ def project_update_dvc_cli(
project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False),
workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."), workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."),
verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"), verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"),
quiet: bool = Opt(False, "--quiet", "-q", help="Print less info"),
force: bool = Opt(False, "--force", "-F", help="Force update DVC config"), force: bool = Opt(False, "--force", "-F", help="Force update DVC config"),
# fmt: on # fmt: on
): ):
@ -36,7 +37,7 @@ def project_update_dvc_cli(
DOCS: https://spacy.io/api/cli#project-dvc DOCS: https://spacy.io/api/cli#project-dvc
""" """
project_update_dvc(project_dir, workflow, verbose=verbose, force=force) project_update_dvc(project_dir, workflow, verbose=verbose, quiet=quiet, force=force)
def project_update_dvc( def project_update_dvc(
@ -44,6 +45,7 @@ def project_update_dvc(
workflow: Optional[str] = None, workflow: Optional[str] = None,
*, *,
verbose: bool = False, verbose: bool = False,
quiet: bool = False,
force: bool = False, force: bool = False,
) -> None: ) -> None:
"""Update the auto-generated Data Version Control (DVC) config file. A DVC """Update the auto-generated Data Version Control (DVC) config file. A DVC
@ -54,11 +56,12 @@ def project_update_dvc(
workflow (Optional[str]): Optional name of workflow defined in project.yml. workflow (Optional[str]): Optional name of workflow defined in project.yml.
If not set, the first workflow will be used. If not set, the first workflow will be used.
verbose (bool): Print more info. verbose (bool): Print more info.
quiet (bool): Print less info.
force (bool): Force update DVC config. force (bool): Force update DVC config.
""" """
config = load_project_config(project_dir) config = load_project_config(project_dir)
updated = update_dvc_config( updated = update_dvc_config(
project_dir, config, workflow, verbose=verbose, force=force project_dir, config, workflow, verbose=verbose, quiet=quiet, force=force
) )
help_msg = "To execute the workflow with DVC, run: dvc repro" help_msg = "To execute the workflow with DVC, run: dvc repro"
if updated: if updated:
@ -72,7 +75,7 @@ def update_dvc_config(
config: Dict[str, Any], config: Dict[str, Any],
workflow: Optional[str] = None, workflow: Optional[str] = None,
verbose: bool = False, verbose: bool = False,
silent: bool = False, quiet: bool = False,
force: bool = False, force: bool = False,
) -> bool: ) -> bool:
"""Re-run the DVC commands in dry mode and update dvc.yaml file in the """Re-run the DVC commands in dry mode and update dvc.yaml file in the
@ -83,7 +86,7 @@ def update_dvc_config(
path (Path): The path to the project directory. path (Path): The path to the project directory.
config (Dict[str, Any]): The loaded project.yml. config (Dict[str, Any]): The loaded project.yml.
verbose (bool): Whether to print additional info (via DVC). verbose (bool): Whether to print additional info (via DVC).
silent (bool): Don't output anything (via DVC). quiet (bool): Don't output anything (via DVC).
force (bool): Force update, even if hashes match. force (bool): Force update, even if hashes match.
RETURNS (bool): Whether the DVC config file was updated. RETURNS (bool): Whether the DVC config file was updated.
""" """
@ -105,6 +108,14 @@ def update_dvc_config(
dvc_config_path.unlink() dvc_config_path.unlink()
dvc_commands = [] dvc_commands = []
config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
# some flags that apply to every command
flags = []
if verbose:
flags.append("--verbose")
if quiet:
flags.append("--quiet")
for name in workflows[workflow]: for name in workflows[workflow]:
command = config_commands[name] command = config_commands[name]
deps = command.get("deps", []) deps = command.get("deps", [])
@ -118,14 +129,26 @@ def update_dvc_config(
deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl] deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl]
outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl] outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl]
outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl] outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl]
dvc_cmd = ["run", "-n", name, "-w", str(path), "--no-exec"]
dvc_cmd = ["run", *flags, "-n", name, "-w", str(path), "--no-exec"]
if command.get("no_skip"): if command.get("no_skip"):
dvc_cmd.append("--always-changed") dvc_cmd.append("--always-changed")
full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd] full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd]
dvc_commands.append(join_command(full_cmd)) dvc_commands.append(join_command(full_cmd))
if not dvc_commands:
# If we don't check for this, then there will be an error when reading the
# config, since DVC wouldn't create it.
msg.fail(
"No usable commands for DVC found. This can happen if none of your "
"commands have dependencies or outputs.",
exits=1,
)
with working_dir(path): with working_dir(path):
dvc_flags = {"--verbose": verbose, "--quiet": silent} for c in dvc_commands:
run_dvc_commands(dvc_commands, flags=dvc_flags) dvc_command = "dvc " + c
run_command(dvc_command)
with dvc_config_path.open("r+", encoding="utf8") as f: with dvc_config_path.open("r+", encoding="utf8") as f:
content = f.read() content = f.read()
f.seek(0, 0) f.seek(0, 0)
@ -133,26 +156,6 @@ def update_dvc_config(
return True return True
def run_dvc_commands(
commands: Iterable[str] = SimpleFrozenList(), flags: Dict[str, bool] = {}
) -> None:
"""Run a sequence of DVC commands in a subprocess, in order.
commands (List[str]): The string commands without the leading "dvc".
flags (Dict[str, bool]): Conditional flags to be added to command. Makes it
easier to pass flags like --quiet that depend on a variable or
command-line setting while avoiding lots of nested conditionals.
"""
for c in commands:
command = split_command(c)
dvc_command = ["dvc", *command]
# Add the flags if they are set to True
for flag, is_active in flags.items():
if is_active:
dvc_command.append(flag)
run_command(dvc_command)
def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None: def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None:
"""Validate workflows provided in project.yml and check that a given """Validate workflows provided in project.yml and check that a given
workflow can be used to generate a DVC config. workflow can be used to generate a DVC config.

View File

@ -5,14 +5,17 @@ import hashlib
import urllib.parse import urllib.parse
import tarfile import tarfile
from pathlib import Path from pathlib import Path
from wasabi import msg
from .._util import get_hash, get_checksum, download_file, ensure_pathy from .._util import get_hash, get_checksum, upload_file, download_file
from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var from .._util import ensure_pathy, make_tempdir
from ...util import get_minor_version, ENV_VARS, check_bool_env_var
from ...git_info import GIT_VERSION from ...git_info import GIT_VERSION
from ... import about from ... import about
from ...errors import Errors
if TYPE_CHECKING: if TYPE_CHECKING:
from pathy import Pathy # noqa: F401 from pathy import FluidPath # noqa: F401
class RemoteStorage: class RemoteStorage:
@ -27,7 +30,7 @@ class RemoteStorage:
self.url = ensure_pathy(url) self.url = ensure_pathy(url)
self.compression = compression self.compression = compression
def push(self, path: Path, command_hash: str, content_hash: str) -> "Pathy": def push(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
"""Compress a file or directory within a project and upload it to a remote """Compress a file or directory within a project and upload it to a remote
storage. If an object exists at the full URL, nothing is done. storage. If an object exists at the full URL, nothing is done.
@ -48,9 +51,7 @@ class RemoteStorage:
mode_string = f"w:{self.compression}" if self.compression else "w" mode_string = f"w:{self.compression}" if self.compression else "w"
with tarfile.open(tar_loc, mode=mode_string) as tar_file: with tarfile.open(tar_loc, mode=mode_string) as tar_file:
tar_file.add(str(loc), arcname=str(path)) tar_file.add(str(loc), arcname=str(path))
with tar_loc.open(mode="rb") as input_file: upload_file(tar_loc, url)
with url.open(mode="wb") as output_file:
output_file.write(input_file.read())
return url return url
def pull( def pull(
@ -59,7 +60,7 @@ class RemoteStorage:
*, *,
command_hash: Optional[str] = None, command_hash: Optional[str] = None,
content_hash: Optional[str] = None, content_hash: Optional[str] = None,
) -> Optional["Pathy"]: ) -> Optional["FluidPath"]:
"""Retrieve a file from the remote cache. If the file already exists, """Retrieve a file from the remote cache. If the file already exists,
nothing is done. nothing is done.
@ -84,7 +85,23 @@ class RemoteStorage:
with tarfile.open(tar_loc, mode=mode_string) as tar_file: with tarfile.open(tar_loc, mode=mode_string) as tar_file:
# This requires that the path is added correctly, relative # This requires that the path is added correctly, relative
# to root. This is how we set things up in push() # to root. This is how we set things up in push()
tar_file.extractall(self.root)
# Disallow paths outside the current directory for the tar
# file (CVE-2007-4559, directory traversal vulnerability)
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise ValueError(Errors.E852)
tar.extractall(path)
safe_extract(tar_file, self.root)
return url return url
def find( def find(
@ -93,25 +110,37 @@ class RemoteStorage:
*, *,
command_hash: Optional[str] = None, command_hash: Optional[str] = None,
content_hash: Optional[str] = None, content_hash: Optional[str] = None,
) -> Optional["Pathy"]: ) -> Optional["FluidPath"]:
"""Find the best matching version of a file within the storage, """Find the best matching version of a file within the storage,
or `None` if no match can be found. If both the creation and content hash or `None` if no match can be found. If both the creation and content hash
are specified, only exact matches will be returned. Otherwise, the most are specified, only exact matches will be returned. Otherwise, the most
recent matching file is preferred. recent matching file is preferred.
""" """
name = self.encode_name(str(path)) name = self.encode_name(str(path))
urls = []
if command_hash is not None and content_hash is not None: if command_hash is not None and content_hash is not None:
url = self.make_url(path, command_hash, content_hash) url = self.url / name / command_hash / content_hash
urls = [url] if url.exists() else [] urls = [url] if url.exists() else []
elif command_hash is not None: elif command_hash is not None:
if (self.url / name / command_hash).exists():
urls = list((self.url / name / command_hash).iterdir()) urls = list((self.url / name / command_hash).iterdir())
else: else:
urls = list((self.url / name).iterdir()) if (self.url / name).exists():
for sub_dir in (self.url / name).iterdir():
urls.extend(sub_dir.iterdir())
if content_hash is not None: if content_hash is not None:
urls = [url for url in urls if url.parts[-1] == content_hash] urls = [url for url in urls if url.parts[-1] == content_hash]
if len(urls) >= 2:
try:
urls.sort(key=lambda x: x.stat().last_modified) # type: ignore
except Exception:
msg.warn(
"Unable to sort remote files by last modified. The file(s) "
"pulled from the cache may not be the most recent."
)
return urls[-1] if urls else None return urls[-1] if urls else None
def make_url(self, path: Path, command_hash: str, content_hash: str) -> "Pathy": def make_url(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath":
"""Construct a URL from a subpath, a creation hash and a content hash.""" """Construct a URL from a subpath, a creation hash and a content hash."""
return self.url / self.encode_name(str(path)) / command_hash / content_hash return self.url / self.encode_name(str(path)) / command_hash / content_hash

View File

@ -1,5 +1,8 @@
from typing import Optional, List, Dict, Sequence, Any, Iterable from typing import Optional, List, Dict, Sequence, Any, Iterable, Tuple
import os.path
from pathlib import Path from pathlib import Path
import pkg_resources
from wasabi import msg from wasabi import msg
from wasabi.util import locale_escape from wasabi.util import locale_escape
import sys import sys
@ -50,6 +53,7 @@ def project_run(
force: bool = False, force: bool = False,
dry: bool = False, dry: bool = False,
capture: bool = False, capture: bool = False,
skip_requirements_check: bool = False,
) -> None: ) -> None:
"""Run a named script defined in the project.yml. If the script is part """Run a named script defined in the project.yml. If the script is part
of the default pipeline (defined in the "run" section), DVC is used to of the default pipeline (defined in the "run" section), DVC is used to
@ -66,11 +70,19 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function. when you want to run the command more like a function.
skip_requirements_check (bool): Whether to skip the requirements check.
""" """
config = load_project_config(project_dir, overrides=overrides) config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
workflows = config.get("workflows", {}) workflows = config.get("workflows", {})
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
req_path = project_dir / "requirements.txt"
if not skip_requirements_check:
if config.get("check_requirements", True) and os.path.exists(req_path):
with req_path.open() as requirements_file:
_check_requirements([req.strip() for req in requirements_file])
if subcommand in workflows: if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'") msg.info(f"Running workflow '{subcommand}'")
for cmd in workflows[subcommand]: for cmd in workflows[subcommand]:
@ -81,6 +93,7 @@ def project_run(
force=force, force=force,
dry=dry, dry=dry,
capture=capture, capture=capture,
skip_requirements_check=True,
) )
else: else:
cmd = commands[subcommand] cmd = commands[subcommand]
@ -88,8 +101,8 @@ def project_run(
if not (project_dir / dep).exists(): if not (project_dir / dep).exists():
err = f"Missing dependency specified by command '{subcommand}': {dep}" err = f"Missing dependency specified by command '{subcommand}': {dep}"
err_help = "Maybe you forgot to run the 'project assets' command or a previous step?" err_help = "Maybe you forgot to run the 'project assets' command or a previous step?"
err_kwargs = {"exits": 1} if not dry else {} err_exits = 1 if not dry else None
msg.fail(err, err_help, **err_kwargs) msg.fail(err, err_help, exits=err_exits)
check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION) check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION)
with working_dir(project_dir) as current_dir: with working_dir(project_dir) as current_dir:
msg.divider(subcommand) msg.divider(subcommand)
@ -195,6 +208,8 @@ def validate_subcommand(
msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1) msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1)
if subcommand not in commands and subcommand not in workflows: if subcommand not in commands and subcommand not in workflows:
help_msg = [] help_msg = []
if subcommand in ["assets", "asset"]:
help_msg.append("Did you mean to run: python -m spacy project assets?")
if commands: if commands:
help_msg.append(f"Available commands: {', '.join(commands)}") help_msg.append(f"Available commands: {', '.join(commands)}")
if workflows: if workflows:
@ -308,3 +323,38 @@ def get_fileinfo(project_dir: Path, paths: List[str]) -> List[Dict[str, Optional
md5 = get_checksum(file_path) if file_path.exists() else None md5 = get_checksum(file_path) if file_path.exists() else None
data.append({"path": path, "md5": md5}) data.append({"path": path, "md5": md5})
return data return data
def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
"""Checks whether requirements are installed and free of version conflicts.
requirements (List[str]): List of requirements.
RETURNS (Tuple[bool, bool]): Whether (1) any packages couldn't be imported, (2) any packages with version conflicts
exist.
"""
failed_pkgs_msgs: List[str] = []
conflicting_pkgs_msgs: List[str] = []
for req in requirements:
try:
pkg_resources.require(req)
except pkg_resources.DistributionNotFound as dnf:
failed_pkgs_msgs.append(dnf.report())
except pkg_resources.VersionConflict as vc:
conflicting_pkgs_msgs.append(vc.report())
except Exception:
msg.warn(
f"Unable to check requirement: {req} "
"Checks are currently limited to requirement specifiers "
"(PEP 508)"
)
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
msg.warn(
title="Missing requirements or requirement conflicts detected. Make sure your Python environment is set up "
"correctly and you installed all requirements specified in your project's requirements.txt: "
)
for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs:
msg.text(pgk_msg)
return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0

View File

@ -1,7 +1,7 @@
{# This is a template for training configs used for the quickstart widget in {# This is a template for training configs used for the quickstart widget in
the docs and the init config command. It encodes various best practices and the docs and the init config command. It encodes various best practices and
can help generate the best possible configuration, given a user's requirements. #} can help generate the best possible configuration, given a user's requirements. #}
{%- set use_transformer = hardware != "cpu" -%} {%- set use_transformer = hardware != "cpu" and transformer_data -%}
{%- set transformer = transformer_data[optimize] if use_transformer else {} -%} {%- set transformer = transformer_data[optimize] if use_transformer else {} -%}
{%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%} {%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%}
[paths] [paths]
@ -271,13 +271,8 @@ factory = "tok2vec"
[components.tok2vec.model.embed] [components.tok2vec.model.embed]
@architectures = "spacy.MultiHashEmbed.v2" @architectures = "spacy.MultiHashEmbed.v2"
width = ${components.tok2vec.model.encode.width} width = ${components.tok2vec.model.encode.width}
{% if has_letters -%}
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
rows = [5000, 2500, 2500, 2500] rows = [5000, 1000, 2500, 2500]
{% else -%}
attrs = ["ORTH", "SHAPE"]
rows = [5000, 2500]
{% endif -%}
include_static_vectors = {{ "true" if optimize == "accuracy" else "false" }} include_static_vectors = {{ "true" if optimize == "accuracy" else "false" }}
[components.tok2vec.model.encode] [components.tok2vec.model.encode]

View File

@ -37,6 +37,15 @@ bn:
accuracy: accuracy:
name: sagorsarker/bangla-bert-base name: sagorsarker/bangla-bert-base
size_factor: 3 size_factor: 3
ca:
word_vectors: null
transformer:
efficiency:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
accuracy:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
da: da:
word_vectors: da_core_news_lg word_vectors: da_core_news_lg
transformer: transformer:
@ -271,4 +280,3 @@ zh:
accuracy: accuracy:
name: bert-base-chinese name: bert-base-chinese
size_factor: 3 size_factor: 3
has_letters: false

View File

@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
train_corpus = "corpora.train" train_corpus = "corpora.train"
# Optional callback before nlp object is saved to disk after training # Optional callback before nlp object is saved to disk after training
before_to_disk = null before_to_disk = null
# Optional callback that is invoked at the start of each training step
before_update = null
[training.logger] [training.logger]
@loggers = "spacy.ConsoleLogger.v1" @loggers = "spacy.ConsoleLogger.v1"

View File

@ -11,6 +11,7 @@ from .render import DependencyRenderer, EntityRenderer, SpanRenderer
from ..tokens import Doc, Span from ..tokens import Doc, Span
from ..errors import Errors, Warnings from ..errors import Errors, Warnings
from ..util import is_in_jupyter from ..util import is_in_jupyter
from ..util import find_available_port
_html = {} _html = {}
@ -36,7 +37,7 @@ def render(
jupyter (bool): Override Jupyter auto-detection. jupyter (bool): Override Jupyter auto-detection.
options (dict): Visualiser-specific options, e.g. colors. options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts. manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
RETURNS (str): Rendered HTML markup. RETURNS (str): Rendered SVG or HTML markup.
DOCS: https://spacy.io/api/top-level#displacy.render DOCS: https://spacy.io/api/top-level#displacy.render
USAGE: https://spacy.io/usage/visualizers USAGE: https://spacy.io/usage/visualizers
@ -82,6 +83,7 @@ def serve(
manual: bool = False, manual: bool = False,
port: int = 5000, port: int = 5000,
host: str = "0.0.0.0", host: str = "0.0.0.0",
auto_select_port: bool = False,
) -> None: ) -> None:
"""Serve displaCy visualisation. """Serve displaCy visualisation.
@ -93,12 +95,15 @@ def serve(
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts. manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation. port (int): Port to serve visualisation.
host (str): Host to serve visualisation. host (str): Host to serve visualisation.
auto_select_port (bool): Automatically select a port if the specified port is in use.
DOCS: https://spacy.io/api/top-level#displacy.serve DOCS: https://spacy.io/api/top-level#displacy.serve
USAGE: https://spacy.io/usage/visualizers USAGE: https://spacy.io/usage/visualizers
""" """
from wsgiref import simple_server from wsgiref import simple_server
port = find_available_port(port, host, auto_select_port)
if is_in_jupyter(): if is_in_jupyter():
warnings.warn(Warnings.W011) warnings.warn(Warnings.W011)
render(docs, style=style, page=page, minify=minify, options=options, manual=manual) render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
@ -228,12 +233,13 @@ def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"kb_id": span.kb_id_ if span.kb_id_ else "", "kb_id": span.kb_id_ if span.kb_id_ else "",
"kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#", "kb_url": kb_url_template.format(span.kb_id_) if kb_url_template else "#",
} }
for span in doc.spans[spans_key] for span in doc.spans.get(spans_key, [])
] ]
tokens = [token.text for token in doc] tokens = [token.text for token in doc]
if not spans: if not spans:
warnings.warn(Warnings.W117.format(spans_key=spans_key)) keys = list(doc.spans.keys())
warnings.warn(Warnings.W117.format(spans_key=spans_key, keys=keys))
title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None title = doc.user_data.get("title", None) if hasattr(doc, "user_data") else None
settings = get_doc_settings(doc) settings = get_doc_settings(doc)
return { return {

View File

@ -94,7 +94,7 @@ class SpanRenderer:
parsed (list): Dependency parses to render. parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page. page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup. minify (bool): Minify HTML markup.
RETURNS (str): Rendered HTML markup. RETURNS (str): Rendered SVG or HTML markup.
""" """
rendered = [] rendered = []
for i, p in enumerate(parsed): for i, p in enumerate(parsed):
@ -510,7 +510,7 @@ class EntityRenderer:
parsed (list): Dependency parses to render. parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page. page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup. minify (bool): Minify HTML markup.
RETURNS (str): Rendered HTML markup. RETURNS (str): Rendered SVG or HTML markup.
""" """
rendered = [] rendered = []
for i, p in enumerate(parsed): for i, p in enumerate(parsed):

View File

@ -199,7 +199,7 @@ class Warnings(metaclass=ErrorsWithCodes):
W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is " W117 = ("No spans to visualize found in Doc object with spans_key: '{spans_key}'. If this is "
"surprising to you, make sure the Doc was processed using a model " "surprising to you, make sure the Doc was processed using a model "
"that supports span categorization, and check the `doc.spans[spans_key]` " "that supports span categorization, and check the `doc.spans[spans_key]` "
"property manually if necessary.") "property manually if necessary.\n\nAvailable keys: {keys}")
W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation " W118 = ("Term '{term}' not found in glossary. It may however be explained in documentation "
"for the corpora used to train the language. Please check " "for the corpora used to train the language. Please check "
"`nlp.meta[\"sources\"]` for any relevant links.") "`nlp.meta[\"sources\"]` for any relevant links.")
@ -212,6 +212,9 @@ class Warnings(metaclass=ErrorsWithCodes):
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'") W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class " W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.") "is a Cython extension type.")
W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option "
"`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.")
W124 = ("{host}:{port} is already in use, using the nearest available port {serve_port} as an alternative.")
class Errors(metaclass=ErrorsWithCodes): class Errors(metaclass=ErrorsWithCodes):
@ -230,8 +233,9 @@ class Errors(metaclass=ErrorsWithCodes):
"initialized component.") "initialized component.")
E004 = ("Can't set up pipeline component: a factory for '{name}' already " E004 = ("Can't set up pipeline component: a factory for '{name}' already "
"exists. Existing factory: {func}. New factory: {new_func}") "exists. Existing factory: {func}. New factory: {new_func}")
E005 = ("Pipeline component '{name}' returned None. If you're using a " E005 = ("Pipeline component '{name}' returned {returned_type} instead of a "
"custom component, maybe you forgot to return the processed Doc?") "Doc. If you're using a custom component, maybe you forgot to "
"return the processed Doc?")
E006 = ("Invalid constraints for adding pipeline component. You can only " E006 = ("Invalid constraints for adding pipeline component. You can only "
"set one of the following: before (component name or index), " "set one of the following: before (component name or index), "
"after (component name or index), first (True) or last (True). " "after (component name or index), first (True) or last (True). "
@ -342,6 +346,11 @@ class Errors(metaclass=ErrorsWithCodes):
"clear the existing vectors and resize the table.") "clear the existing vectors and resize the table.")
E074 = ("Error interpreting compiled match pattern: patterns are expected " E074 = ("Error interpreting compiled match pattern: patterns are expected "
"to end with the attribute {attr}. Got: {bad_attr}.") "to end with the attribute {attr}. Got: {bad_attr}.")
E079 = ("Error computing states in beam: number of predicted beams "
"({pbeams}) does not equal number of gold beams ({gbeams}).")
E080 = ("Duplicate state found in beam: {key}.")
E081 = ("Error getting gradient in beam: number of histories ({n_hist}) "
"does not equal number of losses ({losses}).")
E082 = ("Error deprojectivizing parse: number of heads ({n_heads}), " E082 = ("Error deprojectivizing parse: number of heads ({n_heads}), "
"projective heads ({n_proj_heads}) and labels ({n_labels}) do not " "projective heads ({n_proj_heads}) and labels ({n_labels}) do not "
"match.") "match.")
@ -537,8 +546,14 @@ class Errors(metaclass=ErrorsWithCodes):
E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.") E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.")
E200 = ("Can't set {attr} from Span.") E200 = ("Can't set {attr} from Span.")
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.") E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
E203 = ("If the {name} embedding layer is not updated "
"during training, make sure to include it in 'annotating components'")
# New errors added in v3.x # New errors added in v3.x
E851 = ("The 'textcat' component labels should only have values of 0 or 1, "
"but found value of '{val}'.")
E852 = ("The tar file pulled from the remote attempted an unsafe path "
"traversal.")
E853 = ("Unsupported component factory name '{name}'. The character '.' is " E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.") "not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
@ -706,11 +721,11 @@ class Errors(metaclass=ErrorsWithCodes):
"need to modify the pipeline, use the built-in methods like " "need to modify the pipeline, use the built-in methods like "
"`nlp.add_pipe`, `nlp.remove_pipe`, `nlp.disable_pipe` or " "`nlp.add_pipe`, `nlp.remove_pipe`, `nlp.disable_pipe` or "
"`nlp.enable_pipe` instead.") "`nlp.enable_pipe` instead.")
E927 = ("Can't write to frozen list Maybe you're trying to modify a computed " E927 = ("Can't write to frozen list. Maybe you're trying to modify a computed "
"property or default function argument?") "property or default function argument?")
E928 = ("A KnowledgeBase can only be serialized to/from from a directory, " E928 = ("An InMemoryLookupKB can only be serialized to/from from a directory, "
"but the provided argument {loc} points to a file.") "but the provided argument {loc} points to a file.")
E929 = ("Couldn't read KnowledgeBase from {loc}. The path does not seem to exist.") E929 = ("Couldn't read InMemoryLookupKB from {loc}. The path does not seem to exist.")
E930 = ("Received invalid get_examples callback in `{method}`. " E930 = ("Received invalid get_examples callback in `{method}`. "
"Expected function that returns an iterable of Example objects but " "Expected function that returns an iterable of Example objects but "
"got: {obj}") "got: {obj}")
@ -936,10 +951,23 @@ class Errors(metaclass=ErrorsWithCodes):
E1040 = ("Doc.from_json requires all tokens to have the same attributes. " E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}") "Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
E1042 = ("Function was called with `{arg1}`={arg1_values} and " E1042 = ("`enable={enable}` and `disable={disable}` are inconsistent with each other.\nIf you only passed "
"`{arg2}`={arg2_values} but these arguments are conflicting.") "one of `enable` or `disable`, the other argument is specified in your pipeline's configuration.\nIn that "
"case pass an empty list for the previously not specified argument to avoid this error.")
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got " E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
"{value}.") "{value}.")
E1044 = ("Expected `candidates_batch_size` to be >= 1, but got: {value}")
E1045 = ("Encountered {parent} subclass without `{parent}.{method}` "
"method in '{name}'. If you want to use this method, make "
"sure it's overwritten on the subclass.")
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
"knowledge base, use `InMemoryLookupKB`.")
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
E1048 = ("Got '{unexpected}' as console progress bar type, but expected one of the following: {expected}")
E1049 = ("No available port found for displaCy on host {host}. Please specify an available port "
"with `displacy.serve(doc, port=port)`")
E1050 = ("Port {port} is already in use. Please specify an available port with `displacy.serve(doc, port=port)` "
"or use `auto_switch_port=True` to pick an available port automatically.")
# Deprecated model shortcuts, only used in errors and warnings # Deprecated model shortcuts, only used in errors and warnings

3
spacy/kb/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .kb import KnowledgeBase
from .kb_in_memory import InMemoryLookupKB
from .candidate import Candidate, get_candidates, get_candidates_batch

12
spacy/kb/candidate.pxd Normal file
View File

@ -0,0 +1,12 @@
from .kb cimport KnowledgeBase
from libcpp.vector cimport vector
from ..typedefs cimport hash_t
# Object used by the Entity Linker that summarizes one entity-alias candidate combination.
cdef class Candidate:
cdef readonly KnowledgeBase kb
cdef hash_t entity_hash
cdef float entity_freq
cdef vector[float] entity_vector
cdef hash_t alias_hash
cdef float prior_prob

74
spacy/kb/candidate.pyx Normal file
View File

@ -0,0 +1,74 @@
# cython: infer_types=True, profile=True
from typing import Iterable
from .kb cimport KnowledgeBase
from ..tokens import Span
cdef class Candidate:
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned a certain prior probability.
DOCS: https://spacy.io/api/kb/#candidate-init
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
self.entity_freq = entity_freq
self.entity_vector = entity_vector
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self) -> int:
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
@property
def entity_(self) -> str:
"""RETURNS (str): ID/name of this entity in the KB"""
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self) -> int:
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self) -> str:
"""RETURNS (str): ID of the original alias"""
return self.kb.vocab.strings[self.alias_hash]
@property
def entity_freq(self) -> float:
return self.entity_freq
@property
def entity_vector(self) -> Iterable[float]:
return self.entity_vector
@property
def prior_prob(self) -> float:
return self.prior_prob
def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]:
"""
Return candidate entities for a given mention and fetching appropriate entries from the index.
kb (KnowledgeBase): Knowledge base to query.
mention (Span): Entity mention for which to identify candidates.
RETURNS (Iterable[Candidate]): Identified candidates.
"""
return kb.get_candidates(mention)
def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
"""
Return candidate entities for the given mentions and fetching appropriate entries from the index.
kb (KnowledgeBase): Knowledge base to query.
mention (Iterable[Span]): Entity mentions for which to identify candidates.
RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
"""
return kb.get_candidates_batch(mentions)

10
spacy/kb/kb.pxd Normal file
View File

@ -0,0 +1,10 @@
"""Knowledge-base for entity or concept linking."""
from cymem.cymem cimport Pool
from libc.stdint cimport int64_t
from ..vocab cimport Vocab
cdef class KnowledgeBase:
cdef Pool mem
cdef readonly Vocab vocab
cdef readonly int64_t entity_vector_length

108
spacy/kb/kb.pyx Normal file
View File

@ -0,0 +1,108 @@
# cython: infer_types=True, profile=True
from pathlib import Path
from typing import Iterable, Tuple, Union
from cymem.cymem cimport Pool
from .candidate import Candidate
from ..tokens import Span
from ..util import SimpleFrozenList
from ..errors import Errors
cdef class KnowledgeBase:
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts.
This is an abstract class and requires its operations to be implemented.
DOCS: https://spacy.io/api/kb
"""
def __init__(self, vocab: Vocab, entity_vector_length: int):
"""Create a KnowledgeBase."""
# Make sure abstract KB is not instantiated.
if self.__class__ == KnowledgeBase:
raise TypeError(
Errors.E1046.format(cls_name=self.__class__.__name__)
)
self.vocab = vocab
self.entity_vector_length = entity_vector_length
self.mem = Pool()
def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
"""
Return candidate entities for specified texts. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If no candidate is found for a given text, an empty list is returned.
mentions (Iterable[Span]): Mentions for which to get candidates.
RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
"""
return [self.get_candidates(span) for span in mentions]
def get_candidates(self, mention: Span) -> Iterable[Candidate]:
"""
Return candidate entities for specified text. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If the no candidate is found for a given text, an empty list is returned.
mention (Span): Mention for which to get candidates.
RETURNS (Iterable[Candidate]): Identified candidates.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__)
)
def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]:
"""
Return vectors for entities.
entity (str): Entity name/ID.
RETURNS (Iterable[Iterable[float]]): Vectors for specified entities.
"""
return [self.get_vector(entity) for entity in entities]
def get_vector(self, str entity) -> Iterable[float]:
"""
Return vector for entity.
entity (str): Entity name/ID.
RETURNS (Iterable[float]): Vector for specified entity.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__)
)
def to_bytes(self, **kwargs) -> bytes:
"""Serialize the current state to a binary string.
RETURNS (bytes): Current state as binary string.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__)
)
def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()):
"""Load state from a binary string.
bytes_data (bytes): KB state.
exclude (Tuple[str]): Properties to exclude when restoring KB.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__)
)
def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
"""
Write KnowledgeBase content to disk.
path (Union[str, Path]): Target file path.
exclude (Iterable[str]): List of components to exclude.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__)
)
def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
"""
Load KnowledgeBase content from disk.
path (Union[str, Path]): Target file path.
exclude (Iterable[str]): List of components to exclude.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__)
)

View File

@ -1,14 +1,12 @@
"""Knowledge-base for entity or concept linking.""" """Knowledge-base for entity or concept linking."""
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap from preshed.maps cimport PreshMap
from libcpp.vector cimport vector from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int64_t from libc.stdint cimport int32_t, int64_t
from libc.stdio cimport FILE from libc.stdio cimport FILE
from .vocab cimport Vocab from ..typedefs cimport hash_t
from .typedefs cimport hash_t from ..structs cimport KBEntryC, AliasC
from .structs cimport KBEntryC, AliasC from .kb cimport KnowledgeBase
ctypedef vector[KBEntryC] entry_vec ctypedef vector[KBEntryC] entry_vec
ctypedef vector[AliasC] alias_vec ctypedef vector[AliasC] alias_vec
@ -16,21 +14,7 @@ ctypedef vector[float] float_vec
ctypedef vector[float_vec] float_matrix ctypedef vector[float_vec] float_matrix
# Object used by the Entity Linker that summarizes one entity-alias candidate combination. cdef class InMemoryLookupKB(KnowledgeBase):
cdef class Candidate:
cdef readonly KnowledgeBase kb
cdef hash_t entity_hash
cdef float entity_freq
cdef vector[float] entity_vector
cdef hash_t alias_hash
cdef float prior_prob
cdef class KnowledgeBase:
cdef Pool mem
cdef readonly Vocab vocab
cdef int64_t entity_vector_length
# This maps 64bit keys (hash of unique entity string) # This maps 64bit keys (hash of unique entity string)
# to 64bit values (position of the _KBEntryC struct in the _entries vector). # to 64bit values (position of the _KBEntryC struct in the _entries vector).
# The PreshMap is pretty space efficient, as it uses open addressing. So # The PreshMap is pretty space efficient, as it uses open addressing. So

View File

@ -1,8 +1,7 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True
from typing import Iterator, Iterable, Callable, Dict, Any from typing import Iterable, Callable, Dict, Any, Union
import srsly import srsly
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap from preshed.maps cimport PreshMap
from cpython.exc cimport PyErr_SetFromErrno from cpython.exc cimport PyErr_SetFromErrno
from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek
@ -12,85 +11,28 @@ from libcpp.vector cimport vector
from pathlib import Path from pathlib import Path
import warnings import warnings
from .typedefs cimport hash_t from ..tokens import Span
from .errors import Errors, Warnings from ..typedefs cimport hash_t
from . import util from ..errors import Errors, Warnings
from .util import SimpleFrozenList, ensure_path from .. import util
from ..util import SimpleFrozenList, ensure_path
cdef class Candidate: from ..vocab cimport Vocab
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved from .kb cimport KnowledgeBase
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking from .candidate import Candidate as Candidate
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned to a certain prior probability.
DOCS: https://spacy.io/api/kb/#candidate_init
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
self.entity_freq = entity_freq
self.entity_vector = entity_vector
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self):
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
@property
def entity_(self):
"""RETURNS (str): ID/name of this entity in the KB"""
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self):
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self):
"""RETURNS (str): ID of the original alias"""
return self.kb.vocab.strings[self.alias_hash]
@property
def entity_freq(self):
return self.entity_freq
@property
def entity_vector(self):
return self.entity_vector
@property
def prior_prob(self):
return self.prior_prob
def get_candidates(KnowledgeBase kb, span) -> Iterator[Candidate]: cdef class InMemoryLookupKB(KnowledgeBase):
""" """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
Return candidate entities for a given span by using the text of the span as the alias
and fetching appropriate entries from the index.
This particular function is optimized to work with the built-in KB functionality,
but any other custom candidate generation method can be used in combination with the KB as well.
"""
return kb.get_alias_candidates(span.text)
cdef class KnowledgeBase:
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts. to support entity linking of named entities to real-world concepts.
DOCS: https://spacy.io/api/kb DOCS: https://spacy.io/api/inmemorylookupkb
""" """
def __init__(self, Vocab vocab, entity_vector_length): def __init__(self, Vocab vocab, entity_vector_length):
"""Create a KnowledgeBase.""" """Create an InMemoryLookupKB."""
self.mem = Pool() super().__init__(vocab, entity_vector_length)
self.entity_vector_length = entity_vector_length
self._entry_index = PreshMap() self._entry_index = PreshMap()
self._alias_index = PreshMap() self._alias_index = PreshMap()
self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def _initialize_entities(self, int64_t nr_entities): def _initialize_entities(self, int64_t nr_entities):
@ -104,11 +46,6 @@ cdef class KnowledgeBase:
self._alias_index = PreshMap(nr_aliases + 1) self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1)
@property
def entity_vector_length(self):
"""RETURNS (uint64): length of the entity vectors"""
return self.entity_vector_length
def __len__(self): def __len__(self):
return self.get_size_entities() return self.get_size_entities()
@ -286,7 +223,10 @@ cdef class KnowledgeBase:
alias_entry.probs = probs alias_entry.probs = probs
self._aliases_table[alias_index] = alias_entry self._aliases_table[alias_index] = alias_entry
def get_alias_candidates(self, str alias) -> Iterator[Candidate]: def get_candidates(self, mention: Span) -> Iterable[Candidate]:
return self.get_alias_candidates(mention.text) # type: ignore
def get_alias_candidates(self, str alias) -> Iterable[Candidate]:
""" """
Return candidate entities for an alias. Each candidate defines the entity, the original alias, Return candidate entities for an alias. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity. and the prior probability of that alias resolving to that entity.

View File

@ -72,10 +72,10 @@ class CatalanLemmatizer(Lemmatizer):
oov_forms.append(form) oov_forms.append(form)
if not forms: if not forms:
forms.extend(oov_forms) forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0]) # use lookups, and fall back to the token itself
if not forms: if not forms:
forms.append(string) forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms)) forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms

View File

@ -280,7 +280,7 @@ _currency = (
_punct = ( _punct = (
r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 · । ، ۔ ؛ ٪" r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 · । ، ۔ ؛ ٪"
) )
_quotes = r'\' " ” “ ` ´ , „ » « 「 」 『 』 【 】 《 》 〈 〉' _quotes = r'\' " ” “ ` ´ , „ » « 「 」 『 』 【 】 《 》 〈 〉 〈 〉 ⟦ ⟧'
_hyphens = "- — -- --- —— ~" _hyphens = "- — -- --- —— ~"
# Various symbols like dingbats, but also emoji # Various symbols like dingbats, but also emoji

View File

@ -53,11 +53,16 @@ class FrenchLemmatizer(Lemmatizer):
rules = rules_table.get(univ_pos, []) rules = rules_table.get(univ_pos, [])
string = string.lower() string = string.lower()
forms = [] forms = []
# first try lookup in table based on upos
if string in index: if string in index:
forms.append(string) forms.append(string)
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms
# then add anything in the exceptions table
forms.extend(exceptions.get(string, [])) forms.extend(exceptions.get(string, []))
# if nothing found yet, use the rules
oov_forms = [] oov_forms = []
if not forms: if not forms:
for old, new in rules: for old, new in rules:
@ -69,12 +74,14 @@ class FrenchLemmatizer(Lemmatizer):
forms.append(form) forms.append(form)
else: else:
oov_forms.append(form) oov_forms.append(form)
# if still nothing, add the oov forms from rules
if not forms: if not forms:
forms.extend(oov_forms) forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0]) # use lookups, which fall back to the token itself
if not forms: if not forms:
forms.append(string) forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms)) forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms

View File

@ -1,11 +1,15 @@
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
class AncientGreekDefaults(BaseDefaults): class AncientGreekDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
lex_attr_getters = LEX_ATTRS lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS stop_words = STOP_WORDS

View File

@ -0,0 +1,46 @@
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS
from ..char_classes import CONCAT_QUOTES
_prefixes = (
[
"",
"",
]
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_CURRENCY
+ LIST_ICONS
)
_suffixes = (
LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
"",
"",
r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]",
]
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])—",
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

18
spacy/lang/la/__init__.py Normal file
View File

@ -0,0 +1,18 @@
from ...language import Language, BaseDefaults
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
class LatinDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Latin(Language):
lang = "la"
Defaults = LatinDefaults
__all__ = ["Latin"]

View File

@ -0,0 +1,34 @@
from ...attrs import LIKE_NUM
import re
# cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4
roman_numerals_compile = re.compile(
r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$"
)
_num_words = set(
"""
unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem
""".split()
)
_ordinal_words = set(
"""
primus prima primum secundus secunda secundum tertius tertia tertium
""".split()
)
def like_num(text):
if text.isdigit():
return True
if roman_numerals_compile.match(text):
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}

View File

@ -0,0 +1,37 @@
# Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin
STOP_WORDS = set(
"""
ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem
cum cur
de deinde dum
ego enim ergo es est et etiam etsi ex
fio
haud hic
iam idem igitur ille in infra inter interim ipse is ita
magis modo mox
nam ne nec necque neque nisi non nos
o ob
per possum post pro
quae quam quare qui quia quicumque quidem quilibet quis quisnam quisquam quisque quisquis quo quoniam
sed si sic sive sub sui sum super suus
tam tamen trans tu tum
ubi uel uero
vel vero
""".split()
)

View File

@ -0,0 +1,76 @@
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH
from ...util import update_exc
## TODO: Look into systematically handling u/v
_exc = {
"mecum": [{ORTH: "me"}, {ORTH: "cum"}],
"tecum": [{ORTH: "te"}, {ORTH: "cum"}],
"nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}],
"vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}],
"uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}],
}
for orth in [
"A.",
"Agr.",
"Ap.",
"C.",
"Cn.",
"D.",
"F.",
"K.",
"L.",
"M'.",
"M.",
"Mam.",
"N.",
"Oct.",
"Opet.",
"P.",
"Paul.",
"Post.",
"Pro.",
"Q.",
"S.",
"Ser.",
"Sert.",
"Sex.",
"St.",
"Sta.",
"T.",
"Ti.",
"V.",
"Vol.",
"Vop.",
"U.",
"Uol.",
"Uop.",
"Ian.",
"Febr.",
"Mart.",
"Apr.",
"Mai.",
"Iun.",
"Iul.",
"Aug.",
"Sept.",
"Oct.",
"Nov.",
"Nou.",
"Dec.",
"Non.",
"Id.",
"A.D.",
"Coll.",
"Cos.",
"Ord.",
"Pl.",
"S.C.",
"Suff.",
"Trib.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

View File

@ -15,7 +15,7 @@
STOP_WORDS = set( STOP_WORDS = set(
""" """
aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaangde aangezien achter achterna aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaande aangezien achter achterna
afgelopen aldus alhoewel anderzijds afgelopen aldus alhoewel anderzijds
ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven

View File

@ -28,7 +28,7 @@ class Russian(Language):
assigns=["token.lemma"], assigns=["token.lemma"],
default_config={ default_config={
"model": None, "model": None,
"mode": "pymorphy2", "mode": "pymorphy3",
"overwrite": False, "overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
}, },

View File

@ -19,33 +19,48 @@ class RussianLemmatizer(Lemmatizer):
model: Optional[Model], model: Optional[Model],
name: str = "lemmatizer", name: str = "lemmatizer",
*, *,
mode: str = "pymorphy2", mode: str = "pymorphy3",
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
) -> None: ) -> None:
if mode == "pymorphy2": if mode in {"pymorphy2", "pymorphy2_lookup"}:
try: try:
from pymorphy2 import MorphAnalyzer from pymorphy2 import MorphAnalyzer
except ImportError: except ImportError:
raise ImportError( raise ImportError(
"The Russian lemmatizer mode 'pymorphy2' requires the " "The lemmatizer mode 'pymorphy2' requires the "
"pymorphy2 library. Install it with: pip install pymorphy2" "pymorphy2 library and dictionaries. Install them with: "
"pip install pymorphy2"
"# for Ukrainian dictionaries:"
"pip install pymorphy2-dicts-uk"
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer(lang="ru")
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3"
"# for Ukrainian dictionaries:"
"pip install pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="ru")
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )
def pymorphy2_lemmatize(self, token: Token) -> List[str]: def _pymorphy_lemmatize(self, token: Token) -> List[str]:
string = token.text string = token.text
univ_pos = token.pos_ univ_pos = token.pos_
morphology = token.morph.to_dict() morphology = token.morph.to_dict()
if univ_pos == "PUNCT": if univ_pos == "PUNCT":
return [PUNCT_RULES.get(string, string)] return [PUNCT_RULES.get(string, string)]
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"): if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
# Skip unchangeable pos return self._pymorphy_lookup_lemmatize(token)
return [string.lower()]
analyses = self._morph.parse(string) analyses = self._morph.parse(string)
filtered_analyses = [] filtered_analyses = []
for analysis in analyses: for analysis in analyses:
@ -53,8 +68,10 @@ class RussianLemmatizer(Lemmatizer):
# Skip suggested parse variant for unknown word for pymorphy # Skip suggested parse variant for unknown word for pymorphy
continue continue
analysis_pos, _ = oc2ud(str(analysis.tag)) analysis_pos, _ = oc2ud(str(analysis.tag))
if analysis_pos == univ_pos or ( if (
analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN") analysis_pos == univ_pos
or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN"))
or ((analysis_pos == "PRON") and (univ_pos == "DET"))
): ):
filtered_analyses.append(analysis) filtered_analyses.append(analysis)
if not len(filtered_analyses): if not len(filtered_analyses):
@ -97,13 +114,28 @@ class RussianLemmatizer(Lemmatizer):
dict.fromkeys([analysis.normal_form for analysis in filtered_analyses]) dict.fromkeys([analysis.normal_form for analysis in filtered_analyses])
) )
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text string = token.text
analyses = self._morph.parse(string) analyses = self._morph.parse(string)
if len(analyses) == 1: # often multiple forms would derive from the same normal form
return [analyses[0].normal_form] # thus check _unique_ normal forms
normal_forms = set([an.normal_form for an in analyses])
if len(normal_forms) == 1:
return [next(iter(normal_forms))]
return [string] return [string]
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lemmatize(token)
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lookup_lemmatize(token)
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lemmatize(token)
def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]:
return self._pymorphy_lookup_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
gram_map = { gram_map = {

View File

@ -61,6 +61,11 @@ for abbr in [
{ORTH: "2к23", NORM: "2023"}, {ORTH: "2к23", NORM: "2023"},
{ORTH: "2к24", NORM: "2024"}, {ORTH: "2к24", NORM: "2024"},
{ORTH: "2к25", NORM: "2025"}, {ORTH: "2к25", NORM: "2025"},
{ORTH: "2к26", NORM: "2026"},
{ORTH: "2к27", NORM: "2027"},
{ORTH: "2к28", NORM: "2028"},
{ORTH: "2к29", NORM: "2029"},
{ORTH: "2к30", NORM: "2030"},
]: ]:
_exc[abbr[ORTH]] = [abbr] _exc[abbr[ORTH]] = [abbr]
@ -268,8 +273,8 @@ for abbr in [
{ORTH: "з-ка", NORM: "заимка"}, {ORTH: "з-ка", NORM: "заимка"},
{ORTH: "п-к", NORM: "починок"}, {ORTH: "п-к", NORM: "починок"},
{ORTH: "киш.", NORM: "кишлак"}, {ORTH: "киш.", NORM: "кишлак"},
{ORTH: "п. ст. ", NORM: "поселок станция"}, {ORTH: "п. ст.", NORM: "поселок станция"},
{ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"}, {ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"},
{ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"}, {ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
{ORTH: "ж/д б-ка", NORM: "железнодорожная будка"}, {ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
{ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"}, {ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
@ -280,12 +285,12 @@ for abbr in [
{ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"}, {ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
{ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"}, {ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
{ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"}, {ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
{ORTH: "ж/д ст. ", NORM: "железнодорожная станция"}, {ORTH: "ж/д ст.", NORM: "железнодорожная станция"},
{ORTH: "м-ко", NORM: "местечко"}, {ORTH: "м-ко", NORM: "местечко"},
{ORTH: "д.", NORM: "деревня"}, {ORTH: "д.", NORM: "деревня"},
{ORTH: "с.", NORM: "село"}, {ORTH: "с.", NORM: "село"},
{ORTH: "сл.", NORM: "слобода"}, {ORTH: "сл.", NORM: "слобода"},
{ORTH: "ст. ", NORM: "станция"}, {ORTH: "ст.", NORM: "станция"},
{ORTH: "ст-ца", NORM: "станица"}, {ORTH: "ст-ца", NORM: "станица"},
{ORTH: "у.", NORM: "улус"}, {ORTH: "у.", NORM: "улус"},
{ORTH: "х.", NORM: "хутор"}, {ORTH: "х.", NORM: "хутор"},
@ -388,8 +393,9 @@ for abbr in [
{ORTH: "прим.", NORM: "примечание"}, {ORTH: "прим.", NORM: "примечание"},
{ORTH: "прим.ред.", NORM: "примечание редакции"}, {ORTH: "прим.ред.", NORM: "примечание редакции"},
{ORTH: "см. также", NORM: "смотри также"}, {ORTH: "см. также", NORM: "смотри также"},
{ORTH: "кв.м.", NORM: "квадрантный метр"}, {ORTH: "см.", NORM: "смотри"},
{ORTH: "м2", NORM: "квадрантный метр"}, {ORTH: "кв.м.", NORM: "квадратный метр"},
{ORTH: "м2", NORM: "квадратный метр"},
{ORTH: "б/у", NORM: "бывший в употреблении"}, {ORTH: "б/у", NORM: "бывший в употреблении"},
{ORTH: "сокр.", NORM: "сокращение"}, {ORTH: "сокр.", NORM: "сокращение"},
{ORTH: "чел.", NORM: "человек"}, {ORTH: "чел.", NORM: "человек"},

View File

@ -1,9 +1,17 @@
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
class SlovenianDefaults(BaseDefaults): class SlovenianDefaults(BaseDefaults):
stop_words = STOP_WORDS stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
class Slovenian(Language): class Slovenian(Language):

145
spacy/lang/sl/lex_attrs.py Normal file
View File

@ -0,0 +1,145 @@
from ...attrs import LIKE_NUM
from ...attrs import IS_CURRENCY
import unicodedata
_num_words = set(
"""
nula ničla nič ena dva tri štiri pet šest sedem osem
devet deset enajst dvanajst trinajst štirinajst petnajst
šestnajst sedemnajst osemnajst devetnajst dvajset trideset štirideset
petdeset šestdest sedemdeset osemdeset devedeset sto tisoč
milijon bilijon trilijon kvadrilijon nešteto
en eden enega enemu ennem enim enih enima enimi ene eni eno
dveh dvema dvem dvoje trije treh trem tremi troje štirje štirih štirim štirimi
petih petim petimi šestih šestim šestimi sedmih sedmim sedmimi osmih osmim osmimi
devetih devetim devetimi desetih desetim desetimi enajstih enajstim enajstimi
dvanajstih dvanajstim dvanajstimi trinajstih trinajstim trinajstimi
šestnajstih šestnajstim šestnajstimi petnajstih petnajstim petnajstimi
sedemnajstih sedemnajstim sedemnajstimi osemnajstih osemnajstim osemnajstimi
devetnajstih devetnajstim devetnajstimi dvajsetih dvajsetim dvajsetimi
""".split()
)
_ordinal_words = set(
"""
prvi drugi tretji četrti peti šesti sedmi osmi
deveti deseti enajsti dvanajsti trinajsti štirinajsti
petnajsti šestnajsti sedemnajsti osemnajsti devetnajsti
dvajseti trideseti štirideseti petdeseti šestdeseti sedemdeseti
osemdeseti devetdeseti stoti tisoči milijonti bilijonti
trilijonti kvadrilijonti nešteti
prva druga tretja četrta peta šesta sedma osma
deveta deseta enajsta dvanajsta trinajsta štirnajsta
petnajsta šestnajsta sedemnajsta osemnajsta devetnajsta
dvajseta trideseta štirideseta petdeseta šestdeseta sedemdeseta
osemdeseta devetdeseta stota tisoča milijonta bilijonta
trilijonta kvadrilijonta nešteta
prvo drugo tretje četrto peto šestro sedmo osmo
deveto deseto enajsto dvanajsto trinajsto štirnajsto
petnajsto šestnajsto sedemnajsto osemnajsto devetnajsto
dvajseto trideseto štirideseto petdeseto šestdeseto sedemdeseto
osemdeseto devetdeseto stoto tisočo milijonto bilijonto
trilijonto kvadrilijonto nešteto
prvega drugega tretjega četrtega petega šestega sedmega osmega
devega desetega enajstega dvanajstega trinajstega štirnajstega
petnajstega šestnajstega sedemnajstega osemnajstega devetnajstega
dvajsetega tridesetega štiridesetega petdesetega šestdesetega sedemdesetega
osemdesetega devetdesetega stotega tisočega milijontega bilijontega
trilijontega kvadrilijontega neštetega
prvemu drugemu tretjemu četrtemu petemu šestemu sedmemu osmemu devetemu desetemu
enajstemu dvanajstemu trinajstemu štirnajstemu petnajstemu šestnajstemu sedemnajstemu
osemnajstemu devetnajstemu dvajsetemu tridesetemu štiridesetemu petdesetemu šestdesetemu
sedemdesetemu osemdesetemu devetdesetemu stotemu tisočemu milijontemu bilijontemu
trilijontemu kvadrilijontemu neštetemu
prvem drugem tretjem četrtem petem šestem sedmem osmem devetem desetem
enajstem dvanajstem trinajstem štirnajstem petnajstem šestnajstem sedemnajstem
osemnajstem devetnajstem dvajsetem tridesetem štiridesetem petdesetem šestdesetem
sedemdesetem osemdesetem devetdesetem stotem tisočem milijontem bilijontem
trilijontem kvadrilijontem neštetem
prvim drugim tretjim četrtim petim šestim sedtim osmim devetim desetim
enajstim dvanajstim trinajstim štirnajstim petnajstim šestnajstim sedemnajstim
osemnajstim devetnajstim dvajsetim tridesetim štiridesetim petdesetim šestdesetim
sedemdesetim osemdesetim devetdesetim stotim tisočim milijontim bilijontim
trilijontim kvadrilijontim neštetim
prvih drugih tretjih četrthih petih šestih sedmih osmih deveth desetih
enajstih dvanajstih trinajstih štirnajstih petnajstih šestnajstih sedemnajstih
osemnajstih devetnajstih dvajsetih tridesetih štiridesetih petdesetih šestdesetih
sedemdesetih osemdesetih devetdesetih stotih tisočih milijontih bilijontih
trilijontih kvadrilijontih nešteth
prvima drugima tretjima četrtima petima šestima sedmima osmima devetima desetima
enajstima dvanajstima trinajstima štirnajstima petnajstima šestnajstima sedemnajstima
osemnajstima devetnajstima dvajsetima tridesetima štiridesetima petdesetima šestdesetima
sedemdesetima osemdesetima devetdesetima stotima tisočima milijontima bilijontima
trilijontima kvadrilijontima neštetima
prve druge četrte pete šeste sedme osme devete desete
enajste dvanajste trinajste štirnajste petnajste šestnajste sedemnajste
osemnajste devetnajste dvajsete tridesete štiridesete petdesete šestdesete
sedemdesete osemdesete devetdesete stote tisoče milijonte bilijonte
trilijonte kvadrilijonte neštete
prvimi drugimi tretjimi četrtimi petimi šestimi sedtimi osmimi devetimi desetimi
enajstimi dvanajstimi trinajstimi štirnajstimi petnajstimi šestnajstimi sedemnajstimi
osemnajstimi devetnajstimi dvajsetimi tridesetimi štiridesetimi petdesetimi šestdesetimi
sedemdesetimi osemdesetimi devetdesetimi stotimi tisočimi milijontimi bilijontimi
trilijontimi kvadrilijontimi neštetimi
""".split()
)
_currency_words = set(
"""
evro evra evru evrom evrov evroma evrih evrom evre evri evr eur
cent centa centu cenom centov centoma centih centom cente centi
dolar dolarja dolarji dolarju dolarjem dolarjev dolarjema dolarjih dolarje usd
tolar tolarja tolarji tolarju tolarjem tolarjev tolarjema tolarjih tolarje tol
dinar dinarja dinarji dinarju dinarjem dinarjev dinarjema dinarjih dinarje din
funt funta funti funtu funtom funtov funtoma funtih funte gpb
forint forinta forinti forintu forintom forintov forintoma forintih forinte
zlot zlota zloti zlotu zlotom zlotov zlotoma zlotih zlote
rupij rupija rupiji rupiju rupijem rupijev rupijema rupijih rupije
jen jena jeni jenu jenom jenov jenoma jenih jene
kuna kuni kune kuno kun kunama kunah kunam kunami
marka marki marke markama markah markami
""".split()
)
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
if text_lower in _ordinal_words:
return True
return False
def is_currency(text):
text_lower = text.lower()
if text in _currency_words:
return True
for char in text:
if unicodedata.category(char) != "Sc":
return False
return True
LEX_ATTRS = {LIKE_NUM: like_num, IS_CURRENCY: is_currency}

View File

@ -0,0 +1,84 @@
from ..char_classes import (
LIST_ELLIPSES,
LIST_ICONS,
HYPHENS,
LIST_PUNCT,
LIST_QUOTES,
CURRENCY,
UNITS,
PUNCT,
LIST_CURRENCY,
CONCAT_QUOTES,
)
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
from ..char_classes import merge_chars
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
INCLUDE_SPECIAL = ["\\+", "\\/", "\\", "\\¯", "\\=", "\\×"] + HYPHENS.split("|")
_prefixes = INCLUDE_SPECIAL + BASE_TOKENIZER_PREFIXES
_suffixes = (
INCLUDE_SPECIAL
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
r"(?<=°[FfCcKk])\.",
r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
r"(?<=[0-9])(?:{u})".format(u=UNITS),
r"(?<=[{al}{e}{p}(?:{q})])\.".format(
al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES, p=PUNCT
),
r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
# split initials like J.K. Rowling
r"(?<=[A-Z]\.)(?:[A-Z].)",
]
)
# a list of all suffixes following a hyphen that are shouldn't split (eg. BTC-jev)
# source: Obeliks tokenizer - https://github.com/clarinsi/obeliks/blob/master/obeliks/res/TokRulesPart1.txt
CONCAT_QUOTES = CONCAT_QUOTES.replace("'", "")
HYPHENS_PERMITTED = (
"((a)|(evemu)|(evskega)|(i)|(jevega)|(jevska)|(jevskimi)|(jinemu)|(oma)|(ovim)|"
"(ovski)|(e)|(evi)|(evskem)|(ih)|(jevem)|(jevske)|(jevsko)|(jini)|(ov)|(ovima)|"
"(ovskih)|(em)|(evih)|(evskemu)|(ja)|(jevemu)|(jevskega)|(ji)|(jinih)|(ova)|"
"(ovimi)|(ovskim)|(ema)|(evim)|(evski)|(je)|(jevi)|(jevskem)|(jih)|(jinim)|"
"(ove)|(ovo)|(ovskima)|(ev)|(evima)|(evskih)|(jem)|(jevih)|(jevskemu)|(jin)|"
"(jinima)|(ovega)|(ovska)|(ovskimi)|(eva)|(evimi)|(evskim)|(jema)|(jevim)|"
"(jevski)|(jina)|(jinimi)|(ovem)|(ovske)|(ovsko)|(eve)|(evo)|(evskima)|(jev)|"
"(jevima)|(jevskih)|(jine)|(jino)|(ovemu)|(ovskega)|(u)|(evega)|(evska)|"
"(evskimi)|(jeva)|(jevimi)|(jevskim)|(jinega)|(ju)|(ovi)|(ovskem)|(evem)|"
"(evske)|(evsko)|(jeve)|(jevo)|(jevskima)|(jinem)|(om)|(ovih)|(ovskemu)|"
"(ovec)|(ovca)|(ovcu)|(ovcem)|(ovcev)|(ovcema)|(ovcih)|(ovci)|(ovce)|(ovcimi)|"
"(evec)|(evca)|(evcu)|(evcem)|(evcev)|(evcema)|(evcih)|(evci)|(evce)|(evcimi)|"
"(jevec)|(jevca)|(jevcu)|(jevcem)|(jevcev)|(jevcema)|(jevcih)|(jevci)|(jevce)|"
"(jevcimi)|(ovka)|(ovke)|(ovki)|(ovko)|(ovk)|(ovkama)|(ovkah)|(ovkam)|(ovkami)|"
"(evka)|(evke)|(evki)|(evko)|(evk)|(evkama)|(evkah)|(evkam)|(evkami)|(jevka)|"
"(jevke)|(jevki)|(jevko)|(jevk)|(jevkama)|(jevkah)|(jevkam)|(jevkami)|(timi)|"
"(im)|(ima)|(a)|(imi)|(e)|(o)|(ega)|(ti)|(em)|(tih)|(emu)|(tim)|(i)|(tima)|"
"(ih)|(ta)|(te)|(to)|(tega)|(tem)|(temu))"
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?!{hp}$)(?=[{a}])".format(
a=ALPHA, h=HYPHENS, hp=HYPHENS_PERMITTED
),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -1,326 +1,84 @@
# Source: https://github.com/stopwords-iso/stopwords-sl # Source: https://github.com/stopwords-iso/stopwords-sl
# Removed various words that are not normally considered stop words, such as months.
STOP_WORDS = set( STOP_WORDS = set(
""" """
a a ali
ali
b b bi bil bila bile bili bilo biti blizu bo bodo bojo bolj bom bomo
bi boste bova boš brez
bil
bila c cel cela celi celo
bile
bili č če često četrta četrtek četrti četrto čez čigav
bilo
biti d da daleč dan danes datum deset deseta deseti deseto devet
blizu deveta deveti deveto do dober dobra dobri dobro dokler dol dolg
bo dolga dolgi dovolj drug druga drugi drugo dva dve
bodo
bolj e eden en ena ene eni enkrat eno etc.
bom
bomo
boste
bova
boš
brez
c
cel
cela
celi
celo
d
da
daleč
dan
danes
do
dober
dobra
dobri
dobro
dokler
dol
dovolj
e
eden
en
ena
ene
eni
enkrat
eno
etc.
f f
g
g. g g. ga ga. gor gospa gospod
ga
ga. h halo
gor
gospa i idr. ii iii in iv ix iz
gospod
h j jaz je ji jih jim jo jutri
halo
i k kadarkoli kaj kajti kako kakor kamor kamorkoli kar karkoli
idr. katerikoli kdaj kdo kdorkoli ker ki kje kjer kjerkoli
ii ko koder koderkoli koga komu kot kratek kratka kratke kratki
iii
in l lahka lahke lahki lahko le lep lepa lepe lepi lepo leto
iv
ix m majhen majhna majhni malce malo manj me med medtem mene
iz mesec mi midva midve mnogo moj moja moje mora morajo moram
j moramo morate moraš morem mu
jaz
je n na nad naj najina najino najmanj naju največ nam narobe
ji nas nato nazaj naš naša naše ne nedavno nedelja nek neka
jih nekaj nekatere nekateri nekatero nekdo neke nekega neki
jim nekje neko nekoga nekoč ni nikamor nikdar nikjer nikoli
jo nič nje njega njegov njegova njegovo njej njemu njen
k njena njeno nji njih njihov njihova njihovo njiju njim
kadarkoli njo njun njuna njuno no nocoj npr.
kaj
kajti o ob oba obe oboje od odprt odprta odprti okoli on
kako onadva one oni onidve osem osma osmi osmo oz.
kakor
kamor p pa pet peta petek peti peto po pod pogosto poleg poln
kamorkoli polna polni polno ponavadi ponedeljek ponovno potem
kar povsod pozdravljen pozdravljeni prav prava prave pravi
karkoli pravo prazen prazna prazno prbl. precej pred prej preko
katerikoli pri pribl. približno primer pripravljen pripravljena
kdaj pripravljeni proti prva prvi prvo
kdo
kdorkoli r ravno redko res reč
ker
ki s saj sam sama same sami samo se sebe sebi sedaj sedem
kje sedma sedmi sedmo sem seveda si sicer skoraj skozi slab sm
kjer so sobota spet sreda srednja srednji sta ste stran stvar sva
kjerkoli
ko š šest šesta šesti šesto štiri
koderkoli
koga t ta tak taka take taki tako takoj tam te tebe tebi tega
komu težak težka težki težko ti tista tiste tisti tisto tj.
kot tja to toda torek tretja tretje tretji tri tu tudi tukaj
l tvoj tvoja tvoje
le
lep
lepa
lepe
lepi
lepo
m
manj
me
med
medtem
mene
mi
midva
midve
mnogo
moj
moja
moje
mora
morajo
moram
moramo
morate
moraš
morem
mu
n
na
nad
naj
najina
najino
najmanj
naju
največ
nam
nas
nato
nazaj
naš
naša
naše
ne
nedavno
nek
neka
nekaj
nekatere
nekateri
nekatero
nekdo
neke
nekega
neki
nekje
neko
nekoga
nekoč
ni
nikamor
nikdar
nikjer
nikoli
nič
nje
njega
njegov
njegova
njegovo
njej
njemu
njen
njena
njeno
nji
njih
njihov
njihova
njihovo
njiju
njim
njo
njun
njuna
njuno
no
nocoj
npr.
o
ob
oba
obe
oboje
od
okoli
on
onadva
one
oni
onidve
oz.
p
pa
po
pod
pogosto
poleg
ponavadi
ponovno
potem
povsod
prbl.
precej
pred
prej
preko
pri
pribl.
približno
proti
r
redko
res
s
saj
sam
sama
same
sami
samo
se
sebe
sebi
sedaj
sem
seveda
si
sicer
skoraj
skozi
smo
so
spet
sta
ste
sva
t
ta
tak
taka
take
taki
tako
takoj
tam
te
tebe
tebi
tega
ti
tista
tiste
tisti
tisto
tj.
tja
to
toda
tu
tudi
tukaj
tvoj
tvoja
tvoje
u u
v
vaju v vaju vam vas vaš vaša vaše ve vedno velik velika veliki
vam veliko vendar ves več vi vidva vii viii visok visoka visoke
vas visoki vsa vsaj vsak vsaka vsakdo vsake vsaki vsakomur vse
vaš vsega vsi vso včasih včeraj
vaša
vaše
ve
vedno
vendar
ves
več
vi
vidva
vii
viii
vsa
vsaj
vsak
vsaka
vsakdo
vsake
vsaki
vsakomur
vse
vsega
vsi
vso
včasih
x x
z
za z za zadaj zadnji zakaj zaprta zaprti zaprto zdaj zelo zunaj
zadaj
zadnji ž že
zakaj
zdaj
zelo
zunaj
č
če
često
čez
čigav
š
ž
že
""".split() """.split()
) )

View File

@ -0,0 +1,272 @@
from typing import Dict, List
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH, NORM
from ...util import update_exc
_exc: Dict[str, List[Dict]] = {}
_other_exc = {
"t.i.": [{ORTH: "t.", NORM: "tako"}, {ORTH: "i.", NORM: "imenovano"}],
"t.j.": [{ORTH: "t.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"T.j.": [{ORTH: "T.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"d.o.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "o.", NORM: "omejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.O.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "O.", NORM: "omejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.n.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "n.", NORM: "neomejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.N.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "N.", NORM: "neomejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.d.": [{ORTH: "d.", NORM: "delniška"}, {ORTH: "d.", NORM: "družba"}],
"D.D.": [{ORTH: "D.", NORM: "delniška"}, {ORTH: "D.", NORM: "družba"}],
"s.p.": [{ORTH: "s.", NORM: "samostojni"}, {ORTH: "p.", NORM: "podjetnik"}],
"S.P.": [{ORTH: "S.", NORM: "samostojni"}, {ORTH: "P.", NORM: "podjetnik"}],
"l.r.": [{ORTH: "l.", NORM: "lastno"}, {ORTH: "r.", NORM: "ročno"}],
"le-te": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "te"}],
"Le-te": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "te"}],
"le-ti": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ti"}],
"Le-ti": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ti"}],
"le-to": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "to"}],
"Le-to": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "to"}],
"le-ta": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ta"}],
"Le-ta": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ta"}],
"le-tega": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "tega"}],
"Le-tega": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "tega"}],
}
_exc.update(_other_exc)
for exc_data in [
{ORTH: "adm.", NORM: "administracija"},
{ORTH: "aer.", NORM: "aeronavtika"},
{ORTH: "agr.", NORM: "agronomija"},
{ORTH: "amer.", NORM: "ameriško"},
{ORTH: "anat.", NORM: "anatomija"},
{ORTH: "angl.", NORM: "angleški"},
{ORTH: "ant.", NORM: "antonim"},
{ORTH: "antr.", NORM: "antropologija"},
{ORTH: "apr.", NORM: "april"},
{ORTH: "arab.", NORM: "arabsko"},
{ORTH: "arheol.", NORM: "arheologija"},
{ORTH: "arhit.", NORM: "arhitektura"},
{ORTH: "avg.", NORM: "avgust"},
{ORTH: "avstr.", NORM: "avstrijsko"},
{ORTH: "avt.", NORM: "avtomobilizem"},
{ORTH: "bibl.", NORM: "biblijsko"},
{ORTH: "biokem.", NORM: "biokemija"},
{ORTH: "biol.", NORM: "biologija"},
{ORTH: "bolg.", NORM: "bolgarski"},
{ORTH: "bot.", NORM: "botanika"},
{ORTH: "cit.", NORM: "citat"},
{ORTH: "daj.", NORM: "dajalnik"},
{ORTH: "del.", NORM: "deležnik"},
{ORTH: "ed.", NORM: "ednina"},
{ORTH: "etn.", NORM: "etnografija"},
{ORTH: "farm.", NORM: "farmacija"},
{ORTH: "filat.", NORM: "filatelija"},
{ORTH: "filoz.", NORM: "filozofija"},
{ORTH: "fin.", NORM: "finančništvo"},
{ORTH: "fiz.", NORM: "fizika"},
{ORTH: "fot.", NORM: "fotografija"},
{ORTH: "fr.", NORM: "francoski"},
{ORTH: "friz.", NORM: "frizerstvo"},
{ORTH: "gastr.", NORM: "gastronomija"},
{ORTH: "geogr.", NORM: "geografija"},
{ORTH: "geol.", NORM: "geologija"},
{ORTH: "geom.", NORM: "geometrija"},
{ORTH: "germ.", NORM: "germanski"},
{ORTH: "gl.", NORM: "glej"},
{ORTH: "glag.", NORM: "glagolski"},
{ORTH: "glasb.", NORM: "glasba"},
{ORTH: "gled.", NORM: "gledališče"},
{ORTH: "gost.", NORM: "gostinstvo"},
{ORTH: "gozd.", NORM: "gozdarstvo"},
{ORTH: "gr.", NORM: "grški"},
{ORTH: "grad.", NORM: "gradbeništvo"},
{ORTH: "hebr.", NORM: "hebrejsko"},
{ORTH: "hrv.", NORM: "hrvaško"},
{ORTH: "ide.", NORM: "indoevropsko"},
{ORTH: "igr.", NORM: "igre"},
{ORTH: "im.", NORM: "imenovalnik"},
{ORTH: "iron.", NORM: "ironično"},
{ORTH: "it.", NORM: "italijanski"},
{ORTH: "itd.", NORM: "in tako dalje"},
{ORTH: "itn.", NORM: "in tako naprej"},
{ORTH: "ipd.", NORM: "in podobno"},
{ORTH: "jap.", NORM: "japonsko"},
{ORTH: "jul.", NORM: "julij"},
{ORTH: "jun.", NORM: "junij"},
{ORTH: "kit.", NORM: "kitajsko"},
{ORTH: "knj.", NORM: "knjižno"},
{ORTH: "knjiž.", NORM: "knjižno"},
{ORTH: "kor.", NORM: "koreografija"},
{ORTH: "lat.", NORM: "latinski"},
{ORTH: "les.", NORM: "lesna stroka"},
{ORTH: "lingv.", NORM: "lingvistika"},
{ORTH: "lit.", NORM: "literarni"},
{ORTH: "ljubk.", NORM: "ljubkovalno"},
{ORTH: "lov.", NORM: "lovstvo"},
{ORTH: "m.", NORM: "moški"},
{ORTH: "mak.", NORM: "makedonski"},
{ORTH: "mar.", NORM: "marec"},
{ORTH: "mat.", NORM: "matematika"},
{ORTH: "med.", NORM: "medicina"},
{ORTH: "meh.", NORM: "mehiško"},
{ORTH: "mest.", NORM: "mestnik"},
{ORTH: "mdr.", NORM: "med drugim"},
{ORTH: "min.", NORM: "mineralogija"},
{ORTH: "mitol.", NORM: "mitologija"},
{ORTH: "mn.", NORM: "množina"},
{ORTH: "mont.", NORM: "montanistika"},
{ORTH: "muz.", NORM: "muzikologija"},
{ORTH: "nam.", NORM: "namenilnik"},
{ORTH: "nar.", NORM: "narečno"},
{ORTH: "nav.", NORM: "navadno"},
{ORTH: "nedol.", NORM: "nedoločnik"},
{ORTH: "nedov.", NORM: "nedovršni"},
{ORTH: "neprav.", NORM: "nepravilno"},
{ORTH: "nepreh.", NORM: "neprehodno"},
{ORTH: "neskl.", NORM: "nesklonljiv(o)"},
{ORTH: "nestrok.", NORM: "nestrokovno"},
{ORTH: "num.", NORM: "numizmatika"},
{ORTH: "npr.", NORM: "na primer"},
{ORTH: "obrt.", NORM: "obrtništvo"},
{ORTH: "okt.", NORM: "oktober"},
{ORTH: "or.", NORM: "orodnik"},
{ORTH: "os.", NORM: "oseba"},
{ORTH: "otr.", NORM: "otroško"},
{ORTH: "oz.", NORM: "oziroma"},
{ORTH: "pal.", NORM: "paleontologija"},
{ORTH: "papir.", NORM: "papirništvo"},
{ORTH: "ped.", NORM: "pedagogika"},
{ORTH: "pisar.", NORM: "pisarniško"},
{ORTH: "pog.", NORM: "pogovorno"},
{ORTH: "polit.", NORM: "politika"},
{ORTH: "polj.", NORM: "poljsko"},
{ORTH: "poljud.", NORM: "poljudno"},
{ORTH: "preg.", NORM: "pregovor"},
{ORTH: "preh.", NORM: "prehodno"},
{ORTH: "pren.", NORM: "preneseno"},
{ORTH: "prid.", NORM: "pridevnik"},
{ORTH: "prim.", NORM: "primerjaj"},
{ORTH: "prisl.", NORM: "prislov"},
{ORTH: "psih.", NORM: "psihologija"},
{ORTH: "psiht.", NORM: "psihiatrija"},
{ORTH: "rad.", NORM: "radiotehnika"},
{ORTH: "rač.", NORM: "računalništvo"},
{ORTH: "rib.", NORM: "ribištvo"},
{ORTH: "rod.", NORM: "rodilnik"},
{ORTH: "rus.", NORM: "rusko"},
{ORTH: "s.", NORM: "srednji"},
{ORTH: "sam.", NORM: "samostalniški"},
{ORTH: "sed.", NORM: "sedanjik"},
{ORTH: "sep.", NORM: "september"},
{ORTH: "slabš.", NORM: "slabšalno"},
{ORTH: "slovan.", NORM: "slovansko"},
{ORTH: "slovaš.", NORM: "slovaško"},
{ORTH: "srb.", NORM: "srbsko"},
{ORTH: "star.", NORM: "starinsko"},
{ORTH: "stil.", NORM: "stilno"},
{ORTH: "sv.", NORM: "svet(i)"},
{ORTH: "teh.", NORM: "tehnika"},
{ORTH: "tisk.", NORM: "tiskarstvo"},
{ORTH: "tj.", NORM: "to je"},
{ORTH: "tož.", NORM: "tožilnik"},
{ORTH: "trg.", NORM: "trgovina"},
{ORTH: "ukr.", NORM: "ukrajinski"},
{ORTH: "um.", NORM: "umetnost"},
{ORTH: "vel.", NORM: "velelnik"},
{ORTH: "vet.", NORM: "veterina"},
{ORTH: "vez.", NORM: "veznik"},
{ORTH: "vn.", NORM: "visokonemško"},
{ORTH: "voj.", NORM: "vojska"},
{ORTH: "vrtn.", NORM: "vrtnarstvo"},
{ORTH: "vulg.", NORM: "vulgarno"},
{ORTH: "vznes.", NORM: "vzneseno"},
{ORTH: "zal.", NORM: "založništvo"},
{ORTH: "zastar.", NORM: "zastarelo"},
{ORTH: "zgod.", NORM: "zgodovina"},
{ORTH: "zool.", NORM: "zoologija"},
{ORTH: "čeb.", NORM: "čebelarstvo"},
{ORTH: "češ.", NORM: "češki"},
{ORTH: "člov.", NORM: "človeškost"},
{ORTH: "šah.", NORM: "šahovski"},
{ORTH: "šalj.", NORM: "šaljivo"},
{ORTH: "šp.", NORM: "španski"},
{ORTH: "špan.", NORM: "špansko"},
{ORTH: "šport.", NORM: "športni"},
{ORTH: "štev.", NORM: "števnik"},
{ORTH: "šved.", NORM: "švedsko"},
{ORTH: "švic.", NORM: "švicarsko"},
{ORTH: "ž.", NORM: "ženski"},
{ORTH: "žarg.", NORM: "žargonsko"},
{ORTH: "žel.", NORM: "železnica"},
{ORTH: "živ.", NORM: "živost"},
]:
_exc[exc_data[ORTH]] = [exc_data]
abbrv = """
Co. Ch. DIPL. DR. Dr. Ev. Inc. Jr. Kr. Mag. M. MR. Mr. Mt. Murr. Npr. OZ.
Opr. Osn. Prim. Roj. ST. Sim. Sp. Sred. St. Sv. Škofl. Tel. UR. Zb.
a. aa. ab. abc. abit. abl. abs. abt. acc. accel. add. adj. adv. aet. afr. akad. al. alban. all. alleg.
alp. alt. alter. alžir. am. an. andr. ang. anh. anon. ans. antrop. apoc. app. approx. apt. ar. arc. arch.
arh. arr. as. asist. assist. assoc. asst. astr. attn. aug. avstral. az. b. bab. bal. bbl. bd. belg. bioinf.
biomed. bk. bl. bn. borg. bp. br. braz. brit. bros. broš. bt. bu. c. ca. cal. can. cand. cantab. cap. capt.
cat. cath. cc. cca. cd. cdr. cdre. cent. cerkv. cert. cf. cfr. ch. chap. chem. chr. chs. cic. circ. civ. cl.
cm. cmd. cnr. co. cod. col. coll. colo. com. comp. con. conc. cond. conn. cons. cont. coop. corr. cost. cp.
cpl. cr. crd. cres. cresc. ct. cu. d. dan. dat. davč. ddr. dec. ded. def. dem. dent. dept. dia. dip. dipl.
dir. disp. diss. div. do. doc. dok. dol. doo. dop. dott. dr. dram. druž. družb. drž. dt. duh. dur. dvr. dwt. e.
ea. ecc. eccl. eccles. econ. edn. egipt. egr. ekon. eksp. el. em. enc. eng. eo. ep. err. esp. esq. est.
et. etc. etnogr. etnol. ev. evfem. evr. ex. exc. excl. exp. expl. ext. exx. f. fa. facs. fak. faks. fas.
fasc. fco. fcp. feb. febr. fec. fed. fem. ff. fff. fid. fig. fil. film. fiziol. fiziot. flam. fm. fo. fol. folk.
frag. fran. franc. fsc. g. ga. gal. gdč. ge. gen. geod. geog. geotehnol. gg. gimn. glas. glav. gnr. go. gor.
gosp. gp. graf. gram. gren. grš. gs. h. hab. hf. hist. ho. hort. i. ia. ib. ibid. id. idr. idridr. ill. imen.
imp. impf. impr. in. inc. incl. ind. indus. inf. inform. ing. init. ins. int. inv. inšp. inštr. inž. is. islam.
ist. ital. iur. iz. izbr. izd. izg. izgr. izr. izv. j. jak. jam. jan. jav. je. jez. jr. jsl. jud. jug.
jugoslovan. jur. juž. jv. jz. k. kal. kan. kand. kat. kdo. kem. kip. kmet. kol. kom. komp. konf. kont. kost. kov.
kp. kpfw. kr. kraj. krat. kub. kult. kv. kval. l. la. lab. lb. ld. let. lib. lik. litt. lj. ljud. ll. loc. log.
loč. lt. ma. madž. mag. manag. manjš. masc. mass. mater. max. maxmax. mb. md. mech. medic. medij. medn.
mehč. mem. menedž. mes. mess. metal. meteor. meteorol. mex. mi. mikr. mil. minn. mio. misc. miss. mit. mk.
mkt. ml. mlad. mlle. mlr. mm. mme. množ. mo. moj. moš. možn. mr. mrd. mrs. ms. msc. msgr. mt. murr. mus. mut.
n. na. nad. nadalj. nadom. nagl. nakl. namer. nan. naniz. nasl. nat. navt. nač. ned. nem. nik. nizoz. nm. nn.
no. nom. norv. notr. nov. novogr. ns. o. ob. obd. obj. oblač. obl. oblik. obr. obraz. obs. obst. obt. obč. oc.
oct. od. odd. odg. odn. odst. odv. oec. off. ok. okla. okr. ont. oo. op. opis. opp. opr. orch. ord. ore. oreg.
org. orient. orig. ork. ort. oseb. osn. ot. ozir. ošk. p. pag. par. para. parc. parl. part. past. pat. pdk.
pen. perf. pert. perz. pesn. pet. pev. pf. pfc. ph. pharm. phil. pis. pl. po. pod. podr. podaljš. pogl. pogoj. pojm.
pok. pokr. pol. poljed. poljub. polu. pom. pomen. pon. ponov. pop. por. port. pos. posl. posn. pov. pp. ppl. pr.
praet. prav. pravopis. pravosl. preb. pred. predl. predm. predp. preds. pref. pregib. prel. prem. premen. prep.
pres. pret. prev. pribl. prih. pril. primerj. primor. prip. pripor. prir. prist. priv. proc. prof. prog. proiz.
prom. pron. prop. prot. protest. prov. ps. pss. pt. publ. pz. q. qld. qu. quad. que. r. racc. rastl. razgl.
razl. razv. rd. red. ref. reg. rel. relig. rep. repr. rer. resp. rest. ret. rev. revol. rež. rim. rist. rkp. rm.
roj. rom. romun. rp. rr. rt. rud. ruš. ry. sal. samogl. san. sc. scen. sci. scr. sdv. seg. sek. sen. sept. ser.
sev. sg. sgt. sh. sig. sigg. sign. sim. sin. sing. sinh. skand. skl. sklad. sklanj. sklep. skr. sl. slik. slov.
slovak. slovn. sn. so. sob. soc. sociol. sod. sopomen. sopr. sor. sov. sovj. sp. spec. spl. spr. spreg. sq. sr.
sre. sred. sredoz. srh. ss. ssp. st. sta. stan. stanstar. stcsl. ste. stim. stol. stom. str. stroj. strok. stsl.
stud. sup. supl. suppl. svet. sz. t. tab. tech. ted. tehn. tehnol. tek. teks. tekst. tel. temp. ten. teol. ter.
term. test. th. theol. tim. tip. tisočl. tit. tl. tol. tolmač. tom. tor. tov. tr. trad. traj. trans. tren.
trib. tril. trop. trp. trž. ts. tt. tu. tur. turiz. tvor. tvorb. . u. ul. umet. un. univ. up. upr. ur. urad.
us. ust. utr. v. va. val. var. varn. ven. ver. verb. vest. vezal. vic. vis. viv. viz. viš. vod. vok. vol. vpr.
vrst. vrstil. vs. vv. vzd. vzg. vzh. vzor. w. wed. wg. wk. x. y. z. zah. zaim. zak. zap. zasl. zavar. zač. zb.
združ. zg. zn. znan. znanstv. zoot. zun. zv. zvd. á. é. ć. č. čas. čet. čl. člen. čustv. đ. ľ. ł. ş. ŠT. š. šir.
škofl. škot. šol. št. števil. štud. ů. ű. žen. žival.
""".split()
for orth in abbrv:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

View File

@ -29,7 +29,7 @@ class Ukrainian(Language):
assigns=["token.lemma"], assigns=["token.lemma"],
default_config={ default_config={
"model": None, "model": None,
"mode": "pymorphy2", "mode": "pymorphy3",
"overwrite": False, "overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
}, },

View File

@ -14,11 +14,11 @@ class UkrainianLemmatizer(RussianLemmatizer):
model: Optional[Model], model: Optional[Model],
name: str = "lemmatizer", name: str = "lemmatizer",
*, *,
mode: str = "pymorphy2", mode: str = "pymorphy3",
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
) -> None: ) -> None:
if mode == "pymorphy2": if mode in {"pymorphy2", "pymorphy2_lookup"}:
try: try:
from pymorphy2 import MorphAnalyzer from pymorphy2 import MorphAnalyzer
except ImportError: except ImportError:
@ -29,6 +29,17 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk") self._morph = MorphAnalyzer(lang="uk")
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3 pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )

View File

@ -1,4 +1,4 @@
from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection from typing import Iterator, Optional, Any, Dict, Callable, Iterable
from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@ -10,6 +10,7 @@ from contextlib import contextmanager
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
import warnings import warnings
from thinc.api import get_current_ops, Config, CupyOps, Optimizer from thinc.api import get_current_ops, Config, CupyOps, Optimizer
import srsly import srsly
import multiprocessing as mp import multiprocessing as mp
@ -24,7 +25,7 @@ from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
from .training import Example, validate_examples from .training import Example, validate_examples
from .training.initialize import init_vocab, init_tok2vec from .training.initialize import init_vocab, init_tok2vec
from .scorer import Scorer from .scorer import Scorer
from .util import registry, SimpleFrozenList, _pipe, raise_error from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES
from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER
from .util import warn_if_jupyter_cupy from .util import warn_if_jupyter_cupy
from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS
@ -42,8 +43,7 @@ from .lookups import load_lookups
from .compat import Literal from .compat import Literal
if TYPE_CHECKING: PipeCallable = Callable[[Doc], Doc]
from .pipeline import Pipe # noqa: F401
# This is the base config will all settings (training etc.) # This is the base config will all settings (training etc.)
@ -180,7 +180,7 @@ class Language:
self.vocab: Vocab = vocab self.vocab: Vocab = vocab
if self.lang is None: if self.lang is None:
self.lang = self.vocab.lang self.lang = self.vocab.lang
self._components: List[Tuple[str, "Pipe"]] = [] self._components: List[Tuple[str, PipeCallable]] = []
self._disabled: Set[str] = set() self._disabled: Set[str] = set()
self.max_length = max_length self.max_length = max_length
# Create the default tokenizer from the default config # Create the default tokenizer from the default config
@ -302,7 +302,7 @@ class Language:
return SimpleFrozenList(names) return SimpleFrozenList(names)
@property @property
def components(self) -> List[Tuple[str, "Pipe"]]: def components(self) -> List[Tuple[str, PipeCallable]]:
"""Get all (name, component) tuples in the pipeline, including the """Get all (name, component) tuples in the pipeline, including the
currently disabled components. currently disabled components.
""" """
@ -321,12 +321,12 @@ class Language:
return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names")) return SimpleFrozenList(names, error=Errors.E926.format(attr="component_names"))
@property @property
def pipeline(self) -> List[Tuple[str, "Pipe"]]: def pipeline(self) -> List[Tuple[str, PipeCallable]]:
"""The processing pipeline consisting of (name, component) tuples. The """The processing pipeline consisting of (name, component) tuples. The
components are called on the Doc in order as it passes through the components are called on the Doc in order as it passes through the
pipeline. pipeline.
RETURNS (List[Tuple[str, Pipe]]): The pipeline. RETURNS (List[Tuple[str, Callable[[Doc], Doc]]]): The pipeline.
""" """
pipes = [(n, p) for n, p in self._components if n not in self._disabled] pipes = [(n, p) for n, p in self._components if n not in self._disabled]
return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline")) return SimpleFrozenList(pipes, error=Errors.E926.format(attr="pipeline"))
@ -526,7 +526,7 @@ class Language:
assigns: Iterable[str] = SimpleFrozenList(), assigns: Iterable[str] = SimpleFrozenList(),
requires: Iterable[str] = SimpleFrozenList(), requires: Iterable[str] = SimpleFrozenList(),
retokenizes: bool = False, retokenizes: bool = False,
func: Optional["Pipe"] = None, func: Optional[PipeCallable] = None,
) -> Callable[..., Any]: ) -> Callable[..., Any]:
"""Register a new pipeline component. Can be used for stateless function """Register a new pipeline component. Can be used for stateless function
components that don't require a separate factory. Can be used as a components that don't require a separate factory. Can be used as a
@ -541,7 +541,7 @@ class Language:
e.g. "token.ent_id". Used for pipeline analysis. e.g. "token.ent_id". Used for pipeline analysis.
retokenizes (bool): Whether the component changes the tokenization. retokenizes (bool): Whether the component changes the tokenization.
Used for pipeline analysis. Used for pipeline analysis.
func (Optional[Callable]): Factory function if not used as a decorator. func (Optional[Callable[[Doc], Doc]): Factory function if not used as a decorator.
DOCS: https://spacy.io/api/language#component DOCS: https://spacy.io/api/language#component
""" """
@ -552,11 +552,11 @@ class Language:
raise ValueError(Errors.E853.format(name=name)) raise ValueError(Errors.E853.format(name=name))
component_name = name if name is not None else util.get_object_name(func) component_name = name if name is not None else util.get_object_name(func)
def add_component(component_func: "Pipe") -> Callable: def add_component(component_func: PipeCallable) -> Callable:
if isinstance(func, type): # function is a class if isinstance(func, type): # function is a class
raise ValueError(Errors.E965.format(name=component_name)) raise ValueError(Errors.E965.format(name=component_name))
def factory_func(nlp, name: str) -> "Pipe": def factory_func(nlp, name: str) -> PipeCallable:
return component_func return component_func
internal_name = cls.get_factory_name(name) internal_name = cls.get_factory_name(name)
@ -606,7 +606,7 @@ class Language:
print_pipe_analysis(analysis, keys=keys) print_pipe_analysis(analysis, keys=keys)
return analysis return analysis
def get_pipe(self, name: str) -> "Pipe": def get_pipe(self, name: str) -> PipeCallable:
"""Get a pipeline component for a given component name. """Get a pipeline component for a given component name.
name (str): Name of pipeline component to get. name (str): Name of pipeline component to get.
@ -627,7 +627,7 @@ class Language:
config: Dict[str, Any] = SimpleFrozenDict(), config: Dict[str, Any] = SimpleFrozenDict(),
raw_config: Optional[Config] = None, raw_config: Optional[Config] = None,
validate: bool = True, validate: bool = True,
) -> "Pipe": ) -> PipeCallable:
"""Create a pipeline component. Mostly used internally. To create and """Create a pipeline component. Mostly used internally. To create and
add a component to the pipeline, you can use nlp.add_pipe. add a component to the pipeline, you can use nlp.add_pipe.
@ -639,7 +639,7 @@ class Language:
raw_config (Optional[Config]): Internals: the non-interpolated config. raw_config (Optional[Config]): Internals: the non-interpolated config.
validate (bool): Whether to validate the component config against the validate (bool): Whether to validate the component config against the
arguments and types expected by the factory. arguments and types expected by the factory.
RETURNS (Pipe): The pipeline component. RETURNS (Callable[[Doc], Doc]): The pipeline component.
DOCS: https://spacy.io/api/language#create_pipe DOCS: https://spacy.io/api/language#create_pipe
""" """
@ -694,24 +694,18 @@ class Language:
def create_pipe_from_source( def create_pipe_from_source(
self, source_name: str, source: "Language", *, name: str self, source_name: str, source: "Language", *, name: str
) -> Tuple["Pipe", str]: ) -> Tuple[PipeCallable, str]:
"""Create a pipeline component by copying it from an existing model. """Create a pipeline component by copying it from an existing model.
source_name (str): Name of the component in the source pipeline. source_name (str): Name of the component in the source pipeline.
source (Language): The source nlp object to copy from. source (Language): The source nlp object to copy from.
name (str): Optional alternative name to use in current pipeline. name (str): Optional alternative name to use in current pipeline.
RETURNS (Tuple[Callable, str]): The component and its factory name. RETURNS (Tuple[Callable[[Doc], Doc], str]): The component and its factory name.
""" """
# Check source type # Check source type
if not isinstance(source, Language): if not isinstance(source, Language):
raise ValueError(Errors.E945.format(name=source_name, source=type(source))) raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
# Check vectors, with faster checks first if self.vocab.vectors != source.vocab.vectors:
if (
self.vocab.vectors.shape != source.vocab.vectors.shape
or self.vocab.vectors.key2row != source.vocab.vectors.key2row
or self.vocab.vectors.to_bytes(exclude=["strings"])
!= source.vocab.vectors.to_bytes(exclude=["strings"])
):
warnings.warn(Warnings.W113.format(name=source_name)) warnings.warn(Warnings.W113.format(name=source_name))
if source_name not in source.component_names: if source_name not in source.component_names:
raise KeyError( raise KeyError(
@ -745,7 +739,7 @@ class Language:
config: Dict[str, Any] = SimpleFrozenDict(), config: Dict[str, Any] = SimpleFrozenDict(),
raw_config: Optional[Config] = None, raw_config: Optional[Config] = None,
validate: bool = True, validate: bool = True,
) -> "Pipe": ) -> PipeCallable:
"""Add a component to the processing pipeline. Valid components are """Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last". of before/after/first/last can be set. Default behaviour is "last".
@ -768,7 +762,7 @@ class Language:
raw_config (Optional[Config]): Internals: the non-interpolated config. raw_config (Optional[Config]): Internals: the non-interpolated config.
validate (bool): Whether to validate the component config against the validate (bool): Whether to validate the component config against the
arguments and types expected by the factory. arguments and types expected by the factory.
RETURNS (Pipe): The pipeline component. RETURNS (Callable[[Doc], Doc]): The pipeline component.
DOCS: https://spacy.io/api/language#add_pipe DOCS: https://spacy.io/api/language#add_pipe
""" """
@ -789,14 +783,6 @@ class Language:
factory_name, source, name=name factory_name, source, name=name
) )
else: else:
if not self.has_factory(factory_name):
err = Errors.E002.format(
name=factory_name,
opts=", ".join(self.factory_names),
method="add_pipe",
lang=util.get_object_name(self),
lang_code=self.lang,
)
pipe_component = self.create_pipe( pipe_component = self.create_pipe(
factory_name, factory_name,
name=name, name=name,
@ -882,7 +868,7 @@ class Language:
*, *,
config: Dict[str, Any] = SimpleFrozenDict(), config: Dict[str, Any] = SimpleFrozenDict(),
validate: bool = True, validate: bool = True,
) -> "Pipe": ) -> PipeCallable:
"""Replace a component in the pipeline. """Replace a component in the pipeline.
name (str): Name of the component to replace. name (str): Name of the component to replace.
@ -891,7 +877,7 @@ class Language:
component. Will be merged with default config, if available. component. Will be merged with default config, if available.
validate (bool): Whether to validate the component config against the validate (bool): Whether to validate the component config against the
arguments and types expected by the factory. arguments and types expected by the factory.
RETURNS (Pipe): The new pipeline component. RETURNS (Callable[[Doc], Doc]): The new pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe DOCS: https://spacy.io/api/language#replace_pipe
""" """
@ -943,11 +929,11 @@ class Language:
init_cfg = self._config["initialize"]["components"].pop(old_name) init_cfg = self._config["initialize"]["components"].pop(old_name)
self._config["initialize"]["components"][new_name] = init_cfg self._config["initialize"]["components"][new_name] = init_cfg
def remove_pipe(self, name: str) -> Tuple[str, "Pipe"]: def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]:
"""Remove a component from the pipeline. """Remove a component from the pipeline.
name (str): Name of the component to remove. name (str): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component. RETURNS (Tuple[str, Callable[[Doc], Doc]]): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe DOCS: https://spacy.io/api/language#remove_pipe
""" """
@ -1028,8 +1014,8 @@ class Language:
raise ValueError(Errors.E109.format(name=name)) from e raise ValueError(Errors.E109.format(name=name)) from e
except Exception as e: except Exception as e:
error_handler(name, proc, [doc], e) error_handler(name, proc, [doc], e)
if doc is None: if not isinstance(doc, Doc):
raise ValueError(Errors.E005.format(name=name)) raise ValueError(Errors.E005.format(name=name, returned_type=type(doc)))
return doc return doc
def disable_pipes(self, *names) -> "DisabledPipes": def disable_pipes(self, *names) -> "DisabledPipes":
@ -1063,7 +1049,7 @@ class Language:
""" """
if enable is None and disable is None: if enable is None and disable is None:
raise ValueError(Errors.E991) raise ValueError(Errors.E991)
if disable is not None and isinstance(disable, str): if isinstance(disable, str):
disable = [disable] disable = [disable]
if enable is not None: if enable is not None:
if isinstance(enable, str): if isinstance(enable, str):
@ -1362,15 +1348,15 @@ class Language:
def set_error_handler( def set_error_handler(
self, self,
error_handler: Callable[[str, "Pipe", List[Doc], Exception], NoReturn], error_handler: Callable[[str, PipeCallable, List[Doc], Exception], NoReturn],
): ):
"""Set an error handler object for all the components in the pipeline that implement """Set an error handler object for all the components in the pipeline
a set_error_handler function. that implement a set_error_handler function.
error_handler (Callable[[str, Pipe, List[Doc], Exception], NoReturn]): error_handler (Callable[[str, Callable[[Doc], Doc], List[Doc], Exception], NoReturn]):
Function that deals with a failing batch of documents. This callable function should take in Function that deals with a failing batch of documents. This callable
the component's name, the component itself, the offending batch of documents, and the exception function should take in the component's name, the component itself,
that was thrown. the offending batch of documents, and the exception that was thrown.
DOCS: https://spacy.io/api/language#set_error_handler DOCS: https://spacy.io/api/language#set_error_handler
""" """
self.default_error_handler = error_handler self.default_error_handler = error_handler
@ -1698,9 +1684,9 @@ class Language:
config: Union[Dict[str, Any], Config] = {}, config: Union[Dict[str, Any], Config] = {},
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Iterable[str] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Iterable[str] = SimpleFrozenList(), exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True, auto_fill: bool = True,
validate: bool = True, validate: bool = True,
@ -1711,12 +1697,12 @@ class Language:
config (Dict[str, Any] / Config): The loaded config. config (Dict[str, Any] / Config): The loaded config.
vocab (Vocab): A Vocab object. If True, a vocab is created. vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable. disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable.
Disabled pipes will be loaded but they won't be run unless you Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe. explicitly enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`). pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude.
Excluded components won't be loaded. Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta. meta (Dict[str, Any]): Meta overrides for nlp.meta.
auto_fill (bool): Automatically fill in missing values in config based auto_fill (bool): Automatically fill in missing values in config based
@ -1871,9 +1857,29 @@ class Language:
nlp.vocab.from_bytes(vocab_b) nlp.vocab.from_bytes(vocab_b)
# Resolve disabled/enabled settings. # Resolve disabled/enabled settings.
if isinstance(disable, str):
disable = [disable]
if isinstance(enable, str):
enable = [enable]
if isinstance(exclude, str):
exclude = [exclude]
# `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config
# specifies values for `enabled` not included in `enable`, emit warning.
if id(enable) != id(_DEFAULT_EMPTY_PIPES):
enabled = config["nlp"].get("enabled", [])
if len(enabled) and not set(enabled).issubset(enable):
warnings.warn(
Warnings.W123.format(
enable=enable,
enabled=enabled,
)
)
# Ensure sets of disabled/enabled pipe names are not contradictory.
disabled_pipes = cls._resolve_component_status( disabled_pipes = cls._resolve_component_status(
[*config["nlp"]["disabled"], *disable], list({*disable, *config["nlp"].get("disabled", [])}),
[*config["nlp"].get("enabled", []), *enable], enable,
config["nlp"]["pipeline"], config["nlp"]["pipeline"],
) )
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
@ -2031,37 +2037,36 @@ class Language:
@staticmethod @staticmethod
def _resolve_component_status( def _resolve_component_status(
disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str] disable: Union[str, Iterable[str]],
enable: Union[str, Iterable[str]],
pipe_names: Iterable[str],
) -> Tuple[str, ...]: ) -> Tuple[str, ...]:
"""Derives whether (1) `disable` and `enable` values are consistent and (2) """Derives whether (1) `disable` and `enable` values are consistent and (2)
resolves those to a single set of disabled components. Raises an error in resolves those to a single set of disabled components. Raises an error in
case of inconsistency. case of inconsistency.
disable (Iterable[str]): Names of components or serialization fields to disable. disable (Union[str, Iterable[str]]): Name(s) of component(s) or serialization fields to disable.
enable (Iterable[str]): Names of pipeline components to enable. enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable.
pipe_names (Iterable[str]): Names of all pipeline components. pipe_names (Iterable[str]): Names of all pipeline components.
RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t. RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t.
specified includes and excludes. specified includes and excludes.
""" """
if disable is not None and isinstance(disable, str): if isinstance(disable, str):
disable = [disable] disable = [disable]
to_disable = disable to_disable = disable
if enable: if enable:
to_disable = [ if isinstance(enable, str):
pipe_name for pipe_name in pipe_names if pipe_name not in enable enable = [enable]
] to_disable = {
if disable and disable != to_disable: *[pipe_name for pipe_name in pipe_names if pipe_name not in enable],
raise ValueError( *disable,
Errors.E1042.format( }
arg1="enable", # If any pipe to be enabled is in to_disable, the specification is inconsistent.
arg2="disable", if len(set(enable) & to_disable):
arg1_values=enable, raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
arg2_values=disable,
)
)
return tuple(to_disable) return tuple(to_disable)

View File

@ -1,5 +1,6 @@
from .matcher import Matcher from .matcher import Matcher
from .phrasematcher import PhraseMatcher from .phrasematcher import PhraseMatcher
from .dependencymatcher import DependencyMatcher from .dependencymatcher import DependencyMatcher
from .levenshtein import levenshtein
__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher"] __all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"]

View File

@ -0,0 +1,32 @@
# cython: profile=True, binding=True, infer_types=True
from cpython.object cimport PyObject
from libc.stdint cimport int64_t
from typing import Optional
from ..util import registry
cdef extern from "polyleven.c":
int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
cpdef int64_t levenshtein(a: str, b: str, k: Optional[int] = None):
if k is None:
k = -1
return polyleven(<PyObject*>a, <PyObject*>b, k)
cpdef bint levenshtein_compare(input_text: str, pattern_text: str, fuzzy: int = -1):
if fuzzy >= 0:
max_edits = fuzzy
else:
# allow at least two edits (to allow at least one transposition) and up
# to 30% of the pattern string length
max_edits = max(2, round(0.3 * len(pattern_text)))
return levenshtein(input_text, pattern_text, max_edits) <= max_edits
@registry.misc("spacy.levenshtein_compare.v1")
def make_levenshtein_compare():
return levenshtein_compare

View File

@ -77,3 +77,4 @@ cdef class Matcher:
cdef public object _extensions cdef public object _extensions
cdef public object _extra_predicates cdef public object _extra_predicates
cdef public object _seen_attrs cdef public object _seen_attrs
cdef public object _fuzzy_compare

View File

@ -5,7 +5,12 @@ from ..vocab import Vocab
from ..tokens import Doc, Span from ..tokens import Doc, Span
class Matcher: class Matcher:
def __init__(self, vocab: Vocab, validate: bool = ...) -> None: ... def __init__(
self,
vocab: Vocab,
validate: bool = ...,
fuzzy_compare: Callable[[str, str, int], bool] = ...,
) -> None: ...
def __reduce__(self) -> Any: ... def __reduce__(self) -> Any: ...
def __len__(self) -> int: ... def __len__(self) -> int: ...
def __contains__(self, key: str) -> bool: ... def __contains__(self, key: str) -> bool: ...

View File

@ -1,5 +1,5 @@
# cython: infer_types=True, cython: profile=True # cython: binding=True, infer_types=True, profile=True
from typing import List from typing import List, Iterable
from libcpp.vector cimport vector from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int8_t from libc.stdint cimport int32_t, int8_t
@ -20,10 +20,12 @@ from ..tokens.token cimport Token
from ..tokens.morphanalysis cimport MorphAnalysis from ..tokens.morphanalysis cimport MorphAnalysis
from ..attrs cimport ID, attr_id_t, NULL_ATTR, ORTH, POS, TAG, DEP, LEMMA, MORPH, ENT_IOB from ..attrs cimport ID, attr_id_t, NULL_ATTR, ORTH, POS, TAG, DEP, LEMMA, MORPH, ENT_IOB
from .levenshtein import levenshtein_compare
from ..schemas import validate_token_pattern from ..schemas import validate_token_pattern
from ..errors import Errors, MatchPatternError, Warnings from ..errors import Errors, MatchPatternError, Warnings
from ..strings import get_string_id from ..strings import get_string_id
from ..attrs import IDS from ..attrs import IDS
from ..util import registry
DEF PADDING = 5 DEF PADDING = 5
@ -36,11 +38,13 @@ cdef class Matcher:
USAGE: https://spacy.io/usage/rule-based-matching USAGE: https://spacy.io/usage/rule-based-matching
""" """
def __init__(self, vocab, validate=True): def __init__(self, vocab, validate=True, *, fuzzy_compare=levenshtein_compare):
"""Create the Matcher. """Create the Matcher.
vocab (Vocab): The vocabulary object, which must be shared with the vocab (Vocab): The vocabulary object, which must be shared with the
documents the matcher will operate on. validate (bool): Validate all patterns added to this matcher.
fuzzy_compare (Callable[[str, str, int], bool]): The comparison method
for the FUZZY operators.
""" """
self._extra_predicates = [] self._extra_predicates = []
self._patterns = {} self._patterns = {}
@ -51,9 +55,10 @@ cdef class Matcher:
self.vocab = vocab self.vocab = vocab
self.mem = Pool() self.mem = Pool()
self.validate = validate self.validate = validate
self._fuzzy_compare = fuzzy_compare
def __reduce__(self): def __reduce__(self):
data = (self.vocab, self._patterns, self._callbacks) data = (self.vocab, self._patterns, self._callbacks, self.validate, self._fuzzy_compare)
return (unpickle_matcher, data, None, None) return (unpickle_matcher, data, None, None)
def __len__(self): def __len__(self):
@ -128,7 +133,7 @@ cdef class Matcher:
for pattern in patterns: for pattern in patterns:
try: try:
specs = _preprocess_pattern(pattern, self.vocab, specs = _preprocess_pattern(pattern, self.vocab,
self._extensions, self._extra_predicates) self._extensions, self._extra_predicates, self._fuzzy_compare)
self.patterns.push_back(init_pattern(self.mem, key, specs)) self.patterns.push_back(init_pattern(self.mem, key, specs))
for spec in specs: for spec in specs:
for attr, _ in spec[1]: for attr, _ in spec[1]:
@ -326,8 +331,8 @@ cdef class Matcher:
return key return key
def unpickle_matcher(vocab, patterns, callbacks): def unpickle_matcher(vocab, patterns, callbacks, validate, fuzzy_compare):
matcher = Matcher(vocab) matcher = Matcher(vocab, validate=validate, fuzzy_compare=fuzzy_compare)
for key, pattern in patterns.items(): for key, pattern in patterns.items():
callback = callbacks.get(key, None) callback = callbacks.get(key, None)
matcher.add(key, pattern, on_match=callback) matcher.add(key, pattern, on_match=callback)
@ -754,7 +759,7 @@ cdef attr_t get_ent_id(const TokenPatternC* pattern) nogil:
return id_attr.value return id_attr.value
def _preprocess_pattern(token_specs, vocab, extensions_table, extra_predicates): def _preprocess_pattern(token_specs, vocab, extensions_table, extra_predicates, fuzzy_compare):
"""This function interprets the pattern, converting the various bits of """This function interprets the pattern, converting the various bits of
syntactic sugar before we compile it into a struct with init_pattern. syntactic sugar before we compile it into a struct with init_pattern.
@ -781,7 +786,7 @@ def _preprocess_pattern(token_specs, vocab, extensions_table, extra_predicates):
ops = _get_operators(spec) ops = _get_operators(spec)
attr_values = _get_attr_values(spec, string_store) attr_values = _get_attr_values(spec, string_store)
extensions = _get_extensions(spec, string_store, extensions_table) extensions = _get_extensions(spec, string_store, extensions_table)
predicates = _get_extra_predicates(spec, extra_predicates, vocab) predicates = _get_extra_predicates(spec, extra_predicates, vocab, fuzzy_compare)
for op in ops: for op in ops:
tokens.append((op, list(attr_values), list(extensions), list(predicates), token_idx)) tokens.append((op, list(attr_values), list(extensions), list(predicates), token_idx))
return tokens return tokens
@ -826,16 +831,45 @@ def _get_attr_values(spec, string_store):
# These predicate helper classes are used to match the REGEX, IN, >= etc # These predicate helper classes are used to match the REGEX, IN, >= etc
# extensions to the matcher introduced in #3173. # extensions to the matcher introduced in #3173.
class _FuzzyPredicate:
operators = ("FUZZY", "FUZZY1", "FUZZY2", "FUZZY3", "FUZZY4", "FUZZY5",
"FUZZY6", "FUZZY7", "FUZZY8", "FUZZY9")
def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None,
regex=False, fuzzy=None, fuzzy_compare=None):
self.i = i
self.attr = attr
self.value = value
self.predicate = predicate
self.is_extension = is_extension
if self.predicate not in self.operators:
raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate))
fuzz = self.predicate[len("FUZZY"):] # number after prefix
self.fuzzy = int(fuzz) if fuzz else -1
self.fuzzy_compare = fuzzy_compare
self.key = (self.attr, self.fuzzy, self.predicate, srsly.json_dumps(value, sort_keys=True))
def __call__(self, Token token):
if self.is_extension:
value = token._.get(self.attr)
else:
value = token.vocab.strings[get_token_attr_for_matcher(token.c, self.attr)]
if self.value == value:
return True
return self.fuzzy_compare(value, self.value, self.fuzzy)
class _RegexPredicate: class _RegexPredicate:
operators = ("REGEX",) operators = ("REGEX",)
def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None): def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None,
regex=False, fuzzy=None, fuzzy_compare=None):
self.i = i self.i = i
self.attr = attr self.attr = attr
self.value = re.compile(value) self.value = re.compile(value)
self.predicate = predicate self.predicate = predicate
self.is_extension = is_extension self.is_extension = is_extension
self.key = (attr, self.predicate, srsly.json_dumps(value, sort_keys=True)) self.key = (self.attr, self.predicate, srsly.json_dumps(value, sort_keys=True))
if self.predicate not in self.operators: if self.predicate not in self.operators:
raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate)) raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate))
@ -850,41 +884,78 @@ class _RegexPredicate:
class _SetPredicate: class _SetPredicate:
operators = ("IN", "NOT_IN", "IS_SUBSET", "IS_SUPERSET", "INTERSECTS") operators = ("IN", "NOT_IN", "IS_SUBSET", "IS_SUPERSET", "INTERSECTS")
def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None): def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None,
regex=False, fuzzy=None, fuzzy_compare=None):
self.i = i self.i = i
self.attr = attr self.attr = attr
self.vocab = vocab self.vocab = vocab
self.regex = regex
self.fuzzy = fuzzy
self.fuzzy_compare = fuzzy_compare
if self.attr == MORPH: if self.attr == MORPH:
# normalize morph strings # normalize morph strings
self.value = set(self.vocab.morphology.add(v) for v in value) self.value = set(self.vocab.morphology.add(v) for v in value)
else:
if self.regex:
self.value = set(re.compile(v) for v in value)
elif self.fuzzy is not None:
# add to string store
self.value = set(self.vocab.strings.add(v) for v in value)
else: else:
self.value = set(get_string_id(v) for v in value) self.value = set(get_string_id(v) for v in value)
self.predicate = predicate self.predicate = predicate
self.is_extension = is_extension self.is_extension = is_extension
self.key = (attr, self.predicate, srsly.json_dumps(value, sort_keys=True)) self.key = (self.attr, self.regex, self.fuzzy, self.predicate, srsly.json_dumps(value, sort_keys=True))
if self.predicate not in self.operators: if self.predicate not in self.operators:
raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate)) raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate))
def __call__(self, Token token): def __call__(self, Token token):
if self.is_extension: if self.is_extension:
value = get_string_id(token._.get(self.attr)) value = token._.get(self.attr)
else: else:
value = get_token_attr_for_matcher(token.c, self.attr) value = get_token_attr_for_matcher(token.c, self.attr)
if self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"): if self.predicate in ("IN", "NOT_IN"):
if isinstance(value, (str, int)):
value = get_string_id(value)
else:
return False
elif self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"):
# ensure that all values are enclosed in a set
if self.attr == MORPH: if self.attr == MORPH:
# break up MORPH into individual Feat=Val values # break up MORPH into individual Feat=Val values
value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value)) value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value))
else: elif isinstance(value, (str, int)):
# treat a single value as a list value = set((get_string_id(value),))
if isinstance(value, (str, int)): elif isinstance(value, Iterable) and all(isinstance(v, (str, int)) for v in value):
value = set([get_string_id(value)])
else:
value = set(get_string_id(v) for v in value) value = set(get_string_id(v) for v in value)
else:
return False
if self.predicate == "IN": if self.predicate == "IN":
return value in self.value if self.regex:
value = self.vocab.strings[value]
return any(bool(v.search(value)) for v in self.value)
elif self.fuzzy is not None:
value = self.vocab.strings[value]
return any(self.fuzzy_compare(value, self.vocab.strings[v], self.fuzzy)
for v in self.value)
elif value in self.value:
return True
else:
return False
elif self.predicate == "NOT_IN": elif self.predicate == "NOT_IN":
return value not in self.value if self.regex:
value = self.vocab.strings[value]
return not any(bool(v.search(value)) for v in self.value)
elif self.fuzzy is not None:
value = self.vocab.strings[value]
return not any(self.fuzzy_compare(value, self.vocab.strings[v], self.fuzzy)
for v in self.value)
elif value in self.value:
return False
else:
return True
elif self.predicate == "IS_SUBSET": elif self.predicate == "IS_SUBSET":
return value <= self.value return value <= self.value
elif self.predicate == "IS_SUPERSET": elif self.predicate == "IS_SUPERSET":
@ -899,13 +970,14 @@ class _SetPredicate:
class _ComparisonPredicate: class _ComparisonPredicate:
operators = ("==", "!=", ">=", "<=", ">", "<") operators = ("==", "!=", ">=", "<=", ">", "<")
def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None): def __init__(self, i, attr, value, predicate, is_extension=False, vocab=None,
regex=False, fuzzy=None, fuzzy_compare=None):
self.i = i self.i = i
self.attr = attr self.attr = attr
self.value = value self.value = value
self.predicate = predicate self.predicate = predicate
self.is_extension = is_extension self.is_extension = is_extension
self.key = (attr, self.predicate, srsly.json_dumps(value, sort_keys=True)) self.key = (self.attr, self.predicate, srsly.json_dumps(value, sort_keys=True))
if self.predicate not in self.operators: if self.predicate not in self.operators:
raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate)) raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate))
@ -928,7 +1000,7 @@ class _ComparisonPredicate:
return value < self.value return value < self.value
def _get_extra_predicates(spec, extra_predicates, vocab): def _get_extra_predicates(spec, extra_predicates, vocab, fuzzy_compare):
predicate_types = { predicate_types = {
"REGEX": _RegexPredicate, "REGEX": _RegexPredicate,
"IN": _SetPredicate, "IN": _SetPredicate,
@ -942,6 +1014,16 @@ def _get_extra_predicates(spec, extra_predicates, vocab):
"<=": _ComparisonPredicate, "<=": _ComparisonPredicate,
">": _ComparisonPredicate, ">": _ComparisonPredicate,
"<": _ComparisonPredicate, "<": _ComparisonPredicate,
"FUZZY": _FuzzyPredicate,
"FUZZY1": _FuzzyPredicate,
"FUZZY2": _FuzzyPredicate,
"FUZZY3": _FuzzyPredicate,
"FUZZY4": _FuzzyPredicate,
"FUZZY5": _FuzzyPredicate,
"FUZZY6": _FuzzyPredicate,
"FUZZY7": _FuzzyPredicate,
"FUZZY8": _FuzzyPredicate,
"FUZZY9": _FuzzyPredicate,
} }
seen_predicates = {pred.key: pred.i for pred in extra_predicates} seen_predicates = {pred.key: pred.i for pred in extra_predicates}
output = [] output = []
@ -959,12 +1041,40 @@ def _get_extra_predicates(spec, extra_predicates, vocab):
attr = "ORTH" attr = "ORTH"
attr = IDS.get(attr.upper()) attr = IDS.get(attr.upper())
if isinstance(value, dict): if isinstance(value, dict):
processed = False output.extend(_get_extra_predicates_dict(attr, value, vocab, predicate_types,
value_with_upper_keys = {k.upper(): v for k, v in value.items()} extra_predicates, seen_predicates, fuzzy_compare=fuzzy_compare))
for type_, cls in predicate_types.items(): return output
if type_ in value_with_upper_keys:
predicate = cls(len(extra_predicates), attr, value_with_upper_keys[type_], type_, vocab=vocab)
# Don't create a redundant predicates. def _get_extra_predicates_dict(attr, value_dict, vocab, predicate_types,
extra_predicates, seen_predicates, regex=False, fuzzy=None, fuzzy_compare=None):
output = []
for type_, value in value_dict.items():
type_ = type_.upper()
cls = predicate_types.get(type_)
if cls is None:
warnings.warn(Warnings.W035.format(pattern=value_dict))
# ignore unrecognized predicate type
continue
elif cls == _RegexPredicate:
if isinstance(value, dict):
# add predicates inside regex operator
output.extend(_get_extra_predicates_dict(attr, value, vocab, predicate_types,
extra_predicates, seen_predicates,
regex=True))
continue
elif cls == _FuzzyPredicate:
if isinstance(value, dict):
# add predicates inside fuzzy operator
fuzz = type_[len("FUZZY"):] # number after prefix
fuzzy_val = int(fuzz) if fuzz else -1
output.extend(_get_extra_predicates_dict(attr, value, vocab, predicate_types,
extra_predicates, seen_predicates,
fuzzy=fuzzy_val, fuzzy_compare=fuzzy_compare))
continue
predicate = cls(len(extra_predicates), attr, value, type_, vocab=vocab,
regex=regex, fuzzy=fuzzy, fuzzy_compare=fuzzy_compare)
# Don't create redundant predicates.
# This helps with efficiency, as we're caching the results. # This helps with efficiency, as we're caching the results.
if predicate.key in seen_predicates: if predicate.key in seen_predicates:
output.append(seen_predicates[predicate.key]) output.append(seen_predicates[predicate.key])
@ -972,9 +1082,6 @@ def _get_extra_predicates(spec, extra_predicates, vocab):
extra_predicates.append(predicate) extra_predicates.append(predicate)
output.append(predicate.i) output.append(predicate.i)
seen_predicates[predicate.key] = predicate.i seen_predicates[predicate.key] = predicate.i
processed = True
if not processed:
warnings.warn(Warnings.W035.format(pattern=value))
return output return output

384
spacy/matcher/polyleven.c Normal file
View File

@ -0,0 +1,384 @@
/*
* Adapted from Polyleven (https://ceptord.net/)
*
* Source: https://github.com/fujimotos/polyleven/blob/c3f95a080626c5652f0151a2e449963288ccae84/polyleven.c
*
* Copyright (c) 2021 Fujimoto Seiji <fujimoto@ceptord.net>
* Copyright (c) 2021 Max Bachmann <kontakt@maxbachmann.de>
* Copyright (c) 2022 Nick Mazuk
* Copyright (c) 2022 Michael Weiss <code@mweiss.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <Python.h>
#include <stdint.h>
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define CDIV(a,b) ((a) / (b) + ((a) % (b) > 0))
#define BIT(i,n) (((i) >> (n)) & 1)
#define FLIP(i,n) ((i) ^ ((uint64_t) 1 << (n)))
#define ISASCII(kd) ((kd) == PyUnicode_1BYTE_KIND)
/*
* Bare bone of PyUnicode
*/
struct strbuf {
void *ptr;
int kind;
int64_t len;
};
static void strbuf_init(struct strbuf *s, PyObject *o)
{
s->ptr = PyUnicode_DATA(o);
s->kind = PyUnicode_KIND(o);
s->len = PyUnicode_GET_LENGTH(o);
}
#define strbuf_read(s, i) PyUnicode_READ((s)->kind, (s)->ptr, (i))
/*
* An encoded mbleven model table.
*
* Each 8-bit integer represents an edit sequence, with using two
* bits for a single operation.
*
* 01 = DELETE, 10 = INSERT, 11 = REPLACE
*
* For example, 13 is '1101' in binary notation, so it means
* DELETE + REPLACE.
*/
static const uint8_t MBLEVEN_MATRIX[] = {
3, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
15, 9, 6, 0, 0, 0, 0, 0,
13, 7, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
63, 39, 45, 57, 54, 30, 27, 0,
61, 55, 31, 37, 25, 22, 0, 0,
53, 29, 23, 0, 0, 0, 0, 0,
21, 0, 0, 0, 0, 0, 0, 0,
};
#define MBLEVEN_MATRIX_GET(k, d) ((((k) + (k) * (k)) / 2 - 1) + (d)) * 8
static int64_t mbleven_ascii(char *s1, int64_t len1,
char *s2, int64_t len2, int k)
{
int pos;
uint8_t m;
int64_t i, j, c, r;
pos = MBLEVEN_MATRIX_GET(k, len1 - len2);
r = k + 1;
while (MBLEVEN_MATRIX[pos]) {
m = MBLEVEN_MATRIX[pos++];
i = j = c = 0;
while (i < len1 && j < len2) {
if (s1[i] != s2[j]) {
c++;
if (!m) break;
if (m & 1) i++;
if (m & 2) j++;
m >>= 2;
} else {
i++;
j++;
}
}
c += (len1 - i) + (len2 - j);
r = MIN(r, c);
if (r < 2) {
return r;
}
}
return r;
}
static int64_t mbleven(PyObject *o1, PyObject *o2, int64_t k)
{
int pos;
uint8_t m;
int64_t i, j, c, r;
struct strbuf s1, s2;
strbuf_init(&s1, o1);
strbuf_init(&s2, o2);
if (s1.len < s2.len)
return mbleven(o2, o1, k);
if (k > 3)
return -1;
if (k < s1.len - s2.len)
return k + 1;
if (ISASCII(s1.kind) && ISASCII(s2.kind))
return mbleven_ascii(s1.ptr, s1.len, s2.ptr, s2.len, k);
pos = MBLEVEN_MATRIX_GET(k, s1.len - s2.len);
r = k + 1;
while (MBLEVEN_MATRIX[pos]) {
m = MBLEVEN_MATRIX[pos++];
i = j = c = 0;
while (i < s1.len && j < s2.len) {
if (strbuf_read(&s1, i) != strbuf_read(&s2, j)) {
c++;
if (!m) break;
if (m & 1) i++;
if (m & 2) j++;
m >>= 2;
} else {
i++;
j++;
}
}
c += (s1.len - i) + (s2.len - j);
r = MIN(r, c);
if (r < 2) {
return r;
}
}
return r;
}
/*
* Data structure to store Peq (equality bit-vector).
*/
struct blockmap_entry {
uint32_t key[128];
uint64_t val[128];
};
struct blockmap {
int64_t nr;
struct blockmap_entry *list;
};
#define blockmap_key(c) ((c) | 0x80000000U)
#define blockmap_hash(c) ((c) % 128)
static int blockmap_init(struct blockmap *map, struct strbuf *s)
{
int64_t i;
struct blockmap_entry *be;
uint32_t c, k;
uint8_t h;
map->nr = CDIV(s->len, 64);
map->list = calloc(1, map->nr * sizeof(struct blockmap_entry));
if (map->list == NULL) {
PyErr_NoMemory();
return -1;
}
for (i = 0; i < s->len; i++) {
be = &(map->list[i / 64]);
c = strbuf_read(s, i);
h = blockmap_hash(c);
k = blockmap_key(c);
while (be->key[h] && be->key[h] != k)
h = blockmap_hash(h + 1);
be->key[h] = k;
be->val[h] |= (uint64_t) 1 << (i % 64);
}
return 0;
}
static void blockmap_clear(struct blockmap *map)
{
if (map->list)
free(map->list);
map->list = NULL;
map->nr = 0;
}
static uint64_t blockmap_get(struct blockmap *map, int block, uint32_t c)
{
struct blockmap_entry *be;
uint8_t h;
uint32_t k;
h = blockmap_hash(c);
k = blockmap_key(c);
be = &(map->list[block]);
while (be->key[h] && be->key[h] != k)
h = blockmap_hash(h + 1);
return be->key[h] == k ? be->val[h] : 0;
}
/*
* Myers' bit-parallel algorithm
*
* See: G. Myers. "A fast bit-vector algorithm for approximate string
* matching based on dynamic programming." Journal of the ACM, 1999.
*/
static int64_t myers1999_block(struct strbuf *s1, struct strbuf *s2,
struct blockmap *map)
{
uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
uint64_t *Mhc, *Phc;
int64_t i, b, hsize, vsize, Score;
uint8_t Pb, Mb;
hsize = CDIV(s1->len, 64);
vsize = CDIV(s2->len, 64);
Score = s2->len;
Phc = malloc(hsize * 2 * sizeof(uint64_t));
if (Phc == NULL) {
PyErr_NoMemory();
return -1;
}
Mhc = Phc + hsize;
memset(Phc, -1, hsize * sizeof(uint64_t));
memset(Mhc, 0, hsize * sizeof(uint64_t));
Last = (uint64_t)1 << ((s2->len - 1) % 64);
for (b = 0; b < vsize; b++) {
Mv = 0;
Pv = (uint64_t) -1;
Score = s2->len;
for (i = 0; i < s1->len; i++) {
Eq = blockmap_get(map, b, strbuf_read(s1, i));
Pb = BIT(Phc[i / 64], i % 64);
Mb = BIT(Mhc[i / 64], i % 64);
Xv = Eq | Mv;
Xh = ((((Eq | Mb) & Pv) + Pv) ^ Pv) | Eq | Mb;
Ph = Mv | ~ (Xh | Pv);
Mh = Pv & Xh;
if (Ph & Last) Score++;
if (Mh & Last) Score--;
if ((Ph >> 63) ^ Pb)
Phc[i / 64] = FLIP(Phc[i / 64], i % 64);
if ((Mh >> 63) ^ Mb)
Mhc[i / 64] = FLIP(Mhc[i / 64], i % 64);
Ph = (Ph << 1) | Pb;
Mh = (Mh << 1) | Mb;
Pv = Mh | ~ (Xv | Ph);
Mv = Ph & Xv;
}
}
free(Phc);
return Score;
}
static int64_t myers1999_simple(uint8_t *s1, int64_t len1, uint8_t *s2, int64_t len2)
{
uint64_t Peq[256];
uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
int64_t i;
int64_t Score = len2;
memset(Peq, 0, sizeof(Peq));
for (i = 0; i < len2; i++)
Peq[s2[i]] |= (uint64_t) 1 << i;
Mv = 0;
Pv = (uint64_t) -1;
Last = (uint64_t) 1 << (len2 - 1);
for (i = 0; i < len1; i++) {
Eq = Peq[s1[i]];
Xv = Eq | Mv;
Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq;
Ph = Mv | ~ (Xh | Pv);
Mh = Pv & Xh;
if (Ph & Last) Score++;
if (Mh & Last) Score--;
Ph = (Ph << 1) | 1;
Mh = (Mh << 1);
Pv = Mh | ~ (Xv | Ph);
Mv = Ph & Xv;
}
return Score;
}
static int64_t myers1999(PyObject *o1, PyObject *o2)
{
struct strbuf s1, s2;
struct blockmap map;
int64_t ret;
strbuf_init(&s1, o1);
strbuf_init(&s2, o2);
if (s1.len < s2.len)
return myers1999(o2, o1);
if (ISASCII(s1.kind) && ISASCII(s2.kind) && s2.len < 65)
return myers1999_simple(s1.ptr, s1.len, s2.ptr, s2.len);
if (blockmap_init(&map, &s2))
return -1;
ret = myers1999_block(&s1, &s2, &map);
blockmap_clear(&map);
return ret;
}
/*
* Interface functions
*/
static int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
{
int64_t len1, len2;
len1 = PyUnicode_GET_LENGTH(o1);
len2 = PyUnicode_GET_LENGTH(o2);
if (len1 < len2)
return polyleven(o2, o1, k);
if (k == 0)
return PyUnicode_Compare(o1, o2) ? 1 : 0;
if (0 < k && k < len1 - len2)
return k + 1;
if (len2 == 0)
return len1;
if (0 < k && k < 4)
return mbleven(o1, o2, k);
return myers1999(o1, o2);
}

View File

@ -89,11 +89,14 @@ def pipes_with_nvtx_range(
types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func
) )
# Try to preserve the original function signature. # We need to preserve the original function signature so that
# the original parameters are passed to pydantic for validation downstream.
try: try:
wrapped_func.__signature__ = inspect.signature(func) # type: ignore wrapped_func.__signature__ = inspect.signature(func) # type: ignore
except: except:
pass # Can fail for Cython methods that do not have bindings.
warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
continue
try: try:
setattr( setattr(

View File

@ -1,11 +1,12 @@
from pathlib import Path from pathlib import Path
from typing import Optional, Callable, Iterable, List, Tuple from typing import Optional, Callable, Iterable, List, Tuple
from thinc.types import Floats2d from thinc.types import Floats2d
from thinc.api import chain, clone, list2ragged, reduce_mean, residual from thinc.api import chain, list2ragged, reduce_mean, residual
from thinc.api import Model, Maxout, Linear, noop, tuplify, Ragged from thinc.api import Model, Maxout, Linear, tuplify, Ragged
from ...util import registry from ...util import registry
from ...kb import KnowledgeBase, Candidate, get_candidates from ...kb import KnowledgeBase, InMemoryLookupKB
from ...kb import Candidate, get_candidates, get_candidates_batch
from ...vocab import Vocab from ...vocab import Vocab
from ...tokens import Span, Doc from ...tokens import Span, Doc
from ..extract_spans import extract_spans from ..extract_spans import extract_spans
@ -70,17 +71,18 @@ def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callab
cands.append((start_token, end_token)) cands.append((start_token, end_token))
candidates.append(ops.asarray2i(cands)) candidates.append(ops.asarray2i(cands))
candlens = ops.asarray1i([len(cands) for cands in candidates]) lengths = model.ops.asarray1i([len(cands) for cands in candidates])
candidates = ops.xp.concatenate(candidates) out = Ragged(model.ops.flatten(candidates), lengths)
outputs = Ragged(candidates, candlens)
# because this is just rearranging docs, the backprop does nothing # because this is just rearranging docs, the backprop does nothing
return outputs, lambda x: [] return out, lambda x: []
@registry.misc("spacy.KBFromFile.v1") @registry.misc("spacy.KBFromFile.v1")
def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]: def load_kb(
def kb_from_file(vocab): kb_path: Path,
kb = KnowledgeBase(vocab, entity_vector_length=1) ) -> Callable[[Vocab], KnowledgeBase]:
def kb_from_file(vocab: Vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.from_disk(kb_path) kb.from_disk(kb_path)
return kb return kb
@ -88,9 +90,11 @@ def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.EmptyKB.v1") @registry.misc("spacy.EmptyKB.v1")
def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]: def empty_kb(
def empty_kb_factory(vocab): entity_vector_length: int,
return KnowledgeBase(vocab=vocab, entity_vector_length=entity_vector_length) ) -> Callable[[Vocab], KnowledgeBase]:
def empty_kb_factory(vocab: Vocab):
return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length)
return empty_kb_factory return empty_kb_factory
@ -98,3 +102,10 @@ def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.CandidateGenerator.v1") @registry.misc("spacy.CandidateGenerator.v1")
def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]: def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]:
return get_candidates return get_candidates
@registry.misc("spacy.CandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
]:
return get_candidates_batch

View File

@ -1,7 +1,6 @@
from typing import cast, Any, Callable, Dict, Iterable, List, Optional from typing import cast, Any, Callable, Dict, Iterable, List, Optional
from typing import Sequence, Tuple, Union from typing import Tuple
from collections import Counter from collections import Counter
from copy import deepcopy
from itertools import islice from itertools import islice
import numpy as np import numpy as np
@ -129,7 +128,7 @@ class EditTreeLemmatizer(TrainablePipe):
for (predicted, gold_lemma) in zip( for (predicted, gold_lemma) in zip(
eg.predicted, eg.get_aligned("LEMMA", as_string=True) eg.predicted, eg.get_aligned("LEMMA", as_string=True)
): ):
if gold_lemma is None: if gold_lemma is None or gold_lemma == "":
label = -1 label = -1
else: else:
tree_id = self.trees.add(predicted.text, gold_lemma) tree_id = self.trees.add(predicted.text, gold_lemma)
@ -149,9 +148,7 @@ class EditTreeLemmatizer(TrainablePipe):
if not any(len(doc) for doc in docs): if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs. # Handle cases where there are no tokens in any docs.
n_labels = len(self.cfg["labels"]) n_labels = len(self.cfg["labels"])
guesses: List[Ints2d] = [ guesses: List[Ints2d] = [self.model.ops.alloc2i(0, n_labels) for _ in docs]
self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs
]
assert len(guesses) == n_docs assert len(guesses) == n_docs
return guesses return guesses
scores = self.model.predict(docs) scores = self.model.predict(docs)
@ -331,9 +328,9 @@ class EditTreeLemmatizer(TrainablePipe):
tree = dict(tree) tree = dict(tree)
if "orig" in tree: if "orig" in tree:
tree["orig"] = self.vocab.strings[tree["orig"]] tree["orig"] = self.vocab.strings.add(tree["orig"])
if "orig" in tree: if "orig" in tree:
tree["subst"] = self.vocab.strings[tree["subst"]] tree["subst"] = self.vocab.strings.add(tree["subst"])
trees.append(tree) trees.append(tree)

View File

@ -53,9 +53,11 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
"incl_context": True, "incl_context": True,
"entity_vector_length": 64, "entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"}, "get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
"get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
"overwrite": True, "overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"}, "scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True, "use_gold_ents": True,
"candidates_batch_size": 1,
"threshold": None, "threshold": None,
}, },
default_score_weights={ default_score_weights={
@ -75,9 +77,13 @@ def make_entity_linker(
incl_context: bool, incl_context: bool,
entity_vector_length: int, entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
overwrite: bool, overwrite: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
use_gold_ents: bool, use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None, threshold: Optional[float] = None,
): ):
"""Construct an EntityLinker component. """Construct an EntityLinker component.
@ -90,17 +96,21 @@ def make_entity_linker(
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model. incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model. incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB. entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
get_candidates_batch (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method. scorer (Optional[Callable]): The scoring method.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations. component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold, threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
prediction is discarded. If None, predictions are not filtered by any threshold. prediction is discarded. If None, predictions are not filtered by any threshold.
""" """
if not model.attrs.get("include_span_maker", False): if not model.attrs.get("include_span_maker", False):
# The only difference in arguments here is that use_gold_ents is not available # The only difference in arguments here is that use_gold_ents and threshold aren't available.
return EntityLinker_v1( return EntityLinker_v1(
nlp.vocab, nlp.vocab,
model, model,
@ -124,9 +134,11 @@ def make_entity_linker(
incl_context=incl_context, incl_context=incl_context,
entity_vector_length=entity_vector_length, entity_vector_length=entity_vector_length,
get_candidates=get_candidates, get_candidates=get_candidates,
get_candidates_batch=get_candidates_batch,
overwrite=overwrite, overwrite=overwrite,
scorer=scorer, scorer=scorer,
use_gold_ents=use_gold_ents, use_gold_ents=use_gold_ents,
candidates_batch_size=candidates_batch_size,
threshold=threshold, threshold=threshold,
) )
@ -160,9 +172,13 @@ class EntityLinker(TrainablePipe):
incl_context: bool, incl_context: bool,
entity_vector_length: int, entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
overwrite: bool = BACKWARD_OVERWRITE, overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score, scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool, use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None, threshold: Optional[float] = None,
) -> None: ) -> None:
"""Initialize an entity linker. """Initialize an entity linker.
@ -178,10 +194,14 @@ class EntityLinker(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB. entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to get_candidates_batch (
Scorer.score_links. Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]],
Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations. component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
threshold, prediction is discarded. If None, predictions are not filtered by any threshold. threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init DOCS: https://spacy.io/api/entitylinker#init
@ -204,22 +224,27 @@ class EntityLinker(TrainablePipe):
self.incl_prior = incl_prior self.incl_prior = incl_prior
self.incl_context = incl_context self.incl_context = incl_context
self.get_candidates = get_candidates self.get_candidates = get_candidates
self.get_candidates_batch = get_candidates_batch
self.cfg: Dict[str, Any] = {"overwrite": overwrite} self.cfg: Dict[str, Any] = {"overwrite": overwrite}
self.distance = CosineDistance(normalize=False) self.distance = CosineDistance(normalize=False)
# how many neighbour sentences to take into account # how many neighbour sentences to take into account
# create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'. # create an empty KB by default
self.kb = empty_kb(entity_vector_length)(self.vocab) self.kb = empty_kb(entity_vector_length)(self.vocab)
self.scorer = scorer self.scorer = scorer
self.use_gold_ents = use_gold_ents self.use_gold_ents = use_gold_ents
self.candidates_batch_size = candidates_batch_size
self.threshold = threshold self.threshold = threshold
if candidates_batch_size < 1:
raise ValueError(Errors.E1044)
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will """Define the KB of this pipe by providing a function that will
create it using this object's vocab.""" create it using this object's vocab."""
if not callable(kb_loader): if not callable(kb_loader):
raise ValueError(Errors.E885.format(arg_type=type(kb_loader))) raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
self.kb = kb_loader(self.vocab) self.kb = kb_loader(self.vocab) # type: ignore
def validate_kb(self) -> None: def validate_kb(self) -> None:
# Raise an error if the knowledge base is not initialized. # Raise an error if the knowledge base is not initialized.
@ -241,8 +266,8 @@ class EntityLinker(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects. returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of. nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance. kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab
Note that providing this argument, will overwrite all data accumulated in the current KB. instance. Note that providing this argument will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file. Use this only when loading a KB as-such from file.
DOCS: https://spacy.io/api/entitylinker#initialize DOCS: https://spacy.io/api/entitylinker#initialize
@ -419,15 +444,40 @@ class EntityLinker(TrainablePipe):
if len(doc) == 0: if len(doc) == 0:
continue continue
sentences = [s for s in doc.sents] sentences = [s for s in doc.sents]
# Looping through each entity (TODO: rewrite)
for ent in doc.ents: # Loop over entities in batches.
for ent_idx in range(0, len(doc.ents), self.candidates_batch_size):
ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size]
# Look up candidate entities.
valid_ent_idx = [
idx
for idx in range(len(ent_batch))
if ent_batch[idx].label_ not in self.labels_discard
]
batch_candidates = list(
self.get_candidates_batch(
self.kb, [ent_batch[idx] for idx in valid_ent_idx]
)
if self.candidates_batch_size > 1
else [
self.get_candidates(self.kb, ent_batch[idx])
for idx in valid_ent_idx
]
)
# Looping through each entity in batch (TODO: rewrite)
for j, ent in enumerate(ent_batch):
sent_index = sentences.index(ent.sent) sent_index = sentences.index(ent.sent)
assert sent_index >= 0 assert sent_index >= 0
if self.incl_context: if self.incl_context:
# get n_neighbour sentences, clipped to the length of the document # get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_index - self.n_sents) start_sentence = max(0, sent_index - self.n_sents)
end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) end_sentence = min(
len(sentences) - 1, sent_index + self.n_sents
)
start_token = sentences[start_sentence].start start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc() sent_doc = doc[start_token:end_token].as_doc()
@ -440,7 +490,7 @@ class EntityLinker(TrainablePipe):
# ignoring this entity - setting to NIL # ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
else: else:
candidates = list(self.get_candidates(self.kb, ent)) candidates = list(batch_candidates[j])
if not candidates: if not candidates:
# no prediction possible for this entity - setting to NIL # no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
@ -476,9 +526,11 @@ class EntityLinker(TrainablePipe):
scores = prior_probs + sims - (prior_probs * sims) scores = prior_probs + sims - (prior_probs * sims)
final_kb_ids.append( final_kb_ids.append(
candidates[scores.argmax().item()].entity_ candidates[scores.argmax().item()].entity_
if self.threshold is None or scores.max() >= self.threshold if self.threshold is None
or scores.max() >= self.threshold
else EntityLinker.NIL else EntityLinker.NIL
) )
if not (len(final_kb_ids) == entity_count): if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format( err = Errors.E147.format(
method="predict", msg="result variables not of equal length" method="predict", msg="result variables not of equal length"

View File

@ -1,6 +1,5 @@
import warnings
from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence
from typing import cast import warnings
from collections import defaultdict from collections import defaultdict
from pathlib import Path from pathlib import Path
import srsly import srsly
@ -12,6 +11,7 @@ from ..errors import Errors, Warnings
from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList, registry from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList, registry
from ..tokens import Doc, Span from ..tokens import Doc, Span
from ..matcher import Matcher, PhraseMatcher from ..matcher import Matcher, PhraseMatcher
from ..matcher.levenshtein import levenshtein_compare
from ..scorer import get_ner_prf from ..scorer import get_ner_prf
@ -24,6 +24,7 @@ PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
assigns=["doc.ents", "token.ent_type", "token.ent_iob"], assigns=["doc.ents", "token.ent_type", "token.ent_iob"],
default_config={ default_config={
"phrase_matcher_attr": None, "phrase_matcher_attr": None,
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
"validate": False, "validate": False,
"overwrite_ents": False, "overwrite_ents": False,
"ent_id_sep": DEFAULT_ENT_ID_SEP, "ent_id_sep": DEFAULT_ENT_ID_SEP,
@ -40,6 +41,7 @@ def make_entity_ruler(
nlp: Language, nlp: Language,
name: str, name: str,
phrase_matcher_attr: Optional[Union[int, str]], phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool, validate: bool,
overwrite_ents: bool, overwrite_ents: bool,
ent_id_sep: str, ent_id_sep: str,
@ -49,6 +51,7 @@ def make_entity_ruler(
nlp, nlp,
name, name,
phrase_matcher_attr=phrase_matcher_attr, phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate, validate=validate,
overwrite_ents=overwrite_ents, overwrite_ents=overwrite_ents,
ent_id_sep=ent_id_sep, ent_id_sep=ent_id_sep,
@ -82,6 +85,7 @@ class EntityRuler(Pipe):
name: str = "entity_ruler", name: str = "entity_ruler",
*, *,
phrase_matcher_attr: Optional[Union[int, str]] = None, phrase_matcher_attr: Optional[Union[int, str]] = None,
matcher_fuzzy_compare: Callable = levenshtein_compare,
validate: bool = False, validate: bool = False,
overwrite_ents: bool = False, overwrite_ents: bool = False,
ent_id_sep: str = DEFAULT_ENT_ID_SEP, ent_id_sep: str = DEFAULT_ENT_ID_SEP,
@ -100,7 +104,10 @@ class EntityRuler(Pipe):
added. Used to disable the current entity ruler while creating added. Used to disable the current entity ruler while creating
phrase patterns with the nlp object. phrase patterns with the nlp object.
phrase_matcher_attr (int / str): Token attribute to match on, passed phrase_matcher_attr (int / str): Token attribute to match on, passed
to the internal PhraseMatcher as `attr` to the internal PhraseMatcher as `attr`.
matcher_fuzzy_compare (Callable): The fuzzy comparison method for the
internal Matcher. Defaults to
spacy.matcher.levenshtein.levenshtein_compare.
validate (bool): Whether patterns should be validated, passed to validate (bool): Whether patterns should be validated, passed to
Matcher and PhraseMatcher as `validate` Matcher and PhraseMatcher as `validate`
patterns (iterable): Optional patterns to load in. patterns (iterable): Optional patterns to load in.
@ -118,7 +125,10 @@ class EntityRuler(Pipe):
self.token_patterns = defaultdict(list) # type: ignore self.token_patterns = defaultdict(list) # type: ignore
self.phrase_patterns = defaultdict(list) # type: ignore self.phrase_patterns = defaultdict(list) # type: ignore
self._validate = validate self._validate = validate
self.matcher = Matcher(nlp.vocab, validate=validate) self.matcher_fuzzy_compare = matcher_fuzzy_compare
self.matcher = Matcher(
nlp.vocab, validate=validate, fuzzy_compare=self.matcher_fuzzy_compare
)
self.phrase_matcher_attr = phrase_matcher_attr self.phrase_matcher_attr = phrase_matcher_attr
self.phrase_matcher = PhraseMatcher( self.phrase_matcher = PhraseMatcher(
nlp.vocab, attr=self.phrase_matcher_attr, validate=validate nlp.vocab, attr=self.phrase_matcher_attr, validate=validate
@ -317,7 +327,7 @@ class EntityRuler(Pipe):
phrase_pattern["id"] = ent_id phrase_pattern["id"] = ent_id
phrase_patterns.append(phrase_pattern) phrase_patterns.append(phrase_pattern)
for entry in token_patterns + phrase_patterns: # type: ignore[operator] for entry in token_patterns + phrase_patterns: # type: ignore[operator]
label = entry["label"] label = entry["label"] # type: ignore
if "id" in entry: if "id" in entry:
ent_label = label ent_label = label
label = self._create_label(label, entry["id"]) label = self._create_label(label, entry["id"])
@ -338,7 +348,11 @@ class EntityRuler(Pipe):
self.token_patterns = defaultdict(list) self.token_patterns = defaultdict(list)
self.phrase_patterns = defaultdict(list) self.phrase_patterns = defaultdict(list)
self._ent_ids = defaultdict(tuple) self._ent_ids = defaultdict(tuple)
self.matcher = Matcher(self.nlp.vocab, validate=self._validate) self.matcher = Matcher(
self.nlp.vocab,
validate=self._validate,
fuzzy_compare=self.matcher_fuzzy_compare,
)
self.phrase_matcher = PhraseMatcher( self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self._validate self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self._validate
) )
@ -432,7 +446,8 @@ class EntityRuler(Pipe):
self.overwrite = cfg.get("overwrite", False) self.overwrite = cfg.get("overwrite", False)
self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None) self.phrase_matcher_attr = cfg.get("phrase_matcher_attr", None)
self.phrase_matcher = PhraseMatcher( self.phrase_matcher = PhraseMatcher(
self.nlp.vocab, attr=self.phrase_matcher_attr self.nlp.vocab,
attr=self.phrase_matcher_attr,
) )
self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP) self.ent_id_sep = cfg.get("ent_id_sep", DEFAULT_ENT_ID_SEP)
else: else:

View File

@ -68,8 +68,7 @@ class EntityLinker_v1(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB. entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
Scorer.score_links.
DOCS: https://spacy.io/api/entitylinker#init DOCS: https://spacy.io/api/entitylinker#init
""" """
self.vocab = vocab self.vocab = vocab
@ -115,7 +114,7 @@ class EntityLinker_v1(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects. returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of. nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance. kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance.
Note that providing this argument, will overwrite all data accumulated in the current KB. Note that providing this argument, will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file. Use this only when loading a KB as-such from file.

View File

@ -1,4 +1,4 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True, binding=True
from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict
import srsly import srsly
import warnings import warnings

View File

@ -13,6 +13,7 @@ from ..util import ensure_path, SimpleFrozenList, registry
from ..tokens import Doc, Span from ..tokens import Doc, Span
from ..scorer import Scorer from ..scorer import Scorer
from ..matcher import Matcher, PhraseMatcher from ..matcher import Matcher, PhraseMatcher
from ..matcher.levenshtein import levenshtein_compare
from .. import util from .. import util
PatternType = Dict[str, Union[str, List[Dict[str, Any]]]] PatternType = Dict[str, Union[str, List[Dict[str, Any]]]]
@ -28,6 +29,7 @@ DEFAULT_SPANS_KEY = "ruler"
"overwrite_ents": False, "overwrite_ents": False,
"scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"}, "scorer": {"@scorers": "spacy.entity_ruler_scorer.v1"},
"ent_id_sep": "__unused__", "ent_id_sep": "__unused__",
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
}, },
default_score_weights={ default_score_weights={
"ents_f": 1.0, "ents_f": 1.0,
@ -40,6 +42,7 @@ def make_entity_ruler(
nlp: Language, nlp: Language,
name: str, name: str,
phrase_matcher_attr: Optional[Union[int, str]], phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool, validate: bool,
overwrite_ents: bool, overwrite_ents: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
@ -57,6 +60,7 @@ def make_entity_ruler(
annotate_ents=True, annotate_ents=True,
ents_filter=ents_filter, ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr, phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate, validate=validate,
overwrite=False, overwrite=False,
scorer=scorer, scorer=scorer,
@ -72,6 +76,7 @@ def make_entity_ruler(
"annotate_ents": False, "annotate_ents": False,
"ents_filter": {"@misc": "spacy.first_longest_spans_filter.v1"}, "ents_filter": {"@misc": "spacy.first_longest_spans_filter.v1"},
"phrase_matcher_attr": None, "phrase_matcher_attr": None,
"matcher_fuzzy_compare": {"@misc": "spacy.levenshtein_compare.v1"},
"validate": False, "validate": False,
"overwrite": True, "overwrite": True,
"scorer": { "scorer": {
@ -94,6 +99,7 @@ def make_span_ruler(
annotate_ents: bool, annotate_ents: bool,
ents_filter: Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]], ents_filter: Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]],
phrase_matcher_attr: Optional[Union[int, str]], phrase_matcher_attr: Optional[Union[int, str]],
matcher_fuzzy_compare: Callable,
validate: bool, validate: bool,
overwrite: bool, overwrite: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
@ -106,6 +112,7 @@ def make_span_ruler(
annotate_ents=annotate_ents, annotate_ents=annotate_ents,
ents_filter=ents_filter, ents_filter=ents_filter,
phrase_matcher_attr=phrase_matcher_attr, phrase_matcher_attr=phrase_matcher_attr,
matcher_fuzzy_compare=matcher_fuzzy_compare,
validate=validate, validate=validate,
overwrite=overwrite, overwrite=overwrite,
scorer=scorer, scorer=scorer,
@ -170,7 +177,7 @@ def prioritize_existing_ents_filter(
@registry.misc("spacy.prioritize_existing_ents_filter.v1") @registry.misc("spacy.prioritize_existing_ents_filter.v1")
def make_preverse_existing_ents_filter(): def make_preserve_existing_ents_filter():
return prioritize_existing_ents_filter return prioritize_existing_ents_filter
@ -216,6 +223,7 @@ class SpanRuler(Pipe):
[Iterable[Span], Iterable[Span]], Iterable[Span] [Iterable[Span], Iterable[Span]], Iterable[Span]
] = util.filter_chain_spans, ] = util.filter_chain_spans,
phrase_matcher_attr: Optional[Union[int, str]] = None, phrase_matcher_attr: Optional[Union[int, str]] = None,
matcher_fuzzy_compare: Callable = levenshtein_compare,
validate: bool = False, validate: bool = False,
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = partial( scorer: Optional[Callable] = partial(
@ -246,6 +254,9 @@ class SpanRuler(Pipe):
phrase_matcher_attr (Optional[Union[int, str]]): Token attribute to phrase_matcher_attr (Optional[Union[int, str]]): Token attribute to
match on, passed to the internal PhraseMatcher as `attr`. Defaults match on, passed to the internal PhraseMatcher as `attr`. Defaults
to `None`. to `None`.
matcher_fuzzy_compare (Callable): The fuzzy comparison method for the
internal Matcher. Defaults to
spacy.matcher.levenshtein.levenshtein_compare.
validate (bool): Whether patterns should be validated, passed to validate (bool): Whether patterns should be validated, passed to
Matcher and PhraseMatcher as `validate`. Matcher and PhraseMatcher as `validate`.
overwrite (bool): Whether to remove any existing spans under this spans overwrite (bool): Whether to remove any existing spans under this spans
@ -266,6 +277,7 @@ class SpanRuler(Pipe):
self.spans_filter = spans_filter self.spans_filter = spans_filter
self.ents_filter = ents_filter self.ents_filter = ents_filter
self.scorer = scorer self.scorer = scorer
self.matcher_fuzzy_compare = matcher_fuzzy_compare
self._match_label_id_map: Dict[int, Dict[str, str]] = {} self._match_label_id_map: Dict[int, Dict[str, str]] = {}
self.clear() self.clear()
@ -451,7 +463,11 @@ class SpanRuler(Pipe):
DOCS: https://spacy.io/api/spanruler#clear DOCS: https://spacy.io/api/spanruler#clear
""" """
self._patterns: List[PatternType] = [] self._patterns: List[PatternType] = []
self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate) self.matcher: Matcher = Matcher(
self.nlp.vocab,
validate=self.validate,
fuzzy_compare=self.matcher_fuzzy_compare,
)
self.phrase_matcher: PhraseMatcher = PhraseMatcher( self.phrase_matcher: PhraseMatcher = PhraseMatcher(
self.nlp.vocab, self.nlp.vocab,
attr=self.phrase_matcher_attr, attr=self.phrase_matcher_attr,

View File

@ -1,7 +1,7 @@
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d from thinc.types import Ragged, Ints2d, Floats2d
import numpy import numpy
@ -26,17 +26,17 @@ scorer = {"@layers": "spacy.LinearLogistic.v1"}
hidden_size = 128 hidden_size = 128
[model.tok2vec] [model.tok2vec]
@architectures = "spacy.Tok2Vec.v1" @architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed] [model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v1" @architectures = "spacy.MultiHashEmbed.v2"
width = 96 width = 96
rows = [5000, 2000, 1000, 1000] rows = [5000, 2000, 1000, 1000]
attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"] attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false include_static_vectors = false
[model.tok2vec.encode] [model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1" @architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width} width = ${model.tok2vec.embed.width}
window_size = 1 window_size = 1
maxout_pieces = 3 maxout_pieces = 3
@ -133,6 +133,9 @@ def make_spancat(
spans_key (str): Key of the doc.spans dict to save the spans under. During spans_key (str): Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the initialization and training, the component will look for spans on the
reference document under the same key. reference document under the same key.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the Doc.spans[spans_key] with overlapping
spans allowed.
threshold (float): Minimum probability to consider a prediction positive. threshold (float): Minimum probability to consider a prediction positive.
Spans with a positive prediction will be saved on the Doc. Defaults to Spans with a positive prediction will be saved on the Doc. Defaults to
0.5. 0.5.
@ -269,6 +272,9 @@ class SpanCategorizer(TrainablePipe):
DOCS: https://spacy.io/api/spancategorizer#predict DOCS: https://spacy.io/api/spancategorizer#predict
""" """
indices = self.suggester(docs, ops=self.model.ops) indices = self.suggester(docs, ops=self.model.ops)
if indices.lengths.sum() == 0:
scores = self.model.ops.alloc2f(0, 0)
else:
scores = self.model.predict((docs, indices)) # type: ignore scores = self.model.predict((docs, indices)) # type: ignore
return indices, scores return indices, scores

View File

@ -24,8 +24,8 @@ single_label_default_config = """
[model.tok2vec.embed] [model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2" @architectures = "spacy.MultiHashEmbed.v2"
width = 64 width = 64
rows = [2000, 2000, 1000, 1000, 1000, 1000] rows = [2000, 2000, 500, 1000, 500]
attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false include_static_vectors = false
[model.tok2vec.encode] [model.tok2vec.encode]
@ -72,9 +72,9 @@ subword_features = true
"textcat", "textcat",
assigns=["doc.cats"], assigns=["doc.cats"],
default_config={ default_config={
"threshold": 0.5, "threshold": 0.0,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL, "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v1"}, "scorer": {"@scorers": "spacy.textcat_scorer.v2"},
}, },
default_score_weights={ default_score_weights={
"cats_score": 1.0, "cats_score": 1.0,
@ -87,7 +87,6 @@ subword_features = true
"cats_macro_f": None, "cats_macro_f": None,
"cats_macro_auc": None, "cats_macro_auc": None,
"cats_f_per_type": None, "cats_f_per_type": None,
"cats_macro_auc_per_type": None,
}, },
) )
def make_textcat( def make_textcat(
@ -118,7 +117,7 @@ def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
) )
@registry.scorers("spacy.textcat_scorer.v1") @registry.scorers("spacy.textcat_scorer.v2")
def make_textcat_scorer(): def make_textcat_scorer():
return textcat_score return textcat_score
@ -144,7 +143,8 @@ class TextCategorizer(TrainablePipe):
model (thinc.api.Model): The Thinc Model powering the pipeline component. model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Unused, not needed for single-label (exclusive
classes) classification.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_cats for the attribute "cats". Scorer.score_cats for the attribute "cats".
@ -154,7 +154,11 @@ class TextCategorizer(TrainablePipe):
self.model = model self.model = model
self.name = name self.name = name
self._rehearsal_model = None self._rehearsal_model = None
cfg = {"labels": [], "threshold": threshold, "positive_label": None} cfg: Dict[str, Any] = {
"labels": [],
"threshold": threshold,
"positive_label": None,
}
self.cfg = dict(cfg) self.cfg = dict(cfg)
self.scorer = scorer self.scorer = scorer
@ -396,5 +400,9 @@ class TextCategorizer(TrainablePipe):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""Check whether the provided examples all have single-label cats annotations.""" """Check whether the provided examples all have single-label cats annotations."""
for ex in examples: for ex in examples:
if list(ex.reference.cats.values()).count(1.0) > 1: vals = list(ex.reference.cats.values())
if vals.count(1.0) > 1:
raise ValueError(Errors.E895.format(value=ex.reference.cats)) raise ValueError(Errors.E895.format(value=ex.reference.cats))
for val in vals:
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -19,17 +19,17 @@ multi_label_default_config = """
@architectures = "spacy.TextCatEnsemble.v2" @architectures = "spacy.TextCatEnsemble.v2"
[model.tok2vec] [model.tok2vec]
@architectures = "spacy.Tok2Vec.v1" @architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed] [model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2" @architectures = "spacy.MultiHashEmbed.v2"
width = 64 width = 64
rows = [2000, 2000, 1000, 1000, 1000, 1000] rows = [2000, 2000, 500, 1000, 500]
attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false include_static_vectors = false
[model.tok2vec.encode] [model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1" @architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width} width = ${model.tok2vec.embed.width}
window_size = 1 window_size = 1
maxout_pieces = 3 maxout_pieces = 3
@ -74,7 +74,7 @@ subword_features = true
default_config={ default_config={
"threshold": 0.5, "threshold": 0.5,
"model": DEFAULT_MULTI_TEXTCAT_MODEL, "model": DEFAULT_MULTI_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v1"}, "scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v2"},
}, },
default_score_weights={ default_score_weights={
"cats_score": 1.0, "cats_score": 1.0,
@ -87,7 +87,6 @@ subword_features = true
"cats_macro_f": None, "cats_macro_f": None,
"cats_macro_auc": None, "cats_macro_auc": None,
"cats_f_per_type": None, "cats_f_per_type": None,
"cats_macro_auc_per_type": None,
}, },
) )
def make_multilabel_textcat( def make_multilabel_textcat(
@ -96,8 +95,8 @@ def make_multilabel_textcat(
model: Model[List[Doc], List[Floats2d]], model: Model[List[Doc], List[Floats2d]],
threshold: float, threshold: float,
scorer: Optional[Callable], scorer: Optional[Callable],
) -> "TextCategorizer": ) -> "MultiLabel_TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories """Create a MultiLabel_TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered over a whole document. It can learn one or more labels, and the labels are considered
to be non-mutually exclusive, which means that there can be zero or more labels to be non-mutually exclusive, which means that there can be zero or more labels
per doc). per doc).
@ -105,6 +104,7 @@ def make_multilabel_textcat(
model (Model[List[Doc], List[Floats2d]]): A model instance that predicts model (Model[List[Doc], List[Floats2d]]): A model instance that predicts
scores for each category. scores for each category.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
scorer (Optional[Callable]): The scoring method.
""" """
return MultiLabel_TextCategorizer( return MultiLabel_TextCategorizer(
nlp.vocab, model, name, threshold=threshold, scorer=scorer nlp.vocab, model, name, threshold=threshold, scorer=scorer
@ -120,7 +120,7 @@ def textcat_multilabel_score(examples: Iterable[Example], **kwargs) -> Dict[str,
) )
@registry.scorers("spacy.textcat_multilabel_scorer.v1") @registry.scorers("spacy.textcat_multilabel_scorer.v2")
def make_textcat_multilabel_scorer(): def make_textcat_multilabel_scorer():
return textcat_multilabel_score return textcat_multilabel_score
@ -147,6 +147,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
scorer (Optional[Callable]): The scoring method.
DOCS: https://spacy.io/api/textcategorizer#init DOCS: https://spacy.io/api/textcategorizer#init
""" """
@ -190,6 +191,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
for label in labels: for label in labels:
self.add_label(label) self.add_label(label)
subbatch = list(islice(get_examples(), 10)) subbatch = list(islice(get_examples(), 10))
self._validate_categories(subbatch)
doc_sample = [eg.reference for eg in subbatch] doc_sample = [eg.reference for eg in subbatch]
label_sample, _ = self._examples_to_truth(subbatch) label_sample, _ = self._examples_to_truth(subbatch)
self._require_labels() self._require_labels()
@ -200,4 +203,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""This component allows any type of single- or multi-label annotations. """This component allows any type of single- or multi-label annotations.
This method overwrites the more strict one from 'textcat'.""" This method overwrites the more strict one from 'textcat'."""
pass # check that annotation values are valid
for ex in examples:
for val in ex.reference.cats.values():
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -123,9 +123,6 @@ class Tok2Vec(TrainablePipe):
width = self.model.get_dim("nO") width = self.model.get_dim("nO")
return [self.model.ops.alloc((0, width)) for doc in docs] return [self.model.ops.alloc((0, width)) for doc in docs]
tokvecs = self.model.predict(docs) tokvecs = self.model.predict(docs)
batch_id = Tok2VecListener.get_batch_id(docs)
for listener in self.listeners:
listener.receive(batch_id, tokvecs, _empty_backprop)
return tokvecs return tokvecs
def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None: def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None:
@ -286,6 +283,17 @@ class Tok2VecListener(Model):
def forward(model: Tok2VecListener, inputs, is_train: bool): def forward(model: Tok2VecListener, inputs, is_train: bool):
"""Supply the outputs from the upstream Tok2Vec component.""" """Supply the outputs from the upstream Tok2Vec component."""
if is_train: if is_train:
# This might occur during training when the tok2vec layer is frozen / hasn't been updated.
# In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc.
if model._batch_id is None:
outputs = []
for doc in inputs:
if doc.tensor.size == 0:
raise ValueError(Errors.E203.format(name="tok2vec"))
else:
outputs.append(doc.tensor)
return outputs, _empty_backprop
else:
model.verify_inputs(inputs) model.verify_inputs(inputs)
return model._outputs, model._backprop return model._outputs, model._backprop
else: else:
@ -306,7 +314,7 @@ def forward(model: Tok2VecListener, inputs, is_train: bool):
outputs.append(model.ops.alloc2f(len(doc), width)) outputs.append(model.ops.alloc2f(len(doc), width))
else: else:
outputs.append(doc.tensor) outputs.append(doc.tensor)
return outputs, lambda dX: [] return outputs, _empty_backprop
def _empty_backprop(dX): # for pickling def _empty_backprop(dX): # for pickling

View File

@ -1,4 +1,4 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True, binding=True
from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable
import srsly import srsly
from thinc.api import set_dropout_rate, Model, Optimizer from thinc.api import set_dropout_rate, Model, Optimizer

View File

@ -156,12 +156,40 @@ def validate_token_pattern(obj: list) -> List[str]:
class TokenPatternString(BaseModel): class TokenPatternString(BaseModel):
REGEX: Optional[StrictStr] = Field(None, alias="regex") REGEX: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="regex")
IN: Optional[List[StrictStr]] = Field(None, alias="in") IN: Optional[List[StrictStr]] = Field(None, alias="in")
NOT_IN: Optional[List[StrictStr]] = Field(None, alias="not_in") NOT_IN: Optional[List[StrictStr]] = Field(None, alias="not_in")
IS_SUBSET: Optional[List[StrictStr]] = Field(None, alias="is_subset") IS_SUBSET: Optional[List[StrictStr]] = Field(None, alias="is_subset")
IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset") IS_SUPERSET: Optional[List[StrictStr]] = Field(None, alias="is_superset")
INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects") INTERSECTS: Optional[List[StrictStr]] = Field(None, alias="intersects")
FUZZY: Optional[Union[StrictStr, "TokenPatternString"]] = Field(None, alias="fuzzy")
FUZZY1: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy1"
)
FUZZY2: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy2"
)
FUZZY3: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy3"
)
FUZZY4: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy4"
)
FUZZY5: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy5"
)
FUZZY6: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy6"
)
FUZZY7: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy7"
)
FUZZY8: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy8"
)
FUZZY9: Optional[Union[StrictStr, "TokenPatternString"]] = Field(
None, alias="fuzzy9"
)
class Config: class Config:
extra = "forbid" extra = "forbid"
@ -181,12 +209,12 @@ class TokenPatternNumber(BaseModel):
IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset") IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset")
IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset") IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset")
INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects") INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects")
EQ: Union[StrictInt, StrictFloat] = Field(None, alias="==") EQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="==")
NEQ: Union[StrictInt, StrictFloat] = Field(None, alias="!=") NEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="!=")
GEQ: Union[StrictInt, StrictFloat] = Field(None, alias=">=") GEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">=")
LEQ: Union[StrictInt, StrictFloat] = Field(None, alias="<=") LEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<=")
GT: Union[StrictInt, StrictFloat] = Field(None, alias=">") GT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">")
LT: Union[StrictInt, StrictFloat] = Field(None, alias="<") LT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<")
class Config: class Config:
extra = "forbid" extra = "forbid"
@ -329,6 +357,7 @@ class ConfigSchemaTraining(BaseModel):
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training") frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training") annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk") before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
# fmt: on # fmt: on
class Config: class Config:
@ -430,7 +459,7 @@ class ProjectConfigAssetURL(BaseModel):
# fmt: off # fmt: off
dest: StrictStr = Field(..., title="Destination of downloaded asset") dest: StrictStr = Field(..., title="Destination of downloaded asset")
url: Optional[StrictStr] = Field(None, title="URL of asset") url: Optional[StrictStr] = Field(None, title="URL of asset")
checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
description: StrictStr = Field("", title="Description of asset") description: StrictStr = Field("", title="Description of asset")
# fmt: on # fmt: on
@ -438,7 +467,7 @@ class ProjectConfigAssetURL(BaseModel):
class ProjectConfigAssetGit(BaseModel): class ProjectConfigAssetGit(BaseModel):
# fmt: off # fmt: off
git: ProjectConfigAssetGitItem = Field(..., title="Git repo information") git: ProjectConfigAssetGitItem = Field(..., title="Git repo information")
checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
description: Optional[StrictStr] = Field(None, title="Description of asset") description: Optional[StrictStr] = Field(None, title="Description of asset")
# fmt: on # fmt: on
@ -508,9 +537,9 @@ class DocJSONSchema(BaseModel):
None, title="Indices of sentences' start and end indices" None, title="Indices of sentences' start and end indices"
) )
text: StrictStr = Field(..., title="Document text") text: StrictStr = Field(..., title="Document text")
spans: Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] = Field( spans: Optional[
None, title="Span information - end/start indices, label, KB ID" Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]]
) ] = Field(None, title="Span information - end/start indices, label, KB ID")
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field( tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
..., title="Token information - ID, start, annotations" ..., title="Token information - ID, start, annotations"
) )
@ -519,9 +548,9 @@ class DocJSONSchema(BaseModel):
title="Any custom data stored in the document's _ attribute", title="Any custom data stored in the document's _ attribute",
alias="_", alias="_",
) )
underscore_token: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field( underscore_token: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the token's _ attribute" None, title="Any custom data stored in the token's _ attribute"
) )
underscore_span: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field( underscore_span: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the span's _ attribute" None, title="Any custom data stored in the span's _ attribute"
) )

View File

@ -174,7 +174,7 @@ class Scorer:
prf_score.score_set(pred_spans, gold_spans) prf_score.score_set(pred_spans, gold_spans)
if len(acc_score) > 0: if len(acc_score) > 0:
return { return {
"token_acc": acc_score.fscore, "token_acc": acc_score.precision,
"token_p": prf_score.precision, "token_p": prf_score.precision,
"token_r": prf_score.recall, "token_r": prf_score.recall,
"token_f": prf_score.fscore, "token_f": prf_score.fscore,
@ -446,7 +446,7 @@ class Scorer:
labels (Iterable[str]): The set of possible labels. Defaults to []. labels (Iterable[str]): The set of possible labels. Defaults to [].
multi_label (bool): Whether the attribute allows multiple labels. multi_label (bool): Whether the attribute allows multiple labels.
Defaults to True. When set to False (exclusive labels), missing Defaults to True. When set to False (exclusive labels), missing
gold labels are interpreted as 0.0. gold labels are interpreted as 0.0 and the threshold is set to 0.0.
positive_label (str): The positive label for a binary task with positive_label (str): The positive label for a binary task with
exclusive classes. Defaults to None. exclusive classes. Defaults to None.
threshold (float): Cutoff to consider a prediction "positive". Defaults threshold (float): Cutoff to consider a prediction "positive". Defaults
@ -471,17 +471,17 @@ class Scorer:
""" """
if threshold is None: if threshold is None:
threshold = 0.5 if multi_label else 0.0 threshold = 0.5 if multi_label else 0.0
if not multi_label:
threshold = 0.0
f_per_type = {label: PRFScore() for label in labels} f_per_type = {label: PRFScore() for label in labels}
auc_per_type = {label: ROCAUCScore() for label in labels} auc_per_type = {label: ROCAUCScore() for label in labels}
labels = set(labels) labels = set(labels)
if labels:
for eg in examples:
labels.update(eg.predicted.cats.keys())
labels.update(eg.reference.cats.keys())
for example in examples: for example in examples:
# Through this loop, None in the gold_cats indicates missing label. # Through this loop, None in the gold_cats indicates missing label.
pred_cats = getter(example.predicted, attr) pred_cats = getter(example.predicted, attr)
pred_cats = {k: v for k, v in pred_cats.items() if k in labels}
gold_cats = getter(example.reference, attr) gold_cats = getter(example.reference, attr)
gold_cats = {k: v for k, v in gold_cats.items() if k in labels}
for label in labels: for label in labels:
pred_score = pred_cats.get(label, 0.0) pred_score = pred_cats.get(label, 0.0)
@ -505,11 +505,10 @@ class Scorer:
# Get the highest-scoring for each. # Get the highest-scoring for each.
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1]) gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1])
if pred_label == gold_label and pred_score >= threshold: if pred_label == gold_label:
f_per_type[pred_label].tp += 1 f_per_type[pred_label].tp += 1
else: else:
f_per_type[gold_label].fn += 1 f_per_type[gold_label].fn += 1
if pred_score >= threshold:
f_per_type[pred_label].fp += 1 f_per_type[pred_label].fp += 1
elif gold_cats: elif gold_cats:
gold_label, gold_score = max(gold_cats, key=lambda it: it[1]) gold_label, gold_score = max(gold_cats, key=lambda it: it[1])
@ -517,7 +516,6 @@ class Scorer:
f_per_type[gold_label].fn += 1 f_per_type[gold_label].fn += 1
elif pred_cats: elif pred_cats:
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
if pred_score >= threshold:
f_per_type[pred_label].fp += 1 f_per_type[pred_label].fp += 1
micro_prf = PRFScore() micro_prf = PRFScore()
for label_prf in f_per_type.values(): for label_prf in f_per_type.values():

View File

@ -256,6 +256,11 @@ def ko_tokenizer_tokenizer():
return nlp.tokenizer return nlp.tokenizer
@pytest.fixture(scope="module")
def la_tokenizer():
return get_lang_class("la")().tokenizer
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def lb_tokenizer(): def lb_tokenizer():
return get_lang_class("lb")().tokenizer return get_lang_class("lb")().tokenizer
@ -328,16 +333,24 @@ def ro_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def ru_tokenizer(): def ru_tokenizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().tokenizer return get_lang_class("ru")().tokenizer
@pytest.fixture @pytest.fixture(scope="session")
def ru_lemmatizer(): def ru_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer") return get_lang_class("ru")().add_pipe("lemmatizer")
@pytest.fixture(scope="session")
def ru_lookup_lemmatizer():
pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe(
"lemmatizer", config={"mode": "pymorphy3_lookup"}
)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def sa_tokenizer(): def sa_tokenizer():
return get_lang_class("sa")().tokenizer return get_lang_class("sa")().tokenizer
@ -406,17 +419,26 @@ def ky_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def uk_tokenizer(): def uk_tokenizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("uk")().tokenizer return get_lang_class("uk")().tokenizer
@pytest.fixture @pytest.fixture(scope="session")
def uk_lemmatizer(): def uk_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy2_dicts_uk") pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer") return get_lang_class("uk")().add_pipe("lemmatizer")
@pytest.fixture(scope="session")
def uk_lookup_lemmatizer():
pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe(
"lemmatizer", config={"mode": "pymorphy3_lookup"}
)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def ur_tokenizer(): def ur_tokenizer():
return get_lang_class("ur")().tokenizer return get_lang_class("ur")().tokenizer

View File

@ -123,14 +123,14 @@ def test_doc_from_array_heads_in_bounds(en_vocab):
# head before start # head before start
arr = doc.to_array(["HEAD"]) arr = doc.to_array(["HEAD"])
arr[0] = -1 arr[0] = numpy.int32(-1).astype(numpy.uint64)
doc_from_array = Doc(en_vocab, words=words) doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError): with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr) doc_from_array.from_array(["HEAD"], arr)
# head after end # head after end
arr = doc.to_array(["HEAD"]) arr = doc.to_array(["HEAD"])
arr[0] = 5 arr[0] = numpy.int32(5).astype(numpy.uint64)
doc_from_array = Doc(en_vocab, words=words) doc_from_array = Doc(en_vocab, words=words)
with pytest.raises(ValueError): with pytest.raises(ValueError):
doc_from_array.from_array(["HEAD"], arr) doc_from_array.from_array(["HEAD"], arr)

View File

@ -82,6 +82,21 @@ def test_issue2396(en_vocab):
assert (span.get_lca_matrix() == matrix).all() assert (span.get_lca_matrix() == matrix).all()
@pytest.mark.issue(11499)
def test_init_args_unmodified(en_vocab):
words = ["A", "sentence"]
ents = ["B-TYPE1", ""]
sent_starts = [True, False]
Doc(
vocab=en_vocab,
words=words,
ents=ents,
sent_starts=sent_starts,
)
assert ents == ["B-TYPE1", ""]
assert sent_starts == [True, False]
@pytest.mark.parametrize("text", ["-0.23", "+123,456", "±1"]) @pytest.mark.parametrize("text", ["-0.23", "+123,456", "±1"])
@pytest.mark.parametrize("lang_cls", [English, MultiLanguage]) @pytest.mark.parametrize("lang_cls", [English, MultiLanguage])
@pytest.mark.issue(2782) @pytest.mark.issue(2782)

View File

@ -128,7 +128,9 @@ def test_doc_to_json_with_token_span_attributes(doc):
doc._.json_test1 = "hello world" doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3] doc._.json_test2 = [1, 2, 3]
doc[0:1]._.span_test = "span_attribute" doc[0:1]._.span_test = "span_attribute"
doc[0:2]._.span_test = "span_attribute_2"
doc[0]._.token_test = 117 doc[0]._.token_test = 117
doc[1]._.token_test = 118
doc.spans["span_group"] = [doc[0:1]] doc.spans["span_group"] = [doc[0:1]]
json_doc = doc.to_json( json_doc = doc.to_json(
underscore=["json_test1", "json_test2", "token_test", "span_test"] underscore=["json_test1", "json_test2", "token_test", "span_test"]
@ -139,8 +141,10 @@ def test_doc_to_json_with_token_span_attributes(doc):
assert json_doc["_"]["json_test2"] == [1, 2, 3] assert json_doc["_"]["json_test2"] == [1, 2, 3]
assert "underscore_token" in json_doc assert "underscore_token" in json_doc
assert "underscore_span" in json_doc assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["token_test"]["value"] == 117 assert json_doc["underscore_token"]["token_test"][0]["value"] == 117
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute" assert json_doc["underscore_token"]["token_test"][1]["value"] == 118
assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute"
assert json_doc["underscore_span"]["span_test"][1]["value"] == "span_attribute_2"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
@ -161,8 +165,8 @@ def test_doc_to_json_with_custom_user_data(doc):
assert json_doc["_"]["json_test"] == "hello world" assert json_doc["_"]["json_test"] == "hello world"
assert "underscore_token" in json_doc assert "underscore_token" in json_doc
assert "underscore_span" in json_doc assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["token_test"]["value"] == 117 assert json_doc["underscore_token"]["token_test"][0]["value"] == 117
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute" assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
@ -181,8 +185,8 @@ def test_doc_to_json_with_token_span_same_identifier(doc):
assert json_doc["_"]["my_ext"] == "hello world" assert json_doc["_"]["my_ext"] == "hello world"
assert "underscore_token" in json_doc assert "underscore_token" in json_doc
assert "underscore_span" in json_doc assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["my_ext"]["value"] == 117 assert json_doc["underscore_token"]["my_ext"][0]["value"] == 117
assert json_doc["underscore_span"]["my_ext"]["value"] == "span_attribute" assert json_doc["underscore_span"]["my_ext"][0]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
@ -195,10 +199,9 @@ def test_doc_to_json_with_token_attributes_missing(doc):
doc[0]._.token_test = 117 doc[0]._.token_test = 117
json_doc = doc.to_json(underscore=["span_test"]) json_doc = doc.to_json(underscore=["span_test"])
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc assert "underscore_span" in json_doc
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute" assert json_doc["underscore_span"]["span_test"][0]["value"] == "span_attribute"
assert "token_test" not in json_doc["underscore_token"] assert "underscore_token" not in json_doc
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0 assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
@ -283,7 +286,9 @@ def test_json_to_doc_with_token_span_attributes(doc):
doc._.json_test1 = "hello world" doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3] doc._.json_test2 = [1, 2, 3]
doc[0:1]._.span_test = "span_attribute" doc[0:1]._.span_test = "span_attribute"
doc[0:2]._.span_test = "span_attribute_2"
doc[0]._.token_test = 117 doc[0]._.token_test = 117
doc[1]._.token_test = 118
json_doc = doc.to_json( json_doc = doc.to_json(
underscore=["json_test1", "json_test2", "token_test", "span_test"] underscore=["json_test1", "json_test2", "token_test", "span_test"]
@ -295,7 +300,9 @@ def test_json_to_doc_with_token_span_attributes(doc):
assert new_doc._.json_test1 == "hello world" assert new_doc._.json_test1 == "hello world"
assert new_doc._.json_test2 == [1, 2, 3] assert new_doc._.json_test2 == [1, 2, 3]
assert new_doc[0]._.token_test == 117 assert new_doc[0]._.token_test == 117
assert new_doc[1]._.token_test == 118
assert new_doc[0:1]._.span_test == "span_attribute" assert new_doc[0:1]._.span_test == "span_attribute"
assert new_doc[0:2]._.span_test == "span_attribute_2"
assert new_doc.user_data == doc.user_data assert new_doc.user_data == doc.user_data
assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes( assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes(
exclude=["user_data"] exclude=["user_data"]
@ -363,3 +370,12 @@ def test_json_to_doc_validation_error(doc):
doc_json.pop("tokens") doc_json.pop("tokens")
with pytest.raises(ValueError): with pytest.raises(ValueError):
Doc(doc.vocab).from_json(doc_json, validate=True) Doc(doc.vocab).from_json(doc_json, validate=True)
def test_to_json_underscore_doc_getters(doc):
def get_text_length(doc):
return len(doc.text)
Doc.set_extension("text_length", getter=get_text_length)
doc_json = doc.to_json(underscore=["text_length"])
assert doc_json["_"]["text_length"] == get_text_length(doc)

View File

@ -1,7 +1,10 @@
from typing import List
import pytest import pytest
from random import Random from random import Random
from spacy.matcher import Matcher from spacy.matcher import Matcher
from spacy.tokens import Span, SpanGroup from spacy.tokens import Span, SpanGroup, Doc
from spacy.util import filter_spans
@pytest.fixture @pytest.fixture
@ -240,3 +243,13 @@ def test_span_group_extend(doc):
def test_span_group_dealloc(span_group): def test_span_group_dealloc(span_group):
with pytest.raises(AttributeError): with pytest.raises(AttributeError):
print(span_group.doc) print(span_group.doc)
@pytest.mark.issue(11975)
def test_span_group_typing(doc: Doc):
"""Tests whether typing of `SpanGroup` as `Iterable[Span]`-like object is accepted by mypy."""
span_group: SpanGroup = doc.spans["SPANS"]
spans: List[Span] = list(span_group)
for i, span in enumerate(span_group):
assert span == span_group[i] == spans[i]
filter_spans(span_group)

View File

@ -0,0 +1,18 @@
import pytest
# fmt: off
GRC_TOKEN_EXCEPTION_TESTS = [
("τὸ 〈τῆς〉 φιλοσοφίας ἔργον ἔνιοί φασιν ἀπὸ ⟦βαρβάρων⟧ ἄρξαι.", ["τὸ", "", "τῆς", "", "φιλοσοφίας", "ἔργον", "ἔνιοί", "φασιν", "ἀπὸ", "", "βαρβάρων", "", "ἄρξαι", "."]),
("τὴν δὲ τῶν Αἰγυπτίων φιλοσοφίαν εἶναι τοιαύτην περί τε †θεῶν† καὶ ὑπὲρ δικαιοσύνης.", ["τὴν", "δὲ", "τῶν", "Αἰγυπτίων", "φιλοσοφίαν", "εἶναι", "τοιαύτην", "περί", "τε", "", "θεῶν", "", "καὶ", "ὑπὲρ", "δικαιοσύνης", "."]),
("⸏πόσις δ' Ἐρεχθεύς ἐστί μοι σεσωσμένος⸏", ["", "πόσις", "δ'", "Ἐρεχθεύς", "ἐστί", "μοι", "σεσωσμένος", ""]),
("⸏ὔπνον ἴδωμεν⸎", ["", "ὔπνον", "ἴδωμεν", ""]),
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", GRC_TOKEN_EXCEPTION_TESTS)
def test_grc_tokenizer(grc_tokenizer, text, expected_tokens):
tokens = grc_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list

View File

View File

@ -0,0 +1,8 @@
import pytest
def test_la_tokenizer_handles_exc_in_text(la_tokenizer):
text = "scio te omnia facturum, ut nobiscum quam primum sis"
tokens = la_tokenizer(text)
assert len(tokens) == 11
assert tokens[6].text == "nobis"

View File

@ -0,0 +1,35 @@
import pytest
from spacy.lang.la.lex_attrs import like_num
@pytest.mark.parametrize(
"text,match",
[
("IIII", True),
("VI", True),
("vi", True),
("IV", True),
("iv", True),
("IX", True),
("ix", True),
("MMXXII", True),
("0", True),
("1", True),
("quattuor", True),
("decem", True),
("tertius", True),
("canis", False),
("MMXX11", False),
(",", False),
],
)
def test_lex_attrs_like_number(la_tokenizer, text, match):
tokens = la_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["quinque"])
def test_la_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())

View File

@ -78,3 +78,32 @@ def test_ru_lemmatizer_punct(ru_lemmatizer):
assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"'] assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"']
doc = Doc(ru_lemmatizer.vocab, words=["»"], pos=["PUNCT"]) doc = Doc(ru_lemmatizer.vocab, words=["»"], pos=["PUNCT"])
assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"'] assert ru_lemmatizer.pymorphy2_lemmatize(doc[0]) == ['"']
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"]
morphs = [
"Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing",
"Aspect=Imp|Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act",
"Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing",
]
doc = Doc(ru_lookup_lemmatizer.vocab, words=words, pos=pos, morphs=morphs)
doc = ru_lookup_lemmatizer(doc)
lemmas = [token.lemma_ for token in doc]
assert lemmas == ["мама", "мыла", "раму"]
@pytest.mark.parametrize(
"word,lemma",
(
("бременем", "бремя"),
("будешь", "быть"),
("какая-то", "какой-то"),
),
)
def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma):
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
doc = Doc(ru_lookup_lemmatizer.vocab, words=[word])
assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma

View File

@ -20,7 +20,6 @@ od katerih so te svoboščine odvisne,
assert len(tokens) == 116 assert len(tokens) == 116
@pytest.mark.xfail
def test_ordinal_number(sl_tokenizer): def test_ordinal_number(sl_tokenizer):
text = "10. decembra 1948" text = "10. decembra 1948"
tokens = sl_tokenizer(text) tokens = sl_tokenizer(text)

Some files were not shown because too many files have changed in this diff Show More