Merge remote-tracking branch 'upstream/master' into Non_greedy_quantifier_PR

This commit is contained in:
Adriane Boyd 2022-11-24 17:05:41 +01:00
commit 2a8e0f0c37
205 changed files with 6819 additions and 2336 deletions

View File

@ -10,7 +10,7 @@ about: Use this template if you came across a bug or unexpected behaviour differ
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. --> <!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
## Your Environment ## Your Environment
<!-- Include details of your environment. If you're using spaCy 1.7+, you can also type `python -m spacy info --markdown` and copy-paste the result here.--> <!-- Include details of your environment. You can also type `python -m spacy info --markdown` and copy-paste the result here.-->
* Operating System: * Operating System:
* Python Version Used: * Python Version Used:
* spaCy Version Used: * spaCy Version Used:

View File

@ -1,74 +1,68 @@
parameters: parameters:
python_version: '' python_version: ''
architecture: '' architecture: 'x64'
prefix: '' num_build_jobs: 2
gpu: false
num_build_jobs: 1
steps: steps:
- task: UsePythonVersion@0 - task: UsePythonVersion@0
inputs: inputs:
versionSpec: ${{ parameters.python_version }} versionSpec: ${{ parameters.python_version }}
architecture: ${{ parameters.architecture }} architecture: ${{ parameters.architecture }}
allowUnstable: true
- bash: | - bash: |
echo "##vso[task.setvariable variable=python_version]${{ parameters.python_version }}" echo "##vso[task.setvariable variable=python_version]${{ parameters.python_version }}"
displayName: 'Set variables' displayName: 'Set variables'
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U pip setuptools python -m pip install -U build pip setuptools
${{ parameters.prefix }} python -m pip install -U -r requirements.txt python -m pip install -U -r requirements.txt
displayName: "Install dependencies" displayName: "Install dependencies"
- script: | - script: |
${{ parameters.prefix }} python setup.py build_ext --inplace -j ${{ parameters.num_build_jobs }} python -m build --sdist
${{ parameters.prefix }} python setup.py sdist --formats=gztar displayName: "Build sdist"
displayName: "Compile and build sdist"
- script: python -m mypy spacy - script: |
python -m mypy spacy
displayName: 'Run mypy' displayName: 'Run mypy'
condition: ne(variables['python_version'], '3.6')
- task: DeleteFiles@1 - task: DeleteFiles@1
inputs: inputs:
contents: "spacy" contents: "spacy"
displayName: "Delete source directory" displayName: "Delete source directory"
- task: DeleteFiles@1
inputs:
contents: "*.egg-info"
displayName: "Delete egg-info directory"
- script: | - script: |
${{ parameters.prefix }} python -m pip freeze --exclude torch --exclude cupy-cuda110 > installed.txt python -m pip freeze > installed.txt
${{ parameters.prefix }} python -m pip uninstall -y -r installed.txt python -m pip uninstall -y -r installed.txt
displayName: "Uninstall all packages" displayName: "Uninstall all packages"
- bash: | - bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1) SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
${{ parameters.prefix }} python -m pip install dist/$SDIST SPACY_NUM_BUILD_JOBS=${{ parameters.num_build_jobs }} python -m pip install dist/$SDIST
displayName: "Install from sdist" displayName: "Install from sdist"
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U -r requirements.txt python -W error -c "import spacy"
displayName: "Install test requirements" displayName: "Test import"
- script: | - script: |
${{ parameters.prefix }} python -m pip install -U cupy-cuda110 -f https://github.com/cupy/cupy/releases/v9.0.0 python -m spacy download ca_core_news_sm
${{ parameters.prefix }} python -m pip install "torch==1.7.1+cu110" -f https://download.pytorch.org/whl/torch_stable.html python -m spacy download ca_core_news_md
displayName: "Install GPU requirements" python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
condition: eq(${{ parameters.gpu }}, true) displayName: 'Test download CLI'
condition: eq(variables['python_version'], '3.8')
- script: | - script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')"
displayName: "Run CPU tests" displayName: 'Test no warnings on load (#11713)'
condition: eq(${{ parameters.gpu }}, false) condition: eq(variables['python_version'], '3.8')
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -p spacy.tests.enable_gpu
displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true)
# - script: |
# python -m spacy download ca_core_news_sm
# python -m spacy download ca_core_news_md
# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')"
# displayName: 'Test download CLI'
# condition: eq(variables['python_version'], '3.8')
- script: | - script: |
python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json .
@ -92,25 +86,34 @@ steps:
displayName: 'Test train CLI' displayName: 'Test train CLI'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
# - script: | - script: |
# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')"
# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir
# displayName: 'Test assemble CLI' displayName: 'Test assemble CLI'
# condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
#
# - script: | - script: |
# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')"
# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113
# displayName: 'Test assemble CLI vectors warning' displayName: 'Test assemble CLI vectors warning'
# condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
python -m pip install -U -r requirements.txt
displayName: "Install test requirements"
- script: |
python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
- script: |
python -m pip install --pre thinc-apple-ops
python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11'))
- script: | - script: |
python .github/validate_universe_json.py website/meta/universe.json python .github/validate_universe_json.py website/meta/universe.json
displayName: 'Test website/meta/universe.json' displayName: 'Test website/meta/universe.json'
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: |
${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

View File

@ -1,13 +0,0 @@
# Configuration for probot-no-response - https://github.com/probot/no-response
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 14
# Label requiring a response
responseRequiredLabel: more-info-needed
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
to a request for more information from the original author. With only the
information that is currently in the issue, there's not enough information
to take action. If you're the original author, feel free to reopen the issue
if you have or find the answers needed to investigate further.

67
.github/spacy_universe_alert.py vendored Normal file
View File

@ -0,0 +1,67 @@
import os
import sys
import json
from datetime import datetime
from slack_sdk.web.client import WebClient
CHANNEL = "#alerts-universe"
SLACK_TOKEN = os.environ.get("SLACK_BOT_TOKEN", "ENV VAR not available!")
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
client = WebClient(SLACK_TOKEN)
github_context = json.loads(sys.argv[1])
event = github_context['event']
pr_title = event['pull_request']["title"]
pr_link = event['pull_request']["patch_url"].replace(".patch", "")
pr_author_url = event['sender']["html_url"]
pr_author_name = pr_author_url.rsplit('/')[-1]
pr_created_at_dt = datetime.strptime(
event['pull_request']["created_at"],
DATETIME_FORMAT
)
pr_created_at = pr_created_at_dt.strftime("%c")
pr_updated_at_dt = datetime.strptime(
event['pull_request']["updated_at"],
DATETIME_FORMAT
)
pr_updated_at = pr_updated_at_dt.strftime("%c")
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "📣 New spaCy Universe Project Alert ✨"
}
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": f"*Pull Request:*\n<{pr_link}|{pr_title}>"
},
{
"type": "mrkdwn",
"text": f"*Author:*\n<{pr_author_url}|{pr_author_name}>"
},
{
"type": "mrkdwn",
"text": f"*Created at:*\n {pr_created_at}"
},
{
"type": "mrkdwn",
"text": f"*Last Updated:*\n {pr_updated_at}"
}
]
}
]
client.chat_postMessage(
channel=CHANNEL,
text="spaCy universe project PR alert",
blocks=blocks
)

View File

@ -12,10 +12,10 @@ jobs:
if: github.repository_owner == 'explosion' if: github.repository_owner == 'explosion'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}
- uses: actions/setup-python@v2 - uses: actions/setup-python@v4
- run: pip install black - run: pip install black
- name: Auto-format code if needed - name: Auto-format code if needed
run: black spacy run: black spacy
@ -23,10 +23,11 @@ jobs:
# code and makes GitHub think the action failed # code and makes GitHub think the action failed
- name: Check for modified files - name: Check for modified files
id: git-check id: git-check
run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT
- name: Create Pull Request - name: Create Pull Request
if: steps.git-check.outputs.modified == 'true' if: steps.git-check.outputs.modified == 'true'
uses: peter-evans/create-pull-request@v3 uses: peter-evans/create-pull-request@v4
with: with:
title: Auto-format code with black title: Auto-format code with black
labels: meta labels: meta

View File

@ -8,14 +8,14 @@ on:
jobs: jobs:
explosion-bot: explosion-bot:
runs-on: ubuntu-18.04 runs-on: ubuntu-latest
steps: steps:
- name: Dump GitHub context - name: Dump GitHub context
env: env:
GITHUB_CONTEXT: ${{ toJson(github) }} GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT" run: echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
- name: Install and run explosion-bot - name: Install and run explosion-bot
run: | run: |
pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot pip install git+https://${{ secrets.EXPLOSIONBOT_TOKEN }}@github.com/explosion/explosion-bot

View File

@ -15,7 +15,7 @@ jobs:
issue-manager: issue-manager:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: tiangolo/issue-manager@0.2.1 - uses: tiangolo/issue-manager@0.4.0
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
config: > config: >
@ -25,5 +25,11 @@ jobs:
"message": "This issue has been automatically closed because it was answered and there was no follow-up discussion.", "message": "This issue has been automatically closed because it was answered and there was no follow-up discussion.",
"remove_label_on_comment": true, "remove_label_on_comment": true,
"remove_label_on_close": true "remove_label_on_close": true
},
"more-info-needed": {
"delay": "P7D",
"message": "This issue has been automatically closed because there has been no response to a request for more information from the original author. With only the information that is currently in the issue, there's not enough information to take action. If you're the original author, feel free to reopen the issue if you have or find the answers needed to investigate further.",
"remove_label_on_comment": true,
"remove_label_on_close": true
} }
} }

View File

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v1 uses: actions/checkout@v3
with: with:
ref: ${{ matrix.branch }} ref: ${{ matrix.branch }}
- name: Get commits from past 24 hours - name: Get commits from past 24 hours
@ -23,9 +23,9 @@ jobs:
today=$(date '+%Y-%m-%d %H:%M:%S') today=$(date '+%Y-%m-%d %H:%M:%S')
yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S') yesterday=$(date -d "yesterday" '+%Y-%m-%d %H:%M:%S')
if git log --after="$yesterday" --before="$today" | grep commit ; then if git log --after="$yesterday" --before="$today" | grep commit ; then
echo "::set-output name=run_tests::true" echo run_tests=true >> $GITHUB_OUTPUT
else else
echo "::set-output name=run_tests::false" echo run_tests=false >> $GITHUB_OUTPUT
fi fi
- name: Trigger buildkite build - name: Trigger buildkite build

View File

@ -0,0 +1,32 @@
name: spaCy universe project alert
on:
pull_request_target:
paths:
- "website/meta/universe.json"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Dump GitHub context
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
PR_NUMBER: ${{github.event.number}}
run: |
echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Bernadette app dependency and send an alert
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
GITHUB_CONTEXT: ${{ toJson(github) }}
CHANNEL: "#alerts-universe"
run: |
pip install slack-sdk==3.17.2 aiohttp==3.8.1
echo "$CHANNEL"
python .github/spacy_universe_alert.py "$GITHUB_CONTEXT"

1
.gitignore vendored
View File

@ -24,6 +24,7 @@ quickstart-training-generator.js
cythonize.json cythonize.json
spacy/*.html spacy/*.html
*.cpp *.cpp
*.c
*.so *.so
# Vim / VSCode / editors # Vim / VSCode / editors

View File

@ -5,8 +5,8 @@ repos:
- id: black - id: black
language_version: python3.7 language_version: python3.7
additional_dependencies: ['click==8.0.4'] additional_dependencies: ['click==8.0.4']
- repo: https://gitlab.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 3.9.2 rev: 5.0.4
hooks: hooks:
- id: flake8 - id: flake8
args: args:

View File

@ -271,7 +271,8 @@ except: # noqa: E722
### Python conventions ### Python conventions
All Python code must be written **compatible with Python 3.6+**. All Python code must be written **compatible with Python 3.6+**. More detailed
code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md).
#### I/O and handling paths #### I/O and handling paths

View File

@ -8,7 +8,7 @@ be used in real products.
spaCy comes with spaCy comes with
[pretrained pipelines](https://spacy.io/models) and [pretrained pipelines](https://spacy.io/models) and
currently supports tokenization and training for **60+ languages**. It features currently supports tokenization and training for **70+ languages**. It features
state-of-the-art speed and **neural network models** for tagging, state-of-the-art speed and **neural network models** for tagging,
parsing, **named entity recognition**, **text classification** and more, parsing, **named entity recognition**, **text classification** and more,
multi-task learning with pretrained **transformers** like BERT, as well as a multi-task learning with pretrained **transformers** like BERT, as well as a
@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license. open-source software, released under the MIT license.
💫 **Version 3.3.1 out now!** 💫 **Version 3.4 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases) [Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
@ -79,7 +79,7 @@ more people can benefit from it.
## Features ## Features
- Support for **60+ languages** - Support for **70+ languages**
- **Trained pipelines** for different languages and tasks - **Trained pipelines** for different languages and tasks
- Multi-task learning with pretrained **transformers** like BERT - Multi-task learning with pretrained **transformers** like BERT
- Support for pretrained **word vectors** and embeddings - Support for pretrained **word vectors** and embeddings

View File

@ -31,8 +31,8 @@ jobs:
inputs: inputs:
versionSpec: "3.7" versionSpec: "3.7"
- script: | - script: |
pip install flake8==3.9.2 pip install flake8==5.0.4
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823 --show-source --statistics python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
displayName: "flake8" displayName: "flake8"
- job: "Test" - job: "Test"
@ -76,15 +76,24 @@ jobs:
# Python39Mac: # Python39Mac:
# imageName: "macos-latest" # imageName: "macos-latest"
# python.version: "3.9" # python.version: "3.9"
Python310Linux: # Python310Linux:
imageName: "ubuntu-latest" # imageName: "ubuntu-latest"
python.version: "3.10" # python.version: "3.10"
Python310Windows: Python310Windows:
imageName: "windows-latest" imageName: "windows-latest"
python.version: "3.10" python.version: "3.10"
Python310Mac: # Python310Mac:
imageName: "macos-latest" # imageName: "macos-latest"
python.version: "3.10" # python.version: "3.10"
Python311Linux:
imageName: 'ubuntu-latest'
python.version: '3.11'
Python311Windows:
imageName: 'windows-latest'
python.version: '3.11'
Python311Mac:
imageName: 'macos-latest'
python.version: '3.11'
maxParallel: 4 maxParallel: 4
pool: pool:
vmImage: $(imageName) vmImage: $(imageName)
@ -92,20 +101,3 @@ jobs:
- template: .github/azure-steps.yml - template: .github/azure-steps.yml
parameters: parameters:
python_version: '$(python.version)' python_version: '$(python.version)'
architecture: 'x64'
# - job: "TestGPU"
# dependsOn: "Validate"
# strategy:
# matrix:
# Python38LinuxX64_GPU:
# python.version: '3.8'
# pool:
# name: "LinuxX64_GPU"
# steps:
# - template: .github/azure-steps.yml
# parameters:
# python_version: '$(python.version)'
# architecture: 'x64'
# gpu: true
# num_build_jobs: 24

View File

@ -1,6 +1,8 @@
# build version constraints for use with wheelwright + multibuild # build version constraints for use with wheelwright + multibuild
numpy==1.15.0; python_version<='3.7' numpy==1.15.0; python_version<='3.7' and platform_machine!='aarch64'
numpy==1.17.3; python_version=='3.8' numpy==1.19.2; python_version<='3.7' and platform_machine=='aarch64'
numpy==1.17.3; python_version=='3.8' and platform_machine!='aarch64'
numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'
numpy==1.19.3; python_version=='3.9' numpy==1.19.3; python_version=='3.9'
numpy==1.21.3; python_version=='3.10' numpy==1.21.3; python_version=='3.10'
numpy; python_version>='3.11' numpy; python_version>='3.11'

View File

@ -191,6 +191,8 @@ def load_model(name: str) -> "Language":
... ...
``` ```
Note that we typically put the `from typing` import statements on the first line(s) of the Python module.
## Structuring logic ## Structuring logic
### Positional and keyword arguments ### Positional and keyword arguments
@ -275,6 +277,27 @@ If you have to use `try`/`except`, make sure to only include what's **absolutely
+ return [v.strip() for v in value.split(",")] + return [v.strip() for v in value.split(",")]
``` ```
### Numeric comparisons
For numeric comparisons, as a general rule we always use `<` and `>=` and avoid the usage of `<=` and `>`. This is to ensure we consistently
apply inclusive lower bounds and exclusive upper bounds, helping to prevent off-by-one errors.
One exception to this rule is the ternary case. With a chain like
```python
if value >= 0 and value < max:
...
```
it's fine to rewrite this to the shorter form
```python
if 0 <= value < max:
...
```
even though this requires the usage of the `<=` operator.
### Iteration and comprehensions ### Iteration and comprehensions
We generally avoid using built-in functions like `filter` or `map` in favor of list or generator comprehensions. We generally avoid using built-in functions like `filter` or `map` in favor of list or generator comprehensions.

View File

@ -16,18 +16,38 @@ To summon the robot, write a github comment on the issue/PR you wish to test. Th
Some things to note: Some things to note:
* The `@explosion-bot please` must be the beginning of the command - you cannot add anything in front of this or else the robot won't know how to parse it. Adding anything at the end aside from the test name will also confuse the robot, so keep it simple! - The `@explosion-bot please` must be the beginning of the command - you cannot add anything in front of this or else the robot won't know how to parse it. Adding anything at the end aside from the test name will also confuse the robot, so keep it simple!
* The command name (such as `test_gpu`) must be one of the tests that the bot knows how to run. The available commands are documented in the bot's [workflow config](https://github.com/explosion/spaCy/blob/master/.github/workflows/explosionbot.yml#L26) and must match exactly one of the commands listed there. - The command name (such as `test_gpu`) must be one of the tests that the bot knows how to run. The available commands are documented in the bot's [workflow config](https://github.com/explosion/spaCy/blob/master/.github/workflows/explosionbot.yml#L26) and must match exactly one of the commands listed there.
* The robot can't do multiple things at once, so if you want it to run multiple tests, you'll have to summon it with one comment per test. - The robot can't do multiple things at once, so if you want it to run multiple tests, you'll have to summon it with one comment per test.
* For the `test_gpu` command, you can specify an optional thinc branch (from the spaCy repo) or a spaCy branch (from the thinc repo) with either the `--thinc-branch` or `--spacy-branch` flags. By default, the bot will pull in the PR branch from the repo where the command was issued, and the main branch of the other repository. However, if you need to run against another branch, you can say (for example):
``` ### Examples
@explosion-bot please test_gpu --thinc-branch develop
``` - Execute spaCy slow GPU tests with a custom thinc branch from a spaCy PR:
You can also specify a branch from an unmerged PR:
``` ```
@explosion-bot please test_gpu --thinc-branch refs/pull/633/head @explosion-bot please test_slow_gpu --thinc-branch <branch_name>
``` ```
`branch_name` can either be a named branch, e.g: `develop`, or an unmerged PR, e.g: `refs/pull/<pr_number>/head`.
- Execute spaCy Transformers GPU tests from a spaCy PR:
```
@explosion-bot please test_gpu --run-on spacy-transformers --run-on-branch master --spacy-branch current_pr
```
This will launch the GPU pipeline for the `spacy-transformers` repo on its `master` branch, using the current spaCy PR's branch to build spaCy. The name of the repository passed to `--run-on` is case-sensitive, e.g: use `spaCy` instead of `spacy`.
- General info about supported commands.
```
@explosion-bot please info
```
- Help text for a specific command
```
@explosion-bot please <command> --help
```
## Troubleshooting ## Troubleshooting

View File

@ -0,0 +1,82 @@
# spaCy Satellite Packages
This is a list of all the active repos relevant to spaCy besides the main one, with short descriptions, history, and current status. Archived repos will not be covered.
## Always Included in spaCy
These packages are always pulled in when you install spaCy. Most of them are direct dependencies, but some are transitive dependencies through other packages.
- [spacy-legacy](https://github.com/explosion/spacy-legacy): When an architecture in spaCy changes enough to get a new version, the old version is frozen and moved to spacy-legacy. This allows us to keep the core library slim while also preserving backwards compatability.
- [thinc](https://github.com/explosion/thinc): Thinc is the machine learning library that powers trainable components in spaCy. It wraps backends like Numpy, PyTorch, and Tensorflow to provide a functional interface for specifying architectures.
- [catalogue](https://github.com/explosion/catalogue): Small library for adding function registries, like those used for model architectures in spaCy.
- [confection](https://github.com/explosion/confection): This library contains the functionality for config parsing that was formerly contained directly in Thinc.
- [spacy-loggers](https://github.com/explosion/spacy-loggers): Contains loggers beyond the default logger available in spaCy&#39;s core code base. This includes loggers integrated with third-party services, which may differ in release cadence from spaCy itself.
- [wasabi](https://github.com/explosion/wasabi): A command line formatting library, used for terminal output in spaCy.
- [srsly](https://github.com/explosion/srsly): A wrapper that vendors several serialization libraries for spaCy. Includes parsers for JSON, JSONL, MessagePack, (extended) Pickle, and YAML.
- [preshed](https://github.com/explosion/preshed): A Cython library for low-level data structures like hash maps, used for memory efficient data storage.
- [cython-blis](https://github.com/explosion/cython-blis): Fast matrix multiplication using BLIS without depending on system libraries. Required by Thinc, rather than spaCy directly.
- [murmurhash](https://github.com/explosion/murmurhash): A wrapper library for a C++ murmurhash implementation, used for string IDs in spaCy and preshed.
- [cymem](https://github.com/explosion/cymem): A small library for RAII-style memory management in Cython.
## Optional Extensions for spaCy
These are repos that can be used by spaCy but aren&#39;t part of a default installation. Many of these are wrappers to integrate various kinds of third-party libraries.
- [spacy-transformers](https://github.com/explosion/spacy-transformers): A wrapper for the [HuggingFace Transformers](https://huggingface.co/docs/transformers/index) library, this handles the extensive conversion necessary to coordinate spaCy&#39;s powerful `Doc` representation, training pipeline, and the Transformer embeddings. When released, this was known as `spacy-pytorch-transformers`, but it changed to the current name when HuggingFace update the name of their library as well.
- [spacy-huggingface-hub](https://github.com/explosion/spacy-huggingface-hub): This package has a CLI script for uploading a packaged spaCy pipeline (created with `spacy package`) to the [Hugging Face Hub](https://huggingface.co/models).
- [spacy-alignments](https://github.com/explosion/spacy-alignments): A wrapper for the tokenizations library (mentioned below) with a modified build system to simplify cross-platform wheel creation. Used in spacy-transformers for aligning spaCy and HuggingFace tokenizations.
- [spacy-experimental](https://github.com/explosion/spacy-experimental): Experimental components that are not quite ready for inclusion in the main spaCy library. Usually there are unresolved questions around their APIs, so the experimental library allows us to expose them to the community for feedback before fully integrating them.
- [spacy-lookups-data](https://github.com/explosion/spacy-lookups-data): A repository of linguistic data, such as lemmas, that takes up a lot of disk space. Originally created to reduce the size of the spaCy core library. This is mainly useful if you want the data included but aren&#39;t using a pretrained pipeline; for the affected languages, the relevant data is included in pretrained pipelines directly.
- [coreferee](https://github.com/explosion/coreferee): Coreference resolution for English, French, German and Polish, optimised for limited training data and easily extensible for further languages. Used as a spaCy pipeline component.
- [spacy-stanza](https://github.com/explosion/spacy-stanza): This is a wrapper that allows the use of Stanford&#39;s Stanza library in spaCy.
- [spacy-streamlit](https://github.com/explosion/spacy-streamlit): A wrapper for the Streamlit dashboard building library to help with integrating [displaCy](https://spacy.io/api/top-level/#displacy).
- [spacymoji](https://github.com/explosion/spacymoji): A library to add extra support for emoji to spaCy, such as including character names.
- [thinc-apple-ops](https://github.com/explosion/thinc-apple-ops): A special backend for OSX that uses Apple&#39;s native libraries for improved performance.
- [os-signpost](https://github.com/explosion/os-signpost): A Python package that allows you to use the `OSSignposter` API in OSX for performance analysis.
- [spacy-ray](https://github.com/explosion/spacy-ray): A wrapper to integrate spaCy with Ray, a distributed training framework. Currently a work in progress.
## Prodigy
[Prodigy](https://prodi.gy) is Explosion&#39;s easy to use and highly customizable tool for annotating data. Prodigy itself requires a license, but the repos below contain documentation, examples, and editor or notebook integrations.
- [prodigy-recipes](https://github.com/explosion/prodigy-recipes): Sample recipes for Prodigy, along with notebooks and other examples of usage.
- [vscode-prodigy](https://github.com/explosion/vscode-prodigy): A VS Code extension that lets you run Prodigy inside VS Code.
- [jupyterlab-prodigy](https://github.com/explosion/jupyterlab-prodigy): An extension for JupyterLab that lets you run Prodigy inside JupyterLab.
## Independent Tools or Projects
These are tools that may be related to or use spaCy, but are functional independent projects in their own right as well.
- [floret](https://github.com/explosion/floret): A modification of fastText to use Bloom Embeddings. Can be used to add vectors with subword features to spaCy, and also works independently in the same manner as fastText.
- [sense2vec](https://github.com/explosion/sense2vec): A library to make embeddings of noun phrases or words coupled with their part of speech. This library uses spaCy.
- [spacy-vectors-builder](https://github.com/explosion/spacy-vectors-builder): This is a spaCy project that builds vectors using floret and a lot of input text. It handles downloading the input data as well as the actual building of vectors.
- [holmes-extractor](https://github.com/explosion/holmes-extractor): Information extraction from English and German texts based on predicate logic. Uses spaCy.
- [healthsea](https://github.com/explosion/healthsea): Healthsea is a project to extract information from comments about health supplements. Structurally, it&#39;s a self-contained, large spaCy project.
- [spacy-pkuseg](https://github.com/explosion/spacy-pkuseg): A fork of the pkuseg Chinese tokenizer. Used for Chinese support in spaCy, but also works independently.
- [ml-datasets](https://github.com/explosion/ml-datasets): This repo includes loaders for several standard machine learning datasets, like MNIST or WikiNER, and has historically been used in spaCy example code and documentation.
## Documentation and Informational Repos
These repos are used to support the spaCy docs or otherwise present information about spaCy or other Explosion projects.
- [projects](https://github.com/explosion/projects): The projects repo is used to show detailed examples of spaCy usage. Individual projects can be checked out using the spaCy command line tool, rather than checking out the projects repo directly.
- [spacy-course](https://github.com/explosion/spacy-course): Home to the interactive spaCy course for learning about how to use the library and some basic NLP principles.
- [spacy-io-binder](https://github.com/explosion/spacy-io-binder): Home to the notebooks used for interactive examples in the documentation.
## Organizational / Meta
These repos are used for organizing data around spaCy, but are not something an end user would need to install as part of using the library.
- [spacy-models](https://github.com/explosion/spacy-models): This repo contains metadata (but not training data) for all the spaCy models. This includes information about where their training data came from, version compatability, and performance information. It also includes tests for the model packages, and the built models are hosted as releases of this repo.
- [wheelwright](https://github.com/explosion/wheelwright): A tool for automating our PyPI builds and releases.
- [ec2buildwheel](https://github.com/explosion/ec2buildwheel): A small project that allows you to build Python packages in the manner of cibuildwheel, but on any EC2 image. Used by wheelwright.
## Other
Repos that don&#39;t fit in any of the above categories.
- [blis](https://github.com/explosion/blis): A fork of the official BLIS library. The main branch is not updated, but work continues in various branches. This is used for cython-blis.
- [tokenizations](https://github.com/explosion/tokenizations): A library originally by Yohei Tamura to align strings with tolerance to some variations in features like case and diacritics, used for aligning tokens and wordpieces. Adopted and maintained by Explosion, but usually spacy-alignments is used instead.
- [conll-2012](https://github.com/explosion/conll-2012): A repo to hold some slightly cleaned up versions of the official scripts for the CoNLL 2012 shared task involving coreference resolution. Used in the coref project.
- [fastapi-explosion-extras](https://github.com/explosion/fastapi-explosion-extras): Some small tweaks to FastAPI used at Explosion.

View File

@ -127,3 +127,34 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
polyleven
---------
* Files: spacy/matcher/polyleven.c
MIT License
Copyright (c) 2021 Fujimoto Seiji <fujimoto@ceptord.net>
Copyright (c) 2021 Max Bachmann <kontakt@maxbachmann.de>
Copyright (c) 2022 Nick Mazuk
Copyright (c) 2022 Michael Weiss <code@mweiss.ch>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -5,8 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0", "cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0", "preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0", "murmurhash>=0.28.0,<1.1.0",
"thinc>=8.1.0.dev3,<8.2.0", "thinc>=8.1.0,<8.2.0",
"pathy",
"numpy>=1.15.0", "numpy>=1.15.0",
] ]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"

View File

@ -1,21 +1,21 @@
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev3,<8.2.0 thinc>=8.1.0,<8.2.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.8.0
pathy>=0.3.5 pathy>=0.3.5
# Third party dependencies # Third party dependencies
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0 pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
jinja2 jinja2
langcodes>=3.2.0,<4.0.0 langcodes>=3.2.0,<4.0.0
# Official Python utilities # Official Python utilities
@ -28,10 +28,12 @@ cython>=0.25,<3.0
pytest>=5.2.0,!=7.1.0 pytest>=5.2.0,!=7.1.0
pytest-timeout>=1.3.0,<2.0.0 pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0 mock>=2.0.0,<3.0.0
flake8>=3.8.0,<3.10.0 flake8>=3.8.0,<6.0.0
hypothesis>=3.27.0,<7.0.0 hypothesis>=3.27.0,<7.0.0
mypy>=0.910,<=0.960 mypy>=0.990,<0.1000; platform_machine != "aarch64" and python_version >= "3.7"
types-dataclasses>=0.1.3; python_version < "3.7" types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1 types-mock>=0.1.1
types-setuptools>=57.0.0
types-requests types-requests
types-setuptools>=57.0.0
black>=22.0,<23.0 black>=22.0,<23.0

View File

@ -38,25 +38,25 @@ setup_requires =
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
thinc>=8.1.0.dev3,<8.2.0 thinc>=8.1.0,<8.2.0
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev3,<8.2.0 thinc>=8.1.0,<8.2.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
typer>=0.3.0,<0.5.0
pathy>=0.3.5
# Third-party dependencies # Third-party dependencies
typer>=0.3.0,<0.8.0
pathy>=0.3.5
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
pydantic>=1.7.4,!=1.8,!=1.8.1,<1.10.0 pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0
jinja2 jinja2
# Official Python utilities # Official Python utilities
setuptools setuptools
@ -76,37 +76,41 @@ transformers =
ray = ray =
spacy_ray>=0.1.0,<1.0.0 spacy_ray>=0.1.0,<1.0.0
cuda = cuda =
cupy>=5.0.0b4,<11.0.0 cupy>=5.0.0b4,<12.0.0
cuda80 = cuda80 =
cupy-cuda80>=5.0.0b4,<11.0.0 cupy-cuda80>=5.0.0b4,<12.0.0
cuda90 = cuda90 =
cupy-cuda90>=5.0.0b4,<11.0.0 cupy-cuda90>=5.0.0b4,<12.0.0
cuda91 = cuda91 =
cupy-cuda91>=5.0.0b4,<11.0.0 cupy-cuda91>=5.0.0b4,<12.0.0
cuda92 = cuda92 =
cupy-cuda92>=5.0.0b4,<11.0.0 cupy-cuda92>=5.0.0b4,<12.0.0
cuda100 = cuda100 =
cupy-cuda100>=5.0.0b4,<11.0.0 cupy-cuda100>=5.0.0b4,<12.0.0
cuda101 = cuda101 =
cupy-cuda101>=5.0.0b4,<11.0.0 cupy-cuda101>=5.0.0b4,<12.0.0
cuda102 = cuda102 =
cupy-cuda102>=5.0.0b4,<11.0.0 cupy-cuda102>=5.0.0b4,<12.0.0
cuda110 = cuda110 =
cupy-cuda110>=5.0.0b4,<11.0.0 cupy-cuda110>=5.0.0b4,<12.0.0
cuda111 = cuda111 =
cupy-cuda111>=5.0.0b4,<11.0.0 cupy-cuda111>=5.0.0b4,<12.0.0
cuda112 = cuda112 =
cupy-cuda112>=5.0.0b4,<11.0.0 cupy-cuda112>=5.0.0b4,<12.0.0
cuda113 = cuda113 =
cupy-cuda113>=5.0.0b4,<11.0.0 cupy-cuda113>=5.0.0b4,<12.0.0
cuda114 = cuda114 =
cupy-cuda114>=5.0.0b4,<11.0.0 cupy-cuda114>=5.0.0b4,<12.0.0
cuda115 = cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0 cupy-cuda115>=5.0.0b4,<12.0.0
cuda116 = cuda116 =
cupy-cuda116>=5.0.0b4,<11.0.0 cupy-cuda116>=5.0.0b4,<12.0.0
cuda117 = cuda117 =
cupy-cuda117>=5.0.0b4,<11.0.0 cupy-cuda117>=5.0.0b4,<12.0.0
cuda11x =
cupy-cuda11x>=11.0.0,<12.0.0
cuda-autodetect =
cupy-wheel>=11.0.0,<12.0.0
apple = apple =
thinc-apple-ops>=0.1.0.dev0,<1.0.0 thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies # Language tokenizers with external dependencies
@ -114,7 +118,7 @@ ja =
sudachipy>=0.5.2,!=0.6.1 sudachipy>=0.5.2,!=0.6.1
sudachidict_core>=20211220 sudachidict_core>=20211220
ko = ko =
natto-py==0.9.0 natto-py>=0.9.0
th = th =
pythainlp>=2.0 pythainlp>=2.0

View File

@ -30,7 +30,9 @@ MOD_NAMES = [
"spacy.lexeme", "spacy.lexeme",
"spacy.vocab", "spacy.vocab",
"spacy.attrs", "spacy.attrs",
"spacy.kb", "spacy.kb.candidate",
"spacy.kb.kb",
"spacy.kb.kb_in_memory",
"spacy.ml.parser_model", "spacy.ml.parser_model",
"spacy.morphology", "spacy.morphology",
"spacy.pipeline.dep_parser", "spacy.pipeline.dep_parser",
@ -126,6 +128,8 @@ class build_ext_options:
class build_ext_subclass(build_ext, build_ext_options): class build_ext_subclass(build_ext, build_ext_options):
def build_extensions(self): def build_extensions(self):
if self.parallel is None and os.environ.get("SPACY_NUM_BUILD_JOBS") is not None:
self.parallel = int(os.environ.get("SPACY_NUM_BUILD_JOBS"))
build_ext_options.build_options(self) build_ext_options.build_options(self)
build_ext.build_extensions(self) build_ext.build_extensions(self)
@ -203,10 +207,25 @@ def setup_package():
get_python_inc(plat_specific=True), get_python_inc(plat_specific=True),
] ]
ext_modules = [] ext_modules = []
ext_modules.append(
Extension(
"spacy.matcher.levenshtein",
[
"spacy/matcher/levenshtein.pyx",
"spacy/matcher/polyleven.c",
],
language="c",
include_dirs=include_dirs,
)
)
for name in MOD_NAMES: for name in MOD_NAMES:
mod_path = name.replace(".", "/") + ".pyx" mod_path = name.replace(".", "/") + ".pyx"
ext = Extension( ext = Extension(
name, [mod_path], language="c++", include_dirs=include_dirs, extra_compile_args=["-std=c++11"] name,
[mod_path],
language="c++",
include_dirs=include_dirs,
extra_compile_args=["-std=c++11"],
) )
ext_modules.append(ext) ext_modules.append(ext)
print("Cythonizing sources") print("Cythonizing sources")

View File

@ -31,21 +31,21 @@ def load(
name: Union[str, Path], name: Union[str, Path],
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = util.SimpleFrozenList(), disable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
enable: Iterable[str] = util.SimpleFrozenList(), enable: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
exclude: Iterable[str] = util.SimpleFrozenList(), exclude: Union[str, Iterable[str]] = util._DEFAULT_EMPTY_PIPES,
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language: ) -> Language:
"""Load a spaCy model from an installed package or a local path. """Load a spaCy model from an installed package or a local path.
name (str): Package name or model path. name (str): Package name or model path.
vocab (Vocab): A Vocab object. If True, a vocab is created. vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
pipes will be disabled (but can be enabled later using nlp.enable_pipe). pipes will be disabled (but can be enabled later using nlp.enable_pipe).
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation. keyed by section values in dot notation.

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.4.0" __version__ = "3.4.2"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -573,3 +573,12 @@ def setup_gpu(use_gpu: int, silent=None) -> None:
local_msg.info("Using CPU") local_msg.info("Using CPU")
if gpu_is_available(): if gpu_is_available():
local_msg.info("To switch to GPU 0, use the option: --gpu-id 0") local_msg.info("To switch to GPU 0, use the option: --gpu-id 0")
def _format_number(number: Union[int, float], ndigits: int = 2) -> str:
"""Formats a number (float or int) rounding to `ndigits`, without truncating trailing 0s,
as happens with `round(number, ndigits)`"""
if isinstance(number, float):
return f"{number:.{ndigits}f}"
else:
return str(number)

View File

@ -9,7 +9,7 @@ import typer
import math import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli from ._util import import_code, debug_cli, _format_number
from ..training import Example, remove_bilu_prefix from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining from ..schemas import ConfigSchemaTraining
@ -989,7 +989,8 @@ def _get_kl_divergence(p: Counter, q: Counter) -> float:
def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]: def _format_span_row(span_data: List[Dict], labels: List[str]) -> List[Any]:
"""Compile into one list for easier reporting""" """Compile into one list for easier reporting"""
d = { d = {
label: [label] + list(round(d[label], 2) for d in span_data) for label in labels label: [label] + list(_format_number(d[label]) for d in span_data)
for label in labels
} }
return list(d.values()) return list(d.values())
@ -1004,6 +1005,10 @@ def _get_span_characteristics(
label: _gmean(l) label: _gmean(l)
for label, l in compiled_gold["spans_length"][spans_key].items() for label, l in compiled_gold["spans_length"][spans_key].items()
} }
spans_per_type = {
label: len(spans)
for label, spans in compiled_gold["spans_per_type"][spans_key].items()
}
min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()] min_lengths = [min(l) for l in compiled_gold["spans_length"][spans_key].values()]
max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()] max_lengths = [max(l) for l in compiled_gold["spans_length"][spans_key].values()]
@ -1031,6 +1036,7 @@ def _get_span_characteristics(
return { return {
"sd": span_distinctiveness, "sd": span_distinctiveness,
"bd": sb_distinctiveness, "bd": sb_distinctiveness,
"spans_per_type": spans_per_type,
"lengths": span_length, "lengths": span_length,
"min_length": min(min_lengths), "min_length": min(min_lengths),
"max_length": max(max_lengths), "max_length": max(max_lengths),
@ -1045,12 +1051,15 @@ def _get_span_characteristics(
def _print_span_characteristics(span_characteristics: Dict[str, Any]): def _print_span_characteristics(span_characteristics: Dict[str, Any]):
"""Print all span characteristics into a table""" """Print all span characteristics into a table"""
headers = ("Span Type", "Length", "SD", "BD") headers = ("Span Type", "Length", "SD", "BD", "N")
# Wasabi has this at 30 by default, but we might have some long labels
max_col = max(30, max(len(label) for label in span_characteristics["labels"]))
# Prepare table data with all span characteristics # Prepare table data with all span characteristics
table_data = [ table_data = [
span_characteristics["lengths"], span_characteristics["lengths"],
span_characteristics["sd"], span_characteristics["sd"],
span_characteristics["bd"], span_characteristics["bd"],
span_characteristics["spans_per_type"],
] ]
table = _format_span_row( table = _format_span_row(
span_data=table_data, labels=span_characteristics["labels"] span_data=table_data, labels=span_characteristics["labels"]
@ -1061,8 +1070,18 @@ def _print_span_characteristics(span_characteristics: Dict[str, Any]):
span_characteristics["avg_sd"], span_characteristics["avg_sd"],
span_characteristics["avg_bd"], span_characteristics["avg_bd"],
] ]
footer = ["Wgt. Average"] + [str(round(f, 2)) for f in footer_data]
msg.table(table, footer=footer, header=headers, divider=True) footer = (
["Wgt. Average"] + ["{:.2f}".format(round(f, 2)) for f in footer_data] + ["-"]
)
msg.table(
table,
footer=footer,
header=headers,
divider=True,
aligns=["l"] + ["r"] * (len(footer_data) + 1),
max_col=max_col,
)
def _get_spans_length_freq_dist( def _get_spans_length_freq_dist(

View File

@ -7,6 +7,7 @@ import typer
from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX
from .. import about from .. import about
from ..util import is_package, get_minor_version, run_command from ..util import is_package, get_minor_version, run_command
from ..util import is_prerelease_version
from ..errors import OLD_MODEL_SHORTCUTS from ..errors import OLD_MODEL_SHORTCUTS
@ -19,7 +20,7 @@ def download_cli(
ctx: typer.Context, ctx: typer.Context,
model: str = Arg(..., help="Name of pipeline package to download"), model: str = Arg(..., help="Name of pipeline package to download"),
direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"), direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"),
sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel") sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel"),
# fmt: on # fmt: on
): ):
""" """
@ -35,7 +36,12 @@ def download_cli(
download(model, direct, sdist, *ctx.args) download(model, direct, sdist, *ctx.args)
def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -> None: def download(
model: str,
direct: bool = False,
sdist: bool = False,
*pip_args,
) -> None:
if ( if (
not (is_package("spacy") or is_package("spacy-nightly")) not (is_package("spacy") or is_package("spacy-nightly"))
and "--no-deps" not in pip_args and "--no-deps" not in pip_args
@ -49,13 +55,10 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
"dependencies, you'll have to install them manually." "dependencies, you'll have to install them manually."
) )
pip_args = pip_args + ("--no-deps",) pip_args = pip_args + ("--no-deps",)
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
dl_tpl = "{m}-{v}/{m}-{v}{s}#egg={m}=={v}"
if direct: if direct:
components = model.split("-") components = model.split("-")
model_name = "".join(components[:-1]) model_name = "".join(components[:-1])
version = components[-1] version = components[-1]
download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
else: else:
model_name = model model_name = model
if model in OLD_MODEL_SHORTCUTS: if model in OLD_MODEL_SHORTCUTS:
@ -66,15 +69,31 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
model_name = OLD_MODEL_SHORTCUTS[model] model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility() compatibility = get_compatibility()
version = get_version(model_name, compatibility) version = get_version(model_name, compatibility)
download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
filename = get_model_filename(model_name, version, sdist)
download_model(filename, pip_args)
msg.good( msg.good(
"Download and installation successful", "Download and installation successful",
f"You can now load the package via spacy.load('{model_name}')", f"You can now load the package via spacy.load('{model_name}')",
) )
def get_model_filename(model_name: str, version: str, sdist: bool = False) -> str:
dl_tpl = "{m}-{v}/{m}-{v}{s}"
egg_tpl = "#egg={m}=={v}"
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
filename = dl_tpl.format(m=model_name, v=version, s=suffix)
if sdist:
filename += egg_tpl.format(m=model_name, v=version)
return filename
def get_compatibility() -> dict: def get_compatibility() -> dict:
version = get_minor_version(about.__version__) if is_prerelease_version(about.__version__):
version: Optional[str] = about.__version__
else:
version = get_minor_version(about.__version__)
r = requests.get(about.__compatibility__) r = requests.get(about.__compatibility__)
if r.status_code != 200: if r.status_code != 200:
msg.fail( msg.fail(
@ -101,6 +120,11 @@ def get_version(model: str, comp: dict) -> str:
return comp[model][0] return comp[model][0]
def get_latest_version(model: str) -> str:
comp = get_compatibility()
return get_version(model, comp)
def download_model( def download_model(
filename: str, user_pip_args: Optional[Sequence[str]] = None filename: str, user_pip_args: Optional[Sequence[str]] = None
) -> None: ) -> None:

View File

@ -1,10 +1,13 @@
from typing import Optional, Dict, Any, Union, List from typing import Optional, Dict, Any, Union, List
import platform import platform
import pkg_resources
import json
from pathlib import Path from pathlib import Path
from wasabi import Printer, MarkdownRenderer from wasabi import Printer, MarkdownRenderer
import srsly import srsly
from ._util import app, Arg, Opt, string_to_list from ._util import app, Arg, Opt, string_to_list
from .download import get_model_filename, get_latest_version
from .. import util from .. import util
from .. import about from .. import about
@ -16,6 +19,7 @@ def info_cli(
markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"), markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"),
silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"), silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"),
exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"), exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"),
url: bool = Opt(False, "--url", "-u", help="Print the URL to download the most recent compatible version of the pipeline"),
# fmt: on # fmt: on
): ):
""" """
@ -23,10 +27,19 @@ def info_cli(
print its meta information. Flag --markdown prints details in Markdown for easy print its meta information. Flag --markdown prints details in Markdown for easy
copy-pasting to GitHub issues. copy-pasting to GitHub issues.
Flag --url prints only the download URL of the most recent compatible
version of the pipeline.
DOCS: https://spacy.io/api/cli#info DOCS: https://spacy.io/api/cli#info
""" """
exclude = string_to_list(exclude) exclude = string_to_list(exclude)
info(model, markdown=markdown, silent=silent, exclude=exclude) info(
model,
markdown=markdown,
silent=silent,
exclude=exclude,
url=url,
)
def info( def info(
@ -35,11 +48,20 @@ def info(
markdown: bool = False, markdown: bool = False,
silent: bool = True, silent: bool = True,
exclude: Optional[List[str]] = None, exclude: Optional[List[str]] = None,
url: bool = False,
) -> Union[str, dict]: ) -> Union[str, dict]:
msg = Printer(no_print=silent, pretty=not silent) msg = Printer(no_print=silent, pretty=not silent)
if not exclude: if not exclude:
exclude = [] exclude = []
if model: if url:
if model is not None:
title = f"Download info for pipeline '{model}'"
data = info_model_url(model)
print(data["download_url"])
return data
else:
msg.fail("--url option requires a pipeline name", exits=1)
elif model:
title = f"Info about pipeline '{model}'" title = f"Info about pipeline '{model}'"
data = info_model(model, silent=silent) data = info_model(model, silent=silent)
else: else:
@ -99,11 +121,44 @@ def info_model(model: str, *, silent: bool = True) -> Dict[str, Any]:
meta["source"] = str(model_path.resolve()) meta["source"] = str(model_path.resolve())
else: else:
meta["source"] = str(model_path) meta["source"] = str(model_path)
download_url = info_installed_model_url(model)
if download_url:
meta["download_url"] = download_url
return { return {
k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed") k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed")
} }
def info_installed_model_url(model: str) -> Optional[str]:
"""Given a pipeline name, get the download URL if available, otherwise
return None.
This is only available for pipelines installed as modules that have
dist-info available.
"""
try:
dist = pkg_resources.get_distribution(model)
data = json.loads(dist.get_metadata("direct_url.json"))
return data["url"]
except pkg_resources.DistributionNotFound:
# no such package
return None
except Exception:
# something else, like no file or invalid JSON
return None
def info_model_url(model: str) -> Dict[str, Any]:
"""Return the download URL for the latest version of a pipeline."""
version = get_latest_version(model)
filename = get_model_filename(model, version)
download_url = about.__download_url__ + "/" + filename
release_tpl = "https://github.com/explosion/spacy-models/releases/tag/{m}-{v}"
release_url = release_tpl.format(m=model, v=version)
return {"download_url": download_url, "release_url": release_url}
def get_markdown( def get_markdown(
data: Dict[str, Any], data: Dict[str, Any],
title: Optional[str] = None, title: Optional[str] = None,

View File

@ -299,8 +299,8 @@ def get_meta(
} }
nlp = util.load_model_from_path(Path(model_path)) nlp = util.load_model_from_path(Path(model_path))
meta.update(nlp.meta) meta.update(nlp.meta)
meta.update(existing_meta)
meta["spacy_version"] = util.get_minor_version_range(about.__version__) meta["spacy_version"] = util.get_minor_version_range(about.__version__)
meta.update(existing_meta)
meta["vectors"] = { meta["vectors"] = {
"width": nlp.vocab.vectors_length, "width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors), "vectors": len(nlp.vocab.vectors),

View File

@ -61,7 +61,7 @@ def pretrain_cli(
# TODO: What's the solution here? How do we handle optional blocks? # TODO: What's the solution here? How do we handle optional blocks?
msg.fail("The [pretraining] block in your config is empty", exits=1) msg.fail("The [pretraining] block in your config is empty", exits=1)
if not output_dir.exists(): if not output_dir.exists():
output_dir.mkdir() output_dir.mkdir(parents=True)
msg.good(f"Created output directory: {output_dir}") msg.good(f"Created output directory: {output_dir}")
# Save non-interpolated config # Save non-interpolated config
raw_config.to_disk(output_dir / "config.cfg") raw_config.to_disk(output_dir / "config.cfg")

View File

@ -189,7 +189,11 @@ def convert_asset_url(url: str) -> str:
RETURNS (str): The converted URL. RETURNS (str): The converted URL.
""" """
# If the asset URL is a regular GitHub URL it's likely a mistake # If the asset URL is a regular GitHub URL it's likely a mistake
if re.match(r"(http(s?)):\/\/github.com", url) and "releases/download" not in url: if (
re.match(r"(http(s?)):\/\/github.com", url)
and "releases/download" not in url
and "/raw/" not in url
):
converted = url.replace("github.com", "raw.githubusercontent.com") converted = url.replace("github.com", "raw.githubusercontent.com")
converted = re.sub(r"/(tree|blob)/", "/", converted) converted = re.sub(r"/(tree|blob)/", "/", converted)
msg.warn( msg.warn(

View File

@ -25,6 +25,7 @@ def project_update_dvc_cli(
project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False),
workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."), workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."),
verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"), verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"),
quiet: bool = Opt(False, "--quiet", "-q", help="Print less info"),
force: bool = Opt(False, "--force", "-F", help="Force update DVC config"), force: bool = Opt(False, "--force", "-F", help="Force update DVC config"),
# fmt: on # fmt: on
): ):
@ -36,7 +37,7 @@ def project_update_dvc_cli(
DOCS: https://spacy.io/api/cli#project-dvc DOCS: https://spacy.io/api/cli#project-dvc
""" """
project_update_dvc(project_dir, workflow, verbose=verbose, force=force) project_update_dvc(project_dir, workflow, verbose=verbose, quiet=quiet, force=force)
def project_update_dvc( def project_update_dvc(
@ -44,6 +45,7 @@ def project_update_dvc(
workflow: Optional[str] = None, workflow: Optional[str] = None,
*, *,
verbose: bool = False, verbose: bool = False,
quiet: bool = False,
force: bool = False, force: bool = False,
) -> None: ) -> None:
"""Update the auto-generated Data Version Control (DVC) config file. A DVC """Update the auto-generated Data Version Control (DVC) config file. A DVC
@ -54,11 +56,12 @@ def project_update_dvc(
workflow (Optional[str]): Optional name of workflow defined in project.yml. workflow (Optional[str]): Optional name of workflow defined in project.yml.
If not set, the first workflow will be used. If not set, the first workflow will be used.
verbose (bool): Print more info. verbose (bool): Print more info.
quiet (bool): Print less info.
force (bool): Force update DVC config. force (bool): Force update DVC config.
""" """
config = load_project_config(project_dir) config = load_project_config(project_dir)
updated = update_dvc_config( updated = update_dvc_config(
project_dir, config, workflow, verbose=verbose, force=force project_dir, config, workflow, verbose=verbose, quiet=quiet, force=force
) )
help_msg = "To execute the workflow with DVC, run: dvc repro" help_msg = "To execute the workflow with DVC, run: dvc repro"
if updated: if updated:
@ -72,7 +75,7 @@ def update_dvc_config(
config: Dict[str, Any], config: Dict[str, Any],
workflow: Optional[str] = None, workflow: Optional[str] = None,
verbose: bool = False, verbose: bool = False,
silent: bool = False, quiet: bool = False,
force: bool = False, force: bool = False,
) -> bool: ) -> bool:
"""Re-run the DVC commands in dry mode and update dvc.yaml file in the """Re-run the DVC commands in dry mode and update dvc.yaml file in the
@ -83,7 +86,7 @@ def update_dvc_config(
path (Path): The path to the project directory. path (Path): The path to the project directory.
config (Dict[str, Any]): The loaded project.yml. config (Dict[str, Any]): The loaded project.yml.
verbose (bool): Whether to print additional info (via DVC). verbose (bool): Whether to print additional info (via DVC).
silent (bool): Don't output anything (via DVC). quiet (bool): Don't output anything (via DVC).
force (bool): Force update, even if hashes match. force (bool): Force update, even if hashes match.
RETURNS (bool): Whether the DVC config file was updated. RETURNS (bool): Whether the DVC config file was updated.
""" """
@ -105,6 +108,14 @@ def update_dvc_config(
dvc_config_path.unlink() dvc_config_path.unlink()
dvc_commands = [] dvc_commands = []
config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
# some flags that apply to every command
flags = []
if verbose:
flags.append("--verbose")
if quiet:
flags.append("--quiet")
for name in workflows[workflow]: for name in workflows[workflow]:
command = config_commands[name] command = config_commands[name]
deps = command.get("deps", []) deps = command.get("deps", [])
@ -118,14 +129,26 @@ def update_dvc_config(
deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl] deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl]
outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl] outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl]
outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl] outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl]
dvc_cmd = ["run", "-n", name, "-w", str(path), "--no-exec"]
dvc_cmd = ["run", *flags, "-n", name, "-w", str(path), "--no-exec"]
if command.get("no_skip"): if command.get("no_skip"):
dvc_cmd.append("--always-changed") dvc_cmd.append("--always-changed")
full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd] full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd]
dvc_commands.append(join_command(full_cmd)) dvc_commands.append(join_command(full_cmd))
if not dvc_commands:
# If we don't check for this, then there will be an error when reading the
# config, since DVC wouldn't create it.
msg.fail(
"No usable commands for DVC found. This can happen if none of your "
"commands have dependencies or outputs.",
exits=1,
)
with working_dir(path): with working_dir(path):
dvc_flags = {"--verbose": verbose, "--quiet": silent} for c in dvc_commands:
run_dvc_commands(dvc_commands, flags=dvc_flags) dvc_command = "dvc " + c
run_command(dvc_command)
with dvc_config_path.open("r+", encoding="utf8") as f: with dvc_config_path.open("r+", encoding="utf8") as f:
content = f.read() content = f.read()
f.seek(0, 0) f.seek(0, 0)
@ -133,26 +156,6 @@ def update_dvc_config(
return True return True
def run_dvc_commands(
commands: Iterable[str] = SimpleFrozenList(), flags: Dict[str, bool] = {}
) -> None:
"""Run a sequence of DVC commands in a subprocess, in order.
commands (List[str]): The string commands without the leading "dvc".
flags (Dict[str, bool]): Conditional flags to be added to command. Makes it
easier to pass flags like --quiet that depend on a variable or
command-line setting while avoiding lots of nested conditionals.
"""
for c in commands:
command = split_command(c)
dvc_command = ["dvc", *command]
# Add the flags if they are set to True
for flag, is_active in flags.items():
if is_active:
dvc_command.append(flag)
run_command(dvc_command)
def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None: def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None:
"""Validate workflows provided in project.yml and check that a given """Validate workflows provided in project.yml and check that a given
workflow can be used to generate a DVC config. workflow can be used to generate a DVC config.

View File

@ -10,6 +10,7 @@ from .._util import get_hash, get_checksum, download_file, ensure_pathy
from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var from ...util import make_tempdir, get_minor_version, ENV_VARS, check_bool_env_var
from ...git_info import GIT_VERSION from ...git_info import GIT_VERSION
from ... import about from ... import about
from ...errors import Errors
if TYPE_CHECKING: if TYPE_CHECKING:
from pathy import Pathy # noqa: F401 from pathy import Pathy # noqa: F401
@ -84,7 +85,23 @@ class RemoteStorage:
with tarfile.open(tar_loc, mode=mode_string) as tar_file: with tarfile.open(tar_loc, mode=mode_string) as tar_file:
# This requires that the path is added correctly, relative # This requires that the path is added correctly, relative
# to root. This is how we set things up in push() # to root. This is how we set things up in push()
tar_file.extractall(self.root)
# Disallow paths outside the current directory for the tar
# file (CVE-2007-4559, directory traversal vulnerability)
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise ValueError(Errors.E852)
tar.extractall(path)
safe_extract(tar_file, self.root)
return url return url
def find( def find(

View File

@ -1,5 +1,8 @@
from typing import Optional, List, Dict, Sequence, Any, Iterable from typing import Optional, List, Dict, Sequence, Any, Iterable, Tuple
import os.path
from pathlib import Path from pathlib import Path
import pkg_resources
from wasabi import msg from wasabi import msg
from wasabi.util import locale_escape from wasabi.util import locale_escape
import sys import sys
@ -50,6 +53,7 @@ def project_run(
force: bool = False, force: bool = False,
dry: bool = False, dry: bool = False,
capture: bool = False, capture: bool = False,
skip_requirements_check: bool = False,
) -> None: ) -> None:
"""Run a named script defined in the project.yml. If the script is part """Run a named script defined in the project.yml. If the script is part
of the default pipeline (defined in the "run" section), DVC is used to of the default pipeline (defined in the "run" section), DVC is used to
@ -66,11 +70,19 @@ def project_run(
sys.exit will be called with the return code. You should use capture=False sys.exit will be called with the return code. You should use capture=False
when you want to turn over execution to the command, and capture=True when you want to turn over execution to the command, and capture=True
when you want to run the command more like a function. when you want to run the command more like a function.
skip_requirements_check (bool): Whether to skip the requirements check.
""" """
config = load_project_config(project_dir, overrides=overrides) config = load_project_config(project_dir, overrides=overrides)
commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} commands = {cmd["name"]: cmd for cmd in config.get("commands", [])}
workflows = config.get("workflows", {}) workflows = config.get("workflows", {})
validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)
req_path = project_dir / "requirements.txt"
if not skip_requirements_check:
if config.get("check_requirements", True) and os.path.exists(req_path):
with req_path.open() as requirements_file:
_check_requirements([req.strip() for req in requirements_file])
if subcommand in workflows: if subcommand in workflows:
msg.info(f"Running workflow '{subcommand}'") msg.info(f"Running workflow '{subcommand}'")
for cmd in workflows[subcommand]: for cmd in workflows[subcommand]:
@ -81,6 +93,7 @@ def project_run(
force=force, force=force,
dry=dry, dry=dry,
capture=capture, capture=capture,
skip_requirements_check=True,
) )
else: else:
cmd = commands[subcommand] cmd = commands[subcommand]
@ -195,6 +208,8 @@ def validate_subcommand(
msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1) msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1)
if subcommand not in commands and subcommand not in workflows: if subcommand not in commands and subcommand not in workflows:
help_msg = [] help_msg = []
if subcommand in ["assets", "asset"]:
help_msg.append("Did you mean to run: python -m spacy project assets?")
if commands: if commands:
help_msg.append(f"Available commands: {', '.join(commands)}") help_msg.append(f"Available commands: {', '.join(commands)}")
if workflows: if workflows:
@ -308,3 +323,38 @@ def get_fileinfo(project_dir: Path, paths: List[str]) -> List[Dict[str, Optional
md5 = get_checksum(file_path) if file_path.exists() else None md5 = get_checksum(file_path) if file_path.exists() else None
data.append({"path": path, "md5": md5}) data.append({"path": path, "md5": md5})
return data return data
def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]:
"""Checks whether requirements are installed and free of version conflicts.
requirements (List[str]): List of requirements.
RETURNS (Tuple[bool, bool]): Whether (1) any packages couldn't be imported, (2) any packages with version conflicts
exist.
"""
failed_pkgs_msgs: List[str] = []
conflicting_pkgs_msgs: List[str] = []
for req in requirements:
try:
pkg_resources.require(req)
except pkg_resources.DistributionNotFound as dnf:
failed_pkgs_msgs.append(dnf.report())
except pkg_resources.VersionConflict as vc:
conflicting_pkgs_msgs.append(vc.report())
except Exception:
msg.warn(
f"Unable to check requirement: {req} "
"Checks are currently limited to requirement specifiers "
"(PEP 508)"
)
if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs):
msg.warn(
title="Missing requirements or requirement conflicts detected. Make sure your Python environment is set up "
"correctly and you installed all requirements specified in your project's requirements.txt: "
)
for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs:
msg.text(pgk_msg)
return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0

View File

@ -271,13 +271,8 @@ factory = "tok2vec"
[components.tok2vec.model.embed] [components.tok2vec.model.embed]
@architectures = "spacy.MultiHashEmbed.v2" @architectures = "spacy.MultiHashEmbed.v2"
width = ${components.tok2vec.model.encode.width} width = ${components.tok2vec.model.encode.width}
{% if has_letters -%}
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"] attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
rows = [5000, 2500, 2500, 2500] rows = [5000, 1000, 2500, 2500]
{% else -%}
attrs = ["ORTH", "SHAPE"]
rows = [5000, 2500]
{% endif -%}
include_static_vectors = {{ "true" if optimize == "accuracy" else "false" }} include_static_vectors = {{ "true" if optimize == "accuracy" else "false" }}
[components.tok2vec.model.encode] [components.tok2vec.model.encode]

View File

@ -37,6 +37,15 @@ bn:
accuracy: accuracy:
name: sagorsarker/bangla-bert-base name: sagorsarker/bangla-bert-base
size_factor: 3 size_factor: 3
ca:
word_vectors: null
transformer:
efficiency:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
accuracy:
name: projecte-aina/roberta-base-ca-v2
size_factor: 3
da: da:
word_vectors: da_core_news_lg word_vectors: da_core_news_lg
transformer: transformer:
@ -271,4 +280,3 @@ zh:
accuracy: accuracy:
name: bert-base-chinese name: bert-base-chinese
size_factor: 3 size_factor: 3
has_letters: false

View File

@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
train_corpus = "corpora.train" train_corpus = "corpora.train"
# Optional callback before nlp object is saved to disk after training # Optional callback before nlp object is saved to disk after training
before_to_disk = null before_to_disk = null
# Optional callback that is invoked at the start of each training step
before_update = null
[training.logger] [training.logger]
@loggers = "spacy.ConsoleLogger.v1" @loggers = "spacy.ConsoleLogger.v1"

View File

@ -123,7 +123,8 @@ def app(environ, start_response):
def parse_deps(orig_doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]: def parse_deps(orig_doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate dependency parse in {'words': [], 'arcs': []} format. """Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse. orig_doc (Doc): Document to parse.
options (Dict[str, Any]): Dependency parse specific visualisation options.
RETURNS (dict): Generated dependency parse keyed by words and arcs. RETURNS (dict): Generated dependency parse keyed by words and arcs.
""" """
doc = Doc(orig_doc.vocab).from_bytes( doc = Doc(orig_doc.vocab).from_bytes(
@ -209,7 +210,7 @@ def parse_ents(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]: def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate spans in [{start: i, end: i, label: 'label'}] format. """Generate spans in [{start_token: i, end_token: i, label: 'label'}] format.
doc (Doc): Document to parse. doc (Doc): Document to parse.
options (Dict[str, any]): Span-specific visualisation options. options (Dict[str, any]): Span-specific visualisation options.

View File

@ -130,26 +130,56 @@ class SpanRenderer:
title (str / None): Document title set in Doc.user_data['title']. title (str / None): Document title set in Doc.user_data['title'].
""" """
per_token_info = [] per_token_info = []
# we must sort so that we can correctly describe when spans need to "stack"
# which is determined by their start token, then span length (longer spans on top),
# then break any remaining ties with the span label
spans = sorted(
spans,
key=lambda s: (
s["start_token"],
-(s["end_token"] - s["start_token"]),
s["label"],
),
)
for s in spans:
# this is the vertical 'slot' that the span will be rendered in
# vertical_position = span_label_offset + (offset_step * (slot - 1))
s["render_slot"] = 0
for idx, token in enumerate(tokens): for idx, token in enumerate(tokens):
# Identify if a token belongs to a Span (and which) and if it's a # Identify if a token belongs to a Span (and which) and if it's a
# start token of said Span. We'll use this for the final HTML render # start token of said Span. We'll use this for the final HTML render
token_markup: Dict[str, Any] = {} token_markup: Dict[str, Any] = {}
token_markup["text"] = token token_markup["text"] = token
concurrent_spans = 0
entities = [] entities = []
for span in spans: for span in spans:
ent = {} ent = {}
if span["start_token"] <= idx < span["end_token"]: if span["start_token"] <= idx < span["end_token"]:
concurrent_spans += 1
span_start = idx == span["start_token"]
ent["label"] = span["label"] ent["label"] = span["label"]
ent["is_start"] = True if idx == span["start_token"] else False ent["is_start"] = span_start
if span_start:
# When the span starts, we need to know how many other
# spans are on the 'span stack' and will be rendered.
# This value becomes the vertical render slot for this entire span
span["render_slot"] = concurrent_spans
ent["render_slot"] = span["render_slot"]
kb_id = span.get("kb_id", "") kb_id = span.get("kb_id", "")
kb_url = span.get("kb_url", "#") kb_url = span.get("kb_url", "#")
ent["kb_link"] = ( ent["kb_link"] = (
TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else "" TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else ""
) )
entities.append(ent) entities.append(ent)
else:
# We don't specifically need to do this since we loop
# over tokens and spans sorted by their start_token,
# so we'll never use a span again after the last token it appears in,
# but if we were to use these spans again we'd want to make sure
# this value was reset correctly.
span["render_slot"] = 0
token_markup["entities"] = entities token_markup["entities"] = entities
per_token_info.append(token_markup) per_token_info.append(token_markup)
markup = self._render_markup(per_token_info) markup = self._render_markup(per_token_info)
markup = TPL_SPANS.format(content=markup, dir=self.direction) markup = TPL_SPANS.format(content=markup, dir=self.direction)
if title: if title:
@ -160,8 +190,12 @@ class SpanRenderer:
"""Render the markup from per-token information""" """Render the markup from per-token information"""
markup = "" markup = ""
for token in per_token_info: for token in per_token_info:
entities = sorted(token["entities"], key=lambda d: d["label"]) entities = sorted(token["entities"], key=lambda d: d["render_slot"])
if entities: # Whitespace tokens disrupt the vertical space (no line height) so that the
# span indicators get misaligned. We don't render them as individual
# tokens anyway, so we'll just not display a span indicator either.
is_whitespace = token["text"].strip() == ""
if entities and not is_whitespace:
slices = self._get_span_slices(token["entities"]) slices = self._get_span_slices(token["entities"])
starts = self._get_span_starts(token["entities"]) starts = self._get_span_starts(token["entities"])
total_height = ( total_height = (
@ -182,10 +216,18 @@ class SpanRenderer:
def _get_span_slices(self, entities: List[Dict]) -> str: def _get_span_slices(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span slices""" """Get the rendered markup of all Span slices"""
span_slices = [] span_slices = []
for entity, step in zip(entities, itertools.count(step=self.offset_step)): for entity in entities:
# rather than iterate over multiples of offset_step, we use entity['render_slot']
# to determine the vertical position, since that tells where
# the span starts vertically so we can extend it horizontally,
# past other spans that might have already ended
color = self.colors.get(entity["label"].upper(), self.default_color) color = self.colors.get(entity["label"].upper(), self.default_color)
top_offset = self.top_offset + (
self.offset_step * (entity["render_slot"] - 1)
)
span_slice = self.span_slice_template.format( span_slice = self.span_slice_template.format(
bg=color, top_offset=self.top_offset + step bg=color,
top_offset=top_offset,
) )
span_slices.append(span_slice) span_slices.append(span_slice)
return "".join(span_slices) return "".join(span_slices)
@ -193,12 +235,15 @@ class SpanRenderer:
def _get_span_starts(self, entities: List[Dict]) -> str: def _get_span_starts(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span start tokens""" """Get the rendered markup of all Span start tokens"""
span_starts = [] span_starts = []
for entity, step in zip(entities, itertools.count(step=self.offset_step)): for entity in entities:
color = self.colors.get(entity["label"].upper(), self.default_color) color = self.colors.get(entity["label"].upper(), self.default_color)
top_offset = self.top_offset + (
self.offset_step * (entity["render_slot"] - 1)
)
span_start = ( span_start = (
self.span_start_template.format( self.span_start_template.format(
bg=color, bg=color,
top_offset=self.top_offset + step, top_offset=top_offset,
label=entity["label"], label=entity["label"],
kb_link=entity["kb_link"], kb_link=entity["kb_link"],
) )

View File

@ -16,8 +16,8 @@ def setup_default_warnings():
filter_warning("ignore", error_msg="numpy.dtype size changed") # noqa filter_warning("ignore", error_msg="numpy.dtype size changed") # noqa
filter_warning("ignore", error_msg="numpy.ufunc size changed") # noqa filter_warning("ignore", error_msg="numpy.ufunc size changed") # noqa
# warn about entity_ruler & matcher having no patterns only once # warn about entity_ruler, span_ruler & matcher having no patterns only once
for pipe in ["matcher", "entity_ruler"]: for pipe in ["matcher", "entity_ruler", "span_ruler"]:
filter_warning("once", error_msg=Warnings.W036.format(name=pipe)) filter_warning("once", error_msg=Warnings.W036.format(name=pipe))
# warn once about lemmatizer without required POS # warn once about lemmatizer without required POS
@ -212,6 +212,8 @@ class Warnings(metaclass=ErrorsWithCodes):
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'") W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class " W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.") "is a Cython extension type.")
W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option "
"`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.")
class Errors(metaclass=ErrorsWithCodes): class Errors(metaclass=ErrorsWithCodes):
@ -230,8 +232,9 @@ class Errors(metaclass=ErrorsWithCodes):
"initialized component.") "initialized component.")
E004 = ("Can't set up pipeline component: a factory for '{name}' already " E004 = ("Can't set up pipeline component: a factory for '{name}' already "
"exists. Existing factory: {func}. New factory: {new_func}") "exists. Existing factory: {func}. New factory: {new_func}")
E005 = ("Pipeline component '{name}' returned None. If you're using a " E005 = ("Pipeline component '{name}' returned {returned_type} instead of a "
"custom component, maybe you forgot to return the processed Doc?") "Doc. If you're using a custom component, maybe you forgot to "
"return the processed Doc?")
E006 = ("Invalid constraints for adding pipeline component. You can only " E006 = ("Invalid constraints for adding pipeline component. You can only "
"set one of the following: before (component name or index), " "set one of the following: before (component name or index), "
"after (component name or index), first (True) or last (True). " "after (component name or index), first (True) or last (True). "
@ -389,7 +392,7 @@ class Errors(metaclass=ErrorsWithCodes):
"consider using doc.spans instead.") "consider using doc.spans instead.")
E106 = ("Can't find `doc._.{attr}` attribute specified in the underscore " E106 = ("Can't find `doc._.{attr}` attribute specified in the underscore "
"settings: {opts}") "settings: {opts}")
E107 = ("Value of `doc._.{attr}` is not JSON-serializable: {value}") E107 = ("Value of custom attribute `{attr}` is not JSON-serializable: {value}")
E109 = ("Component '{name}' could not be run. Did you forget to " E109 = ("Component '{name}' could not be run. Did you forget to "
"call `initialize()`?") "call `initialize()`?")
E110 = ("Invalid displaCy render wrapper. Expected callable, got: {obj}") E110 = ("Invalid displaCy render wrapper. Expected callable, got: {obj}")
@ -535,11 +538,18 @@ class Errors(metaclass=ErrorsWithCodes):
E198 = ("Unable to return {n} most similar vectors for the current vectors " E198 = ("Unable to return {n} most similar vectors for the current vectors "
"table, which contains {n_rows} vectors.") "table, which contains {n_rows} vectors.")
E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.") E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.")
E200 = ("Can't yet set {attr} from Span. Vote for this feature on the " E200 = ("Can't set {attr} from Span.")
"issue tracker: http://github.com/explosion/spaCy/issues")
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.") E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
E203 = ("If the {name} embedding layer is not updated "
"during training, make sure to include it in 'annotating components'")
# New errors added in v3.x # New errors added in v3.x
E851 = ("The 'textcat' component labels should only have values of 0 or 1, "
"but found value of '{val}'.")
E852 = ("The tar file pulled from the remote attempted an unsafe path "
"traversal.")
E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
"permit overlapping spans.") "permit overlapping spans.")
E855 = ("Invalid {obj}: {obj} is not from the same doc.") E855 = ("Invalid {obj}: {obj} is not from the same doc.")
@ -705,11 +715,11 @@ class Errors(metaclass=ErrorsWithCodes):
"need to modify the pipeline, use the built-in methods like " "need to modify the pipeline, use the built-in methods like "
"`nlp.add_pipe`, `nlp.remove_pipe`, `nlp.disable_pipe` or " "`nlp.add_pipe`, `nlp.remove_pipe`, `nlp.disable_pipe` or "
"`nlp.enable_pipe` instead.") "`nlp.enable_pipe` instead.")
E927 = ("Can't write to frozen list Maybe you're trying to modify a computed " E927 = ("Can't write to frozen list. Maybe you're trying to modify a computed "
"property or default function argument?") "property or default function argument?")
E928 = ("A KnowledgeBase can only be serialized to/from from a directory, " E928 = ("An InMemoryLookupKB can only be serialized to/from from a directory, "
"but the provided argument {loc} points to a file.") "but the provided argument {loc} points to a file.")
E929 = ("Couldn't read KnowledgeBase from {loc}. The path does not seem to exist.") E929 = ("Couldn't read InMemoryLookupKB from {loc}. The path does not seem to exist.")
E930 = ("Received invalid get_examples callback in `{method}`. " E930 = ("Received invalid get_examples callback in `{method}`. "
"Expected function that returns an iterable of Example objects but " "Expected function that returns an iterable of Example objects but "
"got: {obj}") "got: {obj}")
@ -935,8 +945,17 @@ class Errors(metaclass=ErrorsWithCodes):
E1040 = ("Doc.from_json requires all tokens to have the same attributes. " E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}") "Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
E1042 = ("Function was called with `{arg1}`={arg1_values} and " E1042 = ("`enable={enable}` and `disable={disable}` are inconsistent with each other.\nIf you only passed "
"`{arg2}`={arg2_values} but these arguments are conflicting.") "one of `enable` or `disable`, the other argument is specified in your pipeline's configuration.\nIn that "
"case pass an empty list for the previously not specified argument to avoid this error.")
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
"{value}.")
E1044 = ("Expected `candidates_batch_size` to be >= 1, but got: {value}")
E1045 = ("Encountered {parent} subclass without `{parent}.{method}` "
"method in '{name}'. If you want to use this method, make "
"sure it's overwritten on the subclass.")
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
"knowledge base, use `InMemoryLookupKB`.")
# Deprecated model shortcuts, only used in errors and warnings # Deprecated model shortcuts, only used in errors and warnings

3
spacy/kb/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .kb import KnowledgeBase
from .kb_in_memory import InMemoryLookupKB
from .candidate import Candidate, get_candidates, get_candidates_batch

12
spacy/kb/candidate.pxd Normal file
View File

@ -0,0 +1,12 @@
from .kb cimport KnowledgeBase
from libcpp.vector cimport vector
from ..typedefs cimport hash_t
# Object used by the Entity Linker that summarizes one entity-alias candidate combination.
cdef class Candidate:
cdef readonly KnowledgeBase kb
cdef hash_t entity_hash
cdef float entity_freq
cdef vector[float] entity_vector
cdef hash_t alias_hash
cdef float prior_prob

74
spacy/kb/candidate.pyx Normal file
View File

@ -0,0 +1,74 @@
# cython: infer_types=True, profile=True
from typing import Iterable
from .kb cimport KnowledgeBase
from ..tokens import Span
cdef class Candidate:
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned a certain prior probability.
DOCS: https://spacy.io/api/kb/#candidate-init
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
self.entity_freq = entity_freq
self.entity_vector = entity_vector
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self) -> int:
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
@property
def entity_(self) -> str:
"""RETURNS (str): ID/name of this entity in the KB"""
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self) -> int:
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self) -> str:
"""RETURNS (str): ID of the original alias"""
return self.kb.vocab.strings[self.alias_hash]
@property
def entity_freq(self) -> float:
return self.entity_freq
@property
def entity_vector(self) -> Iterable[float]:
return self.entity_vector
@property
def prior_prob(self) -> float:
return self.prior_prob
def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]:
"""
Return candidate entities for a given mention and fetching appropriate entries from the index.
kb (KnowledgeBase): Knowledge base to query.
mention (Span): Entity mention for which to identify candidates.
RETURNS (Iterable[Candidate]): Identified candidates.
"""
return kb.get_candidates(mention)
def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
"""
Return candidate entities for the given mentions and fetching appropriate entries from the index.
kb (KnowledgeBase): Knowledge base to query.
mention (Iterable[Span]): Entity mentions for which to identify candidates.
RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
"""
return kb.get_candidates_batch(mentions)

10
spacy/kb/kb.pxd Normal file
View File

@ -0,0 +1,10 @@
"""Knowledge-base for entity or concept linking."""
from cymem.cymem cimport Pool
from libc.stdint cimport int64_t
from ..vocab cimport Vocab
cdef class KnowledgeBase:
cdef Pool mem
cdef readonly Vocab vocab
cdef readonly int64_t entity_vector_length

108
spacy/kb/kb.pyx Normal file
View File

@ -0,0 +1,108 @@
# cython: infer_types=True, profile=True
from pathlib import Path
from typing import Iterable, Tuple, Union
from cymem.cymem cimport Pool
from .candidate import Candidate
from ..tokens import Span
from ..util import SimpleFrozenList
from ..errors import Errors
cdef class KnowledgeBase:
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts.
This is an abstract class and requires its operations to be implemented.
DOCS: https://spacy.io/api/kb
"""
def __init__(self, vocab: Vocab, entity_vector_length: int):
"""Create a KnowledgeBase."""
# Make sure abstract KB is not instantiated.
if self.__class__ == KnowledgeBase:
raise TypeError(
Errors.E1046.format(cls_name=self.__class__.__name__)
)
self.vocab = vocab
self.entity_vector_length = entity_vector_length
self.mem = Pool()
def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]:
"""
Return candidate entities for specified texts. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If no candidate is found for a given text, an empty list is returned.
mentions (Iterable[Span]): Mentions for which to get candidates.
RETURNS (Iterable[Iterable[Candidate]]): Identified candidates.
"""
return [self.get_candidates(span) for span in mentions]
def get_candidates(self, mention: Span) -> Iterable[Candidate]:
"""
Return candidate entities for specified text. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If the no candidate is found for a given text, an empty list is returned.
mention (Span): Mention for which to get candidates.
RETURNS (Iterable[Candidate]): Identified candidates.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__)
)
def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]:
"""
Return vectors for entities.
entity (str): Entity name/ID.
RETURNS (Iterable[Iterable[float]]): Vectors for specified entities.
"""
return [self.get_vector(entity) for entity in entities]
def get_vector(self, str entity) -> Iterable[float]:
"""
Return vector for entity.
entity (str): Entity name/ID.
RETURNS (Iterable[float]): Vector for specified entity.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__)
)
def to_bytes(self, **kwargs) -> bytes:
"""Serialize the current state to a binary string.
RETURNS (bytes): Current state as binary string.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__)
)
def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()):
"""Load state from a binary string.
bytes_data (bytes): KB state.
exclude (Tuple[str]): Properties to exclude when restoring KB.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__)
)
def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
"""
Write KnowledgeBase content to disk.
path (Union[str, Path]): Target file path.
exclude (Iterable[str]): List of components to exclude.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__)
)
def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None:
"""
Load KnowledgeBase content from disk.
path (Union[str, Path]): Target file path.
exclude (Iterable[str]): List of components to exclude.
"""
raise NotImplementedError(
Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__)
)

View File

@ -1,14 +1,12 @@
"""Knowledge-base for entity or concept linking.""" """Knowledge-base for entity or concept linking."""
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap from preshed.maps cimport PreshMap
from libcpp.vector cimport vector from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int64_t from libc.stdint cimport int32_t, int64_t
from libc.stdio cimport FILE from libc.stdio cimport FILE
from .vocab cimport Vocab from ..typedefs cimport hash_t
from .typedefs cimport hash_t from ..structs cimport KBEntryC, AliasC
from .structs cimport KBEntryC, AliasC from .kb cimport KnowledgeBase
ctypedef vector[KBEntryC] entry_vec ctypedef vector[KBEntryC] entry_vec
ctypedef vector[AliasC] alias_vec ctypedef vector[AliasC] alias_vec
@ -16,21 +14,7 @@ ctypedef vector[float] float_vec
ctypedef vector[float_vec] float_matrix ctypedef vector[float_vec] float_matrix
# Object used by the Entity Linker that summarizes one entity-alias candidate combination. cdef class InMemoryLookupKB(KnowledgeBase):
cdef class Candidate:
cdef readonly KnowledgeBase kb
cdef hash_t entity_hash
cdef float entity_freq
cdef vector[float] entity_vector
cdef hash_t alias_hash
cdef float prior_prob
cdef class KnowledgeBase:
cdef Pool mem
cdef readonly Vocab vocab
cdef int64_t entity_vector_length
# This maps 64bit keys (hash of unique entity string) # This maps 64bit keys (hash of unique entity string)
# to 64bit values (position of the _KBEntryC struct in the _entries vector). # to 64bit values (position of the _KBEntryC struct in the _entries vector).
# The PreshMap is pretty space efficient, as it uses open addressing. So # The PreshMap is pretty space efficient, as it uses open addressing. So

View File

@ -1,8 +1,7 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True
from typing import Iterator, Iterable, Callable, Dict, Any from typing import Iterable, Callable, Dict, Any, Union
import srsly import srsly
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap from preshed.maps cimport PreshMap
from cpython.exc cimport PyErr_SetFromErrno from cpython.exc cimport PyErr_SetFromErrno
from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek
@ -12,85 +11,28 @@ from libcpp.vector cimport vector
from pathlib import Path from pathlib import Path
import warnings import warnings
from .typedefs cimport hash_t from ..tokens import Span
from .errors import Errors, Warnings from ..typedefs cimport hash_t
from . import util from ..errors import Errors, Warnings
from .util import SimpleFrozenList, ensure_path from .. import util
from ..util import SimpleFrozenList, ensure_path
cdef class Candidate: from ..vocab cimport Vocab
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved from .kb cimport KnowledgeBase
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking from .candidate import Candidate as Candidate
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned to a certain prior probability.
DOCS: https://spacy.io/api/kb/#candidate_init
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
self.entity_freq = entity_freq
self.entity_vector = entity_vector
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self):
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
@property
def entity_(self):
"""RETURNS (str): ID/name of this entity in the KB"""
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self):
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self):
"""RETURNS (str): ID of the original alias"""
return self.kb.vocab.strings[self.alias_hash]
@property
def entity_freq(self):
return self.entity_freq
@property
def entity_vector(self):
return self.entity_vector
@property
def prior_prob(self):
return self.prior_prob
def get_candidates(KnowledgeBase kb, span) -> Iterator[Candidate]: cdef class InMemoryLookupKB(KnowledgeBase):
""" """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases,
Return candidate entities for a given span by using the text of the span as the alias
and fetching appropriate entries from the index.
This particular function is optimized to work with the built-in KB functionality,
but any other custom candidate generation method can be used in combination with the KB as well.
"""
return kb.get_alias_candidates(span.text)
cdef class KnowledgeBase:
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts. to support entity linking of named entities to real-world concepts.
DOCS: https://spacy.io/api/kb DOCS: https://spacy.io/api/kb_in_memory
""" """
def __init__(self, Vocab vocab, entity_vector_length): def __init__(self, Vocab vocab, entity_vector_length):
"""Create a KnowledgeBase.""" """Create an InMemoryLookupKB."""
self.mem = Pool() super().__init__(vocab, entity_vector_length)
self.entity_vector_length = entity_vector_length
self._entry_index = PreshMap() self._entry_index = PreshMap()
self._alias_index = PreshMap() self._alias_index = PreshMap()
self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def _initialize_entities(self, int64_t nr_entities): def _initialize_entities(self, int64_t nr_entities):
@ -104,11 +46,6 @@ cdef class KnowledgeBase:
self._alias_index = PreshMap(nr_aliases + 1) self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1)
@property
def entity_vector_length(self):
"""RETURNS (uint64): length of the entity vectors"""
return self.entity_vector_length
def __len__(self): def __len__(self):
return self.get_size_entities() return self.get_size_entities()
@ -286,7 +223,10 @@ cdef class KnowledgeBase:
alias_entry.probs = probs alias_entry.probs = probs
self._aliases_table[alias_index] = alias_entry self._aliases_table[alias_index] = alias_entry
def get_alias_candidates(self, str alias) -> Iterator[Candidate]: def get_candidates(self, mention: Span) -> Iterable[Candidate]:
return self.get_alias_candidates(mention.text) # type: ignore
def get_alias_candidates(self, str alias) -> Iterable[Candidate]:
""" """
Return candidate entities for an alias. Each candidate defines the entity, the original alias, Return candidate entities for an alias. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity. and the prior probability of that alias resolving to that entity.

View File

@ -72,10 +72,10 @@ class CatalanLemmatizer(Lemmatizer):
oov_forms.append(form) oov_forms.append(form)
if not forms: if not forms:
forms.extend(oov_forms) forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0]) # use lookups, and fall back to the token itself
if not forms: if not forms:
forms.append(string) forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms)) forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms

View File

@ -280,7 +280,7 @@ _currency = (
_punct = ( _punct = (
r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 · । ، ۔ ؛ ٪" r"… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 · । ، ۔ ؛ ٪"
) )
_quotes = r'\' " ” “ ` ´ , „ » « 「 」 『 』 【 】 《 》 〈 〉' _quotes = r'\' " ” “ ` ´ , „ » « 「 」 『 』 【 】 《 》 〈 〉 〈 〉 ⟦ ⟧'
_hyphens = "- — -- --- —— ~" _hyphens = "- — -- --- —— ~"
# Various symbols like dingbats, but also emoji # Various symbols like dingbats, but also emoji

View File

@ -53,11 +53,16 @@ class FrenchLemmatizer(Lemmatizer):
rules = rules_table.get(univ_pos, []) rules = rules_table.get(univ_pos, [])
string = string.lower() string = string.lower()
forms = [] forms = []
# first try lookup in table based on upos
if string in index: if string in index:
forms.append(string) forms.append(string)
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms
# then add anything in the exceptions table
forms.extend(exceptions.get(string, [])) forms.extend(exceptions.get(string, []))
# if nothing found yet, use the rules
oov_forms = [] oov_forms = []
if not forms: if not forms:
for old, new in rules: for old, new in rules:
@ -69,12 +74,14 @@ class FrenchLemmatizer(Lemmatizer):
forms.append(form) forms.append(form)
else: else:
oov_forms.append(form) oov_forms.append(form)
# if still nothing, add the oov forms from rules
if not forms: if not forms:
forms.extend(oov_forms) forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0]) # use lookups, which fall back to the token itself
if not forms: if not forms:
forms.append(string) forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms)) forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms

View File

@ -1,11 +1,15 @@
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
class AncientGreekDefaults(BaseDefaults): class AncientGreekDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
suffixes = TOKENIZER_SUFFIXES
infixes = TOKENIZER_INFIXES
lex_attr_getters = LEX_ATTRS lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS stop_words = STOP_WORDS

View File

@ -0,0 +1,46 @@
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS
from ..char_classes import CONCAT_QUOTES
_prefixes = (
[
"",
"",
]
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_CURRENCY
+ LIST_ICONS
)
_suffixes = (
LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
"",
"",
r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]",
]
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])—",
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -3,7 +3,7 @@ from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES
_infixes = ( _infixes = (
["·", "", "\(", "\)"] ["·", "", r"\(", r"\)"]
+ [r"(?<=[0-9])~(?=[0-9-])"] + [r"(?<=[0-9])~(?=[0-9-])"]
+ LIST_QUOTES + LIST_QUOTES
+ BASE_TOKENIZER_INFIXES + BASE_TOKENIZER_INFIXES

18
spacy/lang/la/__init__.py Normal file
View File

@ -0,0 +1,18 @@
from ...language import Language, BaseDefaults
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
class LatinDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Latin(Language):
lang = "la"
Defaults = LatinDefaults
__all__ = ["Latin"]

View File

@ -0,0 +1,34 @@
from ...attrs import LIKE_NUM
import re
# cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4
roman_numerals_compile = re.compile(
r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$"
)
_num_words = set(
"""
unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem
""".split()
)
_ordinal_words = set(
"""
primus prima primum secundus secunda secundum tertius tertia tertium
""".split()
)
def like_num(text):
if text.isdigit():
return True
if roman_numerals_compile.match(text):
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}

View File

@ -0,0 +1,37 @@
# Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin
STOP_WORDS = set(
"""
ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem
cum cur
de deinde dum
ego enim ergo es est et etiam etsi ex
fio
haud hic
iam idem igitur ille in infra inter interim ipse is ita
magis modo mox
nam ne nec necque neque nisi non nos
o ob
per possum post pro
quae quam quare qui quia quicumque quidem quilibet quis quisnam quisquam quisque quisquis quo quoniam
sed si sic sive sub sui sum super suus
tam tamen trans tu tum
ubi uel uero
vel vero
""".split()
)

View File

@ -0,0 +1,76 @@
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH
from ...util import update_exc
## TODO: Look into systematically handling u/v
_exc = {
"mecum": [{ORTH: "me"}, {ORTH: "cum"}],
"tecum": [{ORTH: "te"}, {ORTH: "cum"}],
"nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}],
"vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}],
"uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}],
}
for orth in [
"A.",
"Agr.",
"Ap.",
"C.",
"Cn.",
"D.",
"F.",
"K.",
"L.",
"M'.",
"M.",
"Mam.",
"N.",
"Oct.",
"Opet.",
"P.",
"Paul.",
"Post.",
"Pro.",
"Q.",
"S.",
"Ser.",
"Sert.",
"Sex.",
"St.",
"Sta.",
"T.",
"Ti.",
"V.",
"Vol.",
"Vop.",
"U.",
"Uol.",
"Uop.",
"Ian.",
"Febr.",
"Mart.",
"Apr.",
"Mai.",
"Iun.",
"Iul.",
"Aug.",
"Sept.",
"Oct.",
"Nov.",
"Nou.",
"Dec.",
"Non.",
"Id.",
"A.D.",
"Coll.",
"Cos.",
"Ord.",
"Pl.",
"S.C.",
"Suff.",
"Trib.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

18
spacy/lang/lg/__init__.py Normal file
View File

@ -0,0 +1,18 @@
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from ...language import Language, BaseDefaults
class LugandaDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
class Luganda(Language):
lang = "lg"
Defaults = LugandaDefaults
__all__ = ["Luganda"]

17
spacy/lang/lg/examples.py Normal file
View File

@ -0,0 +1,17 @@
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.lg.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Mpa ebyafaayo ku byalo Nakatu ne Nkajja",
"Okuyita Ttembo kitegeeza kugwa ddalu",
"Ekifumu kino kyali kya mulimu ki?",
"Ekkovu we liyise wayitibwa mukululo",
"Akola mulimu ki oguvaamu ssente?",
"Emisumaali egikomerera embaawo giyitibwa nninga",
"Abooluganda abemmamba ababiri",
"Ekisaawe ky'ebyenjigiriza kya mugaso nnyo",
]

View File

@ -0,0 +1,95 @@
from ...attrs import LIKE_NUM
_num_words = [
"nnooti", # Zero
"zeero", # zero
"emu", # one
"bbiri", # two
"ssatu", # three
"nnya", # four
"ttaano", # five
"mukaaga", # six
"musanvu", # seven
"munaana", # eight
"mwenda", # nine
"kkumi", # ten
"kkumi n'emu", # eleven
"kkumi na bbiri", # twelve
"kkumi na ssatu", # thirteen
"kkumi na nnya", # forteen
"kkumi na ttaano", # fifteen
"kkumi na mukaaga", # sixteen
"kkumi na musanvu", # seventeen
"kkumi na munaana", # eighteen
"kkumi na mwenda", # nineteen
"amakumi abiri", # twenty
"amakumi asatu", # thirty
"amakumi ana", # forty
"amakumi ataano", # fifty
"nkaaga", # sixty
"nsanvu", # seventy
"kinaana", # eighty
"kyenda", # ninety
"kikumi", # hundred
"lukumi", # thousand
"kakadde", # million
"kawumbi", # billion
"kase", # trillion
"katabalika", # quadrillion
"keesedde", # gajillion
"kafukunya", # bazillion
"ekisooka", # first
"ekyokubiri", # second
"ekyokusatu", # third
"ekyokuna", # fourth
"ekyokutaano", # fifith
"ekyomukaaga", # sixth
"ekyomusanvu", # seventh
"eky'omunaana", # eighth
"ekyomwenda", # nineth
"ekyekkumi", # tenth
"ekyekkumi n'ekimu", # eleventh
"ekyekkumi n'ebibiri", # twelveth
"ekyekkumi n'ebisatu", # thirteenth
"ekyekkumi n'ebina", # fourteenth
"ekyekkumi n'ebitaano", # fifteenth
"ekyekkumi n'omukaaga", # sixteenth
"ekyekkumi n'omusanvu", # seventeenth
"ekyekkumi n'omunaana", # eigteenth
"ekyekkumi n'omwenda", # nineteenth
"ekyamakumi abiri", # twentieth
"ekyamakumi asatu", # thirtieth
"ekyamakumi ana", # fortieth
"ekyamakumi ataano", # fiftieth
"ekyenkaaga", # sixtieth
"ekyensanvu", # seventieth
"ekyekinaana", # eightieth
"ekyekyenda", # ninetieth
"ekyekikumi", # hundredth
"ekyolukumi", # thousandth
"ekyakakadde", # millionth
"ekyakawumbi", # billionth
"ekyakase", # trillionth
"ekyakatabalika", # quadrillionth
"ekyakeesedde", # gajillionth
"ekyakafukunya", # bazillionth
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}

View File

@ -0,0 +1,19 @@
from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_INFIXES = _infixes

View File

@ -0,0 +1,19 @@
STOP_WORDS = set(
"""
abadde abalala abamu abangi abava ajja ali alina ani anti ateekeddwa atewamu
atya awamu aweebwa ayinza ba baali babadde babalina bajja
bajjanewankubade bali balina bandi bangi bano bateekeddwa baweebwa bayina bebombi beera bibye
bimu bingi bino bo bokka bonna buli bulijjo bulungi bwabwe bwaffe bwayo bwe bwonna bya byabwe
byaffe byebimu byonna ddaa ddala ddi e ebimu ebiri ebweruobulungi ebyo edda ejja ekirala ekyo
endala engeri ennyo era erimu erina ffe ffenna ga gujja gumu gunno guno gwa gwe kaseera kati
kennyini ki kiki kikino kikye kikyo kino kirungi kki ku kubangabyombi kubangaolwokuba kudda
kuva kuwa kwegamba kyaffe kye kyekimuoyo kyekyo kyonna leero liryo lwa lwaki lyabwezaabwe
lyaffe lyange mbadde mingi mpozzi mu mulinaoyina munda mwegyabwe nolwekyo nabadde nabo nandiyagadde
nandiye nanti naye ne nedda neera nga nnyingi nnyini nnyinza nnyo nti nyinza nze oba ojja okudda
okugenda okuggyako okutuusa okuva okuwa oli olina oluvannyuma olwekyobuva omuli ono osobola otya
oyina oyo seetaaga si sinakindi singa talina tayina tebaali tebaalina tebayina terina tetulina
tetuteekeddwa tewali teyalina teyayina tolina tu tuyina tulina tuyina twafuna twetaaga wa wabula
wabweru wadde waggulunnina wakati waliwobangi waliyo wandi wange wano wansi weebwa yabadde yaffe
ye yenna yennyini yina yonna ziba zijja zonna
""".split()
)

View File

@ -40,6 +40,7 @@ def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
span_label = doc.vocab.strings.add("NP") span_label = doc.vocab.strings.add("NP")
# Only NOUNS and PRONOUNS matter # Only NOUNS and PRONOUNS matter
end_span = -1
for i, word in enumerate(filter(lambda x: x.pos in [PRON, NOUN], doclike)): for i, word in enumerate(filter(lambda x: x.pos in [PRON, NOUN], doclike)):
# For NOUNS # For NOUNS
# Pick children from syntactic parse (only those with certain dependencies) # Pick children from syntactic parse (only those with certain dependencies)
@ -58,15 +59,17 @@ def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
children_i = [c.i for c in children] + [word.i] children_i = [c.i for c in children] + [word.i]
start_span = min(children_i) start_span = min(children_i)
end_span = max(children_i) + 1 if start_span >= end_span:
yield start_span, end_span, span_label end_span = max(children_i) + 1
yield start_span, end_span, span_label
# PRONOUNS only if it is the subject of a verb # PRONOUNS only if it is the subject of a verb
elif word.pos == PRON: elif word.pos == PRON:
if word.dep in pronoun_deps: if word.dep in pronoun_deps:
start_span = word.i start_span = word.i
end_span = word.i + 1 if start_span >= end_span:
yield start_span, end_span, span_label end_span = word.i + 1
yield start_span, end_span, span_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks} SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}

View File

@ -28,7 +28,7 @@ class Russian(Language):
assigns=["token.lemma"], assigns=["token.lemma"],
default_config={ default_config={
"model": None, "model": None,
"mode": "pymorphy2", "mode": "pymorphy3",
"overwrite": False, "overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
}, },

View File

@ -19,11 +19,11 @@ class RussianLemmatizer(Lemmatizer):
model: Optional[Model], model: Optional[Model],
name: str = "lemmatizer", name: str = "lemmatizer",
*, *,
mode: str = "pymorphy2", mode: str = "pymorphy3",
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
) -> None: ) -> None:
if mode == "pymorphy2": if mode in {"pymorphy2", "pymorphy2_lookup"}:
try: try:
from pymorphy2 import MorphAnalyzer from pymorphy2 import MorphAnalyzer
except ImportError: except ImportError:
@ -33,6 +33,16 @@ class RussianLemmatizer(Lemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer()
elif mode == "pymorphy3":
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Russian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library. Install it with: pip install pymorphy3"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer()
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )
@ -104,6 +114,9 @@ class RussianLemmatizer(Lemmatizer):
return [analyses[0].normal_form] return [analyses[0].normal_form]
return [string] return [string]
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
return self.pymorphy2_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
gram_map = { gram_map = {

View File

@ -61,6 +61,11 @@ for abbr in [
{ORTH: "2к23", NORM: "2023"}, {ORTH: "2к23", NORM: "2023"},
{ORTH: "2к24", NORM: "2024"}, {ORTH: "2к24", NORM: "2024"},
{ORTH: "2к25", NORM: "2025"}, {ORTH: "2к25", NORM: "2025"},
{ORTH: "2к26", NORM: "2026"},
{ORTH: "2к27", NORM: "2027"},
{ORTH: "2к28", NORM: "2028"},
{ORTH: "2к29", NORM: "2029"},
{ORTH: "2к30", NORM: "2030"},
]: ]:
_exc[abbr[ORTH]] = [abbr] _exc[abbr[ORTH]] = [abbr]
@ -268,8 +273,8 @@ for abbr in [
{ORTH: "з-ка", NORM: "заимка"}, {ORTH: "з-ка", NORM: "заимка"},
{ORTH: "п-к", NORM: "починок"}, {ORTH: "п-к", NORM: "починок"},
{ORTH: "киш.", NORM: "кишлак"}, {ORTH: "киш.", NORM: "кишлак"},
{ORTH: "п. ст. ", NORM: "поселок станция"}, {ORTH: "п. ст.", NORM: "поселок станция"},
{ORTH: "п. ж/д ст. ", NORM: "поселок при железнодорожной станции"}, {ORTH: "п. ж/д ст.", NORM: "поселок при железнодорожной станции"},
{ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"}, {ORTH: "ж/д бл-ст", NORM: "железнодорожный блокпост"},
{ORTH: "ж/д б-ка", NORM: "железнодорожная будка"}, {ORTH: "ж/д б-ка", NORM: "железнодорожная будка"},
{ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"}, {ORTH: "ж/д в-ка", NORM: "железнодорожная ветка"},
@ -280,12 +285,12 @@ for abbr in [
{ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"}, {ORTH: "ж/д п.п.", NORM: "железнодорожный путевой пост"},
{ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"}, {ORTH: "ж/д о.п.", NORM: "железнодорожный остановочный пункт"},
{ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"}, {ORTH: "ж/д рзд.", NORM: "железнодорожный разъезд"},
{ORTH: "ж/д ст. ", NORM: "железнодорожная станция"}, {ORTH: "ж/д ст.", NORM: "железнодорожная станция"},
{ORTH: "м-ко", NORM: "местечко"}, {ORTH: "м-ко", NORM: "местечко"},
{ORTH: "д.", NORM: "деревня"}, {ORTH: "д.", NORM: "деревня"},
{ORTH: "с.", NORM: "село"}, {ORTH: "с.", NORM: "село"},
{ORTH: "сл.", NORM: "слобода"}, {ORTH: "сл.", NORM: "слобода"},
{ORTH: "ст. ", NORM: "станция"}, {ORTH: "ст.", NORM: "станция"},
{ORTH: "ст-ца", NORM: "станица"}, {ORTH: "ст-ца", NORM: "станица"},
{ORTH: "у.", NORM: "улус"}, {ORTH: "у.", NORM: "улус"},
{ORTH: "х.", NORM: "хутор"}, {ORTH: "х.", NORM: "хутор"},
@ -388,8 +393,9 @@ for abbr in [
{ORTH: "прим.", NORM: "примечание"}, {ORTH: "прим.", NORM: "примечание"},
{ORTH: "прим.ред.", NORM: "примечание редакции"}, {ORTH: "прим.ред.", NORM: "примечание редакции"},
{ORTH: "см. также", NORM: "смотри также"}, {ORTH: "см. также", NORM: "смотри также"},
{ORTH: "кв.м.", NORM: "квадрантный метр"}, {ORTH: "см.", NORM: "смотри"},
{ORTH: "м2", NORM: "квадрантный метр"}, {ORTH: "кв.м.", NORM: "квадратный метр"},
{ORTH: "м2", NORM: "квадратный метр"},
{ORTH: "б/у", NORM: "бывший в употреблении"}, {ORTH: "б/у", NORM: "бывший в употреблении"},
{ORTH: "сокр.", NORM: "сокращение"}, {ORTH: "сокр.", NORM: "сокращение"},
{ORTH: "чел.", NORM: "человек"}, {ORTH: "чел.", NORM: "человек"},

View File

@ -1,9 +1,17 @@
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
class SlovenianDefaults(BaseDefaults): class SlovenianDefaults(BaseDefaults):
stop_words = STOP_WORDS stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
class Slovenian(Language): class Slovenian(Language):

145
spacy/lang/sl/lex_attrs.py Normal file
View File

@ -0,0 +1,145 @@
from ...attrs import LIKE_NUM
from ...attrs import IS_CURRENCY
import unicodedata
_num_words = set(
"""
nula ničla nič ena dva tri štiri pet šest sedem osem
devet deset enajst dvanajst trinajst štirinajst petnajst
šestnajst sedemnajst osemnajst devetnajst dvajset trideset štirideset
petdeset šestdest sedemdeset osemdeset devedeset sto tisoč
milijon bilijon trilijon kvadrilijon nešteto
en eden enega enemu ennem enim enih enima enimi ene eni eno
dveh dvema dvem dvoje trije treh trem tremi troje štirje štirih štirim štirimi
petih petim petimi šestih šestim šestimi sedmih sedmim sedmimi osmih osmim osmimi
devetih devetim devetimi desetih desetim desetimi enajstih enajstim enajstimi
dvanajstih dvanajstim dvanajstimi trinajstih trinajstim trinajstimi
šestnajstih šestnajstim šestnajstimi petnajstih petnajstim petnajstimi
sedemnajstih sedemnajstim sedemnajstimi osemnajstih osemnajstim osemnajstimi
devetnajstih devetnajstim devetnajstimi dvajsetih dvajsetim dvajsetimi
""".split()
)
_ordinal_words = set(
"""
prvi drugi tretji četrti peti šesti sedmi osmi
deveti deseti enajsti dvanajsti trinajsti štirinajsti
petnajsti šestnajsti sedemnajsti osemnajsti devetnajsti
dvajseti trideseti štirideseti petdeseti šestdeseti sedemdeseti
osemdeseti devetdeseti stoti tisoči milijonti bilijonti
trilijonti kvadrilijonti nešteti
prva druga tretja četrta peta šesta sedma osma
deveta deseta enajsta dvanajsta trinajsta štirnajsta
petnajsta šestnajsta sedemnajsta osemnajsta devetnajsta
dvajseta trideseta štirideseta petdeseta šestdeseta sedemdeseta
osemdeseta devetdeseta stota tisoča milijonta bilijonta
trilijonta kvadrilijonta nešteta
prvo drugo tretje četrto peto šestro sedmo osmo
deveto deseto enajsto dvanajsto trinajsto štirnajsto
petnajsto šestnajsto sedemnajsto osemnajsto devetnajsto
dvajseto trideseto štirideseto petdeseto šestdeseto sedemdeseto
osemdeseto devetdeseto stoto tisočo milijonto bilijonto
trilijonto kvadrilijonto nešteto
prvega drugega tretjega četrtega petega šestega sedmega osmega
devega desetega enajstega dvanajstega trinajstega štirnajstega
petnajstega šestnajstega sedemnajstega osemnajstega devetnajstega
dvajsetega tridesetega štiridesetega petdesetega šestdesetega sedemdesetega
osemdesetega devetdesetega stotega tisočega milijontega bilijontega
trilijontega kvadrilijontega neštetega
prvemu drugemu tretjemu četrtemu petemu šestemu sedmemu osmemu devetemu desetemu
enajstemu dvanajstemu trinajstemu štirnajstemu petnajstemu šestnajstemu sedemnajstemu
osemnajstemu devetnajstemu dvajsetemu tridesetemu štiridesetemu petdesetemu šestdesetemu
sedemdesetemu osemdesetemu devetdesetemu stotemu tisočemu milijontemu bilijontemu
trilijontemu kvadrilijontemu neštetemu
prvem drugem tretjem četrtem petem šestem sedmem osmem devetem desetem
enajstem dvanajstem trinajstem štirnajstem petnajstem šestnajstem sedemnajstem
osemnajstem devetnajstem dvajsetem tridesetem štiridesetem petdesetem šestdesetem
sedemdesetem osemdesetem devetdesetem stotem tisočem milijontem bilijontem
trilijontem kvadrilijontem neštetem
prvim drugim tretjim četrtim petim šestim sedtim osmim devetim desetim
enajstim dvanajstim trinajstim štirnajstim petnajstim šestnajstim sedemnajstim
osemnajstim devetnajstim dvajsetim tridesetim štiridesetim petdesetim šestdesetim
sedemdesetim osemdesetim devetdesetim stotim tisočim milijontim bilijontim
trilijontim kvadrilijontim neštetim
prvih drugih tretjih četrthih petih šestih sedmih osmih deveth desetih
enajstih dvanajstih trinajstih štirnajstih petnajstih šestnajstih sedemnajstih
osemnajstih devetnajstih dvajsetih tridesetih štiridesetih petdesetih šestdesetih
sedemdesetih osemdesetih devetdesetih stotih tisočih milijontih bilijontih
trilijontih kvadrilijontih nešteth
prvima drugima tretjima četrtima petima šestima sedmima osmima devetima desetima
enajstima dvanajstima trinajstima štirnajstima petnajstima šestnajstima sedemnajstima
osemnajstima devetnajstima dvajsetima tridesetima štiridesetima petdesetima šestdesetima
sedemdesetima osemdesetima devetdesetima stotima tisočima milijontima bilijontima
trilijontima kvadrilijontima neštetima
prve druge četrte pete šeste sedme osme devete desete
enajste dvanajste trinajste štirnajste petnajste šestnajste sedemnajste
osemnajste devetnajste dvajsete tridesete štiridesete petdesete šestdesete
sedemdesete osemdesete devetdesete stote tisoče milijonte bilijonte
trilijonte kvadrilijonte neštete
prvimi drugimi tretjimi četrtimi petimi šestimi sedtimi osmimi devetimi desetimi
enajstimi dvanajstimi trinajstimi štirnajstimi petnajstimi šestnajstimi sedemnajstimi
osemnajstimi devetnajstimi dvajsetimi tridesetimi štiridesetimi petdesetimi šestdesetimi
sedemdesetimi osemdesetimi devetdesetimi stotimi tisočimi milijontimi bilijontimi
trilijontimi kvadrilijontimi neštetimi
""".split()
)
_currency_words = set(
"""
evro evra evru evrom evrov evroma evrih evrom evre evri evr eur
cent centa centu cenom centov centoma centih centom cente centi
dolar dolarja dolarji dolarju dolarjem dolarjev dolarjema dolarjih dolarje usd
tolar tolarja tolarji tolarju tolarjem tolarjev tolarjema tolarjih tolarje tol
dinar dinarja dinarji dinarju dinarjem dinarjev dinarjema dinarjih dinarje din
funt funta funti funtu funtom funtov funtoma funtih funte gpb
forint forinta forinti forintu forintom forintov forintoma forintih forinte
zlot zlota zloti zlotu zlotom zlotov zlotoma zlotih zlote
rupij rupija rupiji rupiju rupijem rupijev rupijema rupijih rupije
jen jena jeni jenu jenom jenov jenoma jenih jene
kuna kuni kune kuno kun kunama kunah kunam kunami
marka marki marke markama markah markami
""".split()
)
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
if text_lower in _ordinal_words:
return True
return False
def is_currency(text):
text_lower = text.lower()
if text in _currency_words:
return True
for char in text:
if unicodedata.category(char) != "Sc":
return False
return True
LEX_ATTRS = {LIKE_NUM: like_num, IS_CURRENCY: is_currency}

View File

@ -0,0 +1,84 @@
from ..char_classes import (
LIST_ELLIPSES,
LIST_ICONS,
HYPHENS,
LIST_PUNCT,
LIST_QUOTES,
CURRENCY,
UNITS,
PUNCT,
LIST_CURRENCY,
CONCAT_QUOTES,
)
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
from ..char_classes import merge_chars
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
INCLUDE_SPECIAL = ["\\+", "\\/", "\\", "\\¯", "\\=", "\\×"] + HYPHENS.split("|")
_prefixes = INCLUDE_SPECIAL + BASE_TOKENIZER_PREFIXES
_suffixes = (
INCLUDE_SPECIAL
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
r"(?<=°[FfCcKk])\.",
r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
r"(?<=[0-9])(?:{u})".format(u=UNITS),
r"(?<=[{al}{e}{p}(?:{q})])\.".format(
al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES, p=PUNCT
),
r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
# split initials like J.K. Rowling
r"(?<=[A-Z]\.)(?:[A-Z].)",
]
)
# a list of all suffixes following a hyphen that are shouldn't split (eg. BTC-jev)
# source: Obeliks tokenizer - https://github.com/clarinsi/obeliks/blob/master/obeliks/res/TokRulesPart1.txt
CONCAT_QUOTES = CONCAT_QUOTES.replace("'", "")
HYPHENS_PERMITTED = (
"((a)|(evemu)|(evskega)|(i)|(jevega)|(jevska)|(jevskimi)|(jinemu)|(oma)|(ovim)|"
"(ovski)|(e)|(evi)|(evskem)|(ih)|(jevem)|(jevske)|(jevsko)|(jini)|(ov)|(ovima)|"
"(ovskih)|(em)|(evih)|(evskemu)|(ja)|(jevemu)|(jevskega)|(ji)|(jinih)|(ova)|"
"(ovimi)|(ovskim)|(ema)|(evim)|(evski)|(je)|(jevi)|(jevskem)|(jih)|(jinim)|"
"(ove)|(ovo)|(ovskima)|(ev)|(evima)|(evskih)|(jem)|(jevih)|(jevskemu)|(jin)|"
"(jinima)|(ovega)|(ovska)|(ovskimi)|(eva)|(evimi)|(evskim)|(jema)|(jevim)|"
"(jevski)|(jina)|(jinimi)|(ovem)|(ovske)|(ovsko)|(eve)|(evo)|(evskima)|(jev)|"
"(jevima)|(jevskih)|(jine)|(jino)|(ovemu)|(ovskega)|(u)|(evega)|(evska)|"
"(evskimi)|(jeva)|(jevimi)|(jevskim)|(jinega)|(ju)|(ovi)|(ovskem)|(evem)|"
"(evske)|(evsko)|(jeve)|(jevo)|(jevskima)|(jinem)|(om)|(ovih)|(ovskemu)|"
"(ovec)|(ovca)|(ovcu)|(ovcem)|(ovcev)|(ovcema)|(ovcih)|(ovci)|(ovce)|(ovcimi)|"
"(evec)|(evca)|(evcu)|(evcem)|(evcev)|(evcema)|(evcih)|(evci)|(evce)|(evcimi)|"
"(jevec)|(jevca)|(jevcu)|(jevcem)|(jevcev)|(jevcema)|(jevcih)|(jevci)|(jevce)|"
"(jevcimi)|(ovka)|(ovke)|(ovki)|(ovko)|(ovk)|(ovkama)|(ovkah)|(ovkam)|(ovkami)|"
"(evka)|(evke)|(evki)|(evko)|(evk)|(evkama)|(evkah)|(evkam)|(evkami)|(jevka)|"
"(jevke)|(jevki)|(jevko)|(jevk)|(jevkama)|(jevkah)|(jevkam)|(jevkami)|(timi)|"
"(im)|(ima)|(a)|(imi)|(e)|(o)|(ega)|(ti)|(em)|(tih)|(emu)|(tim)|(i)|(tima)|"
"(ih)|(ta)|(te)|(to)|(tega)|(tem)|(temu))"
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?!{hp}$)(?=[{a}])".format(
a=ALPHA, h=HYPHENS, hp=HYPHENS_PERMITTED
),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -1,326 +1,84 @@
# Source: https://github.com/stopwords-iso/stopwords-sl # Source: https://github.com/stopwords-iso/stopwords-sl
# Removed various words that are not normally considered stop words, such as months.
STOP_WORDS = set( STOP_WORDS = set(
""" """
a a ali
ali
b b bi bil bila bile bili bilo biti blizu bo bodo bojo bolj bom bomo
bi boste bova boš brez
bil
bila c cel cela celi celo
bile
bili č če često četrta četrtek četrti četrto čez čigav
bilo
biti d da daleč dan danes datum deset deseta deseti deseto devet
blizu deveta deveti deveto do dober dobra dobri dobro dokler dol dolg
bo dolga dolgi dovolj drug druga drugi drugo dva dve
bodo
bolj e eden en ena ene eni enkrat eno etc.
bom
bomo
boste
bova
boš
brez
c
cel
cela
celi
celo
d
da
daleč
dan
danes
do
dober
dobra
dobri
dobro
dokler
dol
dovolj
e
eden
en
ena
ene
eni
enkrat
eno
etc.
f f
g
g. g g. ga ga. gor gospa gospod
ga
ga. h halo
gor
gospa i idr. ii iii in iv ix iz
gospod
h j jaz je ji jih jim jo jutri
halo
i k kadarkoli kaj kajti kako kakor kamor kamorkoli kar karkoli
idr. katerikoli kdaj kdo kdorkoli ker ki kje kjer kjerkoli
ii ko koder koderkoli koga komu kot kratek kratka kratke kratki
iii
in l lahka lahke lahki lahko le lep lepa lepe lepi lepo leto
iv
ix m majhen majhna majhni malce malo manj me med medtem mene
iz mesec mi midva midve mnogo moj moja moje mora morajo moram
j moramo morate moraš morem mu
jaz
je n na nad naj najina najino najmanj naju največ nam narobe
ji nas nato nazaj naš naša naše ne nedavno nedelja nek neka
jih nekaj nekatere nekateri nekatero nekdo neke nekega neki
jim nekje neko nekoga nekoč ni nikamor nikdar nikjer nikoli
jo nič nje njega njegov njegova njegovo njej njemu njen
k njena njeno nji njih njihov njihova njihovo njiju njim
kadarkoli njo njun njuna njuno no nocoj npr.
kaj
kajti o ob oba obe oboje od odprt odprta odprti okoli on
kako onadva one oni onidve osem osma osmi osmo oz.
kakor
kamor p pa pet peta petek peti peto po pod pogosto poleg poln
kamorkoli polna polni polno ponavadi ponedeljek ponovno potem
kar povsod pozdravljen pozdravljeni prav prava prave pravi
karkoli pravo prazen prazna prazno prbl. precej pred prej preko
katerikoli pri pribl. približno primer pripravljen pripravljena
kdaj pripravljeni proti prva prvi prvo
kdo
kdorkoli r ravno redko res reč
ker
ki s saj sam sama same sami samo se sebe sebi sedaj sedem
kje sedma sedmi sedmo sem seveda si sicer skoraj skozi slab sm
kjer so sobota spet sreda srednja srednji sta ste stran stvar sva
kjerkoli
ko š šest šesta šesti šesto štiri
koderkoli
koga t ta tak taka take taki tako takoj tam te tebe tebi tega
komu težak težka težki težko ti tista tiste tisti tisto tj.
kot tja to toda torek tretja tretje tretji tri tu tudi tukaj
l tvoj tvoja tvoje
le
lep
lepa
lepe
lepi
lepo
m
manj
me
med
medtem
mene
mi
midva
midve
mnogo
moj
moja
moje
mora
morajo
moram
moramo
morate
moraš
morem
mu
n
na
nad
naj
najina
najino
najmanj
naju
največ
nam
nas
nato
nazaj
naš
naša
naše
ne
nedavno
nek
neka
nekaj
nekatere
nekateri
nekatero
nekdo
neke
nekega
neki
nekje
neko
nekoga
nekoč
ni
nikamor
nikdar
nikjer
nikoli
nič
nje
njega
njegov
njegova
njegovo
njej
njemu
njen
njena
njeno
nji
njih
njihov
njihova
njihovo
njiju
njim
njo
njun
njuna
njuno
no
nocoj
npr.
o
ob
oba
obe
oboje
od
okoli
on
onadva
one
oni
onidve
oz.
p
pa
po
pod
pogosto
poleg
ponavadi
ponovno
potem
povsod
prbl.
precej
pred
prej
preko
pri
pribl.
približno
proti
r
redko
res
s
saj
sam
sama
same
sami
samo
se
sebe
sebi
sedaj
sem
seveda
si
sicer
skoraj
skozi
smo
so
spet
sta
ste
sva
t
ta
tak
taka
take
taki
tako
takoj
tam
te
tebe
tebi
tega
ti
tista
tiste
tisti
tisto
tj.
tja
to
toda
tu
tudi
tukaj
tvoj
tvoja
tvoje
u u
v
vaju v vaju vam vas vaš vaša vaše ve vedno velik velika veliki
vam veliko vendar ves več vi vidva vii viii visok visoka visoke
vas visoki vsa vsaj vsak vsaka vsakdo vsake vsaki vsakomur vse
vaš vsega vsi vso včasih včeraj
vaša
vaše
ve
vedno
vendar
ves
več
vi
vidva
vii
viii
vsa
vsaj
vsak
vsaka
vsakdo
vsake
vsaki
vsakomur
vse
vsega
vsi
vso
včasih
x x
z
za z za zadaj zadnji zakaj zaprta zaprti zaprto zdaj zelo zunaj
zadaj
zadnji ž že
zakaj
zdaj
zelo
zunaj
č
če
često
čez
čigav
š
ž
že
""".split() """.split()
) )

View File

@ -0,0 +1,272 @@
from typing import Dict, List
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH, NORM
from ...util import update_exc
_exc: Dict[str, List[Dict]] = {}
_other_exc = {
"t.i.": [{ORTH: "t.", NORM: "tako"}, {ORTH: "i.", NORM: "imenovano"}],
"t.j.": [{ORTH: "t.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"T.j.": [{ORTH: "T.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"d.o.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "o.", NORM: "omejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.O.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "O.", NORM: "omejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.n.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "n.", NORM: "neomejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.N.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "N.", NORM: "neomejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.d.": [{ORTH: "d.", NORM: "delniška"}, {ORTH: "d.", NORM: "družba"}],
"D.D.": [{ORTH: "D.", NORM: "delniška"}, {ORTH: "D.", NORM: "družba"}],
"s.p.": [{ORTH: "s.", NORM: "samostojni"}, {ORTH: "p.", NORM: "podjetnik"}],
"S.P.": [{ORTH: "S.", NORM: "samostojni"}, {ORTH: "P.", NORM: "podjetnik"}],
"l.r.": [{ORTH: "l.", NORM: "lastno"}, {ORTH: "r.", NORM: "ročno"}],
"le-te": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "te"}],
"Le-te": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "te"}],
"le-ti": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ti"}],
"Le-ti": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ti"}],
"le-to": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "to"}],
"Le-to": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "to"}],
"le-ta": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ta"}],
"Le-ta": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ta"}],
"le-tega": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "tega"}],
"Le-tega": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "tega"}],
}
_exc.update(_other_exc)
for exc_data in [
{ORTH: "adm.", NORM: "administracija"},
{ORTH: "aer.", NORM: "aeronavtika"},
{ORTH: "agr.", NORM: "agronomija"},
{ORTH: "amer.", NORM: "ameriško"},
{ORTH: "anat.", NORM: "anatomija"},
{ORTH: "angl.", NORM: "angleški"},
{ORTH: "ant.", NORM: "antonim"},
{ORTH: "antr.", NORM: "antropologija"},
{ORTH: "apr.", NORM: "april"},
{ORTH: "arab.", NORM: "arabsko"},
{ORTH: "arheol.", NORM: "arheologija"},
{ORTH: "arhit.", NORM: "arhitektura"},
{ORTH: "avg.", NORM: "avgust"},
{ORTH: "avstr.", NORM: "avstrijsko"},
{ORTH: "avt.", NORM: "avtomobilizem"},
{ORTH: "bibl.", NORM: "biblijsko"},
{ORTH: "biokem.", NORM: "biokemija"},
{ORTH: "biol.", NORM: "biologija"},
{ORTH: "bolg.", NORM: "bolgarski"},
{ORTH: "bot.", NORM: "botanika"},
{ORTH: "cit.", NORM: "citat"},
{ORTH: "daj.", NORM: "dajalnik"},
{ORTH: "del.", NORM: "deležnik"},
{ORTH: "ed.", NORM: "ednina"},
{ORTH: "etn.", NORM: "etnografija"},
{ORTH: "farm.", NORM: "farmacija"},
{ORTH: "filat.", NORM: "filatelija"},
{ORTH: "filoz.", NORM: "filozofija"},
{ORTH: "fin.", NORM: "finančništvo"},
{ORTH: "fiz.", NORM: "fizika"},
{ORTH: "fot.", NORM: "fotografija"},
{ORTH: "fr.", NORM: "francoski"},
{ORTH: "friz.", NORM: "frizerstvo"},
{ORTH: "gastr.", NORM: "gastronomija"},
{ORTH: "geogr.", NORM: "geografija"},
{ORTH: "geol.", NORM: "geologija"},
{ORTH: "geom.", NORM: "geometrija"},
{ORTH: "germ.", NORM: "germanski"},
{ORTH: "gl.", NORM: "glej"},
{ORTH: "glag.", NORM: "glagolski"},
{ORTH: "glasb.", NORM: "glasba"},
{ORTH: "gled.", NORM: "gledališče"},
{ORTH: "gost.", NORM: "gostinstvo"},
{ORTH: "gozd.", NORM: "gozdarstvo"},
{ORTH: "gr.", NORM: "grški"},
{ORTH: "grad.", NORM: "gradbeništvo"},
{ORTH: "hebr.", NORM: "hebrejsko"},
{ORTH: "hrv.", NORM: "hrvaško"},
{ORTH: "ide.", NORM: "indoevropsko"},
{ORTH: "igr.", NORM: "igre"},
{ORTH: "im.", NORM: "imenovalnik"},
{ORTH: "iron.", NORM: "ironično"},
{ORTH: "it.", NORM: "italijanski"},
{ORTH: "itd.", NORM: "in tako dalje"},
{ORTH: "itn.", NORM: "in tako naprej"},
{ORTH: "ipd.", NORM: "in podobno"},
{ORTH: "jap.", NORM: "japonsko"},
{ORTH: "jul.", NORM: "julij"},
{ORTH: "jun.", NORM: "junij"},
{ORTH: "kit.", NORM: "kitajsko"},
{ORTH: "knj.", NORM: "knjižno"},
{ORTH: "knjiž.", NORM: "knjižno"},
{ORTH: "kor.", NORM: "koreografija"},
{ORTH: "lat.", NORM: "latinski"},
{ORTH: "les.", NORM: "lesna stroka"},
{ORTH: "lingv.", NORM: "lingvistika"},
{ORTH: "lit.", NORM: "literarni"},
{ORTH: "ljubk.", NORM: "ljubkovalno"},
{ORTH: "lov.", NORM: "lovstvo"},
{ORTH: "m.", NORM: "moški"},
{ORTH: "mak.", NORM: "makedonski"},
{ORTH: "mar.", NORM: "marec"},
{ORTH: "mat.", NORM: "matematika"},
{ORTH: "med.", NORM: "medicina"},
{ORTH: "meh.", NORM: "mehiško"},
{ORTH: "mest.", NORM: "mestnik"},
{ORTH: "mdr.", NORM: "med drugim"},
{ORTH: "min.", NORM: "mineralogija"},
{ORTH: "mitol.", NORM: "mitologija"},
{ORTH: "mn.", NORM: "množina"},
{ORTH: "mont.", NORM: "montanistika"},
{ORTH: "muz.", NORM: "muzikologija"},
{ORTH: "nam.", NORM: "namenilnik"},
{ORTH: "nar.", NORM: "narečno"},
{ORTH: "nav.", NORM: "navadno"},
{ORTH: "nedol.", NORM: "nedoločnik"},
{ORTH: "nedov.", NORM: "nedovršni"},
{ORTH: "neprav.", NORM: "nepravilno"},
{ORTH: "nepreh.", NORM: "neprehodno"},
{ORTH: "neskl.", NORM: "nesklonljiv(o)"},
{ORTH: "nestrok.", NORM: "nestrokovno"},
{ORTH: "num.", NORM: "numizmatika"},
{ORTH: "npr.", NORM: "na primer"},
{ORTH: "obrt.", NORM: "obrtništvo"},
{ORTH: "okt.", NORM: "oktober"},
{ORTH: "or.", NORM: "orodnik"},
{ORTH: "os.", NORM: "oseba"},
{ORTH: "otr.", NORM: "otroško"},
{ORTH: "oz.", NORM: "oziroma"},
{ORTH: "pal.", NORM: "paleontologija"},
{ORTH: "papir.", NORM: "papirništvo"},
{ORTH: "ped.", NORM: "pedagogika"},
{ORTH: "pisar.", NORM: "pisarniško"},
{ORTH: "pog.", NORM: "pogovorno"},
{ORTH: "polit.", NORM: "politika"},
{ORTH: "polj.", NORM: "poljsko"},
{ORTH: "poljud.", NORM: "poljudno"},
{ORTH: "preg.", NORM: "pregovor"},
{ORTH: "preh.", NORM: "prehodno"},
{ORTH: "pren.", NORM: "preneseno"},
{ORTH: "prid.", NORM: "pridevnik"},
{ORTH: "prim.", NORM: "primerjaj"},
{ORTH: "prisl.", NORM: "prislov"},
{ORTH: "psih.", NORM: "psihologija"},
{ORTH: "psiht.", NORM: "psihiatrija"},
{ORTH: "rad.", NORM: "radiotehnika"},
{ORTH: "rač.", NORM: "računalništvo"},
{ORTH: "rib.", NORM: "ribištvo"},
{ORTH: "rod.", NORM: "rodilnik"},
{ORTH: "rus.", NORM: "rusko"},
{ORTH: "s.", NORM: "srednji"},
{ORTH: "sam.", NORM: "samostalniški"},
{ORTH: "sed.", NORM: "sedanjik"},
{ORTH: "sep.", NORM: "september"},
{ORTH: "slabš.", NORM: "slabšalno"},
{ORTH: "slovan.", NORM: "slovansko"},
{ORTH: "slovaš.", NORM: "slovaško"},
{ORTH: "srb.", NORM: "srbsko"},
{ORTH: "star.", NORM: "starinsko"},
{ORTH: "stil.", NORM: "stilno"},
{ORTH: "sv.", NORM: "svet(i)"},
{ORTH: "teh.", NORM: "tehnika"},
{ORTH: "tisk.", NORM: "tiskarstvo"},
{ORTH: "tj.", NORM: "to je"},
{ORTH: "tož.", NORM: "tožilnik"},
{ORTH: "trg.", NORM: "trgovina"},
{ORTH: "ukr.", NORM: "ukrajinski"},
{ORTH: "um.", NORM: "umetnost"},
{ORTH: "vel.", NORM: "velelnik"},
{ORTH: "vet.", NORM: "veterina"},
{ORTH: "vez.", NORM: "veznik"},
{ORTH: "vn.", NORM: "visokonemško"},
{ORTH: "voj.", NORM: "vojska"},
{ORTH: "vrtn.", NORM: "vrtnarstvo"},
{ORTH: "vulg.", NORM: "vulgarno"},
{ORTH: "vznes.", NORM: "vzneseno"},
{ORTH: "zal.", NORM: "založništvo"},
{ORTH: "zastar.", NORM: "zastarelo"},
{ORTH: "zgod.", NORM: "zgodovina"},
{ORTH: "zool.", NORM: "zoologija"},
{ORTH: "čeb.", NORM: "čebelarstvo"},
{ORTH: "češ.", NORM: "češki"},
{ORTH: "člov.", NORM: "človeškost"},
{ORTH: "šah.", NORM: "šahovski"},
{ORTH: "šalj.", NORM: "šaljivo"},
{ORTH: "šp.", NORM: "španski"},
{ORTH: "špan.", NORM: "špansko"},
{ORTH: "šport.", NORM: "športni"},
{ORTH: "štev.", NORM: "števnik"},
{ORTH: "šved.", NORM: "švedsko"},
{ORTH: "švic.", NORM: "švicarsko"},
{ORTH: "ž.", NORM: "ženski"},
{ORTH: "žarg.", NORM: "žargonsko"},
{ORTH: "žel.", NORM: "železnica"},
{ORTH: "živ.", NORM: "živost"},
]:
_exc[exc_data[ORTH]] = [exc_data]
abbrv = """
Co. Ch. DIPL. DR. Dr. Ev. Inc. Jr. Kr. Mag. M. MR. Mr. Mt. Murr. Npr. OZ.
Opr. Osn. Prim. Roj. ST. Sim. Sp. Sred. St. Sv. Škofl. Tel. UR. Zb.
a. aa. ab. abc. abit. abl. abs. abt. acc. accel. add. adj. adv. aet. afr. akad. al. alban. all. alleg.
alp. alt. alter. alžir. am. an. andr. ang. anh. anon. ans. antrop. apoc. app. approx. apt. ar. arc. arch.
arh. arr. as. asist. assist. assoc. asst. astr. attn. aug. avstral. az. b. bab. bal. bbl. bd. belg. bioinf.
biomed. bk. bl. bn. borg. bp. br. braz. brit. bros. broš. bt. bu. c. ca. cal. can. cand. cantab. cap. capt.
cat. cath. cc. cca. cd. cdr. cdre. cent. cerkv. cert. cf. cfr. ch. chap. chem. chr. chs. cic. circ. civ. cl.
cm. cmd. cnr. co. cod. col. coll. colo. com. comp. con. conc. cond. conn. cons. cont. coop. corr. cost. cp.
cpl. cr. crd. cres. cresc. ct. cu. d. dan. dat. davč. ddr. dec. ded. def. dem. dent. dept. dia. dip. dipl.
dir. disp. diss. div. do. doc. dok. dol. doo. dop. dott. dr. dram. druž. družb. drž. dt. duh. dur. dvr. dwt. e.
ea. ecc. eccl. eccles. econ. edn. egipt. egr. ekon. eksp. el. em. enc. eng. eo. ep. err. esp. esq. est.
et. etc. etnogr. etnol. ev. evfem. evr. ex. exc. excl. exp. expl. ext. exx. f. fa. facs. fak. faks. fas.
fasc. fco. fcp. feb. febr. fec. fed. fem. ff. fff. fid. fig. fil. film. fiziol. fiziot. flam. fm. fo. fol. folk.
frag. fran. franc. fsc. g. ga. gal. gdč. ge. gen. geod. geog. geotehnol. gg. gimn. glas. glav. gnr. go. gor.
gosp. gp. graf. gram. gren. grš. gs. h. hab. hf. hist. ho. hort. i. ia. ib. ibid. id. idr. idridr. ill. imen.
imp. impf. impr. in. inc. incl. ind. indus. inf. inform. ing. init. ins. int. inv. inšp. inštr. inž. is. islam.
ist. ital. iur. iz. izbr. izd. izg. izgr. izr. izv. j. jak. jam. jan. jav. je. jez. jr. jsl. jud. jug.
jugoslovan. jur. juž. jv. jz. k. kal. kan. kand. kat. kdo. kem. kip. kmet. kol. kom. komp. konf. kont. kost. kov.
kp. kpfw. kr. kraj. krat. kub. kult. kv. kval. l. la. lab. lb. ld. let. lib. lik. litt. lj. ljud. ll. loc. log.
loč. lt. ma. madž. mag. manag. manjš. masc. mass. mater. max. maxmax. mb. md. mech. medic. medij. medn.
mehč. mem. menedž. mes. mess. metal. meteor. meteorol. mex. mi. mikr. mil. minn. mio. misc. miss. mit. mk.
mkt. ml. mlad. mlle. mlr. mm. mme. množ. mo. moj. moš. možn. mr. mrd. mrs. ms. msc. msgr. mt. murr. mus. mut.
n. na. nad. nadalj. nadom. nagl. nakl. namer. nan. naniz. nasl. nat. navt. nač. ned. nem. nik. nizoz. nm. nn.
no. nom. norv. notr. nov. novogr. ns. o. ob. obd. obj. oblač. obl. oblik. obr. obraz. obs. obst. obt. obč. oc.
oct. od. odd. odg. odn. odst. odv. oec. off. ok. okla. okr. ont. oo. op. opis. opp. opr. orch. ord. ore. oreg.
org. orient. orig. ork. ort. oseb. osn. ot. ozir. ošk. p. pag. par. para. parc. parl. part. past. pat. pdk.
pen. perf. pert. perz. pesn. pet. pev. pf. pfc. ph. pharm. phil. pis. pl. po. pod. podr. podaljš. pogl. pogoj. pojm.
pok. pokr. pol. poljed. poljub. polu. pom. pomen. pon. ponov. pop. por. port. pos. posl. posn. pov. pp. ppl. pr.
praet. prav. pravopis. pravosl. preb. pred. predl. predm. predp. preds. pref. pregib. prel. prem. premen. prep.
pres. pret. prev. pribl. prih. pril. primerj. primor. prip. pripor. prir. prist. priv. proc. prof. prog. proiz.
prom. pron. prop. prot. protest. prov. ps. pss. pt. publ. pz. q. qld. qu. quad. que. r. racc. rastl. razgl.
razl. razv. rd. red. ref. reg. rel. relig. rep. repr. rer. resp. rest. ret. rev. revol. rež. rim. rist. rkp. rm.
roj. rom. romun. rp. rr. rt. rud. ruš. ry. sal. samogl. san. sc. scen. sci. scr. sdv. seg. sek. sen. sept. ser.
sev. sg. sgt. sh. sig. sigg. sign. sim. sin. sing. sinh. skand. skl. sklad. sklanj. sklep. skr. sl. slik. slov.
slovak. slovn. sn. so. sob. soc. sociol. sod. sopomen. sopr. sor. sov. sovj. sp. spec. spl. spr. spreg. sq. sr.
sre. sred. sredoz. srh. ss. ssp. st. sta. stan. stanstar. stcsl. ste. stim. stol. stom. str. stroj. strok. stsl.
stud. sup. supl. suppl. svet. sz. t. tab. tech. ted. tehn. tehnol. tek. teks. tekst. tel. temp. ten. teol. ter.
term. test. th. theol. tim. tip. tisočl. tit. tl. tol. tolmač. tom. tor. tov. tr. trad. traj. trans. tren.
trib. tril. trop. trp. trž. ts. tt. tu. tur. turiz. tvor. tvorb. . u. ul. umet. un. univ. up. upr. ur. urad.
us. ust. utr. v. va. val. var. varn. ven. ver. verb. vest. vezal. vic. vis. viv. viz. viš. vod. vok. vol. vpr.
vrst. vrstil. vs. vv. vzd. vzg. vzh. vzor. w. wed. wg. wk. x. y. z. zah. zaim. zak. zap. zasl. zavar. zač. zb.
združ. zg. zn. znan. znanstv. zoot. zun. zv. zvd. á. é. ć. č. čas. čet. čl. člen. čustv. đ. ľ. ł. ş. ŠT. š. šir.
škofl. škot. šol. št. števil. štud. ů. ű. žen. žival.
""".split()
for orth in abbrv:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

View File

@ -29,7 +29,7 @@ class Ukrainian(Language):
assigns=["token.lemma"], assigns=["token.lemma"],
default_config={ default_config={
"model": None, "model": None,
"mode": "pymorphy2", "mode": "pymorphy3",
"overwrite": False, "overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
}, },

View File

@ -14,11 +14,11 @@ class UkrainianLemmatizer(RussianLemmatizer):
model: Optional[Model], model: Optional[Model],
name: str = "lemmatizer", name: str = "lemmatizer",
*, *,
mode: str = "pymorphy2", mode: str = "pymorphy3",
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
) -> None: ) -> None:
if mode == "pymorphy2": if mode in {"pymorphy2", "pymorphy2_lookup"}:
try: try:
from pymorphy2 import MorphAnalyzer from pymorphy2 import MorphAnalyzer
except ImportError: except ImportError:
@ -29,6 +29,17 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk") self._morph = MorphAnalyzer(lang="uk")
elif mode == "pymorphy3":
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3 pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )

View File

@ -1,4 +1,4 @@
from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection from typing import Iterator, Optional, Any, Dict, Callable, Iterable
from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@ -10,6 +10,7 @@ from contextlib import contextmanager
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
import warnings import warnings
from thinc.api import get_current_ops, Config, CupyOps, Optimizer from thinc.api import get_current_ops, Config, CupyOps, Optimizer
import srsly import srsly
import multiprocessing as mp import multiprocessing as mp
@ -24,7 +25,7 @@ from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis
from .training import Example, validate_examples from .training import Example, validate_examples
from .training.initialize import init_vocab, init_tok2vec from .training.initialize import init_vocab, init_tok2vec
from .scorer import Scorer from .scorer import Scorer
from .util import registry, SimpleFrozenList, _pipe, raise_error from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES
from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER
from .util import warn_if_jupyter_cupy from .util import warn_if_jupyter_cupy
from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS
@ -465,6 +466,8 @@ class Language:
""" """
if not isinstance(name, str): if not isinstance(name, str):
raise ValueError(Errors.E963.format(decorator="factory")) raise ValueError(Errors.E963.format(decorator="factory"))
if "." in name:
raise ValueError(Errors.E853.format(name=name))
if not isinstance(default_config, dict): if not isinstance(default_config, dict):
err = Errors.E962.format( err = Errors.E962.format(
style="default config", name=name, cfg_type=type(default_config) style="default config", name=name, cfg_type=type(default_config)
@ -543,8 +546,11 @@ class Language:
DOCS: https://spacy.io/api/language#component DOCS: https://spacy.io/api/language#component
""" """
if name is not None and not isinstance(name, str): if name is not None:
raise ValueError(Errors.E963.format(decorator="component")) if not isinstance(name, str):
raise ValueError(Errors.E963.format(decorator="component"))
if "." in name:
raise ValueError(Errors.E853.format(name=name))
component_name = name if name is not None else util.get_object_name(func) component_name = name if name is not None else util.get_object_name(func)
def add_component(component_func: "Pipe") -> Callable: def add_component(component_func: "Pipe") -> Callable:
@ -700,13 +706,7 @@ class Language:
# Check source type # Check source type
if not isinstance(source, Language): if not isinstance(source, Language):
raise ValueError(Errors.E945.format(name=source_name, source=type(source))) raise ValueError(Errors.E945.format(name=source_name, source=type(source)))
# Check vectors, with faster checks first if self.vocab.vectors != source.vocab.vectors:
if (
self.vocab.vectors.shape != source.vocab.vectors.shape
or self.vocab.vectors.key2row != source.vocab.vectors.key2row
or self.vocab.vectors.to_bytes(exclude=["strings"])
!= source.vocab.vectors.to_bytes(exclude=["strings"])
):
warnings.warn(Warnings.W113.format(name=source_name)) warnings.warn(Warnings.W113.format(name=source_name))
if source_name not in source.component_names: if source_name not in source.component_names:
raise KeyError( raise KeyError(
@ -784,14 +784,6 @@ class Language:
factory_name, source, name=name factory_name, source, name=name
) )
else: else:
if not self.has_factory(factory_name):
err = Errors.E002.format(
name=factory_name,
opts=", ".join(self.factory_names),
method="add_pipe",
lang=util.get_object_name(self),
lang_code=self.lang,
)
pipe_component = self.create_pipe( pipe_component = self.create_pipe(
factory_name, factory_name,
name=name, name=name,
@ -1023,8 +1015,8 @@ class Language:
raise ValueError(Errors.E109.format(name=name)) from e raise ValueError(Errors.E109.format(name=name)) from e
except Exception as e: except Exception as e:
error_handler(name, proc, [doc], e) error_handler(name, proc, [doc], e)
if doc is None: if not isinstance(doc, Doc):
raise ValueError(Errors.E005.format(name=name)) raise ValueError(Errors.E005.format(name=name, returned_type=type(doc)))
return doc return doc
def disable_pipes(self, *names) -> "DisabledPipes": def disable_pipes(self, *names) -> "DisabledPipes":
@ -1058,7 +1050,7 @@ class Language:
""" """
if enable is None and disable is None: if enable is None and disable is None:
raise ValueError(Errors.E991) raise ValueError(Errors.E991)
if disable is not None and isinstance(disable, str): if isinstance(disable, str):
disable = [disable] disable = [disable]
if enable is not None: if enable is not None:
if isinstance(enable, str): if isinstance(enable, str):
@ -1693,9 +1685,9 @@ class Language:
config: Union[Dict[str, Any], Config] = {}, config: Union[Dict[str, Any], Config] = {},
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
enable: Iterable[str] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
exclude: Iterable[str] = SimpleFrozenList(), exclude: Union[str, Iterable[str]] = _DEFAULT_EMPTY_PIPES,
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True, auto_fill: bool = True,
validate: bool = True, validate: bool = True,
@ -1706,12 +1698,12 @@ class Language:
config (Dict[str, Any] / Config): The loaded config. config (Dict[str, Any] / Config): The loaded config.
vocab (Vocab): A Vocab object. If True, a vocab is created. vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable. disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable.
Disabled pipes will be loaded but they won't be run unless you Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe. explicitly enable them by calling nlp.enable_pipe.
enable (Iterable[str]): Names of pipeline components to enable. All other enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`). pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Iterable[str]): Names of pipeline components to exclude. exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude.
Excluded components won't be loaded. Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta. meta (Dict[str, Any]): Meta overrides for nlp.meta.
auto_fill (bool): Automatically fill in missing values in config based auto_fill (bool): Automatically fill in missing values in config based
@ -1866,9 +1858,29 @@ class Language:
nlp.vocab.from_bytes(vocab_b) nlp.vocab.from_bytes(vocab_b)
# Resolve disabled/enabled settings. # Resolve disabled/enabled settings.
if isinstance(disable, str):
disable = [disable]
if isinstance(enable, str):
enable = [enable]
if isinstance(exclude, str):
exclude = [exclude]
# `enable` should not be merged with `enabled` (the opposite is true for `disable`/`disabled`). If the config
# specifies values for `enabled` not included in `enable`, emit warning.
if id(enable) != id(_DEFAULT_EMPTY_PIPES):
enabled = config["nlp"].get("enabled", [])
if len(enabled) and not set(enabled).issubset(enable):
warnings.warn(
Warnings.W123.format(
enable=enable,
enabled=enabled,
)
)
# Ensure sets of disabled/enabled pipe names are not contradictory.
disabled_pipes = cls._resolve_component_status( disabled_pipes = cls._resolve_component_status(
[*config["nlp"]["disabled"], *disable], list({*disable, *config["nlp"].get("disabled", [])}),
[*config["nlp"].get("enabled", []), *enable], enable,
config["nlp"]["pipeline"], config["nlp"]["pipeline"],
) )
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
@ -2026,37 +2038,36 @@ class Language:
@staticmethod @staticmethod
def _resolve_component_status( def _resolve_component_status(
disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str] disable: Union[str, Iterable[str]],
enable: Union[str, Iterable[str]],
pipe_names: Iterable[str],
) -> Tuple[str, ...]: ) -> Tuple[str, ...]:
"""Derives whether (1) `disable` and `enable` values are consistent and (2) """Derives whether (1) `disable` and `enable` values are consistent and (2)
resolves those to a single set of disabled components. Raises an error in resolves those to a single set of disabled components. Raises an error in
case of inconsistency. case of inconsistency.
disable (Iterable[str]): Names of components or serialization fields to disable. disable (Union[str, Iterable[str]]): Name(s) of component(s) or serialization fields to disable.
enable (Iterable[str]): Names of pipeline components to enable. enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable.
pipe_names (Iterable[str]): Names of all pipeline components. pipe_names (Iterable[str]): Names of all pipeline components.
RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t. RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t.
specified includes and excludes. specified includes and excludes.
""" """
if disable is not None and isinstance(disable, str): if isinstance(disable, str):
disable = [disable] disable = [disable]
to_disable = disable to_disable = disable
if enable: if enable:
to_disable = [ if isinstance(enable, str):
pipe_name for pipe_name in pipe_names if pipe_name not in enable enable = [enable]
] to_disable = {
if disable and disable != to_disable: *[pipe_name for pipe_name in pipe_names if pipe_name not in enable],
raise ValueError( *disable,
Errors.E1042.format( }
arg1="enable", # If any pipe to be enabled is in to_disable, the specification is inconsistent.
arg2="disable", if len(set(enable) & to_disable):
arg1_values=enable, raise ValueError(Errors.E1042.format(enable=enable, disable=disable))
arg2_values=disable,
)
)
return tuple(to_disable) return tuple(to_disable)

View File

@ -1,5 +1,6 @@
from .matcher import Matcher from .matcher import Matcher
from .phrasematcher import PhraseMatcher from .phrasematcher import PhraseMatcher
from .dependencymatcher import DependencyMatcher from .dependencymatcher import DependencyMatcher
from .levenshtein import levenshtein
__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher"] __all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"]

View File

@ -82,6 +82,10 @@ cdef class DependencyMatcher:
"$-": self._imm_left_sib, "$-": self._imm_left_sib,
"$++": self._right_sib, "$++": self._right_sib,
"$--": self._left_sib, "$--": self._left_sib,
">++": self._right_child,
">--": self._left_child,
"<++": self._right_parent,
"<--": self._left_parent,
} }
def __reduce__(self): def __reduce__(self):
@ -423,6 +427,22 @@ cdef class DependencyMatcher:
def _left_sib(self, doc, node): def _left_sib(self, doc, node):
return [doc[child.i] for child in doc[node].head.children if child.i < node] return [doc[child.i] for child in doc[node].head.children if child.i < node]
def _right_child(self, doc, node):
return [doc[child.i] for child in doc[node].children if child.i > node]
def _left_child(self, doc, node):
return [doc[child.i] for child in doc[node].children if child.i < node]
def _right_parent(self, doc, node):
if doc[node].head.i > node:
return [doc[node].head]
return []
def _left_parent(self, doc, node):
if doc[node].head.i < node:
return [doc[node].head]
return []
def _normalize_key(self, key): def _normalize_key(self, key):
if isinstance(key, str): if isinstance(key, str):
return self.vocab.strings.add(key) return self.vocab.strings.add(key)

View File

@ -0,0 +1,15 @@
# cython: profile=True, binding=True, infer_types=True
from cpython.object cimport PyObject
from libc.stdint cimport int64_t
from typing import Optional
cdef extern from "polyleven.c":
int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
cpdef int64_t levenshtein(a: str, b: str, k: Optional[int] = None):
if k is None:
k = -1
return polyleven(<PyObject*>a, <PyObject*>b, k)

View File

@ -1,5 +1,5 @@
# cython: infer_types=True, cython: profile=True # cython: infer_types=True, profile=True
from typing import List from typing import List, Iterable
from libcpp.vector cimport vector from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int8_t from libc.stdint cimport int32_t, int8_t
@ -1012,20 +1012,27 @@ class _SetPredicate:
def __call__(self, Token token): def __call__(self, Token token):
if self.is_extension: if self.is_extension:
value = get_string_id(token._.get(self.attr)) value = token._.get(self.attr)
else: else:
value = get_token_attr_for_matcher(token.c, self.attr) value = get_token_attr_for_matcher(token.c, self.attr)
if self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"): if self.predicate in ("IN", "NOT_IN"):
if isinstance(value, (str, int)):
value = get_string_id(value)
else:
return False
elif self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"):
# ensure that all values are enclosed in a set
if self.attr == MORPH: if self.attr == MORPH:
# break up MORPH into individual Feat=Val values # break up MORPH into individual Feat=Val values
value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value)) value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value))
elif isinstance(value, (str, int)):
value = set((get_string_id(value),))
elif isinstance(value, Iterable) and all(isinstance(v, (str, int)) for v in value):
value = set(get_string_id(v) for v in value)
else: else:
# treat a single value as a list return False
if isinstance(value, (str, int)):
value = set([get_string_id(value)])
else:
value = set(get_string_id(v) for v in value)
if self.predicate == "IN": if self.predicate == "IN":
return value in self.value return value in self.value
elif self.predicate == "NOT_IN": elif self.predicate == "NOT_IN":

384
spacy/matcher/polyleven.c Normal file
View File

@ -0,0 +1,384 @@
/*
* Adapted from Polyleven (https://ceptord.net/)
*
* Source: https://github.com/fujimotos/polyleven/blob/c3f95a080626c5652f0151a2e449963288ccae84/polyleven.c
*
* Copyright (c) 2021 Fujimoto Seiji <fujimoto@ceptord.net>
* Copyright (c) 2021 Max Bachmann <kontakt@maxbachmann.de>
* Copyright (c) 2022 Nick Mazuk
* Copyright (c) 2022 Michael Weiss <code@mweiss.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <Python.h>
#include <stdint.h>
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define CDIV(a,b) ((a) / (b) + ((a) % (b) > 0))
#define BIT(i,n) (((i) >> (n)) & 1)
#define FLIP(i,n) ((i) ^ ((uint64_t) 1 << (n)))
#define ISASCII(kd) ((kd) == PyUnicode_1BYTE_KIND)
/*
* Bare bone of PyUnicode
*/
struct strbuf {
void *ptr;
int kind;
int64_t len;
};
static void strbuf_init(struct strbuf *s, PyObject *o)
{
s->ptr = PyUnicode_DATA(o);
s->kind = PyUnicode_KIND(o);
s->len = PyUnicode_GET_LENGTH(o);
}
#define strbuf_read(s, i) PyUnicode_READ((s)->kind, (s)->ptr, (i))
/*
* An encoded mbleven model table.
*
* Each 8-bit integer represents an edit sequence, with using two
* bits for a single operation.
*
* 01 = DELETE, 10 = INSERT, 11 = REPLACE
*
* For example, 13 is '1101' in binary notation, so it means
* DELETE + REPLACE.
*/
static const uint8_t MBLEVEN_MATRIX[] = {
3, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0,
15, 9, 6, 0, 0, 0, 0, 0,
13, 7, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
63, 39, 45, 57, 54, 30, 27, 0,
61, 55, 31, 37, 25, 22, 0, 0,
53, 29, 23, 0, 0, 0, 0, 0,
21, 0, 0, 0, 0, 0, 0, 0,
};
#define MBLEVEN_MATRIX_GET(k, d) ((((k) + (k) * (k)) / 2 - 1) + (d)) * 8
static int64_t mbleven_ascii(char *s1, int64_t len1,
char *s2, int64_t len2, int k)
{
int pos;
uint8_t m;
int64_t i, j, c, r;
pos = MBLEVEN_MATRIX_GET(k, len1 - len2);
r = k + 1;
while (MBLEVEN_MATRIX[pos]) {
m = MBLEVEN_MATRIX[pos++];
i = j = c = 0;
while (i < len1 && j < len2) {
if (s1[i] != s2[j]) {
c++;
if (!m) break;
if (m & 1) i++;
if (m & 2) j++;
m >>= 2;
} else {
i++;
j++;
}
}
c += (len1 - i) + (len2 - j);
r = MIN(r, c);
if (r < 2) {
return r;
}
}
return r;
}
static int64_t mbleven(PyObject *o1, PyObject *o2, int64_t k)
{
int pos;
uint8_t m;
int64_t i, j, c, r;
struct strbuf s1, s2;
strbuf_init(&s1, o1);
strbuf_init(&s2, o2);
if (s1.len < s2.len)
return mbleven(o2, o1, k);
if (k > 3)
return -1;
if (k < s1.len - s2.len)
return k + 1;
if (ISASCII(s1.kind) && ISASCII(s2.kind))
return mbleven_ascii(s1.ptr, s1.len, s2.ptr, s2.len, k);
pos = MBLEVEN_MATRIX_GET(k, s1.len - s2.len);
r = k + 1;
while (MBLEVEN_MATRIX[pos]) {
m = MBLEVEN_MATRIX[pos++];
i = j = c = 0;
while (i < s1.len && j < s2.len) {
if (strbuf_read(&s1, i) != strbuf_read(&s2, j)) {
c++;
if (!m) break;
if (m & 1) i++;
if (m & 2) j++;
m >>= 2;
} else {
i++;
j++;
}
}
c += (s1.len - i) + (s2.len - j);
r = MIN(r, c);
if (r < 2) {
return r;
}
}
return r;
}
/*
* Data structure to store Peq (equality bit-vector).
*/
struct blockmap_entry {
uint32_t key[128];
uint64_t val[128];
};
struct blockmap {
int64_t nr;
struct blockmap_entry *list;
};
#define blockmap_key(c) ((c) | 0x80000000U)
#define blockmap_hash(c) ((c) % 128)
static int blockmap_init(struct blockmap *map, struct strbuf *s)
{
int64_t i;
struct blockmap_entry *be;
uint32_t c, k;
uint8_t h;
map->nr = CDIV(s->len, 64);
map->list = calloc(1, map->nr * sizeof(struct blockmap_entry));
if (map->list == NULL) {
PyErr_NoMemory();
return -1;
}
for (i = 0; i < s->len; i++) {
be = &(map->list[i / 64]);
c = strbuf_read(s, i);
h = blockmap_hash(c);
k = blockmap_key(c);
while (be->key[h] && be->key[h] != k)
h = blockmap_hash(h + 1);
be->key[h] = k;
be->val[h] |= (uint64_t) 1 << (i % 64);
}
return 0;
}
static void blockmap_clear(struct blockmap *map)
{
if (map->list)
free(map->list);
map->list = NULL;
map->nr = 0;
}
static uint64_t blockmap_get(struct blockmap *map, int block, uint32_t c)
{
struct blockmap_entry *be;
uint8_t h;
uint32_t k;
h = blockmap_hash(c);
k = blockmap_key(c);
be = &(map->list[block]);
while (be->key[h] && be->key[h] != k)
h = blockmap_hash(h + 1);
return be->key[h] == k ? be->val[h] : 0;
}
/*
* Myers' bit-parallel algorithm
*
* See: G. Myers. "A fast bit-vector algorithm for approximate string
* matching based on dynamic programming." Journal of the ACM, 1999.
*/
static int64_t myers1999_block(struct strbuf *s1, struct strbuf *s2,
struct blockmap *map)
{
uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
uint64_t *Mhc, *Phc;
int64_t i, b, hsize, vsize, Score;
uint8_t Pb, Mb;
hsize = CDIV(s1->len, 64);
vsize = CDIV(s2->len, 64);
Score = s2->len;
Phc = malloc(hsize * 2 * sizeof(uint64_t));
if (Phc == NULL) {
PyErr_NoMemory();
return -1;
}
Mhc = Phc + hsize;
memset(Phc, -1, hsize * sizeof(uint64_t));
memset(Mhc, 0, hsize * sizeof(uint64_t));
Last = (uint64_t)1 << ((s2->len - 1) % 64);
for (b = 0; b < vsize; b++) {
Mv = 0;
Pv = (uint64_t) -1;
Score = s2->len;
for (i = 0; i < s1->len; i++) {
Eq = blockmap_get(map, b, strbuf_read(s1, i));
Pb = BIT(Phc[i / 64], i % 64);
Mb = BIT(Mhc[i / 64], i % 64);
Xv = Eq | Mv;
Xh = ((((Eq | Mb) & Pv) + Pv) ^ Pv) | Eq | Mb;
Ph = Mv | ~ (Xh | Pv);
Mh = Pv & Xh;
if (Ph & Last) Score++;
if (Mh & Last) Score--;
if ((Ph >> 63) ^ Pb)
Phc[i / 64] = FLIP(Phc[i / 64], i % 64);
if ((Mh >> 63) ^ Mb)
Mhc[i / 64] = FLIP(Mhc[i / 64], i % 64);
Ph = (Ph << 1) | Pb;
Mh = (Mh << 1) | Mb;
Pv = Mh | ~ (Xv | Ph);
Mv = Ph & Xv;
}
}
free(Phc);
return Score;
}
static int64_t myers1999_simple(uint8_t *s1, int64_t len1, uint8_t *s2, int64_t len2)
{
uint64_t Peq[256];
uint64_t Eq, Xv, Xh, Ph, Mh, Pv, Mv, Last;
int64_t i;
int64_t Score = len2;
memset(Peq, 0, sizeof(Peq));
for (i = 0; i < len2; i++)
Peq[s2[i]] |= (uint64_t) 1 << i;
Mv = 0;
Pv = (uint64_t) -1;
Last = (uint64_t) 1 << (len2 - 1);
for (i = 0; i < len1; i++) {
Eq = Peq[s1[i]];
Xv = Eq | Mv;
Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq;
Ph = Mv | ~ (Xh | Pv);
Mh = Pv & Xh;
if (Ph & Last) Score++;
if (Mh & Last) Score--;
Ph = (Ph << 1) | 1;
Mh = (Mh << 1);
Pv = Mh | ~ (Xv | Ph);
Mv = Ph & Xv;
}
return Score;
}
static int64_t myers1999(PyObject *o1, PyObject *o2)
{
struct strbuf s1, s2;
struct blockmap map;
int64_t ret;
strbuf_init(&s1, o1);
strbuf_init(&s2, o2);
if (s1.len < s2.len)
return myers1999(o2, o1);
if (ISASCII(s1.kind) && ISASCII(s2.kind) && s2.len < 65)
return myers1999_simple(s1.ptr, s1.len, s2.ptr, s2.len);
if (blockmap_init(&map, &s2))
return -1;
ret = myers1999_block(&s1, &s2, &map);
blockmap_clear(&map);
return ret;
}
/*
* Interface functions
*/
static int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
{
int64_t len1, len2;
len1 = PyUnicode_GET_LENGTH(o1);
len2 = PyUnicode_GET_LENGTH(o2);
if (len1 < len2)
return polyleven(o2, o1, k);
if (k == 0)
return PyUnicode_Compare(o1, o2) ? 1 : 0;
if (0 < k && k < len1 - len2)
return k + 1;
if (len2 == 0)
return len1;
if (0 < k && k < 4)
return mbleven(o1, o2, k);
return myers1999(o1, o2);
}

View File

@ -26,7 +26,11 @@ def forward(model, X, is_train):
Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False) Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False)
model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:]) model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:])
Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) Yf = Yf.reshape((Yf.shape[0], nF, nO, nP))
Yf[0] = model.get_param("pad")
# Set padding. Padding has shape (1, nF, nO, nP). Unfortunately, we cannot
# change its shape to (nF, nO, nP) without breaking existing models. So
# we'll squeeze the first dimension here.
Yf[0] = model.ops.xp.squeeze(model.get_param("pad"), 0)
def backward(dY_ids): def backward(dY_ids):
# This backprop is particularly tricky, because we get back a different # This backprop is particularly tricky, because we get back a different

View File

@ -89,11 +89,14 @@ def pipes_with_nvtx_range(
types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func
) )
# Try to preserve the original function signature. # We need to preserve the original function signature so that
# the original parameters are passed to pydantic for validation downstream.
try: try:
wrapped_func.__signature__ = inspect.signature(func) # type: ignore wrapped_func.__signature__ = inspect.signature(func) # type: ignore
except: except:
pass # Can fail for Cython methods that do not have bindings.
warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
continue
try: try:
setattr( setattr(

View File

@ -1,11 +1,12 @@
from pathlib import Path from pathlib import Path
from typing import Optional, Callable, Iterable, List, Tuple from typing import Optional, Callable, Iterable, List, Tuple
from thinc.types import Floats2d from thinc.types import Floats2d
from thinc.api import chain, clone, list2ragged, reduce_mean, residual from thinc.api import chain, list2ragged, reduce_mean, residual
from thinc.api import Model, Maxout, Linear, noop, tuplify, Ragged from thinc.api import Model, Maxout, Linear, tuplify, Ragged
from ...util import registry from ...util import registry
from ...kb import KnowledgeBase, Candidate, get_candidates from ...kb import KnowledgeBase, InMemoryLookupKB
from ...kb import Candidate, get_candidates, get_candidates_batch
from ...vocab import Vocab from ...vocab import Vocab
from ...tokens import Span, Doc from ...tokens import Span, Doc
from ..extract_spans import extract_spans from ..extract_spans import extract_spans
@ -70,17 +71,18 @@ def span_maker_forward(model, docs: List[Doc], is_train) -> Tuple[Ragged, Callab
cands.append((start_token, end_token)) cands.append((start_token, end_token))
candidates.append(ops.asarray2i(cands)) candidates.append(ops.asarray2i(cands))
candlens = ops.asarray1i([len(cands) for cands in candidates]) lengths = model.ops.asarray1i([len(cands) for cands in candidates])
candidates = ops.xp.concatenate(candidates) out = Ragged(model.ops.flatten(candidates), lengths)
outputs = Ragged(candidates, candlens)
# because this is just rearranging docs, the backprop does nothing # because this is just rearranging docs, the backprop does nothing
return outputs, lambda x: [] return out, lambda x: []
@registry.misc("spacy.KBFromFile.v1") @registry.misc("spacy.KBFromFile.v1")
def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]: def load_kb(
def kb_from_file(vocab): kb_path: Path,
kb = KnowledgeBase(vocab, entity_vector_length=1) ) -> Callable[[Vocab], KnowledgeBase]:
def kb_from_file(vocab: Vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=1)
kb.from_disk(kb_path) kb.from_disk(kb_path)
return kb return kb
@ -88,9 +90,11 @@ def load_kb(kb_path: Path) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.EmptyKB.v1") @registry.misc("spacy.EmptyKB.v1")
def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]: def empty_kb(
def empty_kb_factory(vocab): entity_vector_length: int,
return KnowledgeBase(vocab=vocab, entity_vector_length=entity_vector_length) ) -> Callable[[Vocab], KnowledgeBase]:
def empty_kb_factory(vocab: Vocab):
return InMemoryLookupKB(vocab=vocab, entity_vector_length=entity_vector_length)
return empty_kb_factory return empty_kb_factory
@ -98,3 +102,10 @@ def empty_kb(entity_vector_length: int) -> Callable[[Vocab], KnowledgeBase]:
@registry.misc("spacy.CandidateGenerator.v1") @registry.misc("spacy.CandidateGenerator.v1")
def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]: def create_candidates() -> Callable[[KnowledgeBase, Span], Iterable[Candidate]]:
return get_candidates return get_candidates
@registry.misc("spacy.CandidateBatchGenerator.v1")
def create_candidates_batch() -> Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
]:
return get_candidates_batch

View File

@ -441,7 +441,7 @@ cdef class precompute_hiddens:
cdef CBlas cblas cdef CBlas cblas
if isinstance(self.ops, CupyOps): if isinstance(self.ops, CupyOps):
cblas = get_ops("cpu").cblas() cblas = NUMPY_OPS.cblas()
else: else:
cblas = self.ops.cblas() cblas = self.ops.cblas()

View File

@ -1,7 +1,6 @@
from typing import cast, Any, Callable, Dict, Iterable, List, Optional from typing import cast, Any, Callable, Dict, Iterable, List, Optional
from typing import Sequence, Tuple, Union from typing import Tuple
from collections import Counter from collections import Counter
from copy import deepcopy
from itertools import islice from itertools import islice
import numpy as np import numpy as np
@ -149,9 +148,7 @@ class EditTreeLemmatizer(TrainablePipe):
if not any(len(doc) for doc in docs): if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs. # Handle cases where there are no tokens in any docs.
n_labels = len(self.cfg["labels"]) n_labels = len(self.cfg["labels"])
guesses: List[Ints2d] = [ guesses: List[Ints2d] = [self.model.ops.alloc2i(0, n_labels) for _ in docs]
self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs
]
assert len(guesses) == n_docs assert len(guesses) == n_docs
return guesses return guesses
scores = self.model.predict(docs) scores = self.model.predict(docs)

View File

@ -53,9 +53,12 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
"incl_context": True, "incl_context": True,
"entity_vector_length": 64, "entity_vector_length": 64,
"get_candidates": {"@misc": "spacy.CandidateGenerator.v1"}, "get_candidates": {"@misc": "spacy.CandidateGenerator.v1"},
"get_candidates_batch": {"@misc": "spacy.CandidateBatchGenerator.v1"},
"overwrite": True, "overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"}, "scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True, "use_gold_ents": True,
"candidates_batch_size": 1,
"threshold": None,
}, },
default_score_weights={ default_score_weights={
"nel_micro_f": 1.0, "nel_micro_f": 1.0,
@ -74,9 +77,14 @@ def make_entity_linker(
incl_context: bool, incl_context: bool,
entity_vector_length: int, entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
overwrite: bool, overwrite: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
use_gold_ents: bool, use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
): ):
"""Construct an EntityLinker component. """Construct an EntityLinker component.
@ -88,13 +96,21 @@ def make_entity_linker(
incl_prior (bool): Whether or not to include prior probabilities from the KB in the model. incl_prior (bool): Whether or not to include prior probabilities from the KB in the model.
incl_context (bool): Whether or not to include the local context in the model. incl_context (bool): Whether or not to include the local context in the model.
entity_vector_length (int): Size of encoding vectors in the KB. entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
get_candidates_batch (
Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]], Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method. scorer (Optional[Callable]): The scoring method.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
prediction is discarded. If None, predictions are not filtered by any threshold.
""" """
if not model.attrs.get("include_span_maker", False): if not model.attrs.get("include_span_maker", False):
# The only difference in arguments here is that use_gold_ents is not available # The only difference in arguments here is that use_gold_ents and threshold aren't available.
return EntityLinker_v1( return EntityLinker_v1(
nlp.vocab, nlp.vocab,
model, model,
@ -118,9 +134,12 @@ def make_entity_linker(
incl_context=incl_context, incl_context=incl_context,
entity_vector_length=entity_vector_length, entity_vector_length=entity_vector_length,
get_candidates=get_candidates, get_candidates=get_candidates,
get_candidates_batch=get_candidates_batch,
overwrite=overwrite, overwrite=overwrite,
scorer=scorer, scorer=scorer,
use_gold_ents=use_gold_ents, use_gold_ents=use_gold_ents,
candidates_batch_size=candidates_batch_size,
threshold=threshold,
) )
@ -153,9 +172,14 @@ class EntityLinker(TrainablePipe):
incl_context: bool, incl_context: bool,
entity_vector_length: int, entity_vector_length: int,
get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]],
get_candidates_batch: Callable[
[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]
],
overwrite: bool = BACKWARD_OVERWRITE, overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score, scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool, use_gold_ents: bool,
candidates_batch_size: int,
threshold: Optional[float] = None,
) -> None: ) -> None:
"""Initialize an entity linker. """Initialize an entity linker.
@ -170,13 +194,28 @@ class EntityLinker(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB. entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to get_candidates_batch (
Scorer.score_links. Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]],
Iterable[Candidate]]
): Function that produces a list of candidates, given a certain knowledge base and several textual mentions.
scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations. component must provide entity annotations.
candidates_batch_size (int): Size of batches for entity candidate generation.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init DOCS: https://spacy.io/api/entitylinker#init
""" """
if threshold is not None and not (0 <= threshold <= 1):
raise ValueError(
Errors.E1043.format(
range_start=0,
range_end=1,
value=threshold,
)
)
self.vocab = vocab self.vocab = vocab
self.model = model self.model = model
self.name = name self.name = name
@ -185,13 +224,19 @@ class EntityLinker(TrainablePipe):
self.incl_prior = incl_prior self.incl_prior = incl_prior
self.incl_context = incl_context self.incl_context = incl_context
self.get_candidates = get_candidates self.get_candidates = get_candidates
self.get_candidates_batch = get_candidates_batch
self.cfg: Dict[str, Any] = {"overwrite": overwrite} self.cfg: Dict[str, Any] = {"overwrite": overwrite}
self.distance = CosineDistance(normalize=False) self.distance = CosineDistance(normalize=False)
# how many neighbour sentences to take into account # how many neighbour sentences to take into account
# create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'. # create an empty KB by default
self.kb = empty_kb(entity_vector_length)(self.vocab) self.kb = empty_kb(entity_vector_length)(self.vocab)
self.scorer = scorer self.scorer = scorer
self.use_gold_ents = use_gold_ents self.use_gold_ents = use_gold_ents
self.candidates_batch_size = candidates_batch_size
self.threshold = threshold
if candidates_batch_size < 1:
raise ValueError(Errors.E1044)
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will """Define the KB of this pipe by providing a function that will
@ -199,7 +244,7 @@ class EntityLinker(TrainablePipe):
if not callable(kb_loader): if not callable(kb_loader):
raise ValueError(Errors.E885.format(arg_type=type(kb_loader))) raise ValueError(Errors.E885.format(arg_type=type(kb_loader)))
self.kb = kb_loader(self.vocab) self.kb = kb_loader(self.vocab) # type: ignore
def validate_kb(self) -> None: def validate_kb(self) -> None:
# Raise an error if the knowledge base is not initialized. # Raise an error if the knowledge base is not initialized.
@ -221,8 +266,8 @@ class EntityLinker(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects. returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of. nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance. kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab
Note that providing this argument, will overwrite all data accumulated in the current KB. instance. Note that providing this argument will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file. Use this only when loading a KB as-such from file.
DOCS: https://spacy.io/api/entitylinker#initialize DOCS: https://spacy.io/api/entitylinker#initialize
@ -399,66 +444,93 @@ class EntityLinker(TrainablePipe):
if len(doc) == 0: if len(doc) == 0:
continue continue
sentences = [s for s in doc.sents] sentences = [s for s in doc.sents]
# Looping through each entity (TODO: rewrite)
for ent in doc.ents:
sent_index = sentences.index(ent.sent)
assert sent_index >= 0
if self.incl_context: # Loop over entities in batches.
# get n_neighbour sentences, clipped to the length of the document for ent_idx in range(0, len(doc.ents), self.candidates_batch_size):
start_sentence = max(0, sent_index - self.n_sents) ent_batch = doc.ents[ent_idx : ent_idx + self.candidates_batch_size]
end_sentence = min(len(sentences) - 1, sent_index + self.n_sents)
start_token = sentences[start_sentence].start # Look up candidate entities.
end_token = sentences[end_sentence].end valid_ent_idx = [
sent_doc = doc[start_token:end_token].as_doc() idx
# currently, the context is the same for each entity in a sentence (should be refined) for idx in range(len(ent_batch))
sentence_encoding = self.model.predict([sent_doc])[0] if ent_batch[idx].label_ not in self.labels_discard
sentence_encoding_t = sentence_encoding.T ]
sentence_norm = xp.linalg.norm(sentence_encoding_t)
entity_count += 1 batch_candidates = list(
if ent.label_ in self.labels_discard: self.get_candidates_batch(
# ignoring this entity - setting to NIL self.kb, [ent_batch[idx] for idx in valid_ent_idx]
final_kb_ids.append(self.NIL) )
else: if self.candidates_batch_size > 1
candidates = list(self.get_candidates(self.kb, ent)) else [
if not candidates: self.get_candidates(self.kb, ent_batch[idx])
# no prediction possible for this entity - setting to NIL for idx in valid_ent_idx
]
)
# Looping through each entity in batch (TODO: rewrite)
for j, ent in enumerate(ent_batch):
sent_index = sentences.index(ent.sent)
assert sent_index >= 0
if self.incl_context:
# get n_neighbour sentences, clipped to the length of the document
start_sentence = max(0, sent_index - self.n_sents)
end_sentence = min(
len(sentences) - 1, sent_index + self.n_sents
)
start_token = sentences[start_sentence].start
end_token = sentences[end_sentence].end
sent_doc = doc[start_token:end_token].as_doc()
# currently, the context is the same for each entity in a sentence (should be refined)
sentence_encoding = self.model.predict([sent_doc])[0]
sentence_encoding_t = sentence_encoding.T
sentence_norm = xp.linalg.norm(sentence_encoding_t)
entity_count += 1
if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
elif len(candidates) == 1:
# shortcut for efficiency reasons: take the 1 candidate
# TODO: thresholding
final_kb_ids.append(candidates[0].entity_)
else: else:
random.shuffle(candidates) candidates = list(batch_candidates[j])
# set all prior probabilities to 0 if incl_prior=False if not candidates:
prior_probs = xp.asarray([c.prior_prob for c in candidates]) # no prediction possible for this entity - setting to NIL
if not self.incl_prior: final_kb_ids.append(self.NIL)
prior_probs = xp.asarray([0.0 for _ in candidates]) elif len(candidates) == 1 and self.threshold is None:
scores = prior_probs # shortcut for efficiency reasons: take the 1 candidate
# add in similarity from the context final_kb_ids.append(candidates[0].entity_)
if self.incl_context: else:
entity_encodings = xp.asarray( random.shuffle(candidates)
[c.entity_vector for c in candidates] # set all prior probabilities to 0 if incl_prior=False
) prior_probs = xp.asarray([c.prior_prob for c in candidates])
entity_norm = xp.linalg.norm(entity_encodings, axis=1) if not self.incl_prior:
if len(entity_encodings) != len(prior_probs): prior_probs = xp.asarray([0.0 for _ in candidates])
raise RuntimeError( scores = prior_probs
Errors.E147.format( # add in similarity from the context
method="predict", if self.incl_context:
msg="vectors not of equal length", entity_encodings = xp.asarray(
) [c.entity_vector for c in candidates]
) )
# cosine similarity entity_norm = xp.linalg.norm(entity_encodings, axis=1)
sims = xp.dot(entity_encodings, sentence_encoding_t) / ( if len(entity_encodings) != len(prior_probs):
sentence_norm * entity_norm raise RuntimeError(
Errors.E147.format(
method="predict",
msg="vectors not of equal length",
)
)
# cosine similarity
sims = xp.dot(entity_encodings, sentence_encoding_t) / (
sentence_norm * entity_norm
)
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
final_kb_ids.append(
candidates[scores.argmax().item()].entity_
if self.threshold is None
or scores.max() >= self.threshold
else EntityLinker.NIL
) )
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
# TODO: thresholding
best_index = scores.argmax().item()
best_candidate = candidates[best_index]
final_kb_ids.append(best_candidate.entity_)
if not (len(final_kb_ids) == entity_count): if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format( err = Errors.E147.format(
method="predict", msg="result variables not of equal length" method="predict", msg="result variables not of equal length"

View File

@ -1,6 +1,5 @@
import warnings
from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence
from typing import cast import warnings
from collections import defaultdict from collections import defaultdict
from pathlib import Path from pathlib import Path
import srsly import srsly
@ -317,7 +316,7 @@ class EntityRuler(Pipe):
phrase_pattern["id"] = ent_id phrase_pattern["id"] = ent_id
phrase_patterns.append(phrase_pattern) phrase_patterns.append(phrase_pattern)
for entry in token_patterns + phrase_patterns: # type: ignore[operator] for entry in token_patterns + phrase_patterns: # type: ignore[operator]
label = entry["label"] label = entry["label"] # type: ignore
if "id" in entry: if "id" in entry:
ent_label = label ent_label = label
label = self._create_label(label, entry["id"]) label = self._create_label(label, entry["id"])

View File

@ -7,7 +7,7 @@ from pathlib import Path
from itertools import islice from itertools import islice
import srsly import srsly
import random import random
from thinc.api import CosineDistance, Model, Optimizer, Config from thinc.api import CosineDistance, Model, Optimizer
from thinc.api import set_dropout_rate from thinc.api import set_dropout_rate
import warnings import warnings
@ -20,7 +20,7 @@ from ...language import Language
from ...vocab import Vocab from ...vocab import Vocab
from ...training import Example, validate_examples, validate_get_examples from ...training import Example, validate_examples, validate_get_examples
from ...errors import Errors, Warnings from ...errors import Errors, Warnings
from ...util import SimpleFrozenList, registry from ...util import SimpleFrozenList
from ... import util from ... import util
from ...scorer import Scorer from ...scorer import Scorer
@ -68,9 +68,7 @@ class EntityLinker_v1(TrainablePipe):
entity_vector_length (int): Size of encoding vectors in the KB. entity_vector_length (int): Size of encoding vectors in the KB.
get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links.
Scorer.score_links.
DOCS: https://spacy.io/api/entitylinker#init DOCS: https://spacy.io/api/entitylinker#init
""" """
self.vocab = vocab self.vocab = vocab
@ -116,7 +114,7 @@ class EntityLinker_v1(TrainablePipe):
get_examples (Callable[[], Iterable[Example]]): Function that get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects. returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of. nlp (Language): The current nlp object the component is part of.
kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates a KnowledgeBase from a Vocab instance. kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance.
Note that providing this argument, will overwrite all data accumulated in the current KB. Note that providing this argument, will overwrite all data accumulated in the current KB.
Use this only when loading a KB as-such from file. Use this only when loading a KB as-such from file.
@ -272,7 +270,6 @@ class EntityLinker_v1(TrainablePipe):
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
elif len(candidates) == 1: elif len(candidates) == 1:
# shortcut for efficiency reasons: take the 1 candidate # shortcut for efficiency reasons: take the 1 candidate
# TODO: thresholding
final_kb_ids.append(candidates[0].entity_) final_kb_ids.append(candidates[0].entity_)
else: else:
random.shuffle(candidates) random.shuffle(candidates)
@ -301,7 +298,6 @@ class EntityLinker_v1(TrainablePipe):
if sims.shape != prior_probs.shape: if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161) raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims) scores = prior_probs + sims - (prior_probs * sims)
# TODO: thresholding
best_index = scores.argmax().item() best_index = scores.argmax().item()
best_candidate = candidates[best_index] best_candidate = candidates[best_index]
final_kb_ids.append(best_candidate.entity_) final_kb_ids.append(best_candidate.entity_)

View File

@ -1,4 +1,4 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True, binding=True
from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict
import srsly import srsly
import warnings import warnings

View File

@ -26,17 +26,17 @@ scorer = {"@layers": "spacy.LinearLogistic.v1"}
hidden_size = 128 hidden_size = 128
[model.tok2vec] [model.tok2vec]
@architectures = "spacy.Tok2Vec.v1" @architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed] [model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v1" @architectures = "spacy.MultiHashEmbed.v2"
width = 96 width = 96
rows = [5000, 2000, 1000, 1000] rows = [5000, 2000, 1000, 1000]
attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"] attrs = ["ORTH", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false include_static_vectors = false
[model.tok2vec.encode] [model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1" @architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width} width = ${model.tok2vec.embed.width}
window_size = 1 window_size = 1
maxout_pieces = 3 maxout_pieces = 3
@ -133,6 +133,9 @@ def make_spancat(
spans_key (str): Key of the doc.spans dict to save the spans under. During spans_key (str): Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the initialization and training, the component will look for spans on the
reference document under the same key. reference document under the same key.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the Doc.spans[spans_key] with overlapping
spans allowed.
threshold (float): Minimum probability to consider a prediction positive. threshold (float): Minimum probability to consider a prediction positive.
Spans with a positive prediction will be saved on the Doc. Defaults to Spans with a positive prediction will be saved on the Doc. Defaults to
0.5. 0.5.

View File

@ -24,8 +24,8 @@ single_label_default_config = """
[model.tok2vec.embed] [model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2" @architectures = "spacy.MultiHashEmbed.v2"
width = 64 width = 64
rows = [2000, 2000, 1000, 1000, 1000, 1000] rows = [2000, 2000, 500, 1000, 500]
attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false include_static_vectors = false
[model.tok2vec.encode] [model.tok2vec.encode]
@ -72,7 +72,7 @@ subword_features = true
"textcat", "textcat",
assigns=["doc.cats"], assigns=["doc.cats"],
default_config={ default_config={
"threshold": 0.5, "threshold": 0.0,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL, "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v1"}, "scorer": {"@scorers": "spacy.textcat_scorer.v1"},
}, },
@ -144,7 +144,8 @@ class TextCategorizer(TrainablePipe):
model (thinc.api.Model): The Thinc Model powering the pipeline component. model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Unused, not needed for single-label (exclusive
classes) classification.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_cats for the attribute "cats". Scorer.score_cats for the attribute "cats".
@ -154,7 +155,11 @@ class TextCategorizer(TrainablePipe):
self.model = model self.model = model
self.name = name self.name = name
self._rehearsal_model = None self._rehearsal_model = None
cfg = {"labels": [], "threshold": threshold, "positive_label": None} cfg: Dict[str, Any] = {
"labels": [],
"threshold": threshold,
"positive_label": None,
}
self.cfg = dict(cfg) self.cfg = dict(cfg)
self.scorer = scorer self.scorer = scorer
@ -192,7 +197,7 @@ class TextCategorizer(TrainablePipe):
if not any(len(doc) for doc in docs): if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs. # Handle cases where there are no tokens in any docs.
tensors = [doc.tensor for doc in docs] tensors = [doc.tensor for doc in docs]
xp = get_array_module(tensors) xp = self.model.ops.xp
scores = xp.zeros((len(list(docs)), len(self.labels))) scores = xp.zeros((len(list(docs)), len(self.labels)))
return scores return scores
scores = self.model.predict(docs) scores = self.model.predict(docs)
@ -396,5 +401,9 @@ class TextCategorizer(TrainablePipe):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""Check whether the provided examples all have single-label cats annotations.""" """Check whether the provided examples all have single-label cats annotations."""
for ex in examples: for ex in examples:
if list(ex.reference.cats.values()).count(1.0) > 1: vals = list(ex.reference.cats.values())
if vals.count(1.0) > 1:
raise ValueError(Errors.E895.format(value=ex.reference.cats)) raise ValueError(Errors.E895.format(value=ex.reference.cats))
for val in vals:
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -19,17 +19,17 @@ multi_label_default_config = """
@architectures = "spacy.TextCatEnsemble.v2" @architectures = "spacy.TextCatEnsemble.v2"
[model.tok2vec] [model.tok2vec]
@architectures = "spacy.Tok2Vec.v1" @architectures = "spacy.Tok2Vec.v2"
[model.tok2vec.embed] [model.tok2vec.embed]
@architectures = "spacy.MultiHashEmbed.v2" @architectures = "spacy.MultiHashEmbed.v2"
width = 64 width = 64
rows = [2000, 2000, 1000, 1000, 1000, 1000] rows = [2000, 2000, 500, 1000, 500]
attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] attrs = ["NORM", "LOWER", "PREFIX", "SUFFIX", "SHAPE"]
include_static_vectors = false include_static_vectors = false
[model.tok2vec.encode] [model.tok2vec.encode]
@architectures = "spacy.MaxoutWindowEncoder.v1" @architectures = "spacy.MaxoutWindowEncoder.v2"
width = ${model.tok2vec.embed.width} width = ${model.tok2vec.embed.width}
window_size = 1 window_size = 1
maxout_pieces = 3 maxout_pieces = 3
@ -96,8 +96,8 @@ def make_multilabel_textcat(
model: Model[List[Doc], List[Floats2d]], model: Model[List[Doc], List[Floats2d]],
threshold: float, threshold: float,
scorer: Optional[Callable], scorer: Optional[Callable],
) -> "TextCategorizer": ) -> "MultiLabel_TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories """Create a MultiLabel_TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered over a whole document. It can learn one or more labels, and the labels are considered
to be non-mutually exclusive, which means that there can be zero or more labels to be non-mutually exclusive, which means that there can be zero or more labels
per doc). per doc).
@ -105,6 +105,7 @@ def make_multilabel_textcat(
model (Model[List[Doc], List[Floats2d]]): A model instance that predicts model (Model[List[Doc], List[Floats2d]]): A model instance that predicts
scores for each category. scores for each category.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
scorer (Optional[Callable]): The scoring method.
""" """
return MultiLabel_TextCategorizer( return MultiLabel_TextCategorizer(
nlp.vocab, model, name, threshold=threshold, scorer=scorer nlp.vocab, model, name, threshold=threshold, scorer=scorer
@ -147,6 +148,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
scorer (Optional[Callable]): The scoring method.
DOCS: https://spacy.io/api/textcategorizer#init DOCS: https://spacy.io/api/textcategorizer#init
""" """
@ -190,6 +192,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
for label in labels: for label in labels:
self.add_label(label) self.add_label(label)
subbatch = list(islice(get_examples(), 10)) subbatch = list(islice(get_examples(), 10))
self._validate_categories(subbatch)
doc_sample = [eg.reference for eg in subbatch] doc_sample = [eg.reference for eg in subbatch]
label_sample, _ = self._examples_to_truth(subbatch) label_sample, _ = self._examples_to_truth(subbatch)
self._require_labels() self._require_labels()
@ -200,4 +204,8 @@ class MultiLabel_TextCategorizer(TextCategorizer):
def _validate_categories(self, examples: Iterable[Example]): def _validate_categories(self, examples: Iterable[Example]):
"""This component allows any type of single- or multi-label annotations. """This component allows any type of single- or multi-label annotations.
This method overwrites the more strict one from 'textcat'.""" This method overwrites the more strict one from 'textcat'."""
pass # check that annotation values are valid
for ex in examples:
for val in ex.reference.cats.values():
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))

View File

@ -123,9 +123,6 @@ class Tok2Vec(TrainablePipe):
width = self.model.get_dim("nO") width = self.model.get_dim("nO")
return [self.model.ops.alloc((0, width)) for doc in docs] return [self.model.ops.alloc((0, width)) for doc in docs]
tokvecs = self.model.predict(docs) tokvecs = self.model.predict(docs)
batch_id = Tok2VecListener.get_batch_id(docs)
for listener in self.listeners:
listener.receive(batch_id, tokvecs, _empty_backprop)
return tokvecs return tokvecs
def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None: def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None:
@ -286,8 +283,19 @@ class Tok2VecListener(Model):
def forward(model: Tok2VecListener, inputs, is_train: bool): def forward(model: Tok2VecListener, inputs, is_train: bool):
"""Supply the outputs from the upstream Tok2Vec component.""" """Supply the outputs from the upstream Tok2Vec component."""
if is_train: if is_train:
model.verify_inputs(inputs) # This might occur during training when the tok2vec layer is frozen / hasn't been updated.
return model._outputs, model._backprop # In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc.
if model._batch_id is None:
outputs = []
for doc in inputs:
if doc.tensor.size == 0:
raise ValueError(Errors.E203.format(name="tok2vec"))
else:
outputs.append(doc.tensor)
return outputs, _empty_backprop
else:
model.verify_inputs(inputs)
return model._outputs, model._backprop
else: else:
# This is pretty grim, but it's hard to do better :(. # This is pretty grim, but it's hard to do better :(.
# It's hard to avoid relying on the doc.tensor attribute, because the # It's hard to avoid relying on the doc.tensor attribute, because the
@ -306,7 +314,7 @@ def forward(model: Tok2VecListener, inputs, is_train: bool):
outputs.append(model.ops.alloc2f(len(doc), width)) outputs.append(model.ops.alloc2f(len(doc), width))
else: else:
outputs.append(doc.tensor) outputs.append(doc.tensor)
return outputs, lambda dX: [] return outputs, _empty_backprop
def _empty_backprop(dX): # for pickling def _empty_backprop(dX): # for pickling

View File

@ -1,4 +1,4 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True, binding=True
from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable
import srsly import srsly
from thinc.api import set_dropout_rate, Model, Optimizer from thinc.api import set_dropout_rate, Model, Optimizer

View File

@ -9,7 +9,7 @@ from libc.stdlib cimport calloc, free
import random import random
import srsly import srsly
from thinc.api import get_ops, set_dropout_rate, CupyOps from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps
from thinc.extra.search cimport Beam from thinc.extra.search cimport Beam
import numpy.random import numpy.random
import numpy import numpy
@ -30,6 +30,9 @@ from ..errors import Errors, Warnings
from .. import util from .. import util
NUMPY_OPS = NumpyOps()
cdef class Parser(TrainablePipe): cdef class Parser(TrainablePipe):
""" """
Base class of the DependencyParser and EntityRecognizer. Base class of the DependencyParser and EntityRecognizer.
@ -262,7 +265,7 @@ cdef class Parser(TrainablePipe):
ops = self.model.ops ops = self.model.ops
cdef CBlas cblas cdef CBlas cblas
if isinstance(ops, CupyOps): if isinstance(ops, CupyOps):
cblas = get_ops("cpu").cblas() cblas = NUMPY_OPS.cblas()
else: else:
cblas = ops.cblas() cblas = ops.cblas()
self._ensure_labels_are_added(docs) self._ensure_labels_are_added(docs)

View File

@ -181,12 +181,12 @@ class TokenPatternNumber(BaseModel):
IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset") IS_SUBSET: Optional[List[StrictInt]] = Field(None, alias="is_subset")
IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset") IS_SUPERSET: Optional[List[StrictInt]] = Field(None, alias="is_superset")
INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects") INTERSECTS: Optional[List[StrictInt]] = Field(None, alias="intersects")
EQ: Union[StrictInt, StrictFloat] = Field(None, alias="==") EQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="==")
NEQ: Union[StrictInt, StrictFloat] = Field(None, alias="!=") NEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="!=")
GEQ: Union[StrictInt, StrictFloat] = Field(None, alias=">=") GEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">=")
LEQ: Union[StrictInt, StrictFloat] = Field(None, alias="<=") LEQ: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<=")
GT: Union[StrictInt, StrictFloat] = Field(None, alias=">") GT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias=">")
LT: Union[StrictInt, StrictFloat] = Field(None, alias="<") LT: Optional[Union[StrictInt, StrictFloat]] = Field(None, alias="<")
class Config: class Config:
extra = "forbid" extra = "forbid"
@ -209,7 +209,7 @@ class TokenPatternOperatorSimple(str, Enum):
class TokenPatternOperatorMinMax(ConstrainedStr): class TokenPatternOperatorMinMax(ConstrainedStr):
regex = re.compile("^({\d+}|{\d+,\d*}|{\d*,\d+})$") regex = re.compile(r"^({\d+}|{\d+,\d*}|{\d*,\d+})$")
TokenPatternOperator = Union[TokenPatternOperatorSimple, TokenPatternOperatorMinMax] TokenPatternOperator = Union[TokenPatternOperatorSimple, TokenPatternOperatorMinMax]
@ -331,6 +331,7 @@ class ConfigSchemaTraining(BaseModel):
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training") frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training") annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk") before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
# fmt: on # fmt: on
class Config: class Config:
@ -432,7 +433,7 @@ class ProjectConfigAssetURL(BaseModel):
# fmt: off # fmt: off
dest: StrictStr = Field(..., title="Destination of downloaded asset") dest: StrictStr = Field(..., title="Destination of downloaded asset")
url: Optional[StrictStr] = Field(None, title="URL of asset") url: Optional[StrictStr] = Field(None, title="URL of asset")
checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
description: StrictStr = Field("", title="Description of asset") description: StrictStr = Field("", title="Description of asset")
# fmt: on # fmt: on
@ -440,7 +441,7 @@ class ProjectConfigAssetURL(BaseModel):
class ProjectConfigAssetGit(BaseModel): class ProjectConfigAssetGit(BaseModel):
# fmt: off # fmt: off
git: ProjectConfigAssetGitItem = Field(..., title="Git repo information") git: ProjectConfigAssetGitItem = Field(..., title="Git repo information")
checksum: str = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})")
description: Optional[StrictStr] = Field(None, title="Description of asset") description: Optional[StrictStr] = Field(None, title="Description of asset")
# fmt: on # fmt: on
@ -510,12 +511,20 @@ class DocJSONSchema(BaseModel):
None, title="Indices of sentences' start and end indices" None, title="Indices of sentences' start and end indices"
) )
text: StrictStr = Field(..., title="Document text") text: StrictStr = Field(..., title="Document text")
spans: Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]] = Field( spans: Optional[
None, title="Span information - end/start indices, label, KB ID" Dict[StrictStr, List[Dict[StrictStr, Union[StrictStr, StrictInt]]]]
) ] = Field(None, title="Span information - end/start indices, label, KB ID")
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field( tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
..., title="Token information - ID, start, annotations" ..., title="Token information - ID, start, annotations"
) )
_: Optional[Dict[StrictStr, Any]] = Field( underscore_doc: Optional[Dict[StrictStr, Any]] = Field(
None, title="Any custom data stored in the document's _ attribute" None,
title="Any custom data stored in the document's _ attribute",
alias="_",
)
underscore_token: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the token's _ attribute"
)
underscore_span: Optional[Dict[StrictStr, List[Dict[StrictStr, Any]]]] = Field(
None, title="Any custom data stored in the span's _ attribute"
) )

View File

@ -446,7 +446,7 @@ class Scorer:
labels (Iterable[str]): The set of possible labels. Defaults to []. labels (Iterable[str]): The set of possible labels. Defaults to [].
multi_label (bool): Whether the attribute allows multiple labels. multi_label (bool): Whether the attribute allows multiple labels.
Defaults to True. When set to False (exclusive labels), missing Defaults to True. When set to False (exclusive labels), missing
gold labels are interpreted as 0.0. gold labels are interpreted as 0.0 and the threshold is set to 0.0.
positive_label (str): The positive label for a binary task with positive_label (str): The positive label for a binary task with
exclusive classes. Defaults to None. exclusive classes. Defaults to None.
threshold (float): Cutoff to consider a prediction "positive". Defaults threshold (float): Cutoff to consider a prediction "positive". Defaults
@ -471,6 +471,8 @@ class Scorer:
""" """
if threshold is None: if threshold is None:
threshold = 0.5 if multi_label else 0.0 threshold = 0.5 if multi_label else 0.0
if not multi_label:
threshold = 0.0
f_per_type = {label: PRFScore() for label in labels} f_per_type = {label: PRFScore() for label in labels}
auc_per_type = {label: ROCAUCScore() for label in labels} auc_per_type = {label: ROCAUCScore() for label in labels}
labels = set(labels) labels = set(labels)
@ -505,20 +507,18 @@ class Scorer:
# Get the highest-scoring for each. # Get the highest-scoring for each.
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1]) gold_label, gold_score = max(gold_cats.items(), key=lambda it: it[1])
if pred_label == gold_label and pred_score >= threshold: if pred_label == gold_label:
f_per_type[pred_label].tp += 1 f_per_type[pred_label].tp += 1
else: else:
f_per_type[gold_label].fn += 1 f_per_type[gold_label].fn += 1
if pred_score >= threshold: f_per_type[pred_label].fp += 1
f_per_type[pred_label].fp += 1
elif gold_cats: elif gold_cats:
gold_label, gold_score = max(gold_cats, key=lambda it: it[1]) gold_label, gold_score = max(gold_cats, key=lambda it: it[1])
if gold_score > 0: if gold_score > 0:
f_per_type[gold_label].fn += 1 f_per_type[gold_label].fn += 1
elif pred_cats: elif pred_cats:
pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1]) pred_label, pred_score = max(pred_cats.items(), key=lambda it: it[1])
if pred_score >= threshold: f_per_type[pred_label].fp += 1
f_per_type[pred_label].fp += 1
micro_prf = PRFScore() micro_prf = PRFScore()
for label_prf in f_per_type.values(): for label_prf in f_per_type.values():
micro_prf.tp += label_prf.tp micro_prf.tp += label_prf.tp

View File

@ -26,4 +26,4 @@ cdef class StringStore:
cdef public PreshMap _map cdef public PreshMap _map
cdef const Utf8Str* intern_unicode(self, str py_string) cdef const Utf8Str* intern_unicode(self, str py_string)
cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length) cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash)

View File

@ -14,6 +14,13 @@ from .symbols import NAMES as SYMBOLS_BY_INT
from .errors import Errors from .errors import Errors
from . import util from . import util
# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)`
cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash):
try:
out_hash[0] = key
return True
except:
return False
def get_string_id(key): def get_string_id(key):
"""Get a string ID, handling the reserved symbols correctly. If the key is """Get a string ID, handling the reserved symbols correctly. If the key is
@ -22,15 +29,27 @@ def get_string_id(key):
This function optimises for convenience over performance, so shouldn't be This function optimises for convenience over performance, so shouldn't be
used in tight loops. used in tight loops.
""" """
if not isinstance(key, str): cdef hash_t str_hash
return key if isinstance(key, str):
elif key in SYMBOLS_BY_STR: if len(key) == 0:
return SYMBOLS_BY_STR[key] return 0
elif not key:
return 0 symbol = SYMBOLS_BY_STR.get(key, None)
if symbol is not None:
return symbol
else:
chars = key.encode("utf8")
return hash_utf8(chars, len(chars))
elif _try_coerce_to_hash(key, &str_hash):
# Coerce the integral key to the expected primitive hash type.
# This ensures that custom/overloaded "primitive" data types
# such as those implemented by numpy are not inadvertently used
# downsteam (as these are internally implemented as custom PyObjects
# whose comparison operators can incur a significant overhead).
return str_hash
else: else:
chars = key.encode("utf8") # TODO: Raise an error instead
return hash_utf8(chars, len(chars)) return key
cpdef hash_t hash_string(str string) except 0: cpdef hash_t hash_string(str string) except 0:
@ -110,28 +129,36 @@ cdef class StringStore:
string_or_id (bytes, str or uint64): The value to encode. string_or_id (bytes, str or uint64): The value to encode.
Returns (str / uint64): The value to be retrieved. Returns (str / uint64): The value to be retrieved.
""" """
if isinstance(string_or_id, str) and len(string_or_id) == 0: cdef hash_t str_hash
return 0 cdef Utf8Str* utf8str = NULL
elif string_or_id == 0:
return ""
elif string_or_id in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string_or_id]
cdef hash_t key
if isinstance(string_or_id, str): if isinstance(string_or_id, str):
key = hash_string(string_or_id) if len(string_or_id) == 0:
return key return 0
elif isinstance(string_or_id, bytes):
key = hash_utf8(string_or_id, len(string_or_id)) # Return early if the string is found in the symbols LUT.
return key symbol = SYMBOLS_BY_STR.get(string_or_id, None)
elif string_or_id < len(SYMBOLS_BY_INT): if symbol is not None:
return SYMBOLS_BY_INT[string_or_id] return symbol
else:
key = string_or_id
utf8str = <Utf8Str*>self._map.get(key)
if utf8str is NULL:
raise KeyError(Errors.E018.format(hash_value=string_or_id))
else: else:
return decode_Utf8Str(utf8str) return hash_string(string_or_id)
elif isinstance(string_or_id, bytes):
return hash_utf8(string_or_id, len(string_or_id))
elif _try_coerce_to_hash(string_or_id, &str_hash):
if str_hash == 0:
return ""
elif str_hash < len(SYMBOLS_BY_INT):
return SYMBOLS_BY_INT[str_hash]
else:
utf8str = <Utf8Str*>self._map.get(str_hash)
else:
# TODO: Raise an error instead
utf8str = <Utf8Str*>self._map.get(string_or_id)
if utf8str is NULL:
raise KeyError(Errors.E018.format(hash_value=string_or_id))
else:
return decode_Utf8Str(utf8str)
def as_int(self, key): def as_int(self, key):
"""If key is an int, return it; otherwise, get the int value.""" """If key is an int, return it; otherwise, get the int value."""
@ -153,19 +180,22 @@ cdef class StringStore:
string (str): The string to add. string (str): The string to add.
RETURNS (uint64): The string's hash value. RETURNS (uint64): The string's hash value.
""" """
cdef hash_t str_hash
if isinstance(string, str): if isinstance(string, str):
if string in SYMBOLS_BY_STR: if string in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string] return SYMBOLS_BY_STR[string]
key = hash_string(string)
self.intern_unicode(string) string = string.encode("utf8")
str_hash = hash_utf8(string, len(string))
self._intern_utf8(string, len(string), &str_hash)
elif isinstance(string, bytes): elif isinstance(string, bytes):
if string in SYMBOLS_BY_STR: if string in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string] return SYMBOLS_BY_STR[string]
key = hash_utf8(string, len(string)) str_hash = hash_utf8(string, len(string))
self._intern_utf8(string, len(string)) self._intern_utf8(string, len(string), &str_hash)
else: else:
raise TypeError(Errors.E017.format(value_type=type(string))) raise TypeError(Errors.E017.format(value_type=type(string)))
return key return str_hash
def __len__(self): def __len__(self):
"""The number of strings in the store. """The number of strings in the store.
@ -174,30 +204,29 @@ cdef class StringStore:
""" """
return self.keys.size() return self.keys.size()
def __contains__(self, string not None): def __contains__(self, string_or_id not None):
"""Check whether a string is in the store. """Check whether a string or ID is in the store.
string (str): The string to check. string_or_id (str or int): The string to check.
RETURNS (bool): Whether the store contains the string. RETURNS (bool): Whether the store contains the string.
""" """
cdef hash_t key cdef hash_t str_hash
if isinstance(string, int) or isinstance(string, long): if isinstance(string_or_id, str):
if string == 0: if len(string_or_id) == 0:
return True return True
key = string elif string_or_id in SYMBOLS_BY_STR:
elif len(string) == 0: return True
return True str_hash = hash_string(string_or_id)
elif string in SYMBOLS_BY_STR: elif _try_coerce_to_hash(string_or_id, &str_hash):
return True pass
elif isinstance(string, str):
key = hash_string(string)
else: else:
string = string.encode("utf8") # TODO: Raise an error instead
key = hash_utf8(string, len(string)) return self._map.get(string_or_id) is not NULL
if key < len(SYMBOLS_BY_INT):
if str_hash < len(SYMBOLS_BY_INT):
return True return True
else: else:
return self._map.get(key) is not NULL return self._map.get(str_hash) is not NULL
def __iter__(self): def __iter__(self):
"""Iterate over the strings in the store, in order. """Iterate over the strings in the store, in order.
@ -272,13 +301,13 @@ cdef class StringStore:
cdef const Utf8Str* intern_unicode(self, str py_string): cdef const Utf8Str* intern_unicode(self, str py_string):
# 0 means missing, but we don't bother offsetting the index. # 0 means missing, but we don't bother offsetting the index.
cdef bytes byte_string = py_string.encode("utf8") cdef bytes byte_string = py_string.encode("utf8")
return self._intern_utf8(byte_string, len(byte_string)) return self._intern_utf8(byte_string, len(byte_string), NULL)
@cython.final @cython.final
cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length): cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash):
# TODO: This function's API/behaviour is an unholy mess... # TODO: This function's API/behaviour is an unholy mess...
# 0 means missing, but we don't bother offsetting the index. # 0 means missing, but we don't bother offsetting the index.
cdef hash_t key = hash_utf8(utf8_string, length) cdef hash_t key = precalculated_hash[0] if precalculated_hash is not NULL else hash_utf8(utf8_string, length)
cdef Utf8Str* value = <Utf8Str*>self._map.get(key) cdef Utf8Str* value = <Utf8Str*>self._map.get(key)
if value is not NULL: if value is not NULL:
return value return value

View File

@ -1,5 +1,11 @@
import pytest import pytest
from spacy.util import get_lang_class from spacy.util import get_lang_class
from hypothesis import settings
# Functionally disable deadline settings for tests
# to prevent spurious test failures in CI builds.
settings.register_profile("no_deadlines", deadline=2 * 60 * 1000) # in ms
settings.load_profile("no_deadlines")
def pytest_addoption(parser): def pytest_addoption(parser):
@ -250,11 +256,21 @@ def ko_tokenizer_tokenizer():
return nlp.tokenizer return nlp.tokenizer
@pytest.fixture(scope="module")
def la_tokenizer():
return get_lang_class("la")().tokenizer
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def lb_tokenizer(): def lb_tokenizer():
return get_lang_class("lb")().tokenizer return get_lang_class("lb")().tokenizer
@pytest.fixture(scope="session")
def lg_tokenizer():
return get_lang_class("lg")().tokenizer
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def lt_tokenizer(): def lt_tokenizer():
return get_lang_class("lt")().tokenizer return get_lang_class("lt")().tokenizer
@ -317,16 +333,24 @@ def ro_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def ru_tokenizer(): def ru_tokenizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().tokenizer return get_lang_class("ru")().tokenizer
@pytest.fixture @pytest.fixture
def ru_lemmatizer(): def ru_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer") return get_lang_class("ru")().add_pipe("lemmatizer")
@pytest.fixture
def ru_lookup_lemmatizer():
pytest.importorskip("pymorphy2")
return get_lang_class("ru")().add_pipe(
"lemmatizer", config={"mode": "pymorphy2_lookup"}
)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def sa_tokenizer(): def sa_tokenizer():
return get_lang_class("sa")().tokenizer return get_lang_class("sa")().tokenizer
@ -395,15 +419,24 @@ def ky_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def uk_tokenizer(): def uk_tokenizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("uk")().tokenizer return get_lang_class("uk")().tokenizer
@pytest.fixture @pytest.fixture
def uk_lemmatizer(): def uk_lemmatizer():
pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer")
@pytest.fixture
def uk_lookup_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy2")
pytest.importorskip("pymorphy2_dicts_uk") pytest.importorskip("pymorphy2_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer") return get_lang_class("uk")().add_pipe(
"lemmatizer", config={"mode": "pymorphy2_lookup"}
)
@pytest.fixture(scope="session") @pytest.fixture(scope="session")

Some files were not shown because too many files have changed in this diff Show More