Merge remote-tracking branch 'upstream/v4' into feature/refactor-parser

This commit is contained in:
Daniël de Kok 2022-09-27 11:30:51 +02:00
commit 4de76f0577
218 changed files with 5716 additions and 2007 deletions

View File

@ -41,7 +41,7 @@ steps:
- bash: | - bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1) ${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
${{ parameters.prefix }} python -m pip install dist/$SDIST ${{ parameters.prefix }} SPACY_NUM_BUILD_JOBS=2 python -m pip install dist/$SDIST
displayName: "Install from sdist" displayName: "Install from sdist"
- script: | - script: |
@ -55,12 +55,12 @@ steps:
condition: eq(${{ parameters.gpu }}, true) condition: eq(${{ parameters.gpu }}, true)
- script: | - script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy ${{ parameters.prefix }} python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests" displayName: "Run CPU tests"
condition: eq(${{ parameters.gpu }}, false) condition: eq(${{ parameters.gpu }}, false)
- script: | - script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -p spacy.tests.enable_gpu ${{ parameters.prefix }} python -m pytest --pyargs spacy -W error -p spacy.tests.enable_gpu
displayName: "Run GPU tests" displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true) condition: eq(${{ parameters.gpu }}, true)
@ -114,7 +114,7 @@ steps:
condition: eq(variables['python_version'], '3.8') condition: eq(variables['python_version'], '3.8')
- script: | - script: |
${{ parameters.prefix }} python -m pip install thinc-apple-ops ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy ${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops" displayName: "Run CPU tests with thinc-apple-ops"
condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9')) condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))

106
.github/contributors/Lucaterre.md vendored Normal file
View File

@ -0,0 +1,106 @@
# spaCy contributor agreement
This spaCy Contributor Agreement (**"SCA"**) is based on the
[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
The SCA applies to any contribution that you make to any product or project
managed by us (the **"project"**), and sets out the intellectual property rights
you grant to us in the contributed materials. The term **"us"** shall mean
[ExplosionAI GmbH](https://explosion.ai/legal). The term
**"you"** shall mean the person or entity identified below.
If you agree to be bound by these terms, fill in the information requested
below and include the filled-in version with your first pull request, under the
folder [`.github/contributors/`](/.github/contributors/). The name of the file
should be your GitHub username, with the extension `.md`. For example, the user
example_user would create the file `.github/contributors/example_user.md`.
Read this agreement carefully before signing. These terms and conditions
constitute a binding legal agreement.
## Contributor Agreement
1. The term "contribution" or "contributed materials" means any source code,
object code, patch, tool, sample, graphic, specification, manual,
documentation, or any other material posted or submitted by you to the project.
2. With respect to any worldwide copyrights, or copyright applications and
registrations, in your contribution:
* you hereby assign to us joint ownership, and to the extent that such
assignment is or becomes invalid, ineffective or unenforceable, you hereby
grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
royalty-free, unrestricted license to exercise all rights under those
copyrights. This includes, at our option, the right to sublicense these same
rights to third parties through multiple levels of sublicensees or other
licensing arrangements;
* you agree that each of us can do all things in relation to your
contribution as if each of us were the sole owners, and if one of us makes
a derivative work of your contribution, the one who makes the derivative
work (or has it made will be the sole owner of that derivative work;
* you agree that you will not assert any moral rights in your contribution
against us, our licensees or transferees;
* you agree that we may register a copyright in your contribution and
exercise all ownership rights associated with it; and
* you agree that neither of us has any duty to consult with, obtain the
consent of, pay or render an accounting to the other for any use or
distribution of your contribution.
3. With respect to any patents you own, or that you can license without payment
to any third party, you hereby grant to us a perpetual, irrevocable,
non-exclusive, worldwide, no-charge, royalty-free license to:
* make, have made, use, sell, offer to sell, import, and otherwise transfer
your contribution in whole or in part, alone or in combination with or
included in any product, work or materials arising out of the project to
which your contribution was submitted, and
* at our option, to sublicense these same rights to third parties through
multiple levels of sublicensees or other licensing arrangements.
4. Except as set out above, you keep all right, title, and interest in your
contribution. The rights that you grant to us under these terms are effective
on the date you first submitted a contribution to us, even if your submission
took place before the date you sign these terms.
5. You covenant, represent, warrant and agree that:
* Each contribution that you submit is and shall be an original work of
authorship and you can legally grant the rights set out in this SCA;
* to the best of your knowledge, each contribution will not violate any
third party's copyrights, trademarks, patents, or other intellectual
property rights; and
* each contribution shall be in compliance with U.S. export control laws and
other applicable export and import laws. You agree to notify us if you
become aware of any circumstance which would make any of the foregoing
representations inaccurate in any respect. We may publicly disclose your
participation in the project, including the fact that you have signed the SCA.
6. This SCA is governed by the laws of the State of California and applicable
U.S. Federal law. Any choice of law rules will not apply.
7. Please place an “x” on one of the applicable statement below. Please do NOT
mark both statements:
* [x] I am signing on behalf of myself as an individual and no other person
or entity, including my employer, has or will have rights with respect to my
contributions.
* [ ] I am signing on behalf of my employer or a legal entity and I have the
actual authority to contractually bind that entity.
## Contributor Details
| Field | Entry |
|------------------------------- |---------------|
| Name | Lucas Terriel |
| Company name (if applicable) | |
| Title or role (if applicable) | |
| Date | 2022-06-20 |
| GitHub username | Lucaterre |
| Website (optional) | |

View File

@ -1,13 +0,0 @@
# Configuration for probot-no-response - https://github.com/probot/no-response
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 14
# Label requiring a response
responseRequiredLabel: more-info-needed
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
to a request for more information from the original author. With only the
information that is currently in the issue, there's not enough information
to take action. If you're the original author, feel free to reopen the issue
if you have or find the answers needed to investigate further.

67
.github/spacy_universe_alert.py vendored Normal file
View File

@ -0,0 +1,67 @@
import os
import sys
import json
from datetime import datetime
from slack_sdk.web.client import WebClient
CHANNEL = "#alerts-universe"
SLACK_TOKEN = os.environ.get("SLACK_BOT_TOKEN", "ENV VAR not available!")
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
client = WebClient(SLACK_TOKEN)
github_context = json.loads(sys.argv[1])
event = github_context['event']
pr_title = event['pull_request']["title"]
pr_link = event['pull_request']["patch_url"].replace(".patch", "")
pr_author_url = event['sender']["html_url"]
pr_author_name = pr_author_url.rsplit('/')[-1]
pr_created_at_dt = datetime.strptime(
event['pull_request']["created_at"],
DATETIME_FORMAT
)
pr_created_at = pr_created_at_dt.strftime("%c")
pr_updated_at_dt = datetime.strptime(
event['pull_request']["updated_at"],
DATETIME_FORMAT
)
pr_updated_at = pr_updated_at_dt.strftime("%c")
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "📣 New spaCy Universe Project Alert ✨"
}
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": f"*Pull Request:*\n<{pr_link}|{pr_title}>"
},
{
"type": "mrkdwn",
"text": f"*Author:*\n<{pr_author_url}|{pr_author_name}>"
},
{
"type": "mrkdwn",
"text": f"*Created at:*\n {pr_created_at}"
},
{
"type": "mrkdwn",
"text": f"*Last Updated:*\n {pr_updated_at}"
}
]
}
]
client.chat_postMessage(
channel=CHANNEL,
text="spaCy universe project PR alert",
blocks=blocks
)

View File

@ -15,7 +15,7 @@ jobs:
issue-manager: issue-manager:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: tiangolo/issue-manager@0.2.1 - uses: tiangolo/issue-manager@0.4.0
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
config: > config: >
@ -25,5 +25,11 @@ jobs:
"message": "This issue has been automatically closed because it was answered and there was no follow-up discussion.", "message": "This issue has been automatically closed because it was answered and there was no follow-up discussion.",
"remove_label_on_comment": true, "remove_label_on_comment": true,
"remove_label_on_close": true "remove_label_on_close": true
},
"more-info-needed": {
"delay": "P7D",
"message": "This issue has been automatically closed because there has been no response to a request for more information from the original author. With only the information that is currently in the issue, there's not enough information to take action. If you're the original author, feel free to reopen the issue if you have or find the answers needed to investigate further.",
"remove_label_on_comment": true,
"remove_label_on_close": true
} }
} }

View File

@ -0,0 +1,30 @@
name: spaCy universe project alert
on:
pull_request_target:
paths:
- "website/meta/universe.json"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Dump GitHub context
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
PR_NUMBER: ${{github.event.number}}
run: |
echo "$GITHUB_CONTEXT"
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
- name: Install Bernadette app dependency and send an alert
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
GITHUB_CONTEXT: ${{ toJson(github) }}
CHANNEL: "#alerts-universe"
run: |
pip install slack-sdk==3.17.2 aiohttp==3.8.1
echo "$CHANNEL"
python .github/spacy_universe_alert.py "$GITHUB_CONTEXT"

View File

@ -271,7 +271,8 @@ except: # noqa: E722
### Python conventions ### Python conventions
All Python code must be written **compatible with Python 3.6+**. All Python code must be written **compatible with Python 3.6+**. More detailed
code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md).
#### I/O and handling paths #### I/O and handling paths

View File

@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license. open-source software, released under the MIT license.
💫 **Version 3.3.1 out now!** 💫 **Version 3.4.0 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases) [Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) [![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)

View File

@ -32,7 +32,7 @@ jobs:
versionSpec: "3.7" versionSpec: "3.7"
- script: | - script: |
pip install flake8==3.9.2 pip install flake8==3.9.2
python -m flake8 spacy --count --select=E901,E999,F821,F822,F823 --show-source --statistics python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
displayName: "flake8" displayName: "flake8"
- job: "Test" - job: "Test"

View File

@ -1,6 +1,8 @@
# build version constraints for use with wheelwright + multibuild # build version constraints for use with wheelwright + multibuild
numpy==1.15.0; python_version<='3.7' numpy==1.15.0; python_version<='3.7' and platform_machine!='aarch64'
numpy==1.17.3; python_version=='3.8' numpy==1.19.2; python_version<='3.7' and platform_machine=='aarch64'
numpy==1.17.3; python_version=='3.8' and platform_machine!='aarch64'
numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'
numpy==1.19.3; python_version=='3.9' numpy==1.19.3; python_version=='3.9'
numpy==1.21.3; python_version=='3.10' numpy==1.21.3; python_version=='3.10'
numpy; python_version>='3.11' numpy; python_version>='3.11'

View File

@ -191,6 +191,8 @@ def load_model(name: str) -> "Language":
... ...
``` ```
Note that we typically put the `from typing` import statements on the first line(s) of the Python module.
## Structuring logic ## Structuring logic
### Positional and keyword arguments ### Positional and keyword arguments
@ -275,6 +277,27 @@ If you have to use `try`/`except`, make sure to only include what's **absolutely
+ return [v.strip() for v in value.split(",")] + return [v.strip() for v in value.split(",")]
``` ```
### Numeric comparisons
For numeric comparisons, as a general rule we always use `<` and `>=` and avoid the usage of `<=` and `>`. This is to ensure we consistently
apply inclusive lower bounds and exclusive upper bounds, helping to prevent off-by-one errors.
One exception to this rule is the ternary case. With a chain like
```python
if value >= 0 and value < max:
...
```
it's fine to rewrite this to the shorter form
```python
if 0 <= value < max:
...
```
even though this requires the usage of the `<=` operator.
### Iteration and comprehensions ### Iteration and comprehensions
We generally avoid using built-in functions like `filter` or `map` in favor of list or generator comprehensions. We generally avoid using built-in functions like `filter` or `map` in favor of list or generator comprehensions.
@ -451,10 +474,14 @@ spaCy uses the [`pytest`](http://doc.pytest.org/) framework for testing. Tests f
When adding tests, make sure to use descriptive names and only test for one behavior at a time. Tests should be grouped into modules dedicated to the same type of functionality and some test modules are organized as directories of test files related to the same larger area of the library, e.g. `matcher` or `tokenizer`. When adding tests, make sure to use descriptive names and only test for one behavior at a time. Tests should be grouped into modules dedicated to the same type of functionality and some test modules are organized as directories of test files related to the same larger area of the library, e.g. `matcher` or `tokenizer`.
Regression tests are tests that refer to bugs reported in specific issues. They should live in the relevant module of the test suite, named according to the issue number (e.g., `test_issue1234.py`), and [marked](https://docs.pytest.org/en/6.2.x/example/markers.html#working-with-custom-markers) appropriately (e.g. `@pytest.mark.issue(1234)`). This system allows us to relate tests for specific bugs back to the original reported issue, which is especially useful if we introduce a regression and a previously passing regression tests suddenly fails again. When fixing a bug, it's often useful to create a regression test for it first. Regression tests are tests that refer to bugs reported in specific issues. They should live in the relevant module of the test suite, named according to the issue number (e.g., `test_issue1234.py`), and [marked](https://docs.pytest.org/en/6.2.x/example/markers.html#working-with-custom-markers) appropriately (e.g. `@pytest.mark.issue(1234)`). This system allows us to relate tests for specific bugs back to the original reported issue, which is especially useful if we introduce a regression and a previously passing regression tests suddenly fails again. When fixing a bug, it's often useful to create a regression test for it first.
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file. The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
### Testing Cython Code
If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`.
### Constructing objects and state ### Constructing objects and state
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation. Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.

View File

@ -16,21 +16,41 @@ To summon the robot, write a github comment on the issue/PR you wish to test. Th
Some things to note: Some things to note:
* The `@explosion-bot please` must be the beginning of the command - you cannot add anything in front of this or else the robot won't know how to parse it. Adding anything at the end aside from the test name will also confuse the robot, so keep it simple! - The `@explosion-bot please` must be the beginning of the command - you cannot add anything in front of this or else the robot won't know how to parse it. Adding anything at the end aside from the test name will also confuse the robot, so keep it simple!
* The command name (such as `test_gpu`) must be one of the tests that the bot knows how to run. The available commands are documented in the bot's [workflow config](https://github.com/explosion/spaCy/blob/master/.github/workflows/explosionbot.yml#L26) and must match exactly one of the commands listed there. - The command name (such as `test_gpu`) must be one of the tests that the bot knows how to run. The available commands are documented in the bot's [workflow config](https://github.com/explosion/spaCy/blob/master/.github/workflows/explosionbot.yml#L26) and must match exactly one of the commands listed there.
* The robot can't do multiple things at once, so if you want it to run multiple tests, you'll have to summon it with one comment per test. - The robot can't do multiple things at once, so if you want it to run multiple tests, you'll have to summon it with one comment per test.
* For the `test_gpu` command, you can specify an optional thinc branch (from the spaCy repo) or a spaCy branch (from the thinc repo) with either the `--thinc-branch` or `--spacy-branch` flags. By default, the bot will pull in the PR branch from the repo where the command was issued, and the main branch of the other repository. However, if you need to run against another branch, you can say (for example):
``` ### Examples
@explosion-bot please test_gpu --thinc-branch develop
``` - Execute spaCy slow GPU tests with a custom thinc branch from a spaCy PR:
You can also specify a branch from an unmerged PR:
``` ```
@explosion-bot please test_gpu --thinc-branch refs/pull/633/head @explosion-bot please test_slow_gpu --thinc-branch <branch_name>
``` ```
`branch_name` can either be a named branch, e.g: `develop`, or an unmerged PR, e.g: `refs/pull/<pr_number>/head`.
- Execute spaCy Transformers GPU tests from a spaCy PR:
```
@explosion-bot please test_gpu --run-on spacy-transformers --run-on-branch master --spacy-branch current_pr
```
This will launch the GPU pipeline for the `spacy-transformers` repo on its `master` branch, using the current spaCy PR's branch to build spaCy. The name of the repository passed to `--run-on` is case-sensitive, e.g: use `spaCy` instead of `spacy`.
- General info about supported commands.
```
@explosion-bot please info
```
- Help text for a specific command
```
@explosion-bot please <command> --help
```
## Troubleshooting ## Troubleshooting
If the robot isn't responding to commands as expected, you can check its logs in the [Github Action](https://github.com/explosion/spaCy/actions/workflows/explosionbot.yml). If the robot isn't responding to commands as expected, you can check its logs in the [Github Action](https://github.com/explosion/spaCy/actions/workflows/explosionbot.yml).
For each command sent to the bot, there should be a run of the `explosion-bot` workflow. In the `Install and run explosion-bot` step, towards the ends of the logs you should see info about the configuration that the bot was run with, as well as any errors that the bot encountered. For each command sent to the bot, there should be a run of the `explosion-bot` workflow. In the `Install and run explosion-bot` step, towards the ends of the logs you should see info about the configuration that the bot was run with, as well as any errors that the bot encountered.

View File

@ -5,8 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0", "cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0", "preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0", "murmurhash>=0.28.0,<1.1.0",
"thinc>=8.1.0.dev3,<8.2.0", "thinc>=8.1.0,<8.2.0",
"pathy",
"numpy>=1.15.0", "numpy>=1.15.0",
] ]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"

View File

@ -1,9 +1,9 @@
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev3,<8.2.0 thinc>=8.1.0,<8.2.0
ml_datasets>=0.2.0,<0.3.0 ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
@ -30,8 +30,9 @@ pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0 mock>=2.0.0,<3.0.0
flake8>=3.8.0,<3.10.0 flake8>=3.8.0,<3.10.0
hypothesis>=3.27.0,<7.0.0 hypothesis>=3.27.0,<7.0.0
mypy>=0.910,<=0.960 mypy>=0.910,<0.970; platform_machine!='aarch64'
types-dataclasses>=0.1.3; python_version < "3.7" types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1 types-mock>=0.1.1
types-requests types-requests
types-setuptools>=57.0.0
black>=22.0,<23.0 black>=22.0,<23.0

View File

@ -31,28 +31,20 @@ project_urls =
zip_safe = false zip_safe = false
include_package_data = true include_package_data = true
python_requires = >=3.6 python_requires = >=3.6
setup_requires =
cython>=0.25,<3.0
numpy>=1.15.0
# We also need our Cython packages here to compile against
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
murmurhash>=0.28.0,<1.1.0
thinc>=8.1.0.dev3,<8.2.0
install_requires = install_requires =
# Our libraries # Our libraries
spacy-legacy>=3.0.9,<3.1.0 spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0 spacy-loggers>=1.0.0,<2.0.0
murmurhash>=0.28.0,<1.1.0 murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0 cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0 preshed>=3.0.2,<3.1.0
thinc>=8.1.0.dev3,<8.2.0 thinc>=8.1.0,<8.2.0
wasabi>=0.9.1,<1.1.0 wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0 srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0 catalogue>=2.0.6,<2.1.0
# Third-party dependencies
typer>=0.3.0,<0.5.0 typer>=0.3.0,<0.5.0
pathy>=0.3.5 pathy>=0.3.5
# Third-party dependencies
tqdm>=4.38.0,<5.0.0 tqdm>=4.38.0,<5.0.0
numpy>=1.15.0 numpy>=1.15.0
requests>=2.13.0,<3.0.0 requests>=2.13.0,<3.0.0
@ -103,14 +95,18 @@ cuda114 =
cupy-cuda114>=5.0.0b4,<11.0.0 cupy-cuda114>=5.0.0b4,<11.0.0
cuda115 = cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0 cupy-cuda115>=5.0.0b4,<11.0.0
cuda116 =
cupy-cuda116>=5.0.0b4,<11.0.0
cuda117 =
cupy-cuda117>=5.0.0b4,<11.0.0
apple = apple =
thinc-apple-ops>=0.0.4,<1.0.0 thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies # Language tokenizers with external dependencies
ja = ja =
sudachipy>=0.5.2,!=0.6.1 sudachipy>=0.5.2,!=0.6.1
sudachidict_core>=20211220 sudachidict_core>=20211220
ko = ko =
natto-py==0.9.0 mecab-ko>=1.0.0
th = th =
pythainlp>=2.0 pythainlp>=2.0

View File

@ -61,7 +61,7 @@ MOD_NAMES = [
"spacy.tokens.span_group", "spacy.tokens.span_group",
"spacy.tokens.graph", "spacy.tokens.graph",
"spacy.tokens.morphanalysis", "spacy.tokens.morphanalysis",
"spacy.tokens._retokenize", "spacy.tokens.retokenizer",
"spacy.matcher.matcher", "spacy.matcher.matcher",
"spacy.matcher.phrasematcher", "spacy.matcher.phrasematcher",
"spacy.matcher.dependencymatcher", "spacy.matcher.dependencymatcher",
@ -128,6 +128,8 @@ class build_ext_options:
class build_ext_subclass(build_ext, build_ext_options): class build_ext_subclass(build_ext, build_ext_options):
def build_extensions(self): def build_extensions(self):
if self.parallel is None and os.environ.get("SPACY_NUM_BUILD_JOBS") is not None:
self.parallel = int(os.environ.get("SPACY_NUM_BUILD_JOBS"))
build_ext_options.build_options(self) build_ext_options.build_options(self)
build_ext.build_extensions(self) build_ext.build_extensions(self)

View File

@ -31,25 +31,33 @@ def load(
name: Union[str, Path], name: Union[str, Path],
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = util.SimpleFrozenList(), disable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
exclude: Iterable[str] = util.SimpleFrozenList(), enable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
exclude: Union[str, Iterable[str]] = util.SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language: ) -> Language:
"""Load a spaCy model from an installed package or a local path. """Load a spaCy model from an installed package or a local path.
name (str): Package name or model path. name (str): Package name or model path.
vocab (Vocab): A Vocab object. If True, a vocab is created. vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable. Disabled disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe. enable them by calling nlp.enable_pipe.
exclude (Iterable[str]): Names of pipeline components to exclude. Excluded enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
pipes will be disabled (but can be enabled later using nlp.enable_pipe).
exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded. components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation. keyed by section values in dot notation.
RETURNS (Language): The loaded nlp object. RETURNS (Language): The loaded nlp object.
""" """
return util.load_model( return util.load_model(
name, vocab=vocab, disable=disable, exclude=exclude, config=config name,
vocab=vocab,
disable=disable,
enable=enable,
exclude=exclude,
config=config,
) )

View File

@ -1,6 +1,6 @@
# fmt: off # fmt: off
__title__ = "spacy" __title__ = "spacy"
__version__ = "3.3.0" __version__ = "3.4.1"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download" __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects" __projects__ = "https://github.com/explosion/projects"

View File

@ -1,98 +1,49 @@
# Reserve 64 values for flag features
from . cimport symbols from . cimport symbols
cdef enum attr_id_t: cdef enum attr_id_t:
NULL_ATTR NULL_ATTR = 0
IS_ALPHA IS_ALPHA = symbols.IS_ALPHA
IS_ASCII IS_ASCII = symbols.IS_ASCII
IS_DIGIT IS_DIGIT = symbols.IS_DIGIT
IS_LOWER IS_LOWER = symbols.IS_LOWER
IS_PUNCT IS_PUNCT = symbols.IS_PUNCT
IS_SPACE IS_SPACE = symbols.IS_SPACE
IS_TITLE IS_TITLE = symbols.IS_TITLE
IS_UPPER IS_UPPER = symbols.IS_UPPER
LIKE_URL LIKE_URL = symbols.LIKE_URL
LIKE_NUM LIKE_NUM = symbols.LIKE_NUM
LIKE_EMAIL LIKE_EMAIL = symbols.LIKE_EMAIL
IS_STOP IS_STOP = symbols.IS_STOP
IS_OOV_DEPRECATED IS_BRACKET = symbols.IS_BRACKET
IS_BRACKET IS_QUOTE = symbols.IS_QUOTE
IS_QUOTE IS_LEFT_PUNCT = symbols.IS_LEFT_PUNCT
IS_LEFT_PUNCT IS_RIGHT_PUNCT = symbols.IS_RIGHT_PUNCT
IS_RIGHT_PUNCT IS_CURRENCY = symbols.IS_CURRENCY
IS_CURRENCY
FLAG19 = 19 ID = symbols.ID
FLAG20 ORTH = symbols.ORTH
FLAG21 LOWER = symbols.LOWER
FLAG22 NORM = symbols.NORM
FLAG23 SHAPE = symbols.SHAPE
FLAG24 PREFIX = symbols.PREFIX
FLAG25 SUFFIX = symbols.SUFFIX
FLAG26
FLAG27
FLAG28
FLAG29
FLAG30
FLAG31
FLAG32
FLAG33
FLAG34
FLAG35
FLAG36
FLAG37
FLAG38
FLAG39
FLAG40
FLAG41
FLAG42
FLAG43
FLAG44
FLAG45
FLAG46
FLAG47
FLAG48
FLAG49
FLAG50
FLAG51
FLAG52
FLAG53
FLAG54
FLAG55
FLAG56
FLAG57
FLAG58
FLAG59
FLAG60
FLAG61
FLAG62
FLAG63
ID LENGTH = symbols.LENGTH
ORTH CLUSTER = symbols.CLUSTER
LOWER LEMMA = symbols.LEMMA
NORM POS = symbols.POS
SHAPE TAG = symbols.TAG
PREFIX DEP = symbols.DEP
SUFFIX ENT_IOB = symbols.ENT_IOB
ENT_TYPE = symbols.ENT_TYPE
HEAD = symbols.HEAD
SENT_START = symbols.SENT_START
SPACY = symbols.SPACY
PROB = symbols.PROB
LENGTH LANG = symbols.LANG
CLUSTER
LEMMA
POS
TAG
DEP
ENT_IOB
ENT_TYPE
HEAD
SENT_START
SPACY
PROB
LANG
ENT_KB_ID = symbols.ENT_KB_ID ENT_KB_ID = symbols.ENT_KB_ID
MORPH MORPH = symbols.MORPH
ENT_ID = symbols.ENT_ID ENT_ID = symbols.ENT_ID
IDX IDX = symbols.IDX
SENT_END

View File

@ -16,57 +16,11 @@ IDS = {
"LIKE_NUM": LIKE_NUM, "LIKE_NUM": LIKE_NUM,
"LIKE_EMAIL": LIKE_EMAIL, "LIKE_EMAIL": LIKE_EMAIL,
"IS_STOP": IS_STOP, "IS_STOP": IS_STOP,
"IS_OOV_DEPRECATED": IS_OOV_DEPRECATED,
"IS_BRACKET": IS_BRACKET, "IS_BRACKET": IS_BRACKET,
"IS_QUOTE": IS_QUOTE, "IS_QUOTE": IS_QUOTE,
"IS_LEFT_PUNCT": IS_LEFT_PUNCT, "IS_LEFT_PUNCT": IS_LEFT_PUNCT,
"IS_RIGHT_PUNCT": IS_RIGHT_PUNCT, "IS_RIGHT_PUNCT": IS_RIGHT_PUNCT,
"IS_CURRENCY": IS_CURRENCY, "IS_CURRENCY": IS_CURRENCY,
"FLAG19": FLAG19,
"FLAG20": FLAG20,
"FLAG21": FLAG21,
"FLAG22": FLAG22,
"FLAG23": FLAG23,
"FLAG24": FLAG24,
"FLAG25": FLAG25,
"FLAG26": FLAG26,
"FLAG27": FLAG27,
"FLAG28": FLAG28,
"FLAG29": FLAG29,
"FLAG30": FLAG30,
"FLAG31": FLAG31,
"FLAG32": FLAG32,
"FLAG33": FLAG33,
"FLAG34": FLAG34,
"FLAG35": FLAG35,
"FLAG36": FLAG36,
"FLAG37": FLAG37,
"FLAG38": FLAG38,
"FLAG39": FLAG39,
"FLAG40": FLAG40,
"FLAG41": FLAG41,
"FLAG42": FLAG42,
"FLAG43": FLAG43,
"FLAG44": FLAG44,
"FLAG45": FLAG45,
"FLAG46": FLAG46,
"FLAG47": FLAG47,
"FLAG48": FLAG48,
"FLAG49": FLAG49,
"FLAG50": FLAG50,
"FLAG51": FLAG51,
"FLAG52": FLAG52,
"FLAG53": FLAG53,
"FLAG54": FLAG54,
"FLAG55": FLAG55,
"FLAG56": FLAG56,
"FLAG57": FLAG57,
"FLAG58": FLAG58,
"FLAG59": FLAG59,
"FLAG60": FLAG60,
"FLAG61": FLAG61,
"FLAG62": FLAG62,
"FLAG63": FLAG63,
"ID": ID, "ID": ID,
"ORTH": ORTH, "ORTH": ORTH,
"LOWER": LOWER, "LOWER": LOWER,
@ -92,12 +46,11 @@ IDS = {
} }
# ATTR IDs, in order of the symbol NAMES = {v: k for k, v in IDS.items()}
NAMES = [key for key, value in sorted(IDS.items(), key=lambda item: item[1])]
locals().update(IDS) locals().update(IDS)
def intify_attrs(stringy_attrs, strings_map=None, _do_deprecated=False): def intify_attrs(stringy_attrs, strings_map=None):
""" """
Normalize a dictionary of attributes, converting them to ints. Normalize a dictionary of attributes, converting them to ints.
@ -109,75 +62,6 @@ def intify_attrs(stringy_attrs, strings_map=None, _do_deprecated=False):
converted to ints. converted to ints.
""" """
inty_attrs = {} inty_attrs = {}
if _do_deprecated:
if "F" in stringy_attrs:
stringy_attrs["ORTH"] = stringy_attrs.pop("F")
if "L" in stringy_attrs:
stringy_attrs["LEMMA"] = stringy_attrs.pop("L")
if "pos" in stringy_attrs:
stringy_attrs["TAG"] = stringy_attrs.pop("pos")
if "morph" in stringy_attrs:
morphs = stringy_attrs.pop("morph")
if "number" in stringy_attrs:
stringy_attrs.pop("number")
if "tenspect" in stringy_attrs:
stringy_attrs.pop("tenspect")
morph_keys = [
"PunctType",
"PunctSide",
"Other",
"Degree",
"AdvType",
"Number",
"VerbForm",
"PronType",
"Aspect",
"Tense",
"PartType",
"Poss",
"Hyph",
"ConjType",
"NumType",
"Foreign",
"VerbType",
"NounType",
"Gender",
"Mood",
"Negative",
"Tense",
"Voice",
"Abbr",
"Derivation",
"Echo",
"Foreign",
"NameType",
"NounType",
"NumForm",
"NumValue",
"PartType",
"Polite",
"StyleVariant",
"PronType",
"AdjType",
"Person",
"Variant",
"AdpType",
"Reflex",
"Negative",
"Mood",
"Aspect",
"Case",
"Polarity",
"PrepCase",
"Animacy", # U20
]
for key in morph_keys:
if key in stringy_attrs:
stringy_attrs.pop(key)
elif key.lower() in stringy_attrs:
stringy_attrs.pop(key.lower())
elif key.upper() in stringy_attrs:
stringy_attrs.pop(key.upper())
for name, value in stringy_attrs.items(): for name, value in stringy_attrs.items():
int_key = intify_attr(name) int_key = intify_attr(name)
if int_key is not None: if int_key is not None:

View File

@ -462,6 +462,23 @@ def git_sparse_checkout(repo, subpath, dest, branch):
shutil.move(str(source_path), str(dest)) shutil.move(str(source_path), str(dest))
def git_repo_branch_exists(repo: str, branch: str) -> bool:
"""Uses 'git ls-remote' to check if a repository and branch exists
repo (str): URL to get repo.
branch (str): Branch on repo to check.
RETURNS (bool): True if repo:branch exists.
"""
get_git_version()
cmd = f"git ls-remote {repo} {branch}"
# We might be tempted to use `--exit-code` with `git ls-remote`, but
# `run_command` handles the `returncode` for us, so we'll rely on
# the fact that stdout returns '' if the requested branch doesn't exist
ret = run_command(cmd, capture=True)
exists = ret.stdout != ""
return exists
def get_git_version( def get_git_version(
error: str = "Could not run 'git'. Make sure it's installed and the executable is available.", error: str = "Could not run 'git'. Make sure it's installed and the executable is available.",
) -> Tuple[int, int]: ) -> Tuple[int, int]:

View File

@ -10,7 +10,7 @@ import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli from ._util import import_code, debug_cli
from ..training import Example from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining from ..schemas import ConfigSchemaTraining
from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals import nonproj
@ -361,7 +361,7 @@ def debug_data(
if label != "-" if label != "-"
] ]
labels_with_counts = _format_labels(labels_with_counts, counts=True) labels_with_counts = _format_labels(labels_with_counts, counts=True)
msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose) msg.text(f"Labels in train data: {labels_with_counts}", show=verbose)
missing_labels = model_labels - labels missing_labels = model_labels - labels
if missing_labels: if missing_labels:
msg.warn( msg.warn(
@ -758,9 +758,9 @@ def _compile_gold(
# "Illegal" whitespace entity # "Illegal" whitespace entity
data["ws_ents"] += 1 data["ws_ents"] += 1
if label.startswith(("B-", "U-")): if label.startswith(("B-", "U-")):
combined_label = label.split("-")[1] combined_label = remove_bilu_prefix(label)
data["ner"][combined_label] += 1 data["ner"][combined_label] += 1
if sent_starts[i] == True and label.startswith(("I-", "L-")): if sent_starts[i] and label.startswith(("I-", "L-")):
data["boundary_cross_ents"] += 1 data["boundary_cross_ents"] += 1
elif label == "-": elif label == "-":
data["ner"]["-"] += 1 data["ner"]["-"] += 1
@ -908,7 +908,7 @@ def _get_examples_without_label(
for eg in data: for eg in data:
if component == "ner": if component == "ner":
labels = [ labels = [
label.split("-")[1] remove_bilu_prefix(label)
for label in eg.get_aligned_ner() for label in eg.get_aligned_ner()
if label not in ("O", "-", None) if label not in ("O", "-", None)
] ]

View File

@ -7,6 +7,7 @@ import typer
from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX
from .. import about from .. import about
from ..util import is_package, get_minor_version, run_command from ..util import is_package, get_minor_version, run_command
from ..util import is_prerelease_version
from ..errors import OLD_MODEL_SHORTCUTS from ..errors import OLD_MODEL_SHORTCUTS
@ -19,7 +20,7 @@ def download_cli(
ctx: typer.Context, ctx: typer.Context,
model: str = Arg(..., help="Name of pipeline package to download"), model: str = Arg(..., help="Name of pipeline package to download"),
direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"), direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"),
sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel") sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel"),
# fmt: on # fmt: on
): ):
""" """
@ -35,7 +36,12 @@ def download_cli(
download(model, direct, sdist, *ctx.args) download(model, direct, sdist, *ctx.args)
def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -> None: def download(
model: str,
direct: bool = False,
sdist: bool = False,
*pip_args,
) -> None:
if ( if (
not (is_package("spacy") or is_package("spacy-nightly")) not (is_package("spacy") or is_package("spacy-nightly"))
and "--no-deps" not in pip_args and "--no-deps" not in pip_args
@ -49,13 +55,10 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
"dependencies, you'll have to install them manually." "dependencies, you'll have to install them manually."
) )
pip_args = pip_args + ("--no-deps",) pip_args = pip_args + ("--no-deps",)
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
dl_tpl = "{m}-{v}/{m}-{v}{s}#egg={m}=={v}"
if direct: if direct:
components = model.split("-") components = model.split("-")
model_name = "".join(components[:-1]) model_name = "".join(components[:-1])
version = components[-1] version = components[-1]
download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
else: else:
model_name = model model_name = model
if model in OLD_MODEL_SHORTCUTS: if model in OLD_MODEL_SHORTCUTS:
@ -66,15 +69,31 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
model_name = OLD_MODEL_SHORTCUTS[model] model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility() compatibility = get_compatibility()
version = get_version(model_name, compatibility) version = get_version(model_name, compatibility)
download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
filename = get_model_filename(model_name, version, sdist)
download_model(filename, pip_args)
msg.good( msg.good(
"Download and installation successful", "Download and installation successful",
f"You can now load the package via spacy.load('{model_name}')", f"You can now load the package via spacy.load('{model_name}')",
) )
def get_model_filename(model_name: str, version: str, sdist: bool = False) -> str:
dl_tpl = "{m}-{v}/{m}-{v}{s}"
egg_tpl = "#egg={m}=={v}"
suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
filename = dl_tpl.format(m=model_name, v=version, s=suffix)
if sdist:
filename += egg_tpl.format(m=model_name, v=version)
return filename
def get_compatibility() -> dict: def get_compatibility() -> dict:
version = get_minor_version(about.__version__) if is_prerelease_version(about.__version__):
version: Optional[str] = about.__version__
else:
version = get_minor_version(about.__version__)
r = requests.get(about.__compatibility__) r = requests.get(about.__compatibility__)
if r.status_code != 200: if r.status_code != 200:
msg.fail( msg.fail(
@ -101,6 +120,11 @@ def get_version(model: str, comp: dict) -> str:
return comp[model][0] return comp[model][0]
def get_latest_version(model: str) -> str:
comp = get_compatibility()
return get_version(model, comp)
def download_model( def download_model(
filename: str, user_pip_args: Optional[Sequence[str]] = None filename: str, user_pip_args: Optional[Sequence[str]] = None
) -> None: ) -> None:

View File

@ -1,10 +1,13 @@
from typing import Optional, Dict, Any, Union, List from typing import Optional, Dict, Any, Union, List
import platform import platform
import pkg_resources
import json
from pathlib import Path from pathlib import Path
from wasabi import Printer, MarkdownRenderer from wasabi import Printer, MarkdownRenderer
import srsly import srsly
from ._util import app, Arg, Opt, string_to_list from ._util import app, Arg, Opt, string_to_list
from .download import get_model_filename, get_latest_version
from .. import util from .. import util
from .. import about from .. import about
@ -16,6 +19,7 @@ def info_cli(
markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"), markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"),
silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"), silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"),
exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"), exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"),
url: bool = Opt(False, "--url", "-u", help="Print the URL to download the most recent compatible version of the pipeline"),
# fmt: on # fmt: on
): ):
""" """
@ -23,10 +27,19 @@ def info_cli(
print its meta information. Flag --markdown prints details in Markdown for easy print its meta information. Flag --markdown prints details in Markdown for easy
copy-pasting to GitHub issues. copy-pasting to GitHub issues.
Flag --url prints only the download URL of the most recent compatible
version of the pipeline.
DOCS: https://spacy.io/api/cli#info DOCS: https://spacy.io/api/cli#info
""" """
exclude = string_to_list(exclude) exclude = string_to_list(exclude)
info(model, markdown=markdown, silent=silent, exclude=exclude) info(
model,
markdown=markdown,
silent=silent,
exclude=exclude,
url=url,
)
def info( def info(
@ -35,11 +48,20 @@ def info(
markdown: bool = False, markdown: bool = False,
silent: bool = True, silent: bool = True,
exclude: Optional[List[str]] = None, exclude: Optional[List[str]] = None,
url: bool = False,
) -> Union[str, dict]: ) -> Union[str, dict]:
msg = Printer(no_print=silent, pretty=not silent) msg = Printer(no_print=silent, pretty=not silent)
if not exclude: if not exclude:
exclude = [] exclude = []
if model: if url:
if model is not None:
title = f"Download info for pipeline '{model}'"
data = info_model_url(model)
print(data["download_url"])
return data
else:
msg.fail("--url option requires a pipeline name", exits=1)
elif model:
title = f"Info about pipeline '{model}'" title = f"Info about pipeline '{model}'"
data = info_model(model, silent=silent) data = info_model(model, silent=silent)
else: else:
@ -99,11 +121,43 @@ def info_model(model: str, *, silent: bool = True) -> Dict[str, Any]:
meta["source"] = str(model_path.resolve()) meta["source"] = str(model_path.resolve())
else: else:
meta["source"] = str(model_path) meta["source"] = str(model_path)
download_url = info_installed_model_url(model)
if download_url:
meta["download_url"] = download_url
return { return {
k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed") k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed")
} }
def info_installed_model_url(model: str) -> Optional[str]:
"""Given a pipeline name, get the download URL if available, otherwise
return None.
This is only available for pipelines installed as modules that have
dist-info available.
"""
try:
dist = pkg_resources.get_distribution(model)
data = json.loads(dist.get_metadata("direct_url.json"))
return data["url"]
except pkg_resources.DistributionNotFound:
# no such package
return None
except Exception:
# something else, like no file or invalid JSON
return None
def info_model_url(model: str) -> Dict[str, Any]:
"""Return the download URL for the latest version of a pipeline."""
version = get_latest_version(model)
filename = get_model_filename(model, version)
download_url = about.__download_url__ + "/" + filename
release_tpl = "https://github.com/explosion/spacy-models/releases/tag/{m}-{v}"
release_url = release_tpl.format(m=model, v=version)
return {"download_url": download_url, "release_url": release_url}
def get_markdown( def get_markdown(
data: Dict[str, Any], data: Dict[str, Any],
title: Optional[str] = None, title: Optional[str] = None,

View File

@ -10,6 +10,7 @@ from jinja2 import Template
from .. import util from .. import util
from ..language import DEFAULT_CONFIG_PRETRAIN_PATH from ..language import DEFAULT_CONFIG_PRETRAIN_PATH
from ..schemas import RecommendationSchema from ..schemas import RecommendationSchema
from ..util import SimpleFrozenList
from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND
from ._util import string_to_list, import_code from ._util import string_to_list, import_code
@ -24,16 +25,30 @@ class Optimizations(str, Enum):
accuracy = "accuracy" accuracy = "accuracy"
class InitValues:
"""
Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and
init_config(), i.e. initialization calls via CLI respectively Python.
"""
lang = "en"
pipeline = SimpleFrozenList(["tagger", "parser", "ner"])
optimize = Optimizations.efficiency
gpu = False
pretraining = False
force_overwrite = False
@init_cli.command("config") @init_cli.command("config")
def init_config_cli( def init_config_cli(
# fmt: off # fmt: off
output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True), output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True),
lang: str = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"), lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"),
pipeline: str = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"),
optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."),
gpu: bool = Opt(False, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."),
pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"), force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"),
# fmt: on # fmt: on
): ):
""" """
@ -133,11 +148,11 @@ def fill_config(
def init_config( def init_config(
*, *,
lang: str, lang: str = InitValues.lang,
pipeline: List[str], pipeline: List[str] = InitValues.pipeline,
optimize: str, optimize: str = InitValues.optimize,
gpu: bool, gpu: bool = InitValues.gpu,
pretraining: bool = False, pretraining: bool = InitValues.pretraining,
silent: bool = True, silent: bool = True,
) -> Config: ) -> Config:
msg = Printer(no_print=silent) msg = Printer(no_print=silent)

View File

@ -61,7 +61,7 @@ def pretrain_cli(
# TODO: What's the solution here? How do we handle optional blocks? # TODO: What's the solution here? How do we handle optional blocks?
msg.fail("The [pretraining] block in your config is empty", exits=1) msg.fail("The [pretraining] block in your config is empty", exits=1)
if not output_dir.exists(): if not output_dir.exists():
output_dir.mkdir() output_dir.mkdir(parents=True)
msg.good(f"Created output directory: {output_dir}") msg.good(f"Created output directory: {output_dir}")
# Save non-interpolated config # Save non-interpolated config
raw_config.to_disk(output_dir / "config.cfg") raw_config.to_disk(output_dir / "config.cfg")

View File

@ -7,11 +7,11 @@ import re
from ... import about from ... import about
from ...util import ensure_path from ...util import ensure_path
from .._util import project_cli, Arg, Opt, COMMAND, PROJECT_FILE from .._util import project_cli, Arg, Opt, COMMAND, PROJECT_FILE
from .._util import git_checkout, get_git_version from .._util import git_checkout, get_git_version, git_repo_branch_exists
DEFAULT_REPO = about.__projects__ DEFAULT_REPO = about.__projects__
DEFAULT_PROJECTS_BRANCH = about.__projects_branch__ DEFAULT_PROJECTS_BRANCH = about.__projects_branch__
DEFAULT_BRANCH = "master" DEFAULT_BRANCHES = ["main", "master"]
@project_cli.command("clone") @project_cli.command("clone")
@ -20,7 +20,7 @@ def project_clone_cli(
name: str = Arg(..., help="The name of the template to clone"), name: str = Arg(..., help="The name of the template to clone"),
dest: Optional[Path] = Arg(None, help="Where to clone the project. Defaults to current working directory", exists=False), dest: Optional[Path] = Arg(None, help="Where to clone the project. Defaults to current working directory", exists=False),
repo: str = Opt(DEFAULT_REPO, "--repo", "-r", help="The repository to clone from"), repo: str = Opt(DEFAULT_REPO, "--repo", "-r", help="The repository to clone from"),
branch: Optional[str] = Opt(None, "--branch", "-b", help="The branch to clone from"), branch: Optional[str] = Opt(None, "--branch", "-b", help=f"The branch to clone from. If not provided, will attempt {', '.join(DEFAULT_BRANCHES)}"),
sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse Git checkout to only check out and clone the files needed. Requires Git v22.2+.") sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse Git checkout to only check out and clone the files needed. Requires Git v22.2+.")
# fmt: on # fmt: on
): ):
@ -33,9 +33,25 @@ def project_clone_cli(
""" """
if dest is None: if dest is None:
dest = Path.cwd() / Path(name).parts[-1] dest = Path.cwd() / Path(name).parts[-1]
if repo == DEFAULT_REPO and branch is None:
branch = DEFAULT_PROJECTS_BRANCH
if branch is None: if branch is None:
# If it's a user repo, we want to default to other branch for default_branch in DEFAULT_BRANCHES:
branch = DEFAULT_PROJECTS_BRANCH if repo == DEFAULT_REPO else DEFAULT_BRANCH if git_repo_branch_exists(repo, default_branch):
branch = default_branch
break
if branch is None:
default_branches_msg = ", ".join(f"'{b}'" for b in DEFAULT_BRANCHES)
msg.fail(
"No branch provided and attempted default "
f"branches {default_branches_msg} do not exist.",
exits=1,
)
else:
if not git_repo_branch_exists(repo, branch):
msg.fail(f"repo: {repo} (branch: {branch}) does not exist.", exits=1)
assert isinstance(branch, str)
project_clone(name, dest, repo=repo, branch=branch, sparse_checkout=sparse_checkout) project_clone(name, dest, repo=repo, branch=branch, sparse_checkout=sparse_checkout)
@ -61,9 +77,9 @@ def project_clone(
try: try:
git_checkout(repo, name, dest, branch=branch, sparse=sparse_checkout) git_checkout(repo, name, dest, branch=branch, sparse=sparse_checkout)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
err = f"Could not clone '{name}' from repo '{repo_name}'" err = f"Could not clone '{name}' from repo '{repo_name}' (branch '{branch}')"
msg.fail(err, exits=1) msg.fail(err, exits=1)
msg.good(f"Cloned '{name}' from {repo_name}", project_dir) msg.good(f"Cloned '{name}' from '{repo_name}' (branch '{branch}')", project_dir)
if not (project_dir / PROJECT_FILE).exists(): if not (project_dir / PROJECT_FILE).exists():
msg.warn(f"No {PROJECT_FILE} found in directory") msg.warn(f"No {PROJECT_FILE} found in directory")
else: else:

View File

@ -123,7 +123,8 @@ def app(environ, start_response):
def parse_deps(orig_doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]: def parse_deps(orig_doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate dependency parse in {'words': [], 'arcs': []} format. """Generate dependency parse in {'words': [], 'arcs': []} format.
doc (Doc): Document do parse. orig_doc (Doc): Document to parse.
options (Dict[str, Any]): Dependency parse specific visualisation options.
RETURNS (dict): Generated dependency parse keyed by words and arcs. RETURNS (dict): Generated dependency parse keyed by words and arcs.
""" """
doc = Doc(orig_doc.vocab).from_bytes( doc = Doc(orig_doc.vocab).from_bytes(
@ -209,7 +210,7 @@ def parse_ents(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]: def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate spans in [{start: i, end: i, label: 'label'}] format. """Generate spans in [{start_token: i, end_token: i, label: 'label'}] format.
doc (Doc): Document to parse. doc (Doc): Document to parse.
options (Dict[str, any]): Span-specific visualisation options. options (Dict[str, any]): Span-specific visualisation options.

View File

@ -64,8 +64,11 @@ class SpanRenderer:
# Set up how the text and labels will be rendered # Set up how the text and labels will be rendered
self.direction = DEFAULT_DIR self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG self.lang = DEFAULT_LANG
# These values are in px
self.top_offset = options.get("top_offset", 40) self.top_offset = options.get("top_offset", 40)
self.top_offset_step = options.get("top_offset_step", 17) # This is how far under the top offset the span labels appear
self.span_label_offset = options.get("span_label_offset", 20)
self.offset_step = options.get("top_offset_step", 17)
# Set up which templates will be used # Set up which templates will be used
template = options.get("template") template = options.get("template")
@ -127,26 +130,56 @@ class SpanRenderer:
title (str / None): Document title set in Doc.user_data['title']. title (str / None): Document title set in Doc.user_data['title'].
""" """
per_token_info = [] per_token_info = []
# we must sort so that we can correctly describe when spans need to "stack"
# which is determined by their start token, then span length (longer spans on top),
# then break any remaining ties with the span label
spans = sorted(
spans,
key=lambda s: (
s["start_token"],
-(s["end_token"] - s["start_token"]),
s["label"],
),
)
for s in spans:
# this is the vertical 'slot' that the span will be rendered in
# vertical_position = span_label_offset + (offset_step * (slot - 1))
s["render_slot"] = 0
for idx, token in enumerate(tokens): for idx, token in enumerate(tokens):
# Identify if a token belongs to a Span (and which) and if it's a # Identify if a token belongs to a Span (and which) and if it's a
# start token of said Span. We'll use this for the final HTML render # start token of said Span. We'll use this for the final HTML render
token_markup: Dict[str, Any] = {} token_markup: Dict[str, Any] = {}
token_markup["text"] = token token_markup["text"] = token
concurrent_spans = 0
entities = [] entities = []
for span in spans: for span in spans:
ent = {} ent = {}
if span["start_token"] <= idx < span["end_token"]: if span["start_token"] <= idx < span["end_token"]:
concurrent_spans += 1
span_start = idx == span["start_token"]
ent["label"] = span["label"] ent["label"] = span["label"]
ent["is_start"] = True if idx == span["start_token"] else False ent["is_start"] = span_start
if span_start:
# When the span starts, we need to know how many other
# spans are on the 'span stack' and will be rendered.
# This value becomes the vertical render slot for this entire span
span["render_slot"] = concurrent_spans
ent["render_slot"] = span["render_slot"]
kb_id = span.get("kb_id", "") kb_id = span.get("kb_id", "")
kb_url = span.get("kb_url", "#") kb_url = span.get("kb_url", "#")
ent["kb_link"] = ( ent["kb_link"] = (
TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else "" TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else ""
) )
entities.append(ent) entities.append(ent)
else:
# We don't specifically need to do this since we loop
# over tokens and spans sorted by their start_token,
# so we'll never use a span again after the last token it appears in,
# but if we were to use these spans again we'd want to make sure
# this value was reset correctly.
span["render_slot"] = 0
token_markup["entities"] = entities token_markup["entities"] = entities
per_token_info.append(token_markup) per_token_info.append(token_markup)
markup = self._render_markup(per_token_info) markup = self._render_markup(per_token_info)
markup = TPL_SPANS.format(content=markup, dir=self.direction) markup = TPL_SPANS.format(content=markup, dir=self.direction)
if title: if title:
@ -157,12 +190,24 @@ class SpanRenderer:
"""Render the markup from per-token information""" """Render the markup from per-token information"""
markup = "" markup = ""
for token in per_token_info: for token in per_token_info:
entities = sorted(token["entities"], key=lambda d: d["label"]) entities = sorted(token["entities"], key=lambda d: d["render_slot"])
if entities: # Whitespace tokens disrupt the vertical space (no line height) so that the
# span indicators get misaligned. We don't render them as individual
# tokens anyway, so we'll just not display a span indicator either.
is_whitespace = token["text"].strip() == ""
if entities and not is_whitespace:
slices = self._get_span_slices(token["entities"]) slices = self._get_span_slices(token["entities"])
starts = self._get_span_starts(token["entities"]) starts = self._get_span_starts(token["entities"])
total_height = (
self.top_offset
+ self.span_label_offset
+ (self.offset_step * (len(entities) - 1))
)
markup += self.span_template.format( markup += self.span_template.format(
text=token["text"], span_slices=slices, span_starts=starts text=token["text"],
span_slices=slices,
span_starts=starts,
total_height=total_height,
) )
else: else:
markup += escape_html(token["text"] + " ") markup += escape_html(token["text"] + " ")
@ -171,10 +216,18 @@ class SpanRenderer:
def _get_span_slices(self, entities: List[Dict]) -> str: def _get_span_slices(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span slices""" """Get the rendered markup of all Span slices"""
span_slices = [] span_slices = []
for entity, step in zip(entities, itertools.count(step=self.top_offset_step)): for entity in entities:
# rather than iterate over multiples of offset_step, we use entity['render_slot']
# to determine the vertical position, since that tells where
# the span starts vertically so we can extend it horizontally,
# past other spans that might have already ended
color = self.colors.get(entity["label"].upper(), self.default_color) color = self.colors.get(entity["label"].upper(), self.default_color)
top_offset = self.top_offset + (
self.offset_step * (entity["render_slot"] - 1)
)
span_slice = self.span_slice_template.format( span_slice = self.span_slice_template.format(
bg=color, top_offset=self.top_offset + step bg=color,
top_offset=top_offset,
) )
span_slices.append(span_slice) span_slices.append(span_slice)
return "".join(span_slices) return "".join(span_slices)
@ -182,12 +235,15 @@ class SpanRenderer:
def _get_span_starts(self, entities: List[Dict]) -> str: def _get_span_starts(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span start tokens""" """Get the rendered markup of all Span start tokens"""
span_starts = [] span_starts = []
for entity, step in zip(entities, itertools.count(step=self.top_offset_step)): for entity in entities:
color = self.colors.get(entity["label"].upper(), self.default_color) color = self.colors.get(entity["label"].upper(), self.default_color)
top_offset = self.top_offset + (
self.offset_step * (entity["render_slot"] - 1)
)
span_start = ( span_start = (
self.span_start_template.format( self.span_start_template.format(
bg=color, bg=color,
top_offset=self.top_offset + step, top_offset=top_offset,
label=entity["label"], label=entity["label"],
kb_link=entity["kb_link"], kb_link=entity["kb_link"],
) )

View File

@ -67,7 +67,7 @@ TPL_SPANS = """
""" """
TPL_SPAN = """ TPL_SPAN = """
<span style="font-weight: bold; display: inline-block; position: relative;"> <span style="font-weight: bold; display: inline-block; position: relative; height: {total_height}px;">
{text} {text}
{span_slices} {span_slices}
{span_starts} {span_starts}

View File

@ -16,8 +16,8 @@ def setup_default_warnings():
filter_warning("ignore", error_msg="numpy.dtype size changed") # noqa filter_warning("ignore", error_msg="numpy.dtype size changed") # noqa
filter_warning("ignore", error_msg="numpy.ufunc size changed") # noqa filter_warning("ignore", error_msg="numpy.ufunc size changed") # noqa
# warn about entity_ruler & matcher having no patterns only once # warn about entity_ruler, span_ruler & matcher having no patterns only once
for pipe in ["matcher", "entity_ruler"]: for pipe in ["matcher", "entity_ruler", "span_ruler"]:
filter_warning("once", error_msg=Warnings.W036.format(name=pipe)) filter_warning("once", error_msg=Warnings.W036.format(name=pipe))
# warn once about lemmatizer without required POS # warn once about lemmatizer without required POS
@ -209,6 +209,9 @@ class Warnings(metaclass=ErrorsWithCodes):
"Only the last span group will be loaded under " "Only the last span group will be loaded under "
"Doc.spans['{group_name}']. Skipping span group with values: " "Doc.spans['{group_name}']. Skipping span group with values: "
"{group_values}") "{group_values}")
W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
"is a Cython extension type.")
class Errors(metaclass=ErrorsWithCodes): class Errors(metaclass=ErrorsWithCodes):
@ -227,8 +230,9 @@ class Errors(metaclass=ErrorsWithCodes):
"initialized component.") "initialized component.")
E004 = ("Can't set up pipeline component: a factory for '{name}' already " E004 = ("Can't set up pipeline component: a factory for '{name}' already "
"exists. Existing factory: {func}. New factory: {new_func}") "exists. Existing factory: {func}. New factory: {new_func}")
E005 = ("Pipeline component '{name}' returned None. If you're using a " E005 = ("Pipeline component '{name}' returned {returned_type} instead of a "
"custom component, maybe you forgot to return the processed Doc?") "Doc. If you're using a custom component, maybe you forgot to "
"return the processed Doc?")
E006 = ("Invalid constraints for adding pipeline component. You can only " E006 = ("Invalid constraints for adding pipeline component. You can only "
"set one of the following: before (component name or index), " "set one of the following: before (component name or index), "
"after (component name or index), first (True) or last (True). " "after (component name or index), first (True) or last (True). "
@ -386,7 +390,7 @@ class Errors(metaclass=ErrorsWithCodes):
"consider using doc.spans instead.") "consider using doc.spans instead.")
E106 = ("Can't find `doc._.{attr}` attribute specified in the underscore " E106 = ("Can't find `doc._.{attr}` attribute specified in the underscore "
"settings: {opts}") "settings: {opts}")
E107 = ("Value of `doc._.{attr}` is not JSON-serializable: {value}") E107 = ("Value of custom attribute `{attr}` is not JSON-serializable: {value}")
E109 = ("Component '{name}' could not be run. Did you forget to " E109 = ("Component '{name}' could not be run. Did you forget to "
"call `initialize()`?") "call `initialize()`?")
E110 = ("Invalid displaCy render wrapper. Expected callable, got: {obj}") E110 = ("Invalid displaCy render wrapper. Expected callable, got: {obj}")
@ -484,7 +488,7 @@ class Errors(metaclass=ErrorsWithCodes):
"Current DocBin: {current}\nOther DocBin: {other}") "Current DocBin: {current}\nOther DocBin: {other}")
E169 = ("Can't find module: {module}") E169 = ("Can't find module: {module}")
E170 = ("Cannot apply transition {name}: invalid for the current state.") E170 = ("Cannot apply transition {name}: invalid for the current state.")
E171 = ("Matcher.add received invalid 'on_match' callback argument: expected " E171 = ("{name}.add received invalid 'on_match' callback argument: expected "
"callable or None, but got: {arg_type}") "callable or None, but got: {arg_type}")
E175 = ("Can't remove rule for unknown match pattern ID: {key}") E175 = ("Can't remove rule for unknown match pattern ID: {key}")
E176 = ("Alias '{alias}' is not defined in the Knowledge Base.") E176 = ("Alias '{alias}' is not defined in the Knowledge Base.")
@ -532,11 +536,12 @@ class Errors(metaclass=ErrorsWithCodes):
E198 = ("Unable to return {n} most similar vectors for the current vectors " E198 = ("Unable to return {n} most similar vectors for the current vectors "
"table, which contains {n_rows} vectors.") "table, which contains {n_rows} vectors.")
E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.") E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.")
E200 = ("Can't yet set {attr} from Span. Vote for this feature on the " E200 = ("Can't set {attr} from Span.")
"issue tracker: http://github.com/explosion/spaCy/issues")
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.") E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
# New errors added in v3.x # New errors added in v3.x
E853 = ("Unsupported component factory name '{name}'. The character '.' is "
"not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
"permit overlapping spans.") "permit overlapping spans.")
E855 = ("Invalid {obj}: {obj} is not from the same doc.") E855 = ("Invalid {obj}: {obj} is not from the same doc.")
@ -734,7 +739,7 @@ class Errors(metaclass=ErrorsWithCodes):
"loaded nlp object, but got: {source}") "loaded nlp object, but got: {source}")
E947 = ("`Matcher.add` received invalid `greedy` argument: expected " E947 = ("`Matcher.add` received invalid `greedy` argument: expected "
"a string value from {expected} but got: '{arg}'") "a string value from {expected} but got: '{arg}'")
E948 = ("`Matcher.add` received invalid 'patterns' argument: expected " E948 = ("`{name}.add` received invalid 'patterns' argument: expected "
"a list, but got: {arg_type}") "a list, but got: {arg_type}")
E949 = ("Unable to align tokens for the predicted and reference docs. It " E949 = ("Unable to align tokens for the predicted and reference docs. It "
"is only possible to align the docs when both texts are the same " "is only possible to align the docs when both texts are the same "
@ -932,7 +937,14 @@ class Errors(metaclass=ErrorsWithCodes):
E1040 = ("Doc.from_json requires all tokens to have the same attributes. " E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}") "Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
E1042 = ("Backprop is not supported when is_train is not set.") E1042 = ("Function was called with `{arg1}`={arg1_values} and "
"`{arg2}`={arg2_values} but these arguments are conflicting.")
E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
"{value}.")
# v4 error strings
E4000 = ("Expected a Doc as input, but got: '{type}'")
E4001 = ("Backprop is not supported when is_train is not set.")
# Deprecated model shortcuts, only used in errors and warnings # Deprecated model shortcuts, only used in errors and warnings

View File

@ -93,14 +93,14 @@ cdef class KnowledgeBase:
self.vocab = vocab self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def initialize_entities(self, int64_t nr_entities): def _initialize_entities(self, int64_t nr_entities):
self._entry_index = PreshMap(nr_entities + 1) self._entry_index = PreshMap(nr_entities + 1)
self._entries = entry_vec(nr_entities + 1) self._entries = entry_vec(nr_entities + 1)
def initialize_vectors(self, int64_t nr_entities): def _initialize_vectors(self, int64_t nr_entities):
self._vectors_table = float_matrix(nr_entities + 1) self._vectors_table = float_matrix(nr_entities + 1)
def initialize_aliases(self, int64_t nr_aliases): def _initialize_aliases(self, int64_t nr_aliases):
self._alias_index = PreshMap(nr_aliases + 1) self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1)
@ -155,8 +155,8 @@ cdef class KnowledgeBase:
raise ValueError(Errors.E140) raise ValueError(Errors.E140)
nr_entities = len(set(entity_list)) nr_entities = len(set(entity_list))
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
i = 0 i = 0
cdef KBEntryC entry cdef KBEntryC entry
@ -388,9 +388,9 @@ cdef class KnowledgeBase:
nr_entities = header[0] nr_entities = header[0]
nr_aliases = header[1] nr_aliases = header[1]
entity_vector_length = header[2] entity_vector_length = header[2]
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
self.initialize_aliases(nr_aliases) self._initialize_aliases(nr_aliases)
self.entity_vector_length = entity_vector_length self.entity_vector_length = entity_vector_length
def deserialize_vectors(b): def deserialize_vectors(b):
@ -512,8 +512,8 @@ cdef class KnowledgeBase:
cdef int64_t entity_vector_length cdef int64_t entity_vector_length
reader.read_header(&nr_entities, &entity_vector_length) reader.read_header(&nr_entities, &entity_vector_length)
self.initialize_entities(nr_entities) self._initialize_entities(nr_entities)
self.initialize_vectors(nr_entities) self._initialize_vectors(nr_entities)
self.entity_vector_length = entity_vector_length self.entity_vector_length = entity_vector_length
# STEP 1: load entity vectors # STEP 1: load entity vectors
@ -552,7 +552,7 @@ cdef class KnowledgeBase:
# STEP 3: load aliases # STEP 3: load aliases
cdef int64_t nr_aliases cdef int64_t nr_aliases
reader.read_alias_length(&nr_aliases) reader.read_alias_length(&nr_aliases)
self.initialize_aliases(nr_aliases) self._initialize_aliases(nr_aliases)
cdef int64_t nr_candidates cdef int64_t nr_candidates
cdef vector[int64_t] entry_indices cdef vector[int64_t] entry_indices

View File

@ -2,7 +2,8 @@ from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
from ...attrs import LANG from ...attrs import LANG
from ...util import update_exc from ...util import update_exc
@ -16,6 +17,8 @@ class BulgarianDefaults(BaseDefaults):
stop_words = STOP_WORDS stop_words = STOP_WORDS
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Bulgarian(Language): class Bulgarian(Language):

View File

@ -72,10 +72,10 @@ class CatalanLemmatizer(Lemmatizer):
oov_forms.append(form) oov_forms.append(form)
if not forms: if not forms:
forms.extend(oov_forms) forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0]) # use lookups, and fall back to the token itself
if not forms: if not forms:
forms.append(string) forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms)) forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms

View File

@ -258,6 +258,10 @@ ALPHA = group_chars(
ALPHA_LOWER = group_chars(_lower + _uncased) ALPHA_LOWER = group_chars(_lower + _uncased)
ALPHA_UPPER = group_chars(_upper + _uncased) ALPHA_UPPER = group_chars(_upper + _uncased)
_combining_diacritics = r"\u0300-\u036f"
COMBINING_DIACRITICS = _combining_diacritics
_units = ( _units = (
"km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft " "km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft "
"kg g mg µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb " "kg g mg µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb "

View File

@ -53,11 +53,16 @@ class FrenchLemmatizer(Lemmatizer):
rules = rules_table.get(univ_pos, []) rules = rules_table.get(univ_pos, [])
string = string.lower() string = string.lower()
forms = [] forms = []
# first try lookup in table based on upos
if string in index: if string in index:
forms.append(string) forms.append(string)
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms
# then add anything in the exceptions table
forms.extend(exceptions.get(string, [])) forms.extend(exceptions.get(string, []))
# if nothing found yet, use the rules
oov_forms = [] oov_forms = []
if not forms: if not forms:
for old, new in rules: for old, new in rules:
@ -69,12 +74,14 @@ class FrenchLemmatizer(Lemmatizer):
forms.append(form) forms.append(form)
else: else:
oov_forms.append(form) oov_forms.append(form)
# if still nothing, add the oov forms from rules
if not forms: if not forms:
forms.extend(oov_forms) forms.extend(oov_forms)
if not forms and string in lookup_table.keys():
forms.append(self.lookup_lemmatize(token)[0]) # use lookups, which fall back to the token itself
if not forms: if not forms:
forms.append(string) forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms)) forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms self.cache[cache_key] = forms
return forms return forms

View File

@ -18,34 +18,23 @@ DEFAULT_CONFIG = """
[nlp.tokenizer] [nlp.tokenizer]
@tokenizers = "spacy.ko.KoreanTokenizer" @tokenizers = "spacy.ko.KoreanTokenizer"
mecab_args = ""
""" """
@registry.tokenizers("spacy.ko.KoreanTokenizer") @registry.tokenizers("spacy.ko.KoreanTokenizer")
def create_tokenizer(): def create_tokenizer(mecab_args: str):
def korean_tokenizer_factory(nlp): def korean_tokenizer_factory(nlp):
return KoreanTokenizer(nlp.vocab) return KoreanTokenizer(nlp.vocab, mecab_args=mecab_args)
return korean_tokenizer_factory return korean_tokenizer_factory
class KoreanTokenizer(DummyTokenizer): class KoreanTokenizer(DummyTokenizer):
def __init__(self, vocab: Vocab): def __init__(self, vocab: Vocab, *, mecab_args: str = ""):
self.vocab = vocab self.vocab = vocab
self._mecab = try_mecab_import() # type: ignore[func-returns-value] mecab = try_mecab_import()
self._mecab_tokenizer = None self.mecab_tokenizer = mecab.Tagger(mecab_args)
@property
def mecab_tokenizer(self):
# This is a property so that initializing a pipeline with blank:ko is
# possible without actually requiring mecab-ko, e.g. to run
# `spacy init vectors ko` for a pipeline that will have a different
# tokenizer in the end. The languages need to match for the vectors
# to be imported and there's no way to pass a custom config to
# `init vectors`.
if self._mecab_tokenizer is None:
self._mecab_tokenizer = self._mecab("-F%f[0],%f[7]")
return self._mecab_tokenizer
def __reduce__(self): def __reduce__(self):
return KoreanTokenizer, (self.vocab,) return KoreanTokenizer, (self.vocab,)
@ -68,13 +57,15 @@ class KoreanTokenizer(DummyTokenizer):
def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]: def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]:
# 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3], # 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3],
# 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], * # 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], *
for node in self.mecab_tokenizer.parse(text, as_nodes=True): for line in self.mecab_tokenizer.parse(text).split("\n"):
if node.is_eos(): if line == "EOS":
break break
surface = node.surface surface, _, expr = line.partition("\t")
feature = node.feature features = expr.split("/")[0].split(",")
tag, _, expr = feature.partition(",") tag = features[0]
lemma, _, remainder = expr.partition("/") lemma = "*"
if len(features) >= 8:
lemma = features[7]
if lemma == "*": if lemma == "*":
lemma = surface lemma = surface
yield {"surface": surface, "lemma": lemma, "tag": tag} yield {"surface": surface, "lemma": lemma, "tag": tag}
@ -97,20 +88,94 @@ class Korean(Language):
Defaults = KoreanDefaults Defaults = KoreanDefaults
def try_mecab_import() -> None: def try_mecab_import():
try: try:
from natto import MeCab import mecab_ko as MeCab
return MeCab return MeCab
except ImportError: except ImportError:
raise ImportError( raise ImportError(
'The Korean tokenizer ("spacy.ko.KoreanTokenizer") requires ' 'The Korean tokenizer ("spacy.ko.KoreanTokenizer") requires '
"[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), " "the python package `mecab-ko`: pip install mecab-ko"
"[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), "
"and [natto-py](https://github.com/buruzaemon/natto-py)"
) from None ) from None
@registry.tokenizers("spacy.KoreanNattoTokenizer.v1")
def create_natto_tokenizer():
def korean_natto_tokenizer_factory(nlp):
return KoreanNattoTokenizer(nlp.vocab)
return korean_natto_tokenizer_factory
class KoreanNattoTokenizer(DummyTokenizer):
def __init__(self, vocab: Vocab):
self.vocab = vocab
self._mecab = self._try_mecab_import() # type: ignore[func-returns-value]
self._mecab_tokenizer = None
@property
def mecab_tokenizer(self):
# This is a property so that initializing a pipeline with blank:ko is
# possible without actually requiring mecab-ko, e.g. to run
# `spacy init vectors ko` for a pipeline that will have a different
# tokenizer in the end. The languages need to match for the vectors
# to be imported and there's no way to pass a custom config to
# `init vectors`.
if self._mecab_tokenizer is None:
self._mecab_tokenizer = self._mecab("-F%f[0],%f[7]")
return self._mecab_tokenizer
def __reduce__(self):
return KoreanNattoTokenizer, (self.vocab,)
def __call__(self, text: str) -> Doc:
dtokens = list(self.detailed_tokens(text))
surfaces = [dt["surface"] for dt in dtokens]
doc = Doc(self.vocab, words=surfaces, spaces=list(check_spaces(text, surfaces)))
for token, dtoken in zip(doc, dtokens):
first_tag, sep, eomi_tags = dtoken["tag"].partition("+")
token.tag_ = first_tag # stem(어간) or pre-final(선어말 어미)
if token.tag_ in TAG_MAP:
token.pos = TAG_MAP[token.tag_][POS]
else:
token.pos = X
token.lemma_ = dtoken["lemma"]
doc.user_data["full_tags"] = [dt["tag"] for dt in dtokens]
return doc
def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]:
# 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3],
# 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], *
for node in self.mecab_tokenizer.parse(text, as_nodes=True):
if node.is_eos():
break
surface = node.surface
feature = node.feature
tag, _, expr = feature.partition(",")
lemma, _, remainder = expr.partition("/")
if lemma == "*" or lemma == "":
lemma = surface
yield {"surface": surface, "lemma": lemma, "tag": tag}
def score(self, examples):
validate_examples(examples, "KoreanTokenizer.score")
return Scorer.score_tokenization(examples)
def _try_mecab_import(self):
try:
from natto import MeCab
return MeCab
except ImportError:
raise ImportError(
'The Korean Natto tokenizer ("spacy.ko.KoreanNattoTokenizer") requires '
"[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), "
"[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), "
"and [natto-py](https://github.com/buruzaemon/natto-py)"
) from None
def check_spaces(text, tokens): def check_spaces(text, tokens):
prev_end = -1 prev_end = -1
start = 0 start = 0

View File

@ -3,7 +3,7 @@ from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES
_infixes = ( _infixes = (
["·", "", "\(", "\)"] ["·", "", r"\(", r"\)"]
+ [r"(?<=[0-9])~(?=[0-9-])"] + [r"(?<=[0-9])~(?=[0-9-])"]
+ LIST_QUOTES + LIST_QUOTES
+ BASE_TOKENIZER_INFIXES + BASE_TOKENIZER_INFIXES

18
spacy/lang/la/__init__.py Normal file
View File

@ -0,0 +1,18 @@
from ...language import Language, BaseDefaults
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
class LatinDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
stop_words = STOP_WORDS
lex_attr_getters = LEX_ATTRS
class Latin(Language):
lang = "la"
Defaults = LatinDefaults
__all__ = ["Latin"]

View File

@ -0,0 +1,34 @@
from ...attrs import LIKE_NUM
import re
# cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4
roman_numerals_compile = re.compile(
r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$"
)
_num_words = set(
"""
unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem
""".split()
)
_ordinal_words = set(
"""
primus prima primum secundus secunda secundum tertius tertia tertium
""".split()
)
def like_num(text):
if text.isdigit():
return True
if roman_numerals_compile.match(text):
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}

View File

@ -0,0 +1,37 @@
# Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin
STOP_WORDS = set(
"""
ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem
cum cur
de deinde dum
ego enim ergo es est et etiam etsi ex
fio
haud hic
iam idem igitur ille in infra inter interim ipse is ita
magis modo mox
nam ne nec necque neque nisi non nos
o ob
per possum post pro
quae quam quare qui quia quicumque quidem quilibet quis quisnam quisquam quisque quisquis quo quoniam
sed si sic sive sub sui sum super suus
tam tamen trans tu tum
ubi uel uero
vel vero
""".split()
)

View File

@ -0,0 +1,76 @@
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH
from ...util import update_exc
## TODO: Look into systematically handling u/v
_exc = {
"mecum": [{ORTH: "me"}, {ORTH: "cum"}],
"tecum": [{ORTH: "te"}, {ORTH: "cum"}],
"nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}],
"vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}],
"uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}],
}
for orth in [
"A.",
"Agr.",
"Ap.",
"C.",
"Cn.",
"D.",
"F.",
"K.",
"L.",
"M'.",
"M.",
"Mam.",
"N.",
"Oct.",
"Opet.",
"P.",
"Paul.",
"Post.",
"Pro.",
"Q.",
"S.",
"Ser.",
"Sert.",
"Sex.",
"St.",
"Sta.",
"T.",
"Ti.",
"V.",
"Vol.",
"Vop.",
"U.",
"Uol.",
"Uop.",
"Ian.",
"Febr.",
"Mart.",
"Apr.",
"Mai.",
"Iun.",
"Iul.",
"Aug.",
"Sept.",
"Oct.",
"Nov.",
"Nou.",
"Dec.",
"Non.",
"Id.",
"A.D.",
"Coll.",
"Cos.",
"Ord.",
"Pl.",
"S.C.",
"Suff.",
"Trib.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

18
spacy/lang/lg/__init__.py Normal file
View File

@ -0,0 +1,18 @@
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES
from ...language import Language, BaseDefaults
class LugandaDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
class Luganda(Language):
lang = "lg"
Defaults = LugandaDefaults
__all__ = ["Luganda"]

17
spacy/lang/lg/examples.py Normal file
View File

@ -0,0 +1,17 @@
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.lg.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Mpa ebyafaayo ku byalo Nakatu ne Nkajja",
"Okuyita Ttembo kitegeeza kugwa ddalu",
"Ekifumu kino kyali kya mulimu ki?",
"Ekkovu we liyise wayitibwa mukululo",
"Akola mulimu ki oguvaamu ssente?",
"Emisumaali egikomerera embaawo giyitibwa nninga",
"Abooluganda abemmamba ababiri",
"Ekisaawe ky'ebyenjigiriza kya mugaso nnyo",
]

View File

@ -0,0 +1,95 @@
from ...attrs import LIKE_NUM
_num_words = [
"nnooti", # Zero
"zeero", # zero
"emu", # one
"bbiri", # two
"ssatu", # three
"nnya", # four
"ttaano", # five
"mukaaga", # six
"musanvu", # seven
"munaana", # eight
"mwenda", # nine
"kkumi", # ten
"kkumi n'emu", # eleven
"kkumi na bbiri", # twelve
"kkumi na ssatu", # thirteen
"kkumi na nnya", # forteen
"kkumi na ttaano", # fifteen
"kkumi na mukaaga", # sixteen
"kkumi na musanvu", # seventeen
"kkumi na munaana", # eighteen
"kkumi na mwenda", # nineteen
"amakumi abiri", # twenty
"amakumi asatu", # thirty
"amakumi ana", # forty
"amakumi ataano", # fifty
"nkaaga", # sixty
"nsanvu", # seventy
"kinaana", # eighty
"kyenda", # ninety
"kikumi", # hundred
"lukumi", # thousand
"kakadde", # million
"kawumbi", # billion
"kase", # trillion
"katabalika", # quadrillion
"keesedde", # gajillion
"kafukunya", # bazillion
"ekisooka", # first
"ekyokubiri", # second
"ekyokusatu", # third
"ekyokuna", # fourth
"ekyokutaano", # fifith
"ekyomukaaga", # sixth
"ekyomusanvu", # seventh
"eky'omunaana", # eighth
"ekyomwenda", # nineth
"ekyekkumi", # tenth
"ekyekkumi n'ekimu", # eleventh
"ekyekkumi n'ebibiri", # twelveth
"ekyekkumi n'ebisatu", # thirteenth
"ekyekkumi n'ebina", # fourteenth
"ekyekkumi n'ebitaano", # fifteenth
"ekyekkumi n'omukaaga", # sixteenth
"ekyekkumi n'omusanvu", # seventeenth
"ekyekkumi n'omunaana", # eigteenth
"ekyekkumi n'omwenda", # nineteenth
"ekyamakumi abiri", # twentieth
"ekyamakumi asatu", # thirtieth
"ekyamakumi ana", # fortieth
"ekyamakumi ataano", # fiftieth
"ekyenkaaga", # sixtieth
"ekyensanvu", # seventieth
"ekyekinaana", # eightieth
"ekyekyenda", # ninetieth
"ekyekikumi", # hundredth
"ekyolukumi", # thousandth
"ekyakakadde", # millionth
"ekyakawumbi", # billionth
"ekyakase", # trillionth
"ekyakatabalika", # quadrillionth
"ekyakeesedde", # gajillionth
"ekyakafukunya", # bazillionth
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}

View File

@ -0,0 +1,19 @@
from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_INFIXES = _infixes

View File

@ -0,0 +1,19 @@
STOP_WORDS = set(
"""
abadde abalala abamu abangi abava ajja ali alina ani anti ateekeddwa atewamu
atya awamu aweebwa ayinza ba baali babadde babalina bajja
bajjanewankubade bali balina bandi bangi bano bateekeddwa baweebwa bayina bebombi beera bibye
bimu bingi bino bo bokka bonna buli bulijjo bulungi bwabwe bwaffe bwayo bwe bwonna bya byabwe
byaffe byebimu byonna ddaa ddala ddi e ebimu ebiri ebweruobulungi ebyo edda ejja ekirala ekyo
endala engeri ennyo era erimu erina ffe ffenna ga gujja gumu gunno guno gwa gwe kaseera kati
kennyini ki kiki kikino kikye kikyo kino kirungi kki ku kubangabyombi kubangaolwokuba kudda
kuva kuwa kwegamba kyaffe kye kyekimuoyo kyekyo kyonna leero liryo lwa lwaki lyabwezaabwe
lyaffe lyange mbadde mingi mpozzi mu mulinaoyina munda mwegyabwe nolwekyo nabadde nabo nandiyagadde
nandiye nanti naye ne nedda neera nga nnyingi nnyini nnyinza nnyo nti nyinza nze oba ojja okudda
okugenda okuggyako okutuusa okuva okuwa oli olina oluvannyuma olwekyobuva omuli ono osobola otya
oyina oyo seetaaga si sinakindi singa talina tayina tebaali tebaalina tebayina terina tetulina
tetuteekeddwa tewali teyalina teyayina tolina tu tuyina tulina tuyina twafuna twetaaga wa wabula
wabweru wadde waggulunnina wakati waliwobangi waliyo wandi wange wano wansi weebwa yabadde yaffe
ye yenna yennyini yina yonna ziba zijja zonna
""".split()
)

View File

@ -40,6 +40,7 @@ def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
span_label = doc.vocab.strings.add("NP") span_label = doc.vocab.strings.add("NP")
# Only NOUNS and PRONOUNS matter # Only NOUNS and PRONOUNS matter
end_span = -1
for i, word in enumerate(filter(lambda x: x.pos in [PRON, NOUN], doclike)): for i, word in enumerate(filter(lambda x: x.pos in [PRON, NOUN], doclike)):
# For NOUNS # For NOUNS
# Pick children from syntactic parse (only those with certain dependencies) # Pick children from syntactic parse (only those with certain dependencies)
@ -58,15 +59,17 @@ def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
children_i = [c.i for c in children] + [word.i] children_i = [c.i for c in children] + [word.i]
start_span = min(children_i) start_span = min(children_i)
end_span = max(children_i) + 1 if start_span >= end_span:
yield start_span, end_span, span_label end_span = max(children_i) + 1
yield start_span, end_span, span_label
# PRONOUNS only if it is the subject of a verb # PRONOUNS only if it is the subject of a verb
elif word.pos == PRON: elif word.pos == PRON:
if word.dep in pronoun_deps: if word.dep in pronoun_deps:
start_span = word.i start_span = word.i
end_span = word.i + 1 if start_span >= end_span:
yield start_span, end_span, span_label end_span = word.i + 1
yield start_span, end_span, span_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks} SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}

View File

@ -1,5 +1,5 @@
from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS, COMBINING_DIACRITICS
from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT
@ -44,3 +44,23 @@ TOKENIZER_INFIXES = (
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA), r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
] ]
) )
# Some languages e.g. written with the Cyrillic alphabet permit the use of diacritics
# to mark stressed syllables in words where stress is distinctive. Such languages
# should use the COMBINING_DIACRITICS... suffix and infix regex lists in
# place of the standard ones.
COMBINING_DIACRITICS_TOKENIZER_SUFFIXES = list(TOKENIZER_SUFFIXES) + [
r"(?<=[{a}][{d}])\.".format(a=ALPHA, d=COMBINING_DIACRITICS),
]
COMBINING_DIACRITICS_TOKENIZER_INFIXES = list(TOKENIZER_INFIXES) + [
r"(?<=[{al}][{d}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES, d=COMBINING_DIACRITICS
),
r"(?<=[{a}][{d}]),(?=[{a}])".format(a=ALPHA, d=COMBINING_DIACRITICS),
r"(?<=[{a}][{d}])(?:{h})(?=[{a}])".format(
a=ALPHA, d=COMBINING_DIACRITICS, h=HYPHENS
),
r"(?<=[{a}][{d}])[:<>=/](?=[{a}])".format(a=ALPHA, d=COMBINING_DIACRITICS),
]

View File

@ -5,6 +5,8 @@ from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer from .lemmatizer import RussianLemmatizer
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
@ -12,6 +14,8 @@ class RussianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS stop_words = STOP_WORDS
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Russian(Language): class Russian(Language):
@ -24,7 +28,7 @@ class Russian(Language):
assigns=["token.lemma"], assigns=["token.lemma"],
default_config={ default_config={
"model": None, "model": None,
"mode": "pymorphy2", "mode": "pymorphy3",
"overwrite": False, "overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
}, },

View File

@ -19,7 +19,7 @@ class RussianLemmatizer(Lemmatizer):
model: Optional[Model], model: Optional[Model],
name: str = "lemmatizer", name: str = "lemmatizer",
*, *,
mode: str = "pymorphy2", mode: str = "pymorphy3",
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
) -> None: ) -> None:
@ -33,6 +33,16 @@ class RussianLemmatizer(Lemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer() self._morph = MorphAnalyzer()
elif mode == "pymorphy3":
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Russian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library. Install it with: pip install pymorphy3"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer()
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )
@ -104,6 +114,9 @@ class RussianLemmatizer(Lemmatizer):
return [analyses[0].normal_form] return [analyses[0].normal_form]
return [string] return [string]
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
return self.pymorphy2_lemmatize(token)
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
gram_map = { gram_map = {

View File

@ -1,9 +1,17 @@
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
class SlovenianDefaults(BaseDefaults): class SlovenianDefaults(BaseDefaults):
stop_words = STOP_WORDS stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
class Slovenian(Language): class Slovenian(Language):

145
spacy/lang/sl/lex_attrs.py Normal file
View File

@ -0,0 +1,145 @@
from ...attrs import LIKE_NUM
from ...attrs import IS_CURRENCY
import unicodedata
_num_words = set(
"""
nula ničla nič ena dva tri štiri pet šest sedem osem
devet deset enajst dvanajst trinajst štirinajst petnajst
šestnajst sedemnajst osemnajst devetnajst dvajset trideset štirideset
petdeset šestdest sedemdeset osemdeset devedeset sto tisoč
milijon bilijon trilijon kvadrilijon nešteto
en eden enega enemu ennem enim enih enima enimi ene eni eno
dveh dvema dvem dvoje trije treh trem tremi troje štirje štirih štirim štirimi
petih petim petimi šestih šestim šestimi sedmih sedmim sedmimi osmih osmim osmimi
devetih devetim devetimi desetih desetim desetimi enajstih enajstim enajstimi
dvanajstih dvanajstim dvanajstimi trinajstih trinajstim trinajstimi
šestnajstih šestnajstim šestnajstimi petnajstih petnajstim petnajstimi
sedemnajstih sedemnajstim sedemnajstimi osemnajstih osemnajstim osemnajstimi
devetnajstih devetnajstim devetnajstimi dvajsetih dvajsetim dvajsetimi
""".split()
)
_ordinal_words = set(
"""
prvi drugi tretji četrti peti šesti sedmi osmi
deveti deseti enajsti dvanajsti trinajsti štirinajsti
petnajsti šestnajsti sedemnajsti osemnajsti devetnajsti
dvajseti trideseti štirideseti petdeseti šestdeseti sedemdeseti
osemdeseti devetdeseti stoti tisoči milijonti bilijonti
trilijonti kvadrilijonti nešteti
prva druga tretja četrta peta šesta sedma osma
deveta deseta enajsta dvanajsta trinajsta štirnajsta
petnajsta šestnajsta sedemnajsta osemnajsta devetnajsta
dvajseta trideseta štirideseta petdeseta šestdeseta sedemdeseta
osemdeseta devetdeseta stota tisoča milijonta bilijonta
trilijonta kvadrilijonta nešteta
prvo drugo tretje četrto peto šestro sedmo osmo
deveto deseto enajsto dvanajsto trinajsto štirnajsto
petnajsto šestnajsto sedemnajsto osemnajsto devetnajsto
dvajseto trideseto štirideseto petdeseto šestdeseto sedemdeseto
osemdeseto devetdeseto stoto tisočo milijonto bilijonto
trilijonto kvadrilijonto nešteto
prvega drugega tretjega četrtega petega šestega sedmega osmega
devega desetega enajstega dvanajstega trinajstega štirnajstega
petnajstega šestnajstega sedemnajstega osemnajstega devetnajstega
dvajsetega tridesetega štiridesetega petdesetega šestdesetega sedemdesetega
osemdesetega devetdesetega stotega tisočega milijontega bilijontega
trilijontega kvadrilijontega neštetega
prvemu drugemu tretjemu četrtemu petemu šestemu sedmemu osmemu devetemu desetemu
enajstemu dvanajstemu trinajstemu štirnajstemu petnajstemu šestnajstemu sedemnajstemu
osemnajstemu devetnajstemu dvajsetemu tridesetemu štiridesetemu petdesetemu šestdesetemu
sedemdesetemu osemdesetemu devetdesetemu stotemu tisočemu milijontemu bilijontemu
trilijontemu kvadrilijontemu neštetemu
prvem drugem tretjem četrtem petem šestem sedmem osmem devetem desetem
enajstem dvanajstem trinajstem štirnajstem petnajstem šestnajstem sedemnajstem
osemnajstem devetnajstem dvajsetem tridesetem štiridesetem petdesetem šestdesetem
sedemdesetem osemdesetem devetdesetem stotem tisočem milijontem bilijontem
trilijontem kvadrilijontem neštetem
prvim drugim tretjim četrtim petim šestim sedtim osmim devetim desetim
enajstim dvanajstim trinajstim štirnajstim petnajstim šestnajstim sedemnajstim
osemnajstim devetnajstim dvajsetim tridesetim štiridesetim petdesetim šestdesetim
sedemdesetim osemdesetim devetdesetim stotim tisočim milijontim bilijontim
trilijontim kvadrilijontim neštetim
prvih drugih tretjih četrthih petih šestih sedmih osmih deveth desetih
enajstih dvanajstih trinajstih štirnajstih petnajstih šestnajstih sedemnajstih
osemnajstih devetnajstih dvajsetih tridesetih štiridesetih petdesetih šestdesetih
sedemdesetih osemdesetih devetdesetih stotih tisočih milijontih bilijontih
trilijontih kvadrilijontih nešteth
prvima drugima tretjima četrtima petima šestima sedmima osmima devetima desetima
enajstima dvanajstima trinajstima štirnajstima petnajstima šestnajstima sedemnajstima
osemnajstima devetnajstima dvajsetima tridesetima štiridesetima petdesetima šestdesetima
sedemdesetima osemdesetima devetdesetima stotima tisočima milijontima bilijontima
trilijontima kvadrilijontima neštetima
prve druge četrte pete šeste sedme osme devete desete
enajste dvanajste trinajste štirnajste petnajste šestnajste sedemnajste
osemnajste devetnajste dvajsete tridesete štiridesete petdesete šestdesete
sedemdesete osemdesete devetdesete stote tisoče milijonte bilijonte
trilijonte kvadrilijonte neštete
prvimi drugimi tretjimi četrtimi petimi šestimi sedtimi osmimi devetimi desetimi
enajstimi dvanajstimi trinajstimi štirnajstimi petnajstimi šestnajstimi sedemnajstimi
osemnajstimi devetnajstimi dvajsetimi tridesetimi štiridesetimi petdesetimi šestdesetimi
sedemdesetimi osemdesetimi devetdesetimi stotimi tisočimi milijontimi bilijontimi
trilijontimi kvadrilijontimi neštetimi
""".split()
)
_currency_words = set(
"""
evro evra evru evrom evrov evroma evrih evrom evre evri evr eur
cent centa centu cenom centov centoma centih centom cente centi
dolar dolarja dolarji dolarju dolarjem dolarjev dolarjema dolarjih dolarje usd
tolar tolarja tolarji tolarju tolarjem tolarjev tolarjema tolarjih tolarje tol
dinar dinarja dinarji dinarju dinarjem dinarjev dinarjema dinarjih dinarje din
funt funta funti funtu funtom funtov funtoma funtih funte gpb
forint forinta forinti forintu forintom forintov forintoma forintih forinte
zlot zlota zloti zlotu zlotom zlotov zlotoma zlotih zlote
rupij rupija rupiji rupiju rupijem rupijev rupijema rupijih rupije
jen jena jeni jenu jenom jenov jenoma jenih jene
kuna kuni kune kuno kun kunama kunah kunam kunami
marka marki marke markama markah markami
""".split()
)
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
text_lower = text.lower()
if text_lower in _num_words:
return True
if text_lower in _ordinal_words:
return True
return False
def is_currency(text):
text_lower = text.lower()
if text in _currency_words:
return True
for char in text:
if unicodedata.category(char) != "Sc":
return False
return True
LEX_ATTRS = {LIKE_NUM: like_num, IS_CURRENCY: is_currency}

View File

@ -0,0 +1,84 @@
from ..char_classes import (
LIST_ELLIPSES,
LIST_ICONS,
HYPHENS,
LIST_PUNCT,
LIST_QUOTES,
CURRENCY,
UNITS,
PUNCT,
LIST_CURRENCY,
CONCAT_QUOTES,
)
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
from ..char_classes import merge_chars
from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
INCLUDE_SPECIAL = ["\\+", "\\/", "\\", "\\¯", "\\=", "\\×"] + HYPHENS.split("|")
_prefixes = INCLUDE_SPECIAL + BASE_TOKENIZER_PREFIXES
_suffixes = (
INCLUDE_SPECIAL
+ LIST_PUNCT
+ LIST_ELLIPSES
+ LIST_QUOTES
+ LIST_ICONS
+ [
r"(?<=°[FfCcKk])\.",
r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
r"(?<=[0-9])(?:{u})".format(u=UNITS),
r"(?<=[{al}{e}{p}(?:{q})])\.".format(
al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES, p=PUNCT
),
r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
# split initials like J.K. Rowling
r"(?<=[A-Z]\.)(?:[A-Z].)",
]
)
# a list of all suffixes following a hyphen that are shouldn't split (eg. BTC-jev)
# source: Obeliks tokenizer - https://github.com/clarinsi/obeliks/blob/master/obeliks/res/TokRulesPart1.txt
CONCAT_QUOTES = CONCAT_QUOTES.replace("'", "")
HYPHENS_PERMITTED = (
"((a)|(evemu)|(evskega)|(i)|(jevega)|(jevska)|(jevskimi)|(jinemu)|(oma)|(ovim)|"
"(ovski)|(e)|(evi)|(evskem)|(ih)|(jevem)|(jevske)|(jevsko)|(jini)|(ov)|(ovima)|"
"(ovskih)|(em)|(evih)|(evskemu)|(ja)|(jevemu)|(jevskega)|(ji)|(jinih)|(ova)|"
"(ovimi)|(ovskim)|(ema)|(evim)|(evski)|(je)|(jevi)|(jevskem)|(jih)|(jinim)|"
"(ove)|(ovo)|(ovskima)|(ev)|(evima)|(evskih)|(jem)|(jevih)|(jevskemu)|(jin)|"
"(jinima)|(ovega)|(ovska)|(ovskimi)|(eva)|(evimi)|(evskim)|(jema)|(jevim)|"
"(jevski)|(jina)|(jinimi)|(ovem)|(ovske)|(ovsko)|(eve)|(evo)|(evskima)|(jev)|"
"(jevima)|(jevskih)|(jine)|(jino)|(ovemu)|(ovskega)|(u)|(evega)|(evska)|"
"(evskimi)|(jeva)|(jevimi)|(jevskim)|(jinega)|(ju)|(ovi)|(ovskem)|(evem)|"
"(evske)|(evsko)|(jeve)|(jevo)|(jevskima)|(jinem)|(om)|(ovih)|(ovskemu)|"
"(ovec)|(ovca)|(ovcu)|(ovcem)|(ovcev)|(ovcema)|(ovcih)|(ovci)|(ovce)|(ovcimi)|"
"(evec)|(evca)|(evcu)|(evcem)|(evcev)|(evcema)|(evcih)|(evci)|(evce)|(evcimi)|"
"(jevec)|(jevca)|(jevcu)|(jevcem)|(jevcev)|(jevcema)|(jevcih)|(jevci)|(jevce)|"
"(jevcimi)|(ovka)|(ovke)|(ovki)|(ovko)|(ovk)|(ovkama)|(ovkah)|(ovkam)|(ovkami)|"
"(evka)|(evke)|(evki)|(evko)|(evk)|(evkama)|(evkah)|(evkam)|(evkami)|(jevka)|"
"(jevke)|(jevki)|(jevko)|(jevk)|(jevkama)|(jevkah)|(jevkam)|(jevkami)|(timi)|"
"(im)|(ima)|(a)|(imi)|(e)|(o)|(ega)|(ti)|(em)|(tih)|(emu)|(tim)|(i)|(tima)|"
"(ih)|(ta)|(te)|(to)|(tega)|(tem)|(temu))"
)
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}0-9])(?:{h})(?!{hp}$)(?=[{a}])".format(
a=ALPHA, h=HYPHENS, hp=HYPHENS_PERMITTED
),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -1,326 +1,84 @@
# Source: https://github.com/stopwords-iso/stopwords-sl # Source: https://github.com/stopwords-iso/stopwords-sl
# Removed various words that are not normally considered stop words, such as months.
STOP_WORDS = set( STOP_WORDS = set(
""" """
a a ali
ali
b b bi bil bila bile bili bilo biti blizu bo bodo bojo bolj bom bomo
bi boste bova boš brez
bil
bila c cel cela celi celo
bile
bili č če često četrta četrtek četrti četrto čez čigav
bilo
biti d da daleč dan danes datum deset deseta deseti deseto devet
blizu deveta deveti deveto do dober dobra dobri dobro dokler dol dolg
bo dolga dolgi dovolj drug druga drugi drugo dva dve
bodo
bolj e eden en ena ene eni enkrat eno etc.
bom
bomo
boste
bova
boš
brez
c
cel
cela
celi
celo
d
da
daleč
dan
danes
do
dober
dobra
dobri
dobro
dokler
dol
dovolj
e
eden
en
ena
ene
eni
enkrat
eno
etc.
f f
g
g. g g. ga ga. gor gospa gospod
ga
ga. h halo
gor
gospa i idr. ii iii in iv ix iz
gospod
h j jaz je ji jih jim jo jutri
halo
i k kadarkoli kaj kajti kako kakor kamor kamorkoli kar karkoli
idr. katerikoli kdaj kdo kdorkoli ker ki kje kjer kjerkoli
ii ko koder koderkoli koga komu kot kratek kratka kratke kratki
iii
in l lahka lahke lahki lahko le lep lepa lepe lepi lepo leto
iv
ix m majhen majhna majhni malce malo manj me med medtem mene
iz mesec mi midva midve mnogo moj moja moje mora morajo moram
j moramo morate moraš morem mu
jaz
je n na nad naj najina najino najmanj naju največ nam narobe
ji nas nato nazaj naš naša naše ne nedavno nedelja nek neka
jih nekaj nekatere nekateri nekatero nekdo neke nekega neki
jim nekje neko nekoga nekoč ni nikamor nikdar nikjer nikoli
jo nič nje njega njegov njegova njegovo njej njemu njen
k njena njeno nji njih njihov njihova njihovo njiju njim
kadarkoli njo njun njuna njuno no nocoj npr.
kaj
kajti o ob oba obe oboje od odprt odprta odprti okoli on
kako onadva one oni onidve osem osma osmi osmo oz.
kakor
kamor p pa pet peta petek peti peto po pod pogosto poleg poln
kamorkoli polna polni polno ponavadi ponedeljek ponovno potem
kar povsod pozdravljen pozdravljeni prav prava prave pravi
karkoli pravo prazen prazna prazno prbl. precej pred prej preko
katerikoli pri pribl. približno primer pripravljen pripravljena
kdaj pripravljeni proti prva prvi prvo
kdo
kdorkoli r ravno redko res reč
ker
ki s saj sam sama same sami samo se sebe sebi sedaj sedem
kje sedma sedmi sedmo sem seveda si sicer skoraj skozi slab sm
kjer so sobota spet sreda srednja srednji sta ste stran stvar sva
kjerkoli
ko š šest šesta šesti šesto štiri
koderkoli
koga t ta tak taka take taki tako takoj tam te tebe tebi tega
komu težak težka težki težko ti tista tiste tisti tisto tj.
kot tja to toda torek tretja tretje tretji tri tu tudi tukaj
l tvoj tvoja tvoje
le
lep
lepa
lepe
lepi
lepo
m
manj
me
med
medtem
mene
mi
midva
midve
mnogo
moj
moja
moje
mora
morajo
moram
moramo
morate
moraš
morem
mu
n
na
nad
naj
najina
najino
najmanj
naju
največ
nam
nas
nato
nazaj
naš
naša
naše
ne
nedavno
nek
neka
nekaj
nekatere
nekateri
nekatero
nekdo
neke
nekega
neki
nekje
neko
nekoga
nekoč
ni
nikamor
nikdar
nikjer
nikoli
nič
nje
njega
njegov
njegova
njegovo
njej
njemu
njen
njena
njeno
nji
njih
njihov
njihova
njihovo
njiju
njim
njo
njun
njuna
njuno
no
nocoj
npr.
o
ob
oba
obe
oboje
od
okoli
on
onadva
one
oni
onidve
oz.
p
pa
po
pod
pogosto
poleg
ponavadi
ponovno
potem
povsod
prbl.
precej
pred
prej
preko
pri
pribl.
približno
proti
r
redko
res
s
saj
sam
sama
same
sami
samo
se
sebe
sebi
sedaj
sem
seveda
si
sicer
skoraj
skozi
smo
so
spet
sta
ste
sva
t
ta
tak
taka
take
taki
tako
takoj
tam
te
tebe
tebi
tega
ti
tista
tiste
tisti
tisto
tj.
tja
to
toda
tu
tudi
tukaj
tvoj
tvoja
tvoje
u u
v
vaju v vaju vam vas vaš vaša vaše ve vedno velik velika veliki
vam veliko vendar ves več vi vidva vii viii visok visoka visoke
vas visoki vsa vsaj vsak vsaka vsakdo vsake vsaki vsakomur vse
vaš vsega vsi vso včasih včeraj
vaša
vaše x
ve
vedno z za zadaj zadnji zakaj zaprta zaprti zaprto zdaj zelo zunaj
vendar
ves ž že
več
vi
vidva
vii
viii
vsa
vsaj
vsak
vsaka
vsakdo
vsake
vsaki
vsakomur
vse
vsega
vsi
vso
včasih
x
z
za
zadaj
zadnji
zakaj
zdaj
zelo
zunaj
č
če
često
čez
čigav
š
ž
že
""".split() """.split()
) )

View File

@ -0,0 +1,272 @@
from typing import Dict, List
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH, NORM
from ...util import update_exc
_exc: Dict[str, List[Dict]] = {}
_other_exc = {
"t.i.": [{ORTH: "t.", NORM: "tako"}, {ORTH: "i.", NORM: "imenovano"}],
"t.j.": [{ORTH: "t.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"T.j.": [{ORTH: "T.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
"d.o.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "o.", NORM: "omejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.O.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "O.", NORM: "omejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.n.o.": [
{ORTH: "d.", NORM: "družba"},
{ORTH: "n.", NORM: "neomejeno"},
{ORTH: "o.", NORM: "odgovornostjo"},
],
"D.N.O.": [
{ORTH: "D.", NORM: "družba"},
{ORTH: "N.", NORM: "neomejeno"},
{ORTH: "O.", NORM: "odgovornostjo"},
],
"d.d.": [{ORTH: "d.", NORM: "delniška"}, {ORTH: "d.", NORM: "družba"}],
"D.D.": [{ORTH: "D.", NORM: "delniška"}, {ORTH: "D.", NORM: "družba"}],
"s.p.": [{ORTH: "s.", NORM: "samostojni"}, {ORTH: "p.", NORM: "podjetnik"}],
"S.P.": [{ORTH: "S.", NORM: "samostojni"}, {ORTH: "P.", NORM: "podjetnik"}],
"l.r.": [{ORTH: "l.", NORM: "lastno"}, {ORTH: "r.", NORM: "ročno"}],
"le-te": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "te"}],
"Le-te": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "te"}],
"le-ti": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ti"}],
"Le-ti": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ti"}],
"le-to": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "to"}],
"Le-to": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "to"}],
"le-ta": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ta"}],
"Le-ta": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ta"}],
"le-tega": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "tega"}],
"Le-tega": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "tega"}],
}
_exc.update(_other_exc)
for exc_data in [
{ORTH: "adm.", NORM: "administracija"},
{ORTH: "aer.", NORM: "aeronavtika"},
{ORTH: "agr.", NORM: "agronomija"},
{ORTH: "amer.", NORM: "ameriško"},
{ORTH: "anat.", NORM: "anatomija"},
{ORTH: "angl.", NORM: "angleški"},
{ORTH: "ant.", NORM: "antonim"},
{ORTH: "antr.", NORM: "antropologija"},
{ORTH: "apr.", NORM: "april"},
{ORTH: "arab.", NORM: "arabsko"},
{ORTH: "arheol.", NORM: "arheologija"},
{ORTH: "arhit.", NORM: "arhitektura"},
{ORTH: "avg.", NORM: "avgust"},
{ORTH: "avstr.", NORM: "avstrijsko"},
{ORTH: "avt.", NORM: "avtomobilizem"},
{ORTH: "bibl.", NORM: "biblijsko"},
{ORTH: "biokem.", NORM: "biokemija"},
{ORTH: "biol.", NORM: "biologija"},
{ORTH: "bolg.", NORM: "bolgarski"},
{ORTH: "bot.", NORM: "botanika"},
{ORTH: "cit.", NORM: "citat"},
{ORTH: "daj.", NORM: "dajalnik"},
{ORTH: "del.", NORM: "deležnik"},
{ORTH: "ed.", NORM: "ednina"},
{ORTH: "etn.", NORM: "etnografija"},
{ORTH: "farm.", NORM: "farmacija"},
{ORTH: "filat.", NORM: "filatelija"},
{ORTH: "filoz.", NORM: "filozofija"},
{ORTH: "fin.", NORM: "finančništvo"},
{ORTH: "fiz.", NORM: "fizika"},
{ORTH: "fot.", NORM: "fotografija"},
{ORTH: "fr.", NORM: "francoski"},
{ORTH: "friz.", NORM: "frizerstvo"},
{ORTH: "gastr.", NORM: "gastronomija"},
{ORTH: "geogr.", NORM: "geografija"},
{ORTH: "geol.", NORM: "geologija"},
{ORTH: "geom.", NORM: "geometrija"},
{ORTH: "germ.", NORM: "germanski"},
{ORTH: "gl.", NORM: "glej"},
{ORTH: "glag.", NORM: "glagolski"},
{ORTH: "glasb.", NORM: "glasba"},
{ORTH: "gled.", NORM: "gledališče"},
{ORTH: "gost.", NORM: "gostinstvo"},
{ORTH: "gozd.", NORM: "gozdarstvo"},
{ORTH: "gr.", NORM: "grški"},
{ORTH: "grad.", NORM: "gradbeništvo"},
{ORTH: "hebr.", NORM: "hebrejsko"},
{ORTH: "hrv.", NORM: "hrvaško"},
{ORTH: "ide.", NORM: "indoevropsko"},
{ORTH: "igr.", NORM: "igre"},
{ORTH: "im.", NORM: "imenovalnik"},
{ORTH: "iron.", NORM: "ironično"},
{ORTH: "it.", NORM: "italijanski"},
{ORTH: "itd.", NORM: "in tako dalje"},
{ORTH: "itn.", NORM: "in tako naprej"},
{ORTH: "ipd.", NORM: "in podobno"},
{ORTH: "jap.", NORM: "japonsko"},
{ORTH: "jul.", NORM: "julij"},
{ORTH: "jun.", NORM: "junij"},
{ORTH: "kit.", NORM: "kitajsko"},
{ORTH: "knj.", NORM: "knjižno"},
{ORTH: "knjiž.", NORM: "knjižno"},
{ORTH: "kor.", NORM: "koreografija"},
{ORTH: "lat.", NORM: "latinski"},
{ORTH: "les.", NORM: "lesna stroka"},
{ORTH: "lingv.", NORM: "lingvistika"},
{ORTH: "lit.", NORM: "literarni"},
{ORTH: "ljubk.", NORM: "ljubkovalno"},
{ORTH: "lov.", NORM: "lovstvo"},
{ORTH: "m.", NORM: "moški"},
{ORTH: "mak.", NORM: "makedonski"},
{ORTH: "mar.", NORM: "marec"},
{ORTH: "mat.", NORM: "matematika"},
{ORTH: "med.", NORM: "medicina"},
{ORTH: "meh.", NORM: "mehiško"},
{ORTH: "mest.", NORM: "mestnik"},
{ORTH: "mdr.", NORM: "med drugim"},
{ORTH: "min.", NORM: "mineralogija"},
{ORTH: "mitol.", NORM: "mitologija"},
{ORTH: "mn.", NORM: "množina"},
{ORTH: "mont.", NORM: "montanistika"},
{ORTH: "muz.", NORM: "muzikologija"},
{ORTH: "nam.", NORM: "namenilnik"},
{ORTH: "nar.", NORM: "narečno"},
{ORTH: "nav.", NORM: "navadno"},
{ORTH: "nedol.", NORM: "nedoločnik"},
{ORTH: "nedov.", NORM: "nedovršni"},
{ORTH: "neprav.", NORM: "nepravilno"},
{ORTH: "nepreh.", NORM: "neprehodno"},
{ORTH: "neskl.", NORM: "nesklonljiv(o)"},
{ORTH: "nestrok.", NORM: "nestrokovno"},
{ORTH: "num.", NORM: "numizmatika"},
{ORTH: "npr.", NORM: "na primer"},
{ORTH: "obrt.", NORM: "obrtništvo"},
{ORTH: "okt.", NORM: "oktober"},
{ORTH: "or.", NORM: "orodnik"},
{ORTH: "os.", NORM: "oseba"},
{ORTH: "otr.", NORM: "otroško"},
{ORTH: "oz.", NORM: "oziroma"},
{ORTH: "pal.", NORM: "paleontologija"},
{ORTH: "papir.", NORM: "papirništvo"},
{ORTH: "ped.", NORM: "pedagogika"},
{ORTH: "pisar.", NORM: "pisarniško"},
{ORTH: "pog.", NORM: "pogovorno"},
{ORTH: "polit.", NORM: "politika"},
{ORTH: "polj.", NORM: "poljsko"},
{ORTH: "poljud.", NORM: "poljudno"},
{ORTH: "preg.", NORM: "pregovor"},
{ORTH: "preh.", NORM: "prehodno"},
{ORTH: "pren.", NORM: "preneseno"},
{ORTH: "prid.", NORM: "pridevnik"},
{ORTH: "prim.", NORM: "primerjaj"},
{ORTH: "prisl.", NORM: "prislov"},
{ORTH: "psih.", NORM: "psihologija"},
{ORTH: "psiht.", NORM: "psihiatrija"},
{ORTH: "rad.", NORM: "radiotehnika"},
{ORTH: "rač.", NORM: "računalništvo"},
{ORTH: "rib.", NORM: "ribištvo"},
{ORTH: "rod.", NORM: "rodilnik"},
{ORTH: "rus.", NORM: "rusko"},
{ORTH: "s.", NORM: "srednji"},
{ORTH: "sam.", NORM: "samostalniški"},
{ORTH: "sed.", NORM: "sedanjik"},
{ORTH: "sep.", NORM: "september"},
{ORTH: "slabš.", NORM: "slabšalno"},
{ORTH: "slovan.", NORM: "slovansko"},
{ORTH: "slovaš.", NORM: "slovaško"},
{ORTH: "srb.", NORM: "srbsko"},
{ORTH: "star.", NORM: "starinsko"},
{ORTH: "stil.", NORM: "stilno"},
{ORTH: "sv.", NORM: "svet(i)"},
{ORTH: "teh.", NORM: "tehnika"},
{ORTH: "tisk.", NORM: "tiskarstvo"},
{ORTH: "tj.", NORM: "to je"},
{ORTH: "tož.", NORM: "tožilnik"},
{ORTH: "trg.", NORM: "trgovina"},
{ORTH: "ukr.", NORM: "ukrajinski"},
{ORTH: "um.", NORM: "umetnost"},
{ORTH: "vel.", NORM: "velelnik"},
{ORTH: "vet.", NORM: "veterina"},
{ORTH: "vez.", NORM: "veznik"},
{ORTH: "vn.", NORM: "visokonemško"},
{ORTH: "voj.", NORM: "vojska"},
{ORTH: "vrtn.", NORM: "vrtnarstvo"},
{ORTH: "vulg.", NORM: "vulgarno"},
{ORTH: "vznes.", NORM: "vzneseno"},
{ORTH: "zal.", NORM: "založništvo"},
{ORTH: "zastar.", NORM: "zastarelo"},
{ORTH: "zgod.", NORM: "zgodovina"},
{ORTH: "zool.", NORM: "zoologija"},
{ORTH: "čeb.", NORM: "čebelarstvo"},
{ORTH: "češ.", NORM: "češki"},
{ORTH: "člov.", NORM: "človeškost"},
{ORTH: "šah.", NORM: "šahovski"},
{ORTH: "šalj.", NORM: "šaljivo"},
{ORTH: "šp.", NORM: "španski"},
{ORTH: "špan.", NORM: "špansko"},
{ORTH: "šport.", NORM: "športni"},
{ORTH: "štev.", NORM: "števnik"},
{ORTH: "šved.", NORM: "švedsko"},
{ORTH: "švic.", NORM: "švicarsko"},
{ORTH: "ž.", NORM: "ženski"},
{ORTH: "žarg.", NORM: "žargonsko"},
{ORTH: "žel.", NORM: "železnica"},
{ORTH: "živ.", NORM: "živost"},
]:
_exc[exc_data[ORTH]] = [exc_data]
abbrv = """
Co. Ch. DIPL. DR. Dr. Ev. Inc. Jr. Kr. Mag. M. MR. Mr. Mt. Murr. Npr. OZ.
Opr. Osn. Prim. Roj. ST. Sim. Sp. Sred. St. Sv. Škofl. Tel. UR. Zb.
a. aa. ab. abc. abit. abl. abs. abt. acc. accel. add. adj. adv. aet. afr. akad. al. alban. all. alleg.
alp. alt. alter. alžir. am. an. andr. ang. anh. anon. ans. antrop. apoc. app. approx. apt. ar. arc. arch.
arh. arr. as. asist. assist. assoc. asst. astr. attn. aug. avstral. az. b. bab. bal. bbl. bd. belg. bioinf.
biomed. bk. bl. bn. borg. bp. br. braz. brit. bros. broš. bt. bu. c. ca. cal. can. cand. cantab. cap. capt.
cat. cath. cc. cca. cd. cdr. cdre. cent. cerkv. cert. cf. cfr. ch. chap. chem. chr. chs. cic. circ. civ. cl.
cm. cmd. cnr. co. cod. col. coll. colo. com. comp. con. conc. cond. conn. cons. cont. coop. corr. cost. cp.
cpl. cr. crd. cres. cresc. ct. cu. d. dan. dat. davč. ddr. dec. ded. def. dem. dent. dept. dia. dip. dipl.
dir. disp. diss. div. do. doc. dok. dol. doo. dop. dott. dr. dram. druž. družb. drž. dt. duh. dur. dvr. dwt. e.
ea. ecc. eccl. eccles. econ. edn. egipt. egr. ekon. eksp. el. em. enc. eng. eo. ep. err. esp. esq. est.
et. etc. etnogr. etnol. ev. evfem. evr. ex. exc. excl. exp. expl. ext. exx. f. fa. facs. fak. faks. fas.
fasc. fco. fcp. feb. febr. fec. fed. fem. ff. fff. fid. fig. fil. film. fiziol. fiziot. flam. fm. fo. fol. folk.
frag. fran. franc. fsc. g. ga. gal. gdč. ge. gen. geod. geog. geotehnol. gg. gimn. glas. glav. gnr. go. gor.
gosp. gp. graf. gram. gren. grš. gs. h. hab. hf. hist. ho. hort. i. ia. ib. ibid. id. idr. idridr. ill. imen.
imp. impf. impr. in. inc. incl. ind. indus. inf. inform. ing. init. ins. int. inv. inšp. inštr. inž. is. islam.
ist. ital. iur. iz. izbr. izd. izg. izgr. izr. izv. j. jak. jam. jan. jav. je. jez. jr. jsl. jud. jug.
jugoslovan. jur. juž. jv. jz. k. kal. kan. kand. kat. kdo. kem. kip. kmet. kol. kom. komp. konf. kont. kost. kov.
kp. kpfw. kr. kraj. krat. kub. kult. kv. kval. l. la. lab. lb. ld. let. lib. lik. litt. lj. ljud. ll. loc. log.
loč. lt. ma. madž. mag. manag. manjš. masc. mass. mater. max. maxmax. mb. md. mech. medic. medij. medn.
mehč. mem. menedž. mes. mess. metal. meteor. meteorol. mex. mi. mikr. mil. minn. mio. misc. miss. mit. mk.
mkt. ml. mlad. mlle. mlr. mm. mme. množ. mo. moj. moš. možn. mr. mrd. mrs. ms. msc. msgr. mt. murr. mus. mut.
n. na. nad. nadalj. nadom. nagl. nakl. namer. nan. naniz. nasl. nat. navt. nač. ned. nem. nik. nizoz. nm. nn.
no. nom. norv. notr. nov. novogr. ns. o. ob. obd. obj. oblač. obl. oblik. obr. obraz. obs. obst. obt. obč. oc.
oct. od. odd. odg. odn. odst. odv. oec. off. ok. okla. okr. ont. oo. op. opis. opp. opr. orch. ord. ore. oreg.
org. orient. orig. ork. ort. oseb. osn. ot. ozir. ošk. p. pag. par. para. parc. parl. part. past. pat. pdk.
pen. perf. pert. perz. pesn. pet. pev. pf. pfc. ph. pharm. phil. pis. pl. po. pod. podr. podaljš. pogl. pogoj. pojm.
pok. pokr. pol. poljed. poljub. polu. pom. pomen. pon. ponov. pop. por. port. pos. posl. posn. pov. pp. ppl. pr.
praet. prav. pravopis. pravosl. preb. pred. predl. predm. predp. preds. pref. pregib. prel. prem. premen. prep.
pres. pret. prev. pribl. prih. pril. primerj. primor. prip. pripor. prir. prist. priv. proc. prof. prog. proiz.
prom. pron. prop. prot. protest. prov. ps. pss. pt. publ. pz. q. qld. qu. quad. que. r. racc. rastl. razgl.
razl. razv. rd. red. ref. reg. rel. relig. rep. repr. rer. resp. rest. ret. rev. revol. rež. rim. rist. rkp. rm.
roj. rom. romun. rp. rr. rt. rud. ruš. ry. sal. samogl. san. sc. scen. sci. scr. sdv. seg. sek. sen. sept. ser.
sev. sg. sgt. sh. sig. sigg. sign. sim. sin. sing. sinh. skand. skl. sklad. sklanj. sklep. skr. sl. slik. slov.
slovak. slovn. sn. so. sob. soc. sociol. sod. sopomen. sopr. sor. sov. sovj. sp. spec. spl. spr. spreg. sq. sr.
sre. sred. sredoz. srh. ss. ssp. st. sta. stan. stanstar. stcsl. ste. stim. stol. stom. str. stroj. strok. stsl.
stud. sup. supl. suppl. svet. sz. t. tab. tech. ted. tehn. tehnol. tek. teks. tekst. tel. temp. ten. teol. ter.
term. test. th. theol. tim. tip. tisočl. tit. tl. tol. tolmač. tom. tor. tov. tr. trad. traj. trans. tren.
trib. tril. trop. trp. trž. ts. tt. tu. tur. turiz. tvor. tvorb. . u. ul. umet. un. univ. up. upr. ur. urad.
us. ust. utr. v. va. val. var. varn. ven. ver. verb. vest. vezal. vic. vis. viv. viz. viš. vod. vok. vol. vpr.
vrst. vrstil. vs. vv. vzd. vzg. vzh. vzor. w. wed. wg. wk. x. y. z. zah. zaim. zak. zap. zasl. zavar. zač. zb.
združ. zg. zn. znan. znanstv. zoot. zun. zv. zvd. á. é. ć. č. čas. čet. čl. člen. čustv. đ. ľ. ł. ş. ŠT. š. šir.
škofl. škot. šol. št. števil. štud. ů. ű. žen. žival.
""".split()
for orth in abbrv:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)

View File

@ -17,10 +17,6 @@ URL_PATTERN = (
r"(?:\S+(?::\S*)?@)?" r"(?:\S+(?::\S*)?@)?"
r"(?:" r"(?:"
# IP address exclusion # IP address exclusion
# private & local networks
r"(?!(?:10|127)(?:\.\d{1,3}){3})"
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
# IP address dotted notation octets # IP address dotted notation octets
# excludes loopback network 0.0.0.0 # excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0 # excludes reserved space >= 224.0.0.0

View File

@ -6,6 +6,8 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS from .lex_attrs import LEX_ATTRS
from .lemmatizer import UkrainianLemmatizer from .lemmatizer import UkrainianLemmatizer
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults from ...language import Language, BaseDefaults
@ -13,6 +15,8 @@ class UkrainianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS stop_words = STOP_WORDS
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Ukrainian(Language): class Ukrainian(Language):
@ -25,7 +29,7 @@ class Ukrainian(Language):
assigns=["token.lemma"], assigns=["token.lemma"],
default_config={ default_config={
"model": None, "model": None,
"mode": "pymorphy2", "mode": "pymorphy3",
"overwrite": False, "overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
}, },

View File

@ -14,7 +14,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
model: Optional[Model], model: Optional[Model],
name: str = "lemmatizer", name: str = "lemmatizer",
*, *,
mode: str = "pymorphy2", mode: str = "pymorphy3",
overwrite: bool = False, overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
) -> None: ) -> None:
@ -29,6 +29,17 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None ) from None
if getattr(self, "_morph", None) is None: if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk") self._morph = MorphAnalyzer(lang="uk")
elif mode == "pymorphy3":
try:
from pymorphy3 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Ukrainian lemmatizer mode 'pymorphy3' requires the "
"pymorphy3 library and dictionaries. Install them with: "
"pip install pymorphy3 pymorphy3-dicts-uk"
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
super().__init__( super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
) )

View File

@ -1,4 +1,4 @@
from typing import Iterator, Optional, Any, Dict, Callable, Iterable from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection
from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@ -465,6 +465,8 @@ class Language:
""" """
if not isinstance(name, str): if not isinstance(name, str):
raise ValueError(Errors.E963.format(decorator="factory")) raise ValueError(Errors.E963.format(decorator="factory"))
if "." in name:
raise ValueError(Errors.E853.format(name=name))
if not isinstance(default_config, dict): if not isinstance(default_config, dict):
err = Errors.E962.format( err = Errors.E962.format(
style="default config", name=name, cfg_type=type(default_config) style="default config", name=name, cfg_type=type(default_config)
@ -543,8 +545,11 @@ class Language:
DOCS: https://spacy.io/api/language#component DOCS: https://spacy.io/api/language#component
""" """
if name is not None and not isinstance(name, str): if name is not None:
raise ValueError(Errors.E963.format(decorator="component")) if not isinstance(name, str):
raise ValueError(Errors.E963.format(decorator="component"))
if "." in name:
raise ValueError(Errors.E853.format(name=name))
component_name = name if name is not None else util.get_object_name(func) component_name = name if name is not None else util.get_object_name(func)
def add_component(component_func: "Pipe") -> Callable: def add_component(component_func: "Pipe") -> Callable:
@ -1023,8 +1028,8 @@ class Language:
raise ValueError(Errors.E109.format(name=name)) from e raise ValueError(Errors.E109.format(name=name)) from e
except Exception as e: except Exception as e:
error_handler(name, proc, [doc], e) error_handler(name, proc, [doc], e)
if doc is None: if not isinstance(doc, Doc):
raise ValueError(Errors.E005.format(name=name)) raise ValueError(Errors.E005.format(name=name, returned_type=type(doc)))
return doc return doc
def disable_pipes(self, *names) -> "DisabledPipes": def disable_pipes(self, *names) -> "DisabledPipes":
@ -1058,7 +1063,7 @@ class Language:
""" """
if enable is None and disable is None: if enable is None and disable is None:
raise ValueError(Errors.E991) raise ValueError(Errors.E991)
if disable is not None and isinstance(disable, str): if isinstance(disable, str):
disable = [disable] disable = [disable]
if enable is not None: if enable is not None:
if isinstance(enable, str): if isinstance(enable, str):
@ -1693,8 +1698,9 @@ class Language:
config: Union[Dict[str, Any], Config] = {}, config: Union[Dict[str, Any], Config] = {},
*, *,
vocab: Union[Vocab, bool] = True, vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(), disable: Union[str, Iterable[str]] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(), enable: Union[str, Iterable[str]] = SimpleFrozenList(),
exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
meta: Dict[str, Any] = SimpleFrozenDict(), meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True, auto_fill: bool = True,
validate: bool = True, validate: bool = True,
@ -1705,10 +1711,12 @@ class Language:
config (Dict[str, Any] / Config): The loaded config. config (Dict[str, Any] / Config): The loaded config.
vocab (Vocab): A Vocab object. If True, a vocab is created. vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable. disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable.
Disabled pipes will be loaded but they won't be run unless you Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe. explicitly enable them by calling nlp.enable_pipe.
exclude (Iterable[str]): Names of pipeline components to exclude. enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude.
Excluded components won't be loaded. Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta. meta (Dict[str, Any]): Meta overrides for nlp.meta.
auto_fill (bool): Automatically fill in missing values in config based auto_fill (bool): Automatically fill in missing values in config based
@ -1719,6 +1727,12 @@ class Language:
DOCS: https://spacy.io/api/language#from_config DOCS: https://spacy.io/api/language#from_config
""" """
if isinstance(disable, str):
disable = [disable]
if isinstance(enable, str):
enable = [enable]
if isinstance(exclude, str):
exclude = [exclude]
if auto_fill: if auto_fill:
config = Config( config = Config(
cls.default_config, section_order=CONFIG_SECTION_ORDER cls.default_config, section_order=CONFIG_SECTION_ORDER
@ -1861,8 +1875,15 @@ class Language:
# Restore the original vocab after sourcing if necessary # Restore the original vocab after sourcing if necessary
if vocab_b is not None: if vocab_b is not None:
nlp.vocab.from_bytes(vocab_b) nlp.vocab.from_bytes(vocab_b)
disabled_pipes = [*config["nlp"]["disabled"], *disable]
# Resolve disabled/enabled settings.
disabled_pipes = cls._resolve_component_status(
[*config["nlp"]["disabled"], *disable],
[*config["nlp"].get("enabled", []), *enable],
config["nlp"]["pipeline"],
)
nlp._disabled = set(p for p in disabled_pipes if p not in exclude) nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
nlp.batch_size = config["nlp"]["batch_size"] nlp.batch_size = config["nlp"]["batch_size"]
nlp.config = filled if auto_fill else config nlp.config = filled if auto_fill else config
if after_pipeline_creation is not None: if after_pipeline_creation is not None:
@ -2014,6 +2035,46 @@ class Language:
serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
util.to_disk(path, serializers, exclude) util.to_disk(path, serializers, exclude)
@staticmethod
def _resolve_component_status(
disable: Union[str, Iterable[str]],
enable: Union[str, Iterable[str]],
pipe_names: Iterable[str],
) -> Tuple[str, ...]:
"""Derives whether (1) `disable` and `enable` values are consistent and (2)
resolves those to a single set of disabled components. Raises an error in
case of inconsistency.
disable (Union[str, Iterable[str]]): Name(s) of component(s) or serialization fields to disable.
enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable.
pipe_names (Iterable[str]): Names of all pipeline components.
RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t.
specified includes and excludes.
"""
if isinstance(disable, str):
disable = [disable]
to_disable = disable
if enable:
if isinstance(enable, str):
enable = [enable]
to_disable = [
pipe_name for pipe_name in pipe_names if pipe_name not in enable
]
if disable and disable != to_disable:
raise ValueError(
Errors.E1042.format(
arg1="enable",
arg2="disable",
arg1_values=enable,
arg2_values=disable,
)
)
return tuple(to_disable)
def from_disk( def from_disk(
self, self,
path: Union[str, Path], path: Union[str, Path],

View File

@ -82,6 +82,10 @@ cdef class DependencyMatcher:
"$-": self._imm_left_sib, "$-": self._imm_left_sib,
"$++": self._right_sib, "$++": self._right_sib,
"$--": self._left_sib, "$--": self._left_sib,
">++": self._right_child,
">--": self._left_child,
"<++": self._right_parent,
"<--": self._left_parent,
} }
def __reduce__(self): def __reduce__(self):
@ -161,9 +165,9 @@ cdef class DependencyMatcher:
on_match (callable): Optional callback executed on match. on_match (callable): Optional callback executed on match.
""" """
if on_match is not None and not hasattr(on_match, "__call__"): if on_match is not None and not hasattr(on_match, "__call__"):
raise ValueError(Errors.E171.format(arg_type=type(on_match))) raise ValueError(Errors.E171.format(name="DependencyMatcher", arg_type=type(on_match)))
if patterns is None or not isinstance(patterns, List): # old API if patterns is None or not isinstance(patterns, List):
raise ValueError(Errors.E948.format(arg_type=type(patterns))) raise ValueError(Errors.E948.format(name="DependencyMatcher", arg_type=type(patterns)))
for pattern in patterns: for pattern in patterns:
if len(pattern) == 0: if len(pattern) == 0:
raise ValueError(Errors.E012.format(key=key)) raise ValueError(Errors.E012.format(key=key))
@ -423,6 +427,22 @@ cdef class DependencyMatcher:
def _left_sib(self, doc, node): def _left_sib(self, doc, node):
return [doc[child.i] for child in doc[node].head.children if child.i < node] return [doc[child.i] for child in doc[node].head.children if child.i < node]
def _right_child(self, doc, node):
return [doc[child.i] for child in doc[node].children if child.i > node]
def _left_child(self, doc, node):
return [doc[child.i] for child in doc[node].children if child.i < node]
def _right_parent(self, doc, node):
if doc[node].head.i > node:
return [doc[node].head]
return []
def _left_parent(self, doc, node):
if doc[node].head.i < node:
return [doc[node].head]
return []
def _normalize_key(self, key): def _normalize_key(self, key):
if isinstance(key, str): if isinstance(key, str):
return self.vocab.strings.add(key) return self.vocab.strings.add(key)

View File

@ -1,5 +1,5 @@
# cython: infer_types=True, cython: profile=True # cython: infer_types=True, cython: profile=True
from typing import List from typing import List, Iterable
from libcpp.vector cimport vector from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int8_t from libc.stdint cimport int32_t, int8_t
@ -86,10 +86,14 @@ cdef class Matcher:
is a dictionary mapping attribute IDs to values, and optionally a is a dictionary mapping attribute IDs to values, and optionally a
quantifier operator under the key "op". The available quantifiers are: quantifier operator under the key "op". The available quantifiers are:
'!': Negate the pattern, by requiring it to match exactly 0 times. '!': Negate the pattern, by requiring it to match exactly 0 times.
'?': Make the pattern optional, by allowing it to match 0 or 1 times. '?': Make the pattern optional, by allowing it to match 0 or 1 times.
'+': Require the pattern to match 1 or more times. '+': Require the pattern to match 1 or more times.
'*': Allow the pattern to zero or more times. '*': Allow the pattern to zero or more times.
'{n}': Require the pattern to match exactly _n_ times.
'{n,m}': Require the pattern to match at least _n_ but not more than _m_ times.
'{n,}': Require the pattern to match at least _n_ times.
'{,m}': Require the pattern to match at most _m_ times.
The + and * operators return all possible matches (not just the greedy The + and * operators return all possible matches (not just the greedy
ones). However, the "greedy" argument can filter the final matches ones). However, the "greedy" argument can filter the final matches
@ -106,9 +110,9 @@ cdef class Matcher:
""" """
errors = {} errors = {}
if on_match is not None and not hasattr(on_match, "__call__"): if on_match is not None and not hasattr(on_match, "__call__"):
raise ValueError(Errors.E171.format(arg_type=type(on_match))) raise ValueError(Errors.E171.format(name="Matcher", arg_type=type(on_match)))
if patterns is None or not isinstance(patterns, List): # old API if patterns is None or not isinstance(patterns, List):
raise ValueError(Errors.E948.format(arg_type=type(patterns))) raise ValueError(Errors.E948.format(name="Matcher", arg_type=type(patterns)))
if greedy is not None and greedy not in ["FIRST", "LONGEST"]: if greedy is not None and greedy not in ["FIRST", "LONGEST"]:
raise ValueError(Errors.E947.format(expected=["FIRST", "LONGEST"], arg=greedy)) raise ValueError(Errors.E947.format(expected=["FIRST", "LONGEST"], arg=greedy))
for i, pattern in enumerate(patterns): for i, pattern in enumerate(patterns):
@ -864,20 +868,27 @@ class _SetPredicate:
def __call__(self, Token token): def __call__(self, Token token):
if self.is_extension: if self.is_extension:
value = get_string_id(token._.get(self.attr)) value = token._.get(self.attr)
else: else:
value = get_token_attr_for_matcher(token.c, self.attr) value = get_token_attr_for_matcher(token.c, self.attr)
if self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"): if self.predicate in ("IN", "NOT_IN"):
if isinstance(value, (str, int)):
value = get_string_id(value)
else:
return False
elif self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"):
# ensure that all values are enclosed in a set
if self.attr == MORPH: if self.attr == MORPH:
# break up MORPH into individual Feat=Val values # break up MORPH into individual Feat=Val values
value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value)) value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value))
elif isinstance(value, (str, int)):
value = set((get_string_id(value),))
elif isinstance(value, Iterable) and all(isinstance(v, (str, int)) for v in value):
value = set(get_string_id(v) for v in value)
else: else:
# treat a single value as a list return False
if isinstance(value, (str, int)):
value = set([get_string_id(value)])
else:
value = set(get_string_id(v) for v in value)
if self.predicate == "IN": if self.predicate == "IN":
return value in self.value return value in self.value
elif self.predicate == "NOT_IN": elif self.predicate == "NOT_IN":
@ -1005,8 +1016,29 @@ def _get_operators(spec):
return (ONE,) return (ONE,)
elif spec["OP"] in lookup: elif spec["OP"] in lookup:
return lookup[spec["OP"]] return lookup[spec["OP"]]
#Min_max {n,m}
elif spec["OP"].startswith("{") and spec["OP"].endswith("}"):
# {n} --> {n,n} exactly n ONE,(n)
# {n,m}--> {n,m} min of n, max of m ONE,(n),ZERO_ONE,(m)
# {,m} --> {0,m} min of zero, max of m ZERO_ONE,(m)
# {n,} --> {n,∞} min of n, max of inf ONE,(n),ZERO_PLUS
min_max = spec["OP"][1:-1]
min_max = min_max if "," in min_max else f"{min_max},{min_max}"
n, m = min_max.split(",")
#1. Either n or m is a blank string and the other is numeric -->isdigit
#2. Both are numeric and n <= m
if (not n.isdecimal() and not m.isdecimal()) or (n.isdecimal() and m.isdecimal() and int(n) > int(m)):
keys = ", ".join(lookup.keys()) + ", {n}, {n,m}, {n,}, {,m} where n and m are integers and n <= m "
raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys))
# if n is empty string, zero would be used
head = tuple(ONE for __ in range(int(n or 0)))
tail = tuple(ZERO_ONE for __ in range(int(m) - int(n or 0))) if m else (ZERO_PLUS,)
return head + tail
else: else:
keys = ", ".join(lookup.keys()) keys = ", ".join(lookup.keys()) + ", {n}, {n,m}, {n,}, {,m} where n and m are integers and n <= m "
raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys)) raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys))

View File

@ -20,6 +20,15 @@ class PhraseMatcher:
Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any] Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any]
] = ..., ] = ...,
) -> None: ... ) -> None: ...
def _add_from_arrays(
self,
key: str,
specs: List[List[int]],
*,
on_match: Optional[
Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any]
] = ...,
) -> None: ...
def remove(self, key: str) -> None: ... def remove(self, key: str) -> None: ...
@overload @overload
def __call__( def __call__(

View File

@ -1,4 +1,6 @@
# cython: infer_types=True, profile=True # cython: infer_types=True, profile=True
from typing import List
from collections import defaultdict
from libc.stdint cimport uintptr_t from libc.stdint cimport uintptr_t
from preshed.maps cimport map_init, map_set, map_get, map_clear, map_iter from preshed.maps cimport map_init, map_set, map_get, map_clear, map_iter
@ -39,7 +41,7 @@ cdef class PhraseMatcher:
""" """
self.vocab = vocab self.vocab = vocab
self._callbacks = {} self._callbacks = {}
self._docs = {} self._docs = defaultdict(set)
self._validate = validate self._validate = validate
self.mem = Pool() self.mem = Pool()
@ -155,66 +157,24 @@ cdef class PhraseMatcher:
del self._callbacks[key] del self._callbacks[key]
del self._docs[key] del self._docs[key]
def add(self, key, docs, *_docs, on_match=None):
"""Add a match-rule to the phrase-matcher. A match-rule consists of: an ID
key, an on_match callback, and one or more patterns.
Since spaCy v2.2.2, PhraseMatcher.add takes a list of patterns as the def _add_from_arrays(self, key, specs, *, on_match=None):
second argument, with the on_match callback as an optional keyword """Add a preprocessed list of specs, with an optional callback.
argument.
key (str): The match ID. key (str): The match ID.
docs (list): List of `Doc` objects representing match patterns. specs (List[List[int]]): A list of lists of hashes to match.
on_match (callable): Callback executed on match. on_match (callable): Callback executed on match.
*_docs (Doc): For backwards compatibility: list of patterns to add
as variable arguments. Will be ignored if a list of patterns is
provided as the second argument.
DOCS: https://spacy.io/api/phrasematcher#add
""" """
if docs is None or hasattr(docs, "__call__"): # old API
on_match = docs
docs = _docs
_ = self.vocab[key]
self._callbacks[key] = on_match
self._docs.setdefault(key, set())
cdef MapStruct* current_node cdef MapStruct* current_node
cdef MapStruct* internal_node cdef MapStruct* internal_node
cdef void* result cdef void* result
if isinstance(docs, Doc): self._callbacks[key] = on_match
raise ValueError(Errors.E179.format(key=key)) for spec in specs:
for doc in docs: self._docs[key].add(tuple(spec))
if len(doc) == 0:
continue
if isinstance(doc, Doc):
attrs = (TAG, POS, MORPH, LEMMA, DEP)
has_annotation = {attr: doc.has_annotation(attr) for attr in attrs}
for attr in attrs:
if self.attr == attr and not has_annotation[attr]:
if attr == TAG:
pipe = "tagger"
elif attr in (POS, MORPH):
pipe = "morphologizer or tagger+attribute_ruler"
elif attr == LEMMA:
pipe = "lemmatizer"
elif attr == DEP:
pipe = "parser"
error_msg = Errors.E155.format(pipe=pipe, attr=self.vocab.strings.as_string(attr))
raise ValueError(error_msg)
if self._validate and any(has_annotation.values()) \
and self.attr not in attrs:
string_attr = self.vocab.strings[self.attr]
warnings.warn(Warnings.W012.format(key=key, attr=string_attr))
keyword = self._convert_to_array(doc)
else:
keyword = doc
self._docs[key].add(tuple(keyword))
current_node = self.c_map current_node = self.c_map
for token in keyword: for token in spec:
if token == self._terminal_hash: if token == self._terminal_hash:
warnings.warn(Warnings.W021) warnings.warn(Warnings.W021)
break break
@ -233,6 +193,57 @@ cdef class PhraseMatcher:
result = internal_node result = internal_node
map_set(self.mem, <MapStruct*>result, self.vocab.strings[key], NULL) map_set(self.mem, <MapStruct*>result, self.vocab.strings[key], NULL)
def add(self, key, docs, *, on_match=None):
"""Add a match-rule to the phrase-matcher. A match-rule consists of: an ID
key, a list of one or more patterns, and (optionally) an on_match callback.
key (str): The match ID.
docs (list): List of `Doc` objects representing match patterns.
on_match (callable): Callback executed on match.
If any of the input Docs are invalid, no internal state will be updated.
DOCS: https://spacy.io/api/phrasematcher#add
"""
if isinstance(docs, Doc):
raise ValueError(Errors.E179.format(key=key))
if docs is None or not isinstance(docs, List):
raise ValueError(Errors.E948.format(name="PhraseMatcher", arg_type=type(docs)))
if on_match is not None and not hasattr(on_match, "__call__"):
raise ValueError(Errors.E171.format(name="PhraseMatcher", arg_type=type(on_match)))
_ = self.vocab[key]
specs = []
for doc in docs:
if len(doc) == 0:
continue
if not isinstance(doc, Doc):
raise ValueError(Errors.E4000.format(type=type(doc)))
attrs = (TAG, POS, MORPH, LEMMA, DEP)
has_annotation = {attr: doc.has_annotation(attr) for attr in attrs}
for attr in attrs:
if self.attr == attr and not has_annotation[attr]:
if attr == TAG:
pipe = "tagger"
elif attr in (POS, MORPH):
pipe = "morphologizer or tagger+attribute_ruler"
elif attr == LEMMA:
pipe = "lemmatizer"
elif attr == DEP:
pipe = "parser"
error_msg = Errors.E155.format(pipe=pipe, attr=self.vocab.strings.as_string(attr))
raise ValueError(error_msg)
if self._validate and any(has_annotation.values()) \
and self.attr not in attrs:
string_attr = self.vocab.strings[self.attr]
warnings.warn(Warnings.W012.format(key=key, attr=string_attr))
specs.append(self._convert_to_array(doc))
self._add_from_arrays(key, specs, on_match=on_match)
def __call__(self, object doclike, *, as_spans=False): def __call__(self, object doclike, *, as_spans=False):
"""Find all sequences matching the supplied patterns on the `Doc`. """Find all sequences matching the supplied patterns on the `Doc`.
@ -345,7 +356,7 @@ def unpickle_matcher(vocab, docs, callbacks, attr):
matcher = PhraseMatcher(vocab, attr=attr) matcher = PhraseMatcher(vocab, attr=attr)
for key, specs in docs.items(): for key, specs in docs.items():
callback = callbacks.get(key, None) callback = callbacks.get(key, None)
matcher.add(key, specs, on_match=callback) matcher._add_from_arrays(key, specs, on_match=callback)
return matcher return matcher

View File

@ -1,9 +1,14 @@
from functools import partial from typing import Type, Callable, Dict, TYPE_CHECKING, List, Optional, Set
from typing import Type, Callable, TYPE_CHECKING import functools
import inspect
import types
import warnings
from thinc.layers import with_nvtx_range from thinc.layers import with_nvtx_range
from thinc.model import Model, wrap_model_recursive from thinc.model import Model, wrap_model_recursive
from thinc.util import use_nvtx_range
from ..errors import Warnings
from ..util import registry from ..util import registry
if TYPE_CHECKING: if TYPE_CHECKING:
@ -11,29 +16,106 @@ if TYPE_CHECKING:
from ..language import Language # noqa: F401 from ..language import Language # noqa: F401
@registry.callbacks("spacy.models_with_nvtx_range.v1") DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS = [
def create_models_with_nvtx_range( "pipe",
forward_color: int = -1, backprop_color: int = -1 "predict",
) -> Callable[["Language"], "Language"]: "set_annotations",
def models_with_nvtx_range(nlp): "update",
pipes = [ "rehearse",
pipe "get_loss",
for _, pipe in nlp.components "initialize",
if hasattr(pipe, "is_trainable") and pipe.is_trainable "begin_update",
] "finish_update",
"update",
]
# We need process all models jointly to avoid wrapping callbacks twice.
models = Model(
"wrap_with_nvtx_range",
forward=lambda model, X, is_train: ...,
layers=[pipe.model for pipe in pipes],
)
for node in models.walk(): def models_with_nvtx_range(nlp, forward_color: int, backprop_color: int):
pipes = [
pipe
for _, pipe in nlp.components
if hasattr(pipe, "is_trainable") and pipe.is_trainable
]
seen_models: Set[int] = set()
for pipe in pipes:
for node in pipe.model.walk():
if id(node) in seen_models:
continue
seen_models.add(id(node))
with_nvtx_range( with_nvtx_range(
node, forward_color=forward_color, backprop_color=backprop_color node, forward_color=forward_color, backprop_color=backprop_color
) )
return nlp
@registry.callbacks("spacy.models_with_nvtx_range.v1")
def create_models_with_nvtx_range(
forward_color: int = -1, backprop_color: int = -1
) -> Callable[["Language"], "Language"]:
return functools.partial(
models_with_nvtx_range,
forward_color=forward_color,
backprop_color=backprop_color,
)
def nvtx_range_wrapper_for_pipe_method(self, func, *args, **kwargs):
if isinstance(func, functools.partial):
return func(*args, **kwargs)
else:
with use_nvtx_range(f"{self.name} {func.__name__}"):
return func(*args, **kwargs)
def pipes_with_nvtx_range(
nlp, additional_pipe_functions: Optional[Dict[str, List[str]]]
):
for _, pipe in nlp.components:
if additional_pipe_functions:
extra_funcs = additional_pipe_functions.get(pipe.name, [])
else:
extra_funcs = []
for name in DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS + extra_funcs:
func = getattr(pipe, name, None)
if func is None:
if name in extra_funcs:
warnings.warn(Warnings.W121.format(method=name, pipe=pipe.name))
continue
wrapped_func = functools.partial(
types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func
)
# Try to preserve the original function signature.
try:
wrapped_func.__signature__ = inspect.signature(func) # type: ignore
except:
pass
try:
setattr(
pipe,
name,
wrapped_func,
)
except AttributeError:
warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
return nlp
@registry.callbacks("spacy.models_and_pipes_with_nvtx_range.v1")
def create_models_and_pipes_with_nvtx_range(
forward_color: int = -1,
backprop_color: int = -1,
additional_pipe_functions: Optional[Dict[str, List[str]]] = None,
) -> Callable[["Language"], "Language"]:
def inner(nlp):
nlp = models_with_nvtx_range(nlp, forward_color, backprop_color)
nlp = pipes_with_nvtx_range(nlp, additional_pipe_functions)
return nlp return nlp
return models_with_nvtx_range return inner

View File

@ -7,7 +7,7 @@ from thinc.api import expand_window, residual, Maxout, Mish, PyTorchLSTM
from ...tokens import Doc from ...tokens import Doc
from ...util import registry from ...util import registry
from ...errors import Errors from ...errors import Errors
from ...ml import _character_embed from ...ml import character_embed
from ..staticvectors import StaticVectors from ..staticvectors import StaticVectors
from ..featureextractor import FeatureExtractor from ..featureextractor import FeatureExtractor
from ...pipeline.tok2vec import Tok2VecListener from ...pipeline.tok2vec import Tok2VecListener
@ -226,7 +226,7 @@ def CharacterEmbed(
if feature is None: if feature is None:
raise ValueError(Errors.E911.format(feat=feature)) raise ValueError(Errors.E911.format(feat=feature))
char_embed = chain( char_embed = chain(
_character_embed.CharacterEmbed(nM=nM, nC=nC), character_embed.CharacterEmbed(nM=nM, nC=nC),
cast(Model[List[Floats2d], Ragged], list2ragged()), cast(Model[List[Floats2d], Ragged], list2ragged()),
) )
feature_extractor: Model[List[Doc], Ragged] = chain( feature_extractor: Model[List[Doc], Ragged] = chain(

View File

@ -191,7 +191,7 @@ def _forward_greedy_cpu(model: Model, TransitionSystem moves, states: List[State
scores = _parse_batch(cblas, moves, &c_states[0], weights, sizes, actions=actions) scores = _parse_batch(cblas, moves, &c_states[0], weights, sizes, actions=actions)
def backprop(dY): def backprop(dY):
raise ValueError(Errors.E1042) raise ValueError(Errors.E4001)
return (states, scores), backprop return (states, scores), backprop

View File

@ -1,23 +1,41 @@
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
cimport numpy as np cimport numpy as np
from libc.stdint cimport uint64_t from libc.stdint cimport uint32_t, uint64_t
from libcpp.unordered_map cimport unordered_map
from libcpp.vector cimport vector
from libcpp.memory cimport shared_ptr
from .structs cimport MorphAnalysisC
from .strings cimport StringStore from .strings cimport StringStore
from .typedefs cimport attr_t, hash_t from .typedefs cimport attr_t, hash_t
cdef cppclass Feature:
hash_t field
hash_t value
__init__():
this.field = 0
this.value = 0
cdef cppclass MorphAnalysisC:
hash_t key
vector[Feature] features
__init__():
this.key = 0
cdef class Morphology: cdef class Morphology:
cdef readonly Pool mem
cdef readonly StringStore strings cdef readonly StringStore strings
cdef PreshMap tags # Keyed by hash, value is pointer to tag cdef unordered_map[hash_t, shared_ptr[MorphAnalysisC]] tags
cdef MorphAnalysisC create_morph_tag(self, field_feature_pairs) except * cdef shared_ptr[MorphAnalysisC] _lookup_tag(self, hash_t tag_hash)
cdef int insert(self, MorphAnalysisC tag) except -1 cdef void _intern_morph_tag(self, hash_t tag_key, feats)
cdef hash_t _add(self, features)
cdef str _normalize_features(self, features)
cdef str get_morph_str(self, hash_t morph_key)
cdef shared_ptr[MorphAnalysisC] get_morph_c(self, hash_t morph_key)
cdef int check_feature(const shared_ptr[MorphAnalysisC] morph, attr_t feature) nogil
cdef int check_feature(const MorphAnalysisC* morph, attr_t feature) nogil cdef list list_features(const shared_ptr[MorphAnalysisC] morph)
cdef list list_features(const MorphAnalysisC* morph) cdef np.ndarray get_by_field(const shared_ptr[MorphAnalysisC] morph, attr_t field)
cdef np.ndarray get_by_field(const MorphAnalysisC* morph, attr_t field) cdef int get_n_by_field(attr_t* results, const shared_ptr[MorphAnalysisC] morph, attr_t field) nogil
cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t field) nogil

View File

@ -1,10 +1,10 @@
# cython: infer_types # cython: infer_types
import numpy import numpy
import warnings import warnings
from typing import Union, Tuple, List, Dict, Optional
from cython.operator cimport dereference as deref
from libcpp.memory cimport shared_ptr
from .attrs cimport POS
from .parts_of_speech import IDS as POS_IDS
from .errors import Warnings from .errors import Warnings
from . import symbols from . import symbols
@ -24,134 +24,187 @@ cdef class Morphology:
EMPTY_MORPH = symbols.NAMES[symbols._] EMPTY_MORPH = symbols.NAMES[symbols._]
def __init__(self, StringStore strings): def __init__(self, StringStore strings):
self.mem = Pool()
self.strings = strings self.strings = strings
self.tags = PreshMap()
def __reduce__(self): def __reduce__(self):
tags = set([self.get(self.strings[s]) for s in self.strings]) tags = set([self.get(self.strings[s]) for s in self.strings])
tags -= set([""]) tags -= set([""])
return (unpickle_morphology, (self.strings, sorted(tags)), None, None) return (unpickle_morphology, (self.strings, sorted(tags)), None, None)
def add(self, features): cdef shared_ptr[MorphAnalysisC] _lookup_tag(self, hash_t tag_hash):
match = self.tags.find(tag_hash)
if match != self.tags.const_end():
return deref(match).second
else:
return shared_ptr[MorphAnalysisC]()
def _normalize_attr(self, attr_key : Union[int, str], attr_value : Union[int, str]) -> Optional[Tuple[str, Union[str, List[str]]]]:
if isinstance(attr_key, (int, str)) and isinstance(attr_value, (int, str)):
attr_key = self.strings.as_string(attr_key)
attr_value = self.strings.as_string(attr_value)
# Preserve multiple values as a list
if self.VALUE_SEP in attr_value:
values = attr_value.split(self.VALUE_SEP)
values.sort()
attr_value = values
else:
warnings.warn(Warnings.W100.format(feature={attr_key: attr_value}))
return None
return attr_key, attr_value
def _str_to_normalized_feat_dict(self, feats: str) -> Dict[str, str]:
if not feats or feats == self.EMPTY_MORPH:
return {}
out = []
for feat in feats.split(self.FEATURE_SEP):
field, values = feat.split(self.FIELD_SEP, 1)
normalized_attr = self._normalize_attr(field, values)
if normalized_attr is None:
continue
out.append((normalized_attr[0], normalized_attr[1]))
out.sort(key=lambda x: x[0])
return dict(out)
def _dict_to_normalized_feat_dict(self, feats: Dict[Union[int, str], Union[int, str]]) -> Dict[str, str]:
out = []
for field, values in feats.items():
normalized_attr = self._normalize_attr(field, values)
if normalized_attr is None:
continue
out.append((normalized_attr[0], normalized_attr[1]))
out.sort(key=lambda x: x[0])
return dict(out)
def _normalized_feat_dict_to_str(self, feats: Dict[str, str]) -> str:
norm_feats_string = self.FEATURE_SEP.join([
self.FIELD_SEP.join([field, self.VALUE_SEP.join(values) if isinstance(values, list) else values])
for field, values in feats.items()
])
return norm_feats_string or self.EMPTY_MORPH
cdef hash_t _add(self, features):
"""Insert a morphological analysis in the morphology table, if not """Insert a morphological analysis in the morphology table, if not
already present. The morphological analysis may be provided in the UD already present. The morphological analysis may be provided in the UD
FEATS format as a string or in the tag map dict format. FEATS format as a string or in the tag map dict format.
Returns the hash of the new analysis. Returns the hash of the new analysis.
""" """
cdef MorphAnalysisC* tag_ptr cdef hash_t tag_hash = 0
cdef shared_ptr[MorphAnalysisC] tag
if isinstance(features, str): if isinstance(features, str):
if features == "": if features == "":
features = self.EMPTY_MORPH features = self.EMPTY_MORPH
tag_ptr = <MorphAnalysisC*>self.tags.get(<hash_t>self.strings[features])
if tag_ptr != NULL: tag_hash = self.strings[features]
return tag_ptr.key tag = self._lookup_tag(tag_hash)
features = self.feats_to_dict(features) if tag:
if not isinstance(features, dict): return deref(tag).key
features = self._str_to_normalized_feat_dict(features)
elif isinstance(features, dict):
features = self._dict_to_normalized_feat_dict(features)
else:
warnings.warn(Warnings.W100.format(feature=features)) warnings.warn(Warnings.W100.format(feature=features))
features = {} features = {}
string_features = {self.strings.as_string(field): self.strings.as_string(values) for field, values in features.items()}
# intified ("Field", "Field=Value") pairs
field_feature_pairs = []
for field in sorted(string_features):
values = string_features[field]
for value in values.split(self.VALUE_SEP):
field_feature_pairs.append((
self.strings.add(field),
self.strings.add(field + self.FIELD_SEP + value),
))
cdef MorphAnalysisC tag = self.create_morph_tag(field_feature_pairs)
# the hash key for the tag is either the hash of the normalized UFEATS # the hash key for the tag is either the hash of the normalized UFEATS
# string or the hash of an empty placeholder # string or the hash of an empty placeholder
norm_feats_string = self.normalize_features(features) norm_feats_string = self._normalized_feat_dict_to_str(features)
tag.key = self.strings.add(norm_feats_string) tag_hash = self.strings.add(norm_feats_string)
self.insert(tag) tag = self._lookup_tag(tag_hash)
return tag.key if tag:
return deref(tag).key
def normalize_features(self, features): self._intern_morph_tag(tag_hash, features)
return tag_hash
cdef void _intern_morph_tag(self, hash_t tag_key, feats):
# intified ("Field", "Field=Value") pairs where fields with multiple values have
# been split into individual tuples, e.g.:
# [("Field1", "Field1=Value1"), ("Field1", "Field1=Value2"),
# ("Field2", "Field2=Value3")]
field_feature_pairs = []
# Feat dict is normalized at this point.
for field, values in feats.items():
field_key = self.strings.add(field)
if isinstance(values, list):
for value in values:
value_key = self.strings.add(field + self.FIELD_SEP + value)
field_feature_pairs.append((field_key, value_key))
else:
# We could box scalar values into a list and use a common
# code path to generate features but that incurs a small
# but measurable allocation/iteration overhead (as this
# branch is taken often enough).
value_key = self.strings.add(field + self.FIELD_SEP + values)
field_feature_pairs.append((field_key, value_key))
num_features = len(field_feature_pairs)
cdef shared_ptr[MorphAnalysisC] tag = shared_ptr[MorphAnalysisC](new MorphAnalysisC())
deref(tag).key = tag_key
deref(tag).features.resize(num_features)
for i in range(num_features):
deref(tag).features[i].field = field_feature_pairs[i][0]
deref(tag).features[i].value = field_feature_pairs[i][1]
self.tags[tag_key] = tag
cdef str get_morph_str(self, hash_t morph_key):
cdef shared_ptr[MorphAnalysisC] tag = self._lookup_tag(morph_key)
if not tag:
return ""
else:
return self.strings[deref(tag).key]
cdef shared_ptr[MorphAnalysisC] get_morph_c(self, hash_t morph_key):
return self._lookup_tag(morph_key)
cdef str _normalize_features(self, features):
"""Create a normalized FEATS string from a features string or dict. """Create a normalized FEATS string from a features string or dict.
features (Union[dict, str]): Features as dict or UFEATS string. features (Union[dict, str]): Features as dict or UFEATS string.
RETURNS (str): Features as normalized UFEATS string. RETURNS (str): Features as normalized UFEATS string.
""" """
if isinstance(features, str): if isinstance(features, str):
features = self.feats_to_dict(features) features = self._str_to_normalized_feat_dict(features)
if not isinstance(features, dict): elif isinstance(features, dict):
features = self._dict_to_normalized_feat_dict(features)
else:
warnings.warn(Warnings.W100.format(feature=features)) warnings.warn(Warnings.W100.format(feature=features))
features = {} features = {}
features = self.normalize_attrs(features)
string_features = {self.strings.as_string(field): self.strings.as_string(values) for field, values in features.items()}
# normalized UFEATS string with sorted fields and values
norm_feats_string = self.FEATURE_SEP.join(sorted([
self.FIELD_SEP.join([field, values])
for field, values in string_features.items()
]))
return norm_feats_string or self.EMPTY_MORPH
def normalize_attrs(self, attrs): return self._normalized_feat_dict_to_str(features)
"""Convert attrs dict so that POS is always by ID, other features are
by string. Values separated by VALUE_SEP are sorted.
"""
out = {}
attrs = dict(attrs)
for key, value in attrs.items():
# convert POS value to ID
if key == POS or (isinstance(key, str) and key.upper() == "POS"):
if isinstance(value, str) and value.upper() in POS_IDS:
value = POS_IDS[value.upper()]
elif isinstance(value, int) and value not in POS_IDS.values():
warnings.warn(Warnings.W100.format(feature={key: value}))
continue
out[POS] = value
# accept any string or ID fields and values and convert to strings
elif isinstance(key, (int, str)) and isinstance(value, (int, str)):
key = self.strings.as_string(key)
value = self.strings.as_string(value)
# sort values
if self.VALUE_SEP in value:
value = self.VALUE_SEP.join(sorted(value.split(self.VALUE_SEP)))
out[key] = value
else:
warnings.warn(Warnings.W100.format(feature={key: value}))
return out
cdef MorphAnalysisC create_morph_tag(self, field_feature_pairs) except *: def add(self, features):
"""Creates a MorphAnalysisC from a list of intified return self._add(features)
("Field", "Field=Value") tuples where fields with multiple values have
been split into individual tuples, e.g.:
[("Field1", "Field1=Value1"), ("Field1", "Field1=Value2"),
("Field2", "Field2=Value3")]
"""
cdef MorphAnalysisC tag
tag.length = len(field_feature_pairs)
if tag.length > 0:
tag.fields = <attr_t*>self.mem.alloc(tag.length, sizeof(attr_t))
tag.features = <attr_t*>self.mem.alloc(tag.length, sizeof(attr_t))
for i, (field, feature) in enumerate(field_feature_pairs):
tag.fields[i] = field
tag.features[i] = feature
return tag
cdef int insert(self, MorphAnalysisC tag) except -1: def get(self, morph_key):
cdef hash_t key = tag.key return self.get_morph_str(morph_key)
if self.tags.get(key) == NULL:
tag_ptr = <MorphAnalysisC*>self.mem.alloc(1, sizeof(MorphAnalysisC))
tag_ptr[0] = tag
self.tags.set(key, <void*>tag_ptr)
def get(self, hash_t morph): def normalize_features(self, features):
tag = <MorphAnalysisC*>self.tags.get(morph) return self._normalize_features(features)
if tag == NULL:
return ""
else:
return self.strings[tag.key]
@staticmethod @staticmethod
def feats_to_dict(feats): def feats_to_dict(feats, *, sort_values=True):
if not feats or feats == Morphology.EMPTY_MORPH: if not feats or feats == Morphology.EMPTY_MORPH:
return {} return {}
return {field: Morphology.VALUE_SEP.join(sorted(values.split(Morphology.VALUE_SEP))) for field, values in
[feat.split(Morphology.FIELD_SEP) for feat in feats.split(Morphology.FEATURE_SEP)]} out = {}
for feat in feats.split(Morphology.FEATURE_SEP):
field, values = feat.split(Morphology.FIELD_SEP, 1)
if sort_values:
values = values.split(Morphology.VALUE_SEP)
values.sort()
values = Morphology.VALUE_SEP.join(values)
out[field] = values
return out
@staticmethod @staticmethod
def dict_to_feats(feats_dict): def dict_to_feats(feats_dict):
@ -160,34 +213,34 @@ cdef class Morphology:
return Morphology.FEATURE_SEP.join(sorted([Morphology.FIELD_SEP.join([field, Morphology.VALUE_SEP.join(sorted(values.split(Morphology.VALUE_SEP)))]) for field, values in feats_dict.items()])) return Morphology.FEATURE_SEP.join(sorted([Morphology.FIELD_SEP.join([field, Morphology.VALUE_SEP.join(sorted(values.split(Morphology.VALUE_SEP)))]) for field, values in feats_dict.items()]))
cdef int check_feature(const MorphAnalysisC* morph, attr_t feature) nogil: cdef int check_feature(const shared_ptr[MorphAnalysisC] morph, attr_t feature) nogil:
cdef int i cdef int i
for i in range(morph.length): for i in range(deref(morph).features.size()):
if morph.features[i] == feature: if deref(morph).features[i].value == feature:
return True return True
return False return False
cdef list list_features(const MorphAnalysisC* morph): cdef list list_features(const shared_ptr[MorphAnalysisC] morph):
cdef int i cdef int i
features = [] features = []
for i in range(morph.length): for i in range(deref(morph).features.size()):
features.append(morph.features[i]) features.append(deref(morph).features[i].value)
return features return features
cdef np.ndarray get_by_field(const MorphAnalysisC* morph, attr_t field): cdef np.ndarray get_by_field(const shared_ptr[MorphAnalysisC] morph, attr_t field):
cdef np.ndarray results = numpy.zeros((morph.length,), dtype="uint64") cdef np.ndarray results = numpy.zeros((deref(morph).features.size(),), dtype="uint64")
n = get_n_by_field(<uint64_t*>results.data, morph, field) n = get_n_by_field(<uint64_t*>results.data, morph, field)
return results[:n] return results[:n]
cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t field) nogil: cdef int get_n_by_field(attr_t* results, const shared_ptr[MorphAnalysisC] morph, attr_t field) nogil:
cdef int n_results = 0 cdef int n_results = 0
cdef int i cdef int i
for i in range(morph.length): for i in range(deref(morph).features.size()):
if morph.fields[i] == field: if deref(morph).features[i].field == field:
results[n_results] = morph.features[i] results[n_results] = deref(morph).features[i].value
n_results += 1 n_results += 1
return n_results return n_results

View File

@ -3,22 +3,22 @@ from . cimport symbols
cpdef enum univ_pos_t: cpdef enum univ_pos_t:
NO_TAG = 0 NO_TAG = 0
ADJ = symbols.ADJ ADJ = symbols.ADJ
ADP ADP = symbols.ADP
ADV ADV = symbols.ADV
AUX AUX = symbols.AUX
CONJ CONJ = symbols.CONJ
CCONJ # U20 CCONJ = symbols.CCONJ # U20
DET DET = symbols.DET
INTJ INTJ = symbols.INTJ
NOUN NOUN = symbols.NOUN
NUM NUM = symbols.NUM
PART PART = symbols.PART
PRON PRON = symbols.PRON
PROPN PROPN = symbols.PROPN
PUNCT PUNCT = symbols.PUNCT
SCONJ SCONJ = symbols.SCONJ
SYM SYM = symbols.SYM
VERB VERB = symbols.VERB
X X = symbols.X
EOL EOL = symbols.EOL
SPACE SPACE = symbols.SPACE

View File

@ -1,9 +1,9 @@
from .attributeruler import AttributeRuler from .attribute_ruler import AttributeRuler
from .dep_parser import DependencyParser from .dep_parser import DependencyParser
from .edit_tree_lemmatizer import EditTreeLemmatizer from .edit_tree_lemmatizer import EditTreeLemmatizer
from .entity_linker import EntityLinker from .entity_linker import EntityLinker
from .ner import EntityRecognizer from .ner import EntityRecognizer
from .entityruler import EntityRuler from .entity_ruler import EntityRuler
from .lemmatizer import Lemmatizer from .lemmatizer import Lemmatizer
from .morphologizer import Morphologizer from .morphologizer import Morphologizer
from .pipe import Pipe from .pipe import Pipe

View File

@ -10,6 +10,7 @@ from ...strings cimport hash_string
from ...structs cimport TokenC from ...structs cimport TokenC
from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.doc cimport Doc, set_children_from_heads
from ...tokens.token cimport MISSING_DEP from ...tokens.token cimport MISSING_DEP
from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .stateclass cimport StateClass from .stateclass cimport StateClass
from ._state cimport StateC, ArcC from ._state cimport StateC, ArcC
@ -687,7 +688,7 @@ cdef class ArcEager(TransitionSystem):
return self.c[name_or_id] return self.c[name_or_id]
name = name_or_id name = name_or_id
if '-' in name: if '-' in name:
move_str, label_str = name.split('-', 1) move_str, label_str = split_bilu_label(name)
label = self.strings[label_str] label = self.strings[label_str]
else: else:
move_str = name move_str = name

View File

@ -14,6 +14,7 @@ from ...typedefs cimport weight_t, attr_t
from ...lexeme cimport Lexeme from ...lexeme cimport Lexeme
from ...attrs cimport IS_SPACE from ...attrs cimport IS_SPACE
from ...structs cimport TokenC, SpanC from ...structs cimport TokenC, SpanC
from ...training import split_bilu_label
from ...training.example cimport Example from ...training.example cimport Example
from .search cimport Beam from .search cimport Beam
from .stateclass cimport StateClass from .stateclass cimport StateClass
@ -180,7 +181,7 @@ cdef class BiluoPushDown(TransitionSystem):
if name == '-' or name == '' or name is None: if name == '-' or name == '' or name is None:
return Transition(clas=0, move=MISSING, label=0, score=0) return Transition(clas=0, move=MISSING, label=0, score=0)
elif '-' in name: elif '-' in name:
move_str, label_str = name.split('-', 1) move_str, label_str = split_bilu_label(name)
# Deprecated, hacky way to denote 'not this entity' # Deprecated, hacky way to denote 'not this entity'
if label_str.startswith('!'): if label_str.startswith('!'):
raise ValueError(Errors.E869.format(label=name)) raise ValueError(Errors.E869.format(label=name))

View File

@ -11,7 +11,7 @@ from ..matcher import Matcher
from ..scorer import Scorer from ..scorer import Scorer
from ..symbols import IDS from ..symbols import IDS
from ..tokens import Doc, Span from ..tokens import Doc, Span
from ..tokens._retokenize import normalize_token_attrs, set_token_attrs from ..tokens.retokenizer import normalize_token_attrs, set_token_attrs
from ..vocab import Vocab from ..vocab import Vocab
from ..util import SimpleFrozenList, registry from ..util import SimpleFrozenList, registry
from .. import util from .. import util

View File

@ -12,6 +12,7 @@ from ..language import Language
from ._parser_internals import nonproj from ._parser_internals import nonproj
from ._parser_internals.nonproj import DELIMITER from ._parser_internals.nonproj import DELIMITER
from ..scorer import Scorer from ..scorer import Scorer
from ..training import remove_bilu_prefix
from ..util import registry from ..util import registry
@ -318,7 +319,7 @@ class DependencyParser(Parser):
# Get the labels from the model by looking at the available moves # Get the labels from the model by looking at the available moves
for move in self.move_names: for move in self.move_names:
if "-" in move: if "-" in move:
label = move.split("-")[1] label = remove_bilu_prefix(move)
if DELIMITER in label: if DELIMITER in label:
label = label.split(DELIMITER)[1] label = label.split(DELIMITER)[1]
labels.add(label) labels.add(label)

View File

@ -7,7 +7,7 @@ import numpy as np
import srsly import srsly
from thinc.api import Config, Model, SequenceCategoricalCrossentropy from thinc.api import Config, Model, SequenceCategoricalCrossentropy
from thinc.types import Floats2d, Ints1d, Ints2d from thinc.types import ArrayXd, Floats2d, Ints1d
from ._edit_tree_internals.edit_trees import EditTrees from ._edit_tree_internals.edit_trees import EditTrees
from ._edit_tree_internals.schemas import validate_edit_tree from ._edit_tree_internals.schemas import validate_edit_tree
@ -21,6 +21,9 @@ from ..vocab import Vocab
from .. import util from .. import util
ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
default_model_config = """ default_model_config = """
[model] [model]
@architectures = "spacy.Tagger.v2" @architectures = "spacy.Tagger.v2"
@ -49,6 +52,7 @@ DEFAULT_EDIT_TREE_LEMMATIZER_MODEL = Config().from_str(default_model_config)["mo
"overwrite": False, "overwrite": False,
"top_k": 1, "top_k": 1,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"}, "scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
"save_activations": False,
}, },
default_score_weights={"lemma_acc": 1.0}, default_score_weights={"lemma_acc": 1.0},
) )
@ -61,6 +65,7 @@ def make_edit_tree_lemmatizer(
overwrite: bool, overwrite: bool,
top_k: int, top_k: int,
scorer: Optional[Callable], scorer: Optional[Callable],
save_activations: bool,
): ):
"""Construct an EditTreeLemmatizer component.""" """Construct an EditTreeLemmatizer component."""
return EditTreeLemmatizer( return EditTreeLemmatizer(
@ -72,6 +77,7 @@ def make_edit_tree_lemmatizer(
overwrite=overwrite, overwrite=overwrite,
top_k=top_k, top_k=top_k,
scorer=scorer, scorer=scorer,
save_activations=save_activations,
) )
@ -91,6 +97,7 @@ class EditTreeLemmatizer(TrainablePipe):
overwrite: bool = False, overwrite: bool = False,
top_k: int = 1, top_k: int = 1,
scorer: Optional[Callable] = lemmatizer_score, scorer: Optional[Callable] = lemmatizer_score,
save_activations: bool = False,
): ):
""" """
Construct an edit tree lemmatizer. Construct an edit tree lemmatizer.
@ -102,6 +109,7 @@ class EditTreeLemmatizer(TrainablePipe):
frequency in the training data. frequency in the training data.
overwrite (bool): overwrite existing lemma annotations. overwrite (bool): overwrite existing lemma annotations.
top_k (int): try to apply at most the k most probable edit trees. top_k (int): try to apply at most the k most probable edit trees.
save_activations (bool): save model activations in Doc when annotating.
""" """
self.vocab = vocab self.vocab = vocab
self.model = model self.model = model
@ -116,6 +124,7 @@ class EditTreeLemmatizer(TrainablePipe):
self.cfg: Dict[str, Any] = {"labels": []} self.cfg: Dict[str, Any] = {"labels": []}
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
def get_loss( def get_loss(
self, examples: Iterable[Example], scores: List[Floats2d] self, examples: Iterable[Example], scores: List[Floats2d]
@ -144,21 +153,24 @@ class EditTreeLemmatizer(TrainablePipe):
return float(loss), d_scores return float(loss), d_scores
def predict(self, docs: Iterable[Doc]) -> List[Ints2d]: def predict(self, docs: Iterable[Doc]) -> ActivationsT:
n_docs = len(list(docs)) n_docs = len(list(docs))
if not any(len(doc) for doc in docs): if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs. # Handle cases where there are no tokens in any docs.
n_labels = len(self.cfg["labels"]) n_labels = len(self.cfg["labels"])
guesses: List[Ints2d] = [ guesses: List[Ints1d] = [
self.model.ops.alloc((0,), dtype="i") for doc in docs
]
scores: List[Floats2d] = [
self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs
] ]
assert len(guesses) == n_docs assert len(guesses) == n_docs
return guesses return {"probabilities": scores, "tree_ids": guesses}
scores = self.model.predict(docs) scores = self.model.predict(docs)
assert len(scores) == n_docs assert len(scores) == n_docs
guesses = self._scores2guesses(docs, scores) guesses = self._scores2guesses(docs, scores)
assert len(guesses) == n_docs assert len(guesses) == n_docs
return guesses return {"probabilities": scores, "tree_ids": guesses}
def _scores2guesses(self, docs, scores): def _scores2guesses(self, docs, scores):
guesses = [] guesses = []
@ -186,8 +198,13 @@ class EditTreeLemmatizer(TrainablePipe):
return guesses return guesses
def set_annotations(self, docs: Iterable[Doc], batch_tree_ids): def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
batch_tree_ids = activations["tree_ids"]
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
if self.save_activations:
doc.activations[self.name] = {}
for act_name, acts in activations.items():
doc.activations[self.name][act_name] = acts[i]
doc_tree_ids = batch_tree_ids[i] doc_tree_ids = batch_tree_ids[i]
if hasattr(doc_tree_ids, "get"): if hasattr(doc_tree_ids, "get"):
doc_tree_ids = doc_tree_ids.get() doc_tree_ids = doc_tree_ids.get()

View File

@ -1,5 +1,7 @@
from typing import Optional, Iterable, Callable, Dict, Union, List, Any from typing import Optional, Iterable, Callable, Dict, Sequence, Union, List, Any
from thinc.types import Floats2d from typing import cast
from numpy import dtype
from thinc.types import Floats1d, Floats2d, Ints1d, Ragged
from pathlib import Path from pathlib import Path
from itertools import islice from itertools import islice
import srsly import srsly
@ -21,6 +23,11 @@ from ..util import SimpleFrozenList, registry
from .. import util from .. import util
from ..scorer import Scorer from ..scorer import Scorer
ActivationsT = Dict[str, Union[List[Ragged], List[str]]]
KNOWLEDGE_BASE_IDS = "kb_ids"
# See #9050 # See #9050
BACKWARD_OVERWRITE = True BACKWARD_OVERWRITE = True
@ -56,6 +63,8 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
"overwrite": True, "overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"}, "scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True, "use_gold_ents": True,
"threshold": None,
"save_activations": False,
}, },
default_score_weights={ default_score_weights={
"nel_micro_f": 1.0, "nel_micro_f": 1.0,
@ -77,6 +86,8 @@ def make_entity_linker(
overwrite: bool, overwrite: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
use_gold_ents: bool, use_gold_ents: bool,
threshold: Optional[float] = None,
save_activations: bool,
): ):
"""Construct an EntityLinker component. """Construct an EntityLinker component.
@ -91,6 +102,11 @@ def make_entity_linker(
get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. scorer (Optional[Callable]): The scoring method.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
prediction is discarded. If None, predictions are not filtered by any threshold.
save_activations (bool): save model activations in Doc when annotating.
""" """
if not model.attrs.get("include_span_maker", False): if not model.attrs.get("include_span_maker", False):
@ -121,6 +137,8 @@ def make_entity_linker(
overwrite=overwrite, overwrite=overwrite,
scorer=scorer, scorer=scorer,
use_gold_ents=use_gold_ents, use_gold_ents=use_gold_ents,
threshold=threshold,
save_activations=save_activations,
) )
@ -156,6 +174,8 @@ class EntityLinker(TrainablePipe):
overwrite: bool = BACKWARD_OVERWRITE, overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score, scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool, use_gold_ents: bool,
threshold: Optional[float] = None,
save_activations: bool = False,
) -> None: ) -> None:
"""Initialize an entity linker. """Initialize an entity linker.
@ -174,9 +194,20 @@ class EntityLinker(TrainablePipe):
Scorer.score_links. Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations. component must provide entity annotations.
threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init DOCS: https://spacy.io/api/entitylinker#init
""" """
if threshold is not None and not (0 <= threshold <= 1):
raise ValueError(
Errors.E1043.format(
range_start=0,
range_end=1,
value=threshold,
)
)
self.vocab = vocab self.vocab = vocab
self.model = model self.model = model
self.name = name self.name = name
@ -192,6 +223,8 @@ class EntityLinker(TrainablePipe):
self.kb = empty_kb(entity_vector_length)(self.vocab) self.kb = empty_kb(entity_vector_length)(self.vocab)
self.scorer = scorer self.scorer = scorer
self.use_gold_ents = use_gold_ents self.use_gold_ents = use_gold_ents
self.threshold = threshold
self.save_activations = save_activations
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will """Define the KB of this pipe by providing a function that will
@ -377,7 +410,7 @@ class EntityLinker(TrainablePipe):
loss = loss / len(entity_encodings) loss = loss / len(entity_encodings)
return float(loss), out return float(loss), out
def predict(self, docs: Iterable[Doc]) -> List[str]: def predict(self, docs: Iterable[Doc]) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them. """Apply the pipeline's model to a batch of docs, without modifying them.
Returns the KB IDs for each entity in each doc, including NIL if there is Returns the KB IDs for each entity in each doc, including NIL if there is
no prediction. no prediction.
@ -390,13 +423,20 @@ class EntityLinker(TrainablePipe):
self.validate_kb() self.validate_kb()
entity_count = 0 entity_count = 0
final_kb_ids: List[str] = [] final_kb_ids: List[str] = []
xp = self.model.ops.xp ops = self.model.ops
xp = ops.xp
docs_ents: List[Ragged] = []
docs_scores: List[Ragged] = []
if not docs: if not docs:
return final_kb_ids return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
if isinstance(docs, Doc): if isinstance(docs, Doc):
docs = [docs] docs = [docs]
for i, doc in enumerate(docs): for doc in docs:
doc_ents: List[Ints1d] = []
doc_scores: List[Floats1d] = []
if len(doc) == 0: if len(doc) == 0:
docs_scores.append(Ragged(ops.alloc1f(0), ops.alloc1i(0)))
docs_ents.append(Ragged(xp.zeros(0, dtype="uint64"), ops.alloc1i(0)))
continue continue
sentences = [s for s in doc.sents] sentences = [s for s in doc.sents]
# Looping through each entity (TODO: rewrite) # Looping through each entity (TODO: rewrite)
@ -419,15 +459,32 @@ class EntityLinker(TrainablePipe):
if ent.label_ in self.labels_discard: if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL # ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
self._add_activations(
doc_scores=doc_scores,
doc_ents=doc_ents,
scores=[0.0],
ents=[0],
)
else: else:
candidates = list(self.get_candidates(self.kb, ent)) candidates = list(self.get_candidates(self.kb, ent))
if not candidates: if not candidates:
# no prediction possible for this entity - setting to NIL # no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
elif len(candidates) == 1: self._add_activations(
doc_scores=doc_scores,
doc_ents=doc_ents,
scores=[0.0],
ents=[0],
)
elif len(candidates) == 1 and self.threshold is None:
# shortcut for efficiency reasons: take the 1 candidate # shortcut for efficiency reasons: take the 1 candidate
# TODO: thresholding
final_kb_ids.append(candidates[0].entity_) final_kb_ids.append(candidates[0].entity_)
self._add_activations(
doc_scores=doc_scores,
doc_ents=doc_ents,
scores=[1.0],
ents=[candidates[0].entity_],
)
else: else:
random.shuffle(candidates) random.shuffle(candidates)
# set all prior probabilities to 0 if incl_prior=False # set all prior probabilities to 0 if incl_prior=False
@ -455,31 +512,53 @@ class EntityLinker(TrainablePipe):
if sims.shape != prior_probs.shape: if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161) raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims) scores = prior_probs + sims - (prior_probs * sims)
# TODO: thresholding final_kb_ids.append(
best_index = scores.argmax().item() candidates[scores.argmax().item()].entity_
best_candidate = candidates[best_index] if self.threshold is None or scores.max() >= self.threshold
final_kb_ids.append(best_candidate.entity_) else EntityLinker.NIL
)
self._add_activations(
doc_scores=doc_scores,
doc_ents=doc_ents,
scores=scores,
ents=[c.entity for c in candidates],
)
self._add_doc_activations(
docs_scores=docs_scores,
docs_ents=docs_ents,
doc_scores=doc_scores,
doc_ents=doc_ents,
)
if not (len(final_kb_ids) == entity_count): if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format( err = Errors.E147.format(
method="predict", msg="result variables not of equal length" method="predict", msg="result variables not of equal length"
) )
raise RuntimeError(err) raise RuntimeError(err)
return final_kb_ids return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None: def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of documents, using pre-computed scores. """Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify. docs (Iterable[Doc]): The documents to modify.
kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict. activations (ActivationsT): The activations used for setting annotations, produced
by EntityLinker.predict.
DOCS: https://spacy.io/api/entitylinker#set_annotations DOCS: https://spacy.io/api/entitylinker#set_annotations
""" """
kb_ids = cast(List[str], activations[KNOWLEDGE_BASE_IDS])
count_ents = len([ent for doc in docs for ent in doc.ents]) count_ents = len([ent for doc in docs for ent in doc.ents])
if count_ents != len(kb_ids): if count_ents != len(kb_ids):
raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids))) raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids)))
i = 0 i = 0
overwrite = self.cfg["overwrite"] overwrite = self.cfg["overwrite"]
for doc in docs: for j, doc in enumerate(docs):
if self.save_activations:
doc.activations[self.name] = {}
for act_name, acts in activations.items():
if act_name != KNOWLEDGE_BASE_IDS:
# We only copy activations that are Ragged.
doc.activations[self.name][act_name] = cast(Ragged, acts[j])
for ent in doc.ents: for ent in doc.ents:
kb_id = kb_ids[i] kb_id = kb_ids[i]
i += 1 i += 1
@ -578,3 +657,32 @@ class EntityLinker(TrainablePipe):
def add_label(self, label): def add_label(self, label):
raise NotImplementedError raise NotImplementedError
def _add_doc_activations(
self,
*,
docs_scores: List[Ragged],
docs_ents: List[Ragged],
doc_scores: List[Floats1d],
doc_ents: List[Ints1d],
):
if not self.save_activations:
return
ops = self.model.ops
lengths = ops.asarray1i([s.shape[0] for s in doc_scores])
docs_scores.append(Ragged(ops.flatten(doc_scores), lengths))
docs_ents.append(Ragged(ops.flatten(doc_ents), lengths))
def _add_activations(
self,
*,
doc_scores: List[Floats1d],
doc_ents: List[Ints1d],
scores: Sequence[float],
ents: Sequence[int],
):
if not self.save_activations:
return
ops = self.model.ops
doc_scores.append(ops.asarray1f(scores))
doc_ents.append(ops.asarray1i(ents, dtype="uint64"))

View File

@ -7,7 +7,7 @@ from pathlib import Path
from itertools import islice from itertools import islice
import srsly import srsly
import random import random
from thinc.api import CosineDistance, Model, Optimizer, Config from thinc.api import CosineDistance, Model, Optimizer
from thinc.api import set_dropout_rate from thinc.api import set_dropout_rate
import warnings import warnings
@ -20,7 +20,7 @@ from ...language import Language
from ...vocab import Vocab from ...vocab import Vocab
from ...training import Example, validate_examples, validate_get_examples from ...training import Example, validate_examples, validate_get_examples
from ...errors import Errors, Warnings from ...errors import Errors, Warnings
from ...util import SimpleFrozenList, registry from ...util import SimpleFrozenList
from ... import util from ... import util
from ...scorer import Scorer from ...scorer import Scorer
@ -70,7 +70,6 @@ class EntityLinker_v1(TrainablePipe):
produces a list of candidates, given a certain knowledge base and a textual mention. produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_links. Scorer.score_links.
DOCS: https://spacy.io/api/entitylinker#init DOCS: https://spacy.io/api/entitylinker#init
""" """
self.vocab = vocab self.vocab = vocab
@ -272,7 +271,6 @@ class EntityLinker_v1(TrainablePipe):
final_kb_ids.append(self.NIL) final_kb_ids.append(self.NIL)
elif len(candidates) == 1: elif len(candidates) == 1:
# shortcut for efficiency reasons: take the 1 candidate # shortcut for efficiency reasons: take the 1 candidate
# TODO: thresholding
final_kb_ids.append(candidates[0].entity_) final_kb_ids.append(candidates[0].entity_)
else: else:
random.shuffle(candidates) random.shuffle(candidates)
@ -301,7 +299,6 @@ class EntityLinker_v1(TrainablePipe):
if sims.shape != prior_probs.shape: if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161) raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims) scores = prior_probs + sims - (prior_probs * sims)
# TODO: thresholding
best_index = scores.argmax().item() best_index = scores.argmax().item()
best_candidate = candidates[best_index] best_candidate = candidates[best_index]
final_kb_ids.append(best_candidate.entity_) final_kb_ids.append(best_candidate.entity_)

View File

@ -1,7 +1,8 @@
# cython: infer_types=True, profile=True, binding=True # cython: infer_types=True, profile=True, binding=True
from typing import Optional, Union, Dict, Callable from typing import Callable, Dict, Iterable, List, Optional, Union
import srsly import srsly
from thinc.api import SequenceCategoricalCrossentropy, Model, Config from thinc.api import SequenceCategoricalCrossentropy, Model, Config
from thinc.types import Floats2d, Ints1d
from itertools import islice from itertools import islice
from ..tokens.doc cimport Doc from ..tokens.doc cimport Doc
@ -13,7 +14,7 @@ from ..symbols import POS
from ..language import Language from ..language import Language
from ..errors import Errors from ..errors import Errors
from .pipe import deserialize_config from .pipe import deserialize_config
from .tagger import Tagger from .tagger import ActivationsT, Tagger
from .. import util from .. import util
from ..scorer import Scorer from ..scorer import Scorer
from ..training import validate_examples, validate_get_examples from ..training import validate_examples, validate_get_examples
@ -52,7 +53,13 @@ DEFAULT_MORPH_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory( @Language.factory(
"morphologizer", "morphologizer",
assigns=["token.morph", "token.pos"], assigns=["token.morph", "token.pos"],
default_config={"model": DEFAULT_MORPH_MODEL, "overwrite": True, "extend": False, "scorer": {"@scorers": "spacy.morphologizer_scorer.v1"}}, default_config={
"model": DEFAULT_MORPH_MODEL,
"overwrite": True,
"extend": False,
"scorer": {"@scorers": "spacy.morphologizer_scorer.v1"},
"save_activations": False,
},
default_score_weights={"pos_acc": 0.5, "morph_acc": 0.5, "morph_per_feat": None}, default_score_weights={"pos_acc": 0.5, "morph_acc": 0.5, "morph_per_feat": None},
) )
def make_morphologizer( def make_morphologizer(
@ -62,8 +69,10 @@ def make_morphologizer(
overwrite: bool, overwrite: bool,
extend: bool, extend: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
save_activations: bool,
): ):
return Morphologizer(nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer) return Morphologizer(nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer,
save_activations=save_activations)
def morphologizer_score(examples, **kwargs): def morphologizer_score(examples, **kwargs):
@ -95,6 +104,7 @@ class Morphologizer(Tagger):
overwrite: bool = BACKWARD_OVERWRITE, overwrite: bool = BACKWARD_OVERWRITE,
extend: bool = BACKWARD_EXTEND, extend: bool = BACKWARD_EXTEND,
scorer: Optional[Callable] = morphologizer_score, scorer: Optional[Callable] = morphologizer_score,
save_activations: bool = False,
): ):
"""Initialize a morphologizer. """Initialize a morphologizer.
@ -105,6 +115,7 @@ class Morphologizer(Tagger):
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attributes "pos" and "morph" and Scorer.score_token_attr for the attributes "pos" and "morph" and
Scorer.score_token_attr_per_feat for the attribute "morph". Scorer.score_token_attr_per_feat for the attribute "morph".
save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/morphologizer#init DOCS: https://spacy.io/api/morphologizer#init
""" """
@ -124,11 +135,12 @@ class Morphologizer(Tagger):
} }
self.cfg = dict(sorted(cfg.items())) self.cfg = dict(sorted(cfg.items()))
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
@property @property
def labels(self): def labels(self):
"""RETURNS (Tuple[str]): The labels currently added to the component.""" """RETURNS (Iterable[str]): The labels currently added to the component."""
return tuple(self.cfg["labels_morph"].keys()) return self.cfg["labels_morph"].keys()
@property @property
def label_data(self) -> Dict[str, Dict[str, Union[str, float, int, None]]]: def label_data(self) -> Dict[str, Dict[str, Union[str, float, int, None]]]:
@ -151,7 +163,7 @@ class Morphologizer(Tagger):
# normalize label # normalize label
norm_label = self.vocab.morphology.normalize_features(label) norm_label = self.vocab.morphology.normalize_features(label)
# extract separate POS and morph tags # extract separate POS and morph tags
label_dict = Morphology.feats_to_dict(label) label_dict = Morphology.feats_to_dict(label, sort_values=False)
pos = label_dict.get(self.POS_FEAT, "") pos = label_dict.get(self.POS_FEAT, "")
if self.POS_FEAT in label_dict: if self.POS_FEAT in label_dict:
label_dict.pop(self.POS_FEAT) label_dict.pop(self.POS_FEAT)
@ -189,7 +201,7 @@ class Morphologizer(Tagger):
continue continue
morph = str(token.morph) morph = str(token.morph)
# create and add the combined morph+POS label # create and add the combined morph+POS label
morph_dict = Morphology.feats_to_dict(morph) morph_dict = Morphology.feats_to_dict(morph, sort_values=False)
if pos: if pos:
morph_dict[self.POS_FEAT] = pos morph_dict[self.POS_FEAT] = pos
norm_label = self.vocab.strings[self.vocab.morphology.add(morph_dict)] norm_label = self.vocab.strings[self.vocab.morphology.add(morph_dict)]
@ -206,7 +218,7 @@ class Morphologizer(Tagger):
for i, token in enumerate(example.reference): for i, token in enumerate(example.reference):
pos = token.pos_ pos = token.pos_
morph = str(token.morph) morph = str(token.morph)
morph_dict = Morphology.feats_to_dict(morph) morph_dict = Morphology.feats_to_dict(morph, sort_values=False)
if pos: if pos:
morph_dict[self.POS_FEAT] = pos morph_dict[self.POS_FEAT] = pos
norm_label = self.vocab.strings[self.vocab.morphology.add(morph_dict)] norm_label = self.vocab.strings[self.vocab.morphology.add(morph_dict)]
@ -217,40 +229,48 @@ class Morphologizer(Tagger):
assert len(label_sample) > 0, Errors.E923.format(name=self.name) assert len(label_sample) > 0, Errors.E923.format(name=self.name)
self.model.initialize(X=doc_sample, Y=label_sample) self.model.initialize(X=doc_sample, Y=label_sample)
def set_annotations(self, docs, batch_tag_ids): def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
"""Modify a batch of documents, using pre-computed scores. """Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify. docs (Iterable[Doc]): The documents to modify.
batch_tag_ids: The IDs to set, produced by Morphologizer.predict. activations (ActivationsT): The activations used for setting annotations, produced by Morphologizer.predict.
DOCS: https://spacy.io/api/morphologizer#set_annotations DOCS: https://spacy.io/api/morphologizer#set_annotations
""" """
batch_tag_ids = activations["label_ids"]
if isinstance(docs, Doc): if isinstance(docs, Doc):
docs = [docs] docs = [docs]
cdef Doc doc cdef Doc doc
cdef Vocab vocab = self.vocab cdef Vocab vocab = self.vocab
cdef bint overwrite = self.cfg["overwrite"] cdef bint overwrite = self.cfg["overwrite"]
cdef bint extend = self.cfg["extend"] cdef bint extend = self.cfg["extend"]
labels = self.labels
# We require random access for the upcoming ops, so we need
# to allocate a compatible container out of the iterable.
labels = tuple(self.labels)
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
if self.save_activations:
doc.activations[self.name] = {}
for act_name, acts in activations.items():
doc.activations[self.name][act_name] = acts[i]
doc_tag_ids = batch_tag_ids[i] doc_tag_ids = batch_tag_ids[i]
if hasattr(doc_tag_ids, "get"): if hasattr(doc_tag_ids, "get"):
doc_tag_ids = doc_tag_ids.get() doc_tag_ids = doc_tag_ids.get()
for j, tag_id in enumerate(doc_tag_ids): for j, tag_id in enumerate(doc_tag_ids):
morph = labels[tag_id] morph = labels[int(tag_id)]
# set morph # set morph
if doc.c[j].morph == 0 or overwrite or extend: if doc.c[j].morph == 0 or overwrite or extend:
if overwrite and extend: if overwrite and extend:
# morphologizer morph overwrites any existing features # morphologizer morph overwrites any existing features
# while extending # while extending
extended_morph = Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph]) extended_morph = Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph], sort_values=False)
extended_morph.update(Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0))) extended_morph.update(Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0), sort_values=False))
doc.c[j].morph = self.vocab.morphology.add(extended_morph) doc.c[j].morph = self.vocab.morphology.add(extended_morph)
elif extend: elif extend:
# existing features are preserved and any new features # existing features are preserved and any new features
# are added # are added
extended_morph = Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0)) extended_morph = Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0), sort_values=False)
extended_morph.update(Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph])) extended_morph.update(Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph], sort_values=False))
doc.c[j].morph = self.vocab.morphology.add(extended_morph) doc.c[j].morph = self.vocab.morphology.add(extended_morph)
else: else:
# clobber # clobber
@ -270,7 +290,7 @@ class Morphologizer(Tagger):
DOCS: https://spacy.io/api/morphologizer#get_loss DOCS: https://spacy.io/api/morphologizer#get_loss
""" """
validate_examples(examples, "Morphologizer.get_loss") validate_examples(examples, "Morphologizer.get_loss")
loss_func = SequenceCategoricalCrossentropy(names=self.labels, normalize=False) loss_func = SequenceCategoricalCrossentropy(names=tuple(self.labels), normalize=False)
truths = [] truths = []
for eg in examples: for eg in examples:
eg_truths = [] eg_truths = []
@ -291,7 +311,7 @@ class Morphologizer(Tagger):
label = None label = None
# Otherwise, generate the combined label # Otherwise, generate the combined label
else: else:
label_dict = Morphology.feats_to_dict(morph) label_dict = Morphology.feats_to_dict(morph, sort_values=False)
if pos: if pos:
label_dict[self.POS_FEAT] = pos label_dict[self.POS_FEAT] = pos
label = self.vocab.strings[self.vocab.morphology.add(label_dict)] label = self.vocab.strings[self.vocab.morphology.add(label_dict)]

View File

@ -6,11 +6,11 @@ from thinc.api import Model, Config
from ._parser_internals.transition_system import TransitionSystem from ._parser_internals.transition_system import TransitionSystem
from .transition_parser import Parser from .transition_parser import Parser
from ._parser_internals.ner import BiluoPushDown from ._parser_internals.ner import BiluoPushDown
from ..language import Language from ..language import Language
from ..scorer import get_ner_prf, PRFScore from ..scorer import get_ner_prf, PRFScore
from ..training import validate_examples from ..training import validate_examples
from ..util import registry from ..util import registry
from ..training import remove_bilu_prefix
default_model_config = """ default_model_config = """
@ -252,11 +252,8 @@ class EntityRecognizer(Parser):
def labels(self): def labels(self):
# Get the labels from the model by looking at the available moves, e.g. # Get the labels from the model by looking at the available moves, e.g.
# B-PERSON, I-PERSON, L-PERSON, U-PERSON # B-PERSON, I-PERSON, L-PERSON, U-PERSON
labels = set( labels = set(remove_bilu_prefix(move) for move in self.move_names
move.split("-")[1] if move[0] in ("B", "I", "L", "U"))
for move in self.move_names
if move[0] in ("B", "I", "L", "U")
)
return tuple(sorted(labels)) return tuple(sorted(labels))
def scored_ents(self, beams): def scored_ents(self, beams):

View File

@ -1,13 +1,14 @@
# cython: infer_types=True, profile=True, binding=True # cython: infer_types=True, profile=True, binding=True
from typing import Optional, Callable from typing import Dict, Iterable, Optional, Callable, List, Union
from itertools import islice from itertools import islice
import srsly import srsly
from thinc.api import Model, SequenceCategoricalCrossentropy, Config from thinc.api import Model, SequenceCategoricalCrossentropy, Config
from thinc.types import Floats2d, Ints1d
from ..tokens.doc cimport Doc from ..tokens.doc cimport Doc
from .tagger import Tagger from .tagger import ActivationsT, Tagger
from ..language import Language from ..language import Language
from ..errors import Errors from ..errors import Errors
from ..scorer import Scorer from ..scorer import Scorer
@ -38,11 +39,21 @@ DEFAULT_SENTER_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory( @Language.factory(
"senter", "senter",
assigns=["token.is_sent_start"], assigns=["token.is_sent_start"],
default_config={"model": DEFAULT_SENTER_MODEL, "overwrite": False, "scorer": {"@scorers": "spacy.senter_scorer.v1"}}, default_config={
"model": DEFAULT_SENTER_MODEL,
"overwrite": False,
"scorer": {"@scorers": "spacy.senter_scorer.v1"},
"save_activations": False,
},
default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0}, default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0},
) )
def make_senter(nlp: Language, name: str, model: Model, overwrite: bool, scorer: Optional[Callable]): def make_senter(nlp: Language,
return SentenceRecognizer(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer) name: str,
model: Model,
overwrite: bool,
scorer: Optional[Callable],
save_activations: bool):
return SentenceRecognizer(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, save_activations=save_activations)
def senter_score(examples, **kwargs): def senter_score(examples, **kwargs):
@ -72,6 +83,7 @@ class SentenceRecognizer(Tagger):
*, *,
overwrite=BACKWARD_OVERWRITE, overwrite=BACKWARD_OVERWRITE,
scorer=senter_score, scorer=senter_score,
save_activations: bool = False,
): ):
"""Initialize a sentence recognizer. """Initialize a sentence recognizer.
@ -81,6 +93,7 @@ class SentenceRecognizer(Tagger):
losses during training. losses during training.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the attribute "sents". Scorer.score_spans for the attribute "sents".
save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/sentencerecognizer#init DOCS: https://spacy.io/api/sentencerecognizer#init
""" """
@ -90,6 +103,7 @@ class SentenceRecognizer(Tagger):
self._rehearsal_model = None self._rehearsal_model = None
self.cfg = {"overwrite": overwrite} self.cfg = {"overwrite": overwrite}
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
@property @property
def labels(self): def labels(self):
@ -107,19 +121,24 @@ class SentenceRecognizer(Tagger):
def label_data(self): def label_data(self):
return None return None
def set_annotations(self, docs, batch_tag_ids): def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
"""Modify a batch of documents, using pre-computed scores. """Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify. docs (Iterable[Doc]): The documents to modify.
batch_tag_ids: The IDs to set, produced by SentenceRecognizer.predict. activations (ActivationsT): The activations used for setting annotations, produced by SentenceRecognizer.predict.
DOCS: https://spacy.io/api/sentencerecognizer#set_annotations DOCS: https://spacy.io/api/sentencerecognizer#set_annotations
""" """
batch_tag_ids = activations["label_ids"]
if isinstance(docs, Doc): if isinstance(docs, Doc):
docs = [docs] docs = [docs]
cdef Doc doc cdef Doc doc
cdef bint overwrite = self.cfg["overwrite"] cdef bint overwrite = self.cfg["overwrite"]
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
if self.save_activations:
doc.activations[self.name] = {}
for act_name, acts in activations.items():
doc.activations[self.name][act_name] = acts[i]
doc_tag_ids = batch_tag_ids[i] doc_tag_ids = batch_tag_ids[i]
if hasattr(doc_tag_ids, "get"): if hasattr(doc_tag_ids, "get"):
doc_tag_ids = doc_tag_ids.get() doc_tag_ids = doc_tag_ids.get()

View File

@ -1,4 +1,5 @@
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
from typing import Union
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d from thinc.types import Ragged, Ints2d, Floats2d, Ints1d
@ -16,6 +17,9 @@ from ..errors import Errors
from ..util import registry from ..util import registry
ActivationsT = Dict[str, Union[Floats2d, Ragged]]
spancat_default_config = """ spancat_default_config = """
[model] [model]
@architectures = "spacy.SpanCategorizer.v1" @architectures = "spacy.SpanCategorizer.v1"
@ -106,6 +110,7 @@ def build_ngram_range_suggester(min_size: int, max_size: int) -> Suggester:
"model": DEFAULT_SPANCAT_MODEL, "model": DEFAULT_SPANCAT_MODEL,
"suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]}, "suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]},
"scorer": {"@scorers": "spacy.spancat_scorer.v1"}, "scorer": {"@scorers": "spacy.spancat_scorer.v1"},
"save_activations": False,
}, },
default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0}, default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0},
) )
@ -118,6 +123,7 @@ def make_spancat(
scorer: Optional[Callable], scorer: Optional[Callable],
threshold: float, threshold: float,
max_positive: Optional[int], max_positive: Optional[int],
save_activations: bool,
) -> "SpanCategorizer": ) -> "SpanCategorizer":
"""Create a SpanCategorizer component. The span categorizer consists of two """Create a SpanCategorizer component. The span categorizer consists of two
parts: a suggester function that proposes candidate spans, and a labeller parts: a suggester function that proposes candidate spans, and a labeller
@ -138,6 +144,7 @@ def make_spancat(
0.5. 0.5.
max_positive (Optional[int]): Maximum number of labels to consider positive max_positive (Optional[int]): Maximum number of labels to consider positive
per span. Defaults to None, indicating no limit. per span. Defaults to None, indicating no limit.
save_activations (bool): save model activations in Doc when annotating.
""" """
return SpanCategorizer( return SpanCategorizer(
nlp.vocab, nlp.vocab,
@ -148,6 +155,7 @@ def make_spancat(
max_positive=max_positive, max_positive=max_positive,
name=name, name=name,
scorer=scorer, scorer=scorer,
save_activations=save_activations,
) )
@ -186,6 +194,7 @@ class SpanCategorizer(TrainablePipe):
threshold: float = 0.5, threshold: float = 0.5,
max_positive: Optional[int] = None, max_positive: Optional[int] = None,
scorer: Optional[Callable] = spancat_score, scorer: Optional[Callable] = spancat_score,
save_activations: bool = False,
) -> None: ) -> None:
"""Initialize the span categorizer. """Initialize the span categorizer.
vocab (Vocab): The shared vocabulary. vocab (Vocab): The shared vocabulary.
@ -218,6 +227,7 @@ class SpanCategorizer(TrainablePipe):
self.model = model self.model = model
self.name = name self.name = name
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
@property @property
def key(self) -> str: def key(self) -> str:
@ -260,7 +270,7 @@ class SpanCategorizer(TrainablePipe):
""" """
return list(self.labels) return list(self.labels)
def predict(self, docs: Iterable[Doc]): def predict(self, docs: Iterable[Doc]) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them. """Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict. docs (Iterable[Doc]): The documents to predict.
@ -270,7 +280,7 @@ class SpanCategorizer(TrainablePipe):
""" """
indices = self.suggester(docs, ops=self.model.ops) indices = self.suggester(docs, ops=self.model.ops)
scores = self.model.predict((docs, indices)) # type: ignore scores = self.model.predict((docs, indices)) # type: ignore
return indices, scores return {"indices": indices, "scores": scores}
def set_candidates( def set_candidates(
self, docs: Iterable[Doc], *, candidates_key: str = "candidates" self, docs: Iterable[Doc], *, candidates_key: str = "candidates"
@ -290,19 +300,29 @@ class SpanCategorizer(TrainablePipe):
for index in candidates.dataXd: for index in candidates.dataXd:
doc.spans[candidates_key].append(doc[index[0] : index[1]]) doc.spans[candidates_key].append(doc[index[0] : index[1]])
def set_annotations(self, docs: Iterable[Doc], indices_scores) -> None: def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of Doc objects, using pre-computed scores. """Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify. docs (Iterable[Doc]): The documents to modify.
scores: The scores to set, produced by SpanCategorizer.predict. activations: ActivationsT: The activations, produced by SpanCategorizer.predict.
DOCS: https://spacy.io/api/spancategorizer#set_annotations DOCS: https://spacy.io/api/spancategorizer#set_annotations
""" """
labels = self.labels labels = self.labels
indices, scores = indices_scores
indices = activations["indices"]
assert isinstance(indices, Ragged)
scores = cast(Floats2d, activations["scores"])
offset = 0 offset = 0
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
indices_i = indices[i].dataXd indices_i = indices[i].dataXd
if self.save_activations:
doc.activations[self.name] = {}
doc.activations[self.name]["indices"] = indices_i
doc.activations[self.name]["scores"] = scores[
offset : offset + indices.lengths[i]
]
doc.spans[self.key] = self._make_span_group( doc.spans[self.key] = self._make_span_group(
doc, indices_i, scores[offset : offset + indices.lengths[i]], labels # type: ignore[arg-type] doc, indices_i, scores[offset : offset + indices.lengths[i]], labels # type: ignore[arg-type]
) )

View File

@ -1,9 +1,9 @@
# cython: infer_types=True, profile=True, binding=True # cython: infer_types=True, profile=True, binding=True
from typing import Callable, Optional from typing import Callable, Dict, Iterable, List, Optional, Union
import numpy import numpy
import srsly import srsly
from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config
from thinc.types import Floats2d from thinc.types import Floats2d, Ints1d
import warnings import warnings
from itertools import islice from itertools import islice
@ -22,6 +22,9 @@ from ..training import validate_examples, validate_get_examples
from ..util import registry from ..util import registry
from .. import util from .. import util
ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
# See #9050 # See #9050
BACKWARD_OVERWRITE = False BACKWARD_OVERWRITE = False
@ -45,7 +48,13 @@ DEFAULT_TAGGER_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory( @Language.factory(
"tagger", "tagger",
assigns=["token.tag"], assigns=["token.tag"],
default_config={"model": DEFAULT_TAGGER_MODEL, "overwrite": False, "scorer": {"@scorers": "spacy.tagger_scorer.v1"}, "neg_prefix": "!"}, default_config={
"model": DEFAULT_TAGGER_MODEL,
"overwrite": False,
"scorer": {"@scorers": "spacy.tagger_scorer.v1"},
"neg_prefix": "!",
"save_activations": False,
},
default_score_weights={"tag_acc": 1.0}, default_score_weights={"tag_acc": 1.0},
) )
def make_tagger( def make_tagger(
@ -55,6 +64,7 @@ def make_tagger(
overwrite: bool, overwrite: bool,
scorer: Optional[Callable], scorer: Optional[Callable],
neg_prefix: str, neg_prefix: str,
save_activations: bool,
): ):
"""Construct a part-of-speech tagger component. """Construct a part-of-speech tagger component.
@ -63,7 +73,8 @@ def make_tagger(
in size, and be normalized as probabilities (all scores between 0 and 1, in size, and be normalized as probabilities (all scores between 0 and 1,
with the rows summing to 1). with the rows summing to 1).
""" """
return Tagger(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, neg_prefix=neg_prefix) return Tagger(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, neg_prefix=neg_prefix,
save_activations=save_activations)
def tagger_score(examples, **kwargs): def tagger_score(examples, **kwargs):
@ -89,6 +100,7 @@ class Tagger(TrainablePipe):
overwrite=BACKWARD_OVERWRITE, overwrite=BACKWARD_OVERWRITE,
scorer=tagger_score, scorer=tagger_score,
neg_prefix="!", neg_prefix="!",
save_activations: bool = False,
): ):
"""Initialize a part-of-speech tagger. """Initialize a part-of-speech tagger.
@ -98,6 +110,7 @@ class Tagger(TrainablePipe):
losses during training. losses during training.
scorer (Optional[Callable]): The scoring method. Defaults to scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attribute "tag". Scorer.score_token_attr for the attribute "tag".
save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/tagger#init DOCS: https://spacy.io/api/tagger#init
""" """
@ -108,6 +121,7 @@ class Tagger(TrainablePipe):
cfg = {"labels": [], "overwrite": overwrite, "neg_prefix": neg_prefix} cfg = {"labels": [], "overwrite": overwrite, "neg_prefix": neg_prefix}
self.cfg = dict(sorted(cfg.items())) self.cfg = dict(sorted(cfg.items()))
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
@property @property
def labels(self): def labels(self):
@ -126,7 +140,7 @@ class Tagger(TrainablePipe):
"""Data about the labels currently added to the component.""" """Data about the labels currently added to the component."""
return tuple(self.cfg["labels"]) return tuple(self.cfg["labels"])
def predict(self, docs): def predict(self, docs) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them. """Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict. docs (Iterable[Doc]): The documents to predict.
@ -139,12 +153,12 @@ class Tagger(TrainablePipe):
n_labels = len(self.labels) n_labels = len(self.labels)
guesses = [self.model.ops.alloc((0, n_labels)) for doc in docs] guesses = [self.model.ops.alloc((0, n_labels)) for doc in docs]
assert len(guesses) == len(docs) assert len(guesses) == len(docs)
return guesses return {"probabilities": guesses, "label_ids": guesses}
scores = self.model.predict(docs) scores = self.model.predict(docs)
assert len(scores) == len(docs), (len(scores), len(docs)) assert len(scores) == len(docs), (len(scores), len(docs))
guesses = self._scores2guesses(scores) guesses = self._scores2guesses(scores)
assert len(guesses) == len(docs) assert len(guesses) == len(docs)
return guesses return {"probabilities": scores, "label_ids": guesses}
def _scores2guesses(self, scores): def _scores2guesses(self, scores):
guesses = [] guesses = []
@ -155,14 +169,15 @@ class Tagger(TrainablePipe):
guesses.append(doc_guesses) guesses.append(doc_guesses)
return guesses return guesses
def set_annotations(self, docs, batch_tag_ids): def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
"""Modify a batch of documents, using pre-computed scores. """Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify. docs (Iterable[Doc]): The documents to modify.
batch_tag_ids: The IDs to set, produced by Tagger.predict. activations (ActivationsT): The activations used for setting annotations, produced by Tagger.predict.
DOCS: https://spacy.io/api/tagger#set_annotations DOCS: https://spacy.io/api/tagger#set_annotations
""" """
batch_tag_ids = activations["label_ids"]
if isinstance(docs, Doc): if isinstance(docs, Doc):
docs = [docs] docs = [docs]
cdef Doc doc cdef Doc doc
@ -170,6 +185,10 @@ class Tagger(TrainablePipe):
cdef bint overwrite = self.cfg["overwrite"] cdef bint overwrite = self.cfg["overwrite"]
labels = self.labels labels = self.labels
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
if self.save_activations:
doc.activations[self.name] = {}
for act_name, acts in activations.items():
doc.activations[self.name][act_name] = acts[i]
doc_tag_ids = batch_tag_ids[i] doc_tag_ids = batch_tag_ids[i]
if hasattr(doc_tag_ids, "get"): if hasattr(doc_tag_ids, "get"):
doc_tag_ids = doc_tag_ids.get() doc_tag_ids = doc_tag_ids.get()

View File

@ -1,4 +1,4 @@
from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any, Union
from thinc.api import get_array_module, Model, Optimizer, set_dropout_rate, Config from thinc.api import get_array_module, Model, Optimizer, set_dropout_rate, Config
from thinc.types import Floats2d from thinc.types import Floats2d
import numpy import numpy
@ -14,6 +14,9 @@ from ..util import registry
from ..vocab import Vocab from ..vocab import Vocab
ActivationsT = Dict[str, Floats2d]
single_label_default_config = """ single_label_default_config = """
[model] [model]
@architectures = "spacy.TextCatEnsemble.v2" @architectures = "spacy.TextCatEnsemble.v2"
@ -75,6 +78,7 @@ subword_features = true
"threshold": 0.5, "threshold": 0.5,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL, "model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v1"}, "scorer": {"@scorers": "spacy.textcat_scorer.v1"},
"save_activations": False,
}, },
default_score_weights={ default_score_weights={
"cats_score": 1.0, "cats_score": 1.0,
@ -96,6 +100,7 @@ def make_textcat(
model: Model[List[Doc], List[Floats2d]], model: Model[List[Doc], List[Floats2d]],
threshold: float, threshold: float,
scorer: Optional[Callable], scorer: Optional[Callable],
save_activations: bool,
) -> "TextCategorizer": ) -> "TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories """Create a TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered over a whole document. It can learn one or more labels, and the labels are considered
@ -105,8 +110,16 @@ def make_textcat(
scores for each category. scores for each category.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
scorer (Optional[Callable]): The scoring method. scorer (Optional[Callable]): The scoring method.
save_activations (bool): save model activations in Doc when annotating.
""" """
return TextCategorizer(nlp.vocab, model, name, threshold=threshold, scorer=scorer) return TextCategorizer(
nlp.vocab,
model,
name,
threshold=threshold,
scorer=scorer,
save_activations=save_activations,
)
def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
@ -137,6 +150,7 @@ class TextCategorizer(TrainablePipe):
*, *,
threshold: float, threshold: float,
scorer: Optional[Callable] = textcat_score, scorer: Optional[Callable] = textcat_score,
save_activations: bool = False,
) -> None: ) -> None:
"""Initialize a text categorizer for single-label classification. """Initialize a text categorizer for single-label classification.
@ -157,6 +171,7 @@ class TextCategorizer(TrainablePipe):
cfg = {"labels": [], "threshold": threshold, "positive_label": None} cfg = {"labels": [], "threshold": threshold, "positive_label": None}
self.cfg = dict(cfg) self.cfg = dict(cfg)
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
@property @property
def support_missing_values(self): def support_missing_values(self):
@ -181,7 +196,7 @@ class TextCategorizer(TrainablePipe):
""" """
return self.labels # type: ignore[return-value] return self.labels # type: ignore[return-value]
def predict(self, docs: Iterable[Doc]): def predict(self, docs: Iterable[Doc]) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them. """Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict. docs (Iterable[Doc]): The documents to predict.
@ -194,12 +209,12 @@ class TextCategorizer(TrainablePipe):
tensors = [doc.tensor for doc in docs] tensors = [doc.tensor for doc in docs]
xp = self.model.ops.xp xp = self.model.ops.xp
scores = xp.zeros((len(list(docs)), len(self.labels))) scores = xp.zeros((len(list(docs)), len(self.labels)))
return scores return {"probabilities": scores}
scores = self.model.predict(docs) scores = self.model.predict(docs)
scores = self.model.ops.asarray(scores) scores = self.model.ops.asarray(scores)
return scores return {"probabilities": scores}
def set_annotations(self, docs: Iterable[Doc], scores) -> None: def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of Doc objects, using pre-computed scores. """Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify. docs (Iterable[Doc]): The documents to modify.
@ -207,9 +222,13 @@ class TextCategorizer(TrainablePipe):
DOCS: https://spacy.io/api/textcategorizer#set_annotations DOCS: https://spacy.io/api/textcategorizer#set_annotations
""" """
probs = activations["probabilities"]
for i, doc in enumerate(docs): for i, doc in enumerate(docs):
if self.save_activations:
doc.activations[self.name] = {}
doc.activations[self.name]["probabilities"] = probs[i]
for j, label in enumerate(self.labels): for j, label in enumerate(self.labels):
doc.cats[label] = float(scores[i, j]) doc.cats[label] = float(probs[i, j])
def update( def update(
self, self,

View File

@ -1,4 +1,4 @@
from typing import Iterable, Optional, Dict, List, Callable, Any from typing import Iterable, Optional, Dict, List, Callable, Any, Union
from thinc.types import Floats2d from thinc.types import Floats2d
from thinc.api import Model, Config from thinc.api import Model, Config
@ -75,6 +75,7 @@ subword_features = true
"threshold": 0.5, "threshold": 0.5,
"model": DEFAULT_MULTI_TEXTCAT_MODEL, "model": DEFAULT_MULTI_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v1"}, "scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v1"},
"save_activations": False,
}, },
default_score_weights={ default_score_weights={
"cats_score": 1.0, "cats_score": 1.0,
@ -96,6 +97,7 @@ def make_multilabel_textcat(
model: Model[List[Doc], List[Floats2d]], model: Model[List[Doc], List[Floats2d]],
threshold: float, threshold: float,
scorer: Optional[Callable], scorer: Optional[Callable],
save_activations: bool,
) -> "TextCategorizer": ) -> "TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories """Create a TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered over a whole document. It can learn one or more labels, and the labels are considered
@ -107,7 +109,12 @@ def make_multilabel_textcat(
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
""" """
return MultiLabel_TextCategorizer( return MultiLabel_TextCategorizer(
nlp.vocab, model, name, threshold=threshold, scorer=scorer nlp.vocab,
model,
name,
threshold=threshold,
scorer=scorer,
save_activations=save_activations,
) )
@ -139,6 +146,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
*, *,
threshold: float, threshold: float,
scorer: Optional[Callable] = textcat_multilabel_score, scorer: Optional[Callable] = textcat_multilabel_score,
save_activations: bool = False,
) -> None: ) -> None:
"""Initialize a text categorizer for multi-label classification. """Initialize a text categorizer for multi-label classification.
@ -147,6 +155,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
name (str): The component instance name, used to add entries to the name (str): The component instance name, used to add entries to the
losses during training. losses during training.
threshold (float): Cutoff to consider a prediction "positive". threshold (float): Cutoff to consider a prediction "positive".
save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/textcategorizer#init DOCS: https://spacy.io/api/textcategorizer#init
""" """
@ -157,6 +166,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
cfg = {"labels": [], "threshold": threshold} cfg = {"labels": [], "threshold": threshold}
self.cfg = dict(cfg) self.cfg = dict(cfg)
self.scorer = scorer self.scorer = scorer
self.save_activations = save_activations
@property @property
def support_missing_values(self): def support_missing_values(self):

View File

@ -6,3 +6,4 @@ cdef class TrainablePipe(Pipe):
cdef public object model cdef public object model
cdef public object cfg cdef public object cfg
cdef public object scorer cdef public object scorer
cdef bint _save_activations

View File

@ -2,11 +2,12 @@
from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable
import srsly import srsly
from thinc.api import set_dropout_rate, Model, Optimizer from thinc.api import set_dropout_rate, Model, Optimizer
import warnings
from ..tokens.doc cimport Doc from ..tokens.doc cimport Doc
from ..training import validate_examples from ..training import validate_examples
from ..errors import Errors from ..errors import Errors, Warnings
from .pipe import Pipe, deserialize_config from .pipe import Pipe, deserialize_config
from .. import util from .. import util
from ..vocab import Vocab from ..vocab import Vocab
@ -342,3 +343,11 @@ cdef class TrainablePipe(Pipe):
deserialize["model"] = load_model deserialize["model"] = load_model
util.from_disk(path, deserialize, exclude) util.from_disk(path, deserialize, exclude)
return self return self
@property
def save_activations(self):
return self._save_activations
@save_activations.setter
def save_activations(self, save_activations: bool):
self._save_activations = save_activations

View File

@ -11,7 +11,9 @@ import random
import contextlib import contextlib
import srsly import srsly
from thinc.api import set_dropout_rate, CupyOps, get_array_module from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps
from thinc.api import get_array_module
from thinc.extra.search cimport Beam
from thinc.types import Ints1d from thinc.types import Ints1d
import numpy.random import numpy.random
import numpy import numpy
@ -20,7 +22,7 @@ import warnings
from ._parser_internals.stateclass cimport StateC, StateClass from ._parser_internals.stateclass cimport StateC, StateClass
from ._parser_internals.search cimport Beam from ._parser_internals.search cimport Beam
from ..tokens.doc cimport Doc from ..tokens.doc cimport Doc
from .trainable_pipe import TrainablePipe from .trainable_pipe cimport TrainablePipe
from ._parser_internals cimport _beam_utils from ._parser_internals cimport _beam_utils
from ._parser_internals import _beam_utils from ._parser_internals import _beam_utils
from ..vocab cimport Vocab from ..vocab cimport Vocab
@ -32,7 +34,10 @@ from ..errors import Errors, Warnings
from .. import util from .. import util
class Parser(TrainablePipe): NUMPY_OPS = NumpyOps()
cdef class Parser(TrainablePipe):
""" """
Base class of the DependencyParser and EntityRecognizer. Base class of the DependencyParser and EntityRecognizer.
""" """
@ -122,6 +127,7 @@ class Parser(TrainablePipe):
self._rehearsal_model = None self._rehearsal_model = None
self.scorer = scorer self.scorer = scorer
self._cpu_ops = get_ops("cpu") if isinstance(self.model.ops, CupyOps) else self.model.ops
def __getnewargs_ex__(self): def __getnewargs_ex__(self):
"""This allows pickling the Parser and its keyword-only init arguments""" """This allows pickling the Parser and its keyword-only init arguments"""

View File

@ -3,12 +3,13 @@ from typing import Iterable, TypeVar, TYPE_CHECKING
from .compat import Literal from .compat import Literal
from enum import Enum from enum import Enum
from pydantic import BaseModel, Field, ValidationError, validator, create_model from pydantic import BaseModel, Field, ValidationError, validator, create_model
from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr
from pydantic.main import ModelMetaclass from pydantic.main import ModelMetaclass
from thinc.api import Optimizer, ConfigValidationError, Model from thinc.api import Optimizer, ConfigValidationError, Model
from thinc.config import Promise from thinc.config import Promise
from collections import defaultdict from collections import defaultdict
import inspect import inspect
import re
from .attrs import NAMES from .attrs import NAMES
from .lookups import Lookups from .lookups import Lookups
@ -143,7 +144,7 @@ def validate_init_settings(
def validate_token_pattern(obj: list) -> List[str]: def validate_token_pattern(obj: list) -> List[str]:
# Try to convert non-string keys (e.g. {ORTH: "foo"} -> {"ORTH": "foo"}) # Try to convert non-string keys (e.g. {ORTH: "foo"} -> {"ORTH": "foo"})
get_key = lambda k: NAMES[k] if isinstance(k, int) and k < len(NAMES) else k get_key = lambda k: NAMES[k] if isinstance(k, int) and k in NAMES else k
if isinstance(obj, list): if isinstance(obj, list):
converted = [] converted = []
for pattern in obj: for pattern in obj:
@ -198,13 +199,18 @@ class TokenPatternNumber(BaseModel):
return v return v
class TokenPatternOperator(str, Enum): class TokenPatternOperatorSimple(str, Enum):
plus: StrictStr = StrictStr("+") plus: StrictStr = StrictStr("+")
start: StrictStr = StrictStr("*") star: StrictStr = StrictStr("*")
question: StrictStr = StrictStr("?") question: StrictStr = StrictStr("?")
exclamation: StrictStr = StrictStr("!") exclamation: StrictStr = StrictStr("!")
class TokenPatternOperatorMinMax(ConstrainedStr):
regex = re.compile(r"^({\d+}|{\d+,\d*}|{\d*,\d+})$")
TokenPatternOperator = Union[TokenPatternOperatorSimple, TokenPatternOperatorMinMax]
StringValue = Union[TokenPatternString, StrictStr] StringValue = Union[TokenPatternString, StrictStr]
NumberValue = Union[TokenPatternNumber, StrictInt, StrictFloat] NumberValue = Union[TokenPatternNumber, StrictInt, StrictFloat]
UnderscoreValue = Union[ UnderscoreValue = Union[
@ -508,6 +514,14 @@ class DocJSONSchema(BaseModel):
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field( tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
..., title="Token information - ID, start, annotations" ..., title="Token information - ID, start, annotations"
) )
_: Optional[Dict[StrictStr, Any]] = Field( underscore_doc: Optional[Dict[StrictStr, Any]] = Field(
None, title="Any custom data stored in the document's _ attribute" None,
title="Any custom data stored in the document's _ attribute",
alias="_",
)
underscore_token: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
None, title="Any custom data stored in the token's _ attribute"
)
underscore_span: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
None, title="Any custom data stored in the span's _ attribute"
) )

View File

@ -26,4 +26,4 @@ cdef class StringStore:
cdef public PreshMap _map cdef public PreshMap _map
cdef const Utf8Str* intern_unicode(self, str py_string) cdef const Utf8Str* intern_unicode(self, str py_string)
cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length) cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash)

View File

@ -14,6 +14,13 @@ from .symbols import NAMES as SYMBOLS_BY_INT
from .errors import Errors from .errors import Errors
from . import util from . import util
# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)`
cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash):
try:
out_hash[0] = key
return True
except:
return False
def get_string_id(key): def get_string_id(key):
"""Get a string ID, handling the reserved symbols correctly. If the key is """Get a string ID, handling the reserved symbols correctly. If the key is
@ -22,15 +29,27 @@ def get_string_id(key):
This function optimises for convenience over performance, so shouldn't be This function optimises for convenience over performance, so shouldn't be
used in tight loops. used in tight loops.
""" """
if not isinstance(key, str): cdef hash_t str_hash
return key if isinstance(key, str):
elif key in SYMBOLS_BY_STR: if len(key) == 0:
return SYMBOLS_BY_STR[key] return 0
elif not key:
return 0 symbol = SYMBOLS_BY_STR.get(key, None)
if symbol is not None:
return symbol
else:
chars = key.encode("utf8")
return hash_utf8(chars, len(chars))
elif _try_coerce_to_hash(key, &str_hash):
# Coerce the integral key to the expected primitive hash type.
# This ensures that custom/overloaded "primitive" data types
# such as those implemented by numpy are not inadvertently used
# downsteam (as these are internally implemented as custom PyObjects
# whose comparison operators can incur a significant overhead).
return str_hash
else: else:
chars = key.encode("utf8") # TODO: Raise an error instead
return hash_utf8(chars, len(chars)) return key
cpdef hash_t hash_string(str string) except 0: cpdef hash_t hash_string(str string) except 0:
@ -110,28 +129,36 @@ cdef class StringStore:
string_or_id (bytes, str or uint64): The value to encode. string_or_id (bytes, str or uint64): The value to encode.
Returns (str / uint64): The value to be retrieved. Returns (str / uint64): The value to be retrieved.
""" """
if isinstance(string_or_id, str) and len(string_or_id) == 0: cdef hash_t str_hash
return 0 cdef Utf8Str* utf8str = NULL
elif string_or_id == 0:
return ""
elif string_or_id in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string_or_id]
cdef hash_t key
if isinstance(string_or_id, str): if isinstance(string_or_id, str):
key = hash_string(string_or_id) if len(string_or_id) == 0:
return key return 0
elif isinstance(string_or_id, bytes):
key = hash_utf8(string_or_id, len(string_or_id)) # Return early if the string is found in the symbols LUT.
return key symbol = SYMBOLS_BY_STR.get(string_or_id, None)
elif string_or_id < len(SYMBOLS_BY_INT): if symbol is not None:
return SYMBOLS_BY_INT[string_or_id] return symbol
else:
key = string_or_id
utf8str = <Utf8Str*>self._map.get(key)
if utf8str is NULL:
raise KeyError(Errors.E018.format(hash_value=string_or_id))
else: else:
return decode_Utf8Str(utf8str) return hash_string(string_or_id)
elif isinstance(string_or_id, bytes):
return hash_utf8(string_or_id, len(string_or_id))
elif _try_coerce_to_hash(string_or_id, &str_hash):
if str_hash == 0:
return ""
elif str_hash in SYMBOLS_BY_INT:
return SYMBOLS_BY_INT[str_hash]
else:
utf8str = <Utf8Str*>self._map.get(str_hash)
else:
# TODO: Raise an error instead
utf8str = <Utf8Str*>self._map.get(string_or_id)
if utf8str is NULL:
raise KeyError(Errors.E018.format(hash_value=string_or_id))
else:
return decode_Utf8Str(utf8str)
def as_int(self, key): def as_int(self, key):
"""If key is an int, return it; otherwise, get the int value.""" """If key is an int, return it; otherwise, get the int value."""
@ -153,19 +180,22 @@ cdef class StringStore:
string (str): The string to add. string (str): The string to add.
RETURNS (uint64): The string's hash value. RETURNS (uint64): The string's hash value.
""" """
cdef hash_t str_hash
if isinstance(string, str): if isinstance(string, str):
if string in SYMBOLS_BY_STR: if string in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string] return SYMBOLS_BY_STR[string]
key = hash_string(string)
self.intern_unicode(string) string = string.encode("utf8")
str_hash = hash_utf8(string, len(string))
self._intern_utf8(string, len(string), &str_hash)
elif isinstance(string, bytes): elif isinstance(string, bytes):
if string in SYMBOLS_BY_STR: if string in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string] return SYMBOLS_BY_STR[string]
key = hash_utf8(string, len(string)) str_hash = hash_utf8(string, len(string))
self._intern_utf8(string, len(string)) self._intern_utf8(string, len(string), &str_hash)
else: else:
raise TypeError(Errors.E017.format(value_type=type(string))) raise TypeError(Errors.E017.format(value_type=type(string)))
return key return str_hash
def __len__(self): def __len__(self):
"""The number of strings in the store. """The number of strings in the store.
@ -174,30 +204,29 @@ cdef class StringStore:
""" """
return self.keys.size() return self.keys.size()
def __contains__(self, string not None): def __contains__(self, string_or_id not None):
"""Check whether a string is in the store. """Check whether a string or ID is in the store.
string (str): The string to check. string_or_id (str or int): The string to check.
RETURNS (bool): Whether the store contains the string. RETURNS (bool): Whether the store contains the string.
""" """
cdef hash_t key cdef hash_t str_hash
if isinstance(string, int) or isinstance(string, long): if isinstance(string_or_id, str):
if string == 0: if len(string_or_id) == 0:
return True return True
key = string elif string_or_id in SYMBOLS_BY_STR:
elif len(string) == 0: return True
return True str_hash = hash_string(string_or_id)
elif string in SYMBOLS_BY_STR: elif _try_coerce_to_hash(string_or_id, &str_hash):
return True pass
elif isinstance(string, str):
key = hash_string(string)
else: else:
string = string.encode("utf8") # TODO: Raise an error instead
key = hash_utf8(string, len(string)) return self._map.get(string_or_id) is not NULL
if key < len(SYMBOLS_BY_INT):
if str_hash in SYMBOLS_BY_INT:
return True return True
else: else:
return self._map.get(key) is not NULL return self._map.get(str_hash) is not NULL
def __iter__(self): def __iter__(self):
"""Iterate over the strings in the store, in order. """Iterate over the strings in the store, in order.
@ -272,13 +301,13 @@ cdef class StringStore:
cdef const Utf8Str* intern_unicode(self, str py_string): cdef const Utf8Str* intern_unicode(self, str py_string):
# 0 means missing, but we don't bother offsetting the index. # 0 means missing, but we don't bother offsetting the index.
cdef bytes byte_string = py_string.encode("utf8") cdef bytes byte_string = py_string.encode("utf8")
return self._intern_utf8(byte_string, len(byte_string)) return self._intern_utf8(byte_string, len(byte_string), NULL)
@cython.final @cython.final
cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length): cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash):
# TODO: This function's API/behaviour is an unholy mess... # TODO: This function's API/behaviour is an unholy mess...
# 0 means missing, but we don't bother offsetting the index. # 0 means missing, but we don't bother offsetting the index.
cdef hash_t key = hash_utf8(utf8_string, length) cdef hash_t key = precalculated_hash[0] if precalculated_hash is not NULL else hash_utf8(utf8_string, length)
cdef Utf8Str* value = <Utf8Str*>self._map.get(key) cdef Utf8Str* value = <Utf8Str*>self._map.get(key)
if value is not NULL: if value is not NULL:
return value return value

View File

@ -58,14 +58,6 @@ cdef struct TokenC:
hash_t ent_id hash_t ent_id
cdef struct MorphAnalysisC:
hash_t key
int length
attr_t* fields
attr_t* features
# Internal struct, for storage and disambiguation of entities. # Internal struct, for storage and disambiguation of entities.
cdef struct KBEntryC: cdef struct KBEntryC:

View File

@ -1,5 +1,6 @@
# DO NOT EDIT! The symbols are frozen as of spaCy v3.0.0.
cdef enum symbol_t: cdef enum symbol_t:
NIL NIL = 0
IS_ALPHA IS_ALPHA
IS_ASCII IS_ASCII
IS_DIGIT IS_DIGIT
@ -65,7 +66,7 @@ cdef enum symbol_t:
FLAG62 FLAG62
FLAG63 FLAG63
ID ID = 64
ORTH ORTH
LOWER LOWER
NORM NORM
@ -385,7 +386,7 @@ cdef enum symbol_t:
DEPRECATED275 DEPRECATED275
DEPRECATED276 DEPRECATED276
PERSON PERSON = 380
NORP NORP
FACILITY FACILITY
ORG ORG
@ -405,7 +406,7 @@ cdef enum symbol_t:
ORDINAL ORDINAL
CARDINAL CARDINAL
acomp acomp = 398
advcl advcl
advmod advmod
agent agent
@ -458,12 +459,12 @@ cdef enum symbol_t:
rcmod rcmod
root root
xcomp xcomp
acl acl
ENT_KB_ID ENT_KB_ID = 452
MORPH MORPH
ENT_ID ENT_ID
IDX IDX
_ _ = 456
# DO NOT ADD ANY NEW SYMBOLS!

View File

@ -469,11 +469,7 @@ IDS = {
} }
def sort_nums(x): NAMES = {v: k for k, v in IDS.items()}
return x[1]
NAMES = [it[0] for it in sorted(IDS.items(), key=sort_nums)]
# Unfortunate hack here, to work around problem with long cpdef enum # Unfortunate hack here, to work around problem with long cpdef enum
# (which is generating an enormous amount of C++ in Cython 0.24+) # (which is generating an enormous amount of C++ in Cython 0.24+)
# We keep the enum cdef, and just make sure the names are available to Python # We keep the enum cdef, and just make sure the names are available to Python

View File

@ -4,6 +4,12 @@ import functools
import inspect import inspect
import importlib import importlib
import sys import sys
from hypothesis import settings
# Functionally disable deadline settings for tests
# to prevent spurious test failures in CI builds.
settings.register_profile("no_deadlines", deadline=2 * 60 * 1000) # in ms
settings.load_profile("no_deadlines")
def pytest_addoption(parser): def pytest_addoption(parser):
@ -264,7 +270,7 @@ def hsb_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def ko_tokenizer(): def ko_tokenizer():
pytest.importorskip("natto") pytest.importorskip("mecab_ko")
return get_lang_class("ko")().tokenizer return get_lang_class("ko")().tokenizer
@ -281,11 +287,35 @@ def ko_tokenizer_tokenizer():
return nlp.tokenizer return nlp.tokenizer
@pytest.fixture(scope="module")
def la_tokenizer():
return get_lang_class("la")().tokenizer
@pytest.fixture(scope="session")
def ko_tokenizer_natto():
pytest.importorskip("natto")
config = {
"nlp": {
"tokenizer": {
"@tokenizers": "spacy.KoreanNattoTokenizer.v1",
}
}
}
nlp = get_lang_class("ko").from_config(config)
return nlp.tokenizer
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def lb_tokenizer(): def lb_tokenizer():
return get_lang_class("lb")().tokenizer return get_lang_class("lb")().tokenizer
@pytest.fixture(scope="session")
def lg_tokenizer():
return get_lang_class("lg")().tokenizer
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def lt_tokenizer(): def lt_tokenizer():
return get_lang_class("lt")().tokenizer return get_lang_class("lt")().tokenizer
@ -348,13 +378,13 @@ def ro_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def ru_tokenizer(): def ru_tokenizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().tokenizer return get_lang_class("ru")().tokenizer
@pytest.fixture @pytest.fixture
def ru_lemmatizer(): def ru_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer") return get_lang_class("ru")().add_pipe("lemmatizer")
@ -426,14 +456,14 @@ def ky_tokenizer():
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def uk_tokenizer(): def uk_tokenizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
return get_lang_class("uk")().tokenizer return get_lang_class("uk")().tokenizer
@pytest.fixture @pytest.fixture
def uk_lemmatizer(): def uk_lemmatizer():
pytest.importorskip("pymorphy2") pytest.importorskip("pymorphy3")
pytest.importorskip("pymorphy2_dicts_uk") pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer") return get_lang_class("uk")().add_pipe("lemmatizer")

View File

@ -45,6 +45,33 @@ def test_ents_reset(en_vocab):
assert [t.ent_iob_ for t in doc] == orig_iobs assert [t.ent_iob_ for t in doc] == orig_iobs
def test_ents_clear(en_vocab):
"""Ensure that removing entities clears token attributes"""
text = ["Louisiana", "Office", "of", "Conservation"]
doc = Doc(en_vocab, words=text)
entity = Span(doc, 0, 4, label=391, span_id="TEST")
doc.ents = [entity]
doc.ents = []
for token in doc:
assert token.ent_iob == 2
assert token.ent_type == 0
assert token.ent_id == 0
assert token.ent_kb_id == 0
doc.ents = [entity]
doc.set_ents([], default="missing")
for token in doc:
assert token.ent_iob == 0
assert token.ent_type == 0
assert token.ent_id == 0
assert token.ent_kb_id == 0
doc.set_ents([], default="blocked")
for token in doc:
assert token.ent_iob == 3
assert token.ent_type == 0
assert token.ent_id == 0
assert token.ent_kb_id == 0
def test_add_overlapping_entities(en_vocab): def test_add_overlapping_entities(en_vocab):
text = ["Louisiana", "Office", "of", "Conservation"] text = ["Louisiana", "Office", "of", "Conservation"]
doc = Doc(en_vocab, words=text) doc = Doc(en_vocab, words=text)

View File

@ -3,6 +3,7 @@ import weakref
import numpy import numpy
from numpy.testing import assert_array_equal from numpy.testing import assert_array_equal
import pytest import pytest
import warnings
from thinc.api import NumpyOps, get_current_ops from thinc.api import NumpyOps, get_current_ops
from spacy.attrs import DEP, ENT_IOB, ENT_TYPE, HEAD, IS_ALPHA, MORPH, POS from spacy.attrs import DEP, ENT_IOB, ENT_TYPE, HEAD, IS_ALPHA, MORPH, POS
@ -529,9 +530,9 @@ def test_doc_from_array_sent_starts(en_vocab):
# no warning using default attrs # no warning using default attrs
attrs = doc._get_array_attrs() attrs = doc._get_array_attrs()
arr = doc.to_array(attrs) arr = doc.to_array(attrs)
with pytest.warns(None) as record: with warnings.catch_warnings():
warnings.simplefilter("error")
new_doc.from_array(attrs, arr) new_doc.from_array(attrs, arr)
assert len(record) == 0
# only SENT_START uses SENT_START # only SENT_START uses SENT_START
attrs = [SENT_START] attrs = [SENT_START]
arr = doc.to_array(attrs) arr = doc.to_array(attrs)

View File

@ -1,12 +1,15 @@
import pytest import pytest
import spacy import spacy
from spacy import schemas from spacy import schemas
from spacy.tokens import Doc, Span from spacy.tokens import Doc, Span, Token
import srsly
from .test_underscore import clean_underscore # noqa: F401
@pytest.fixture() @pytest.fixture()
def doc(en_vocab): def doc(en_vocab):
words = ["c", "d", "e"] words = ["c", "d", "e"]
spaces = [True, True, True]
pos = ["VERB", "NOUN", "NOUN"] pos = ["VERB", "NOUN", "NOUN"]
tags = ["VBP", "NN", "NN"] tags = ["VBP", "NN", "NN"]
heads = [0, 0, 1] heads = [0, 0, 1]
@ -17,6 +20,7 @@ def doc(en_vocab):
return Doc( return Doc(
en_vocab, en_vocab,
words=words, words=words,
spaces=spaces,
pos=pos, pos=pos,
tags=tags, tags=tags,
heads=heads, heads=heads,
@ -45,6 +49,47 @@ def doc_without_deps(en_vocab):
) )
@pytest.fixture()
def doc_json():
return {
"text": "c d e ",
"ents": [{"start": 2, "end": 3, "label": "ORG"}],
"sents": [{"start": 0, "end": 5}],
"tokens": [
{
"id": 0,
"start": 0,
"end": 1,
"tag": "VBP",
"pos": "VERB",
"morph": "Feat1=A",
"dep": "ROOT",
"head": 0,
},
{
"id": 1,
"start": 2,
"end": 3,
"tag": "NN",
"pos": "NOUN",
"morph": "Feat1=B",
"dep": "dobj",
"head": 0,
},
{
"id": 2,
"start": 4,
"end": 5,
"tag": "NN",
"pos": "NOUN",
"morph": "Feat1=A|Feat2=D",
"dep": "dobj",
"head": 1,
},
],
}
def test_doc_to_json(doc): def test_doc_to_json(doc):
json_doc = doc.to_json() json_doc = doc.to_json()
assert json_doc["text"] == "c d e " assert json_doc["text"] == "c d e "
@ -56,7 +101,8 @@ def test_doc_to_json(doc):
assert json_doc["ents"][0]["start"] == 2 # character offset! assert json_doc["ents"][0]["start"] == 2 # character offset!
assert json_doc["ents"][0]["end"] == 3 # character offset! assert json_doc["ents"][0]["end"] == 3 # character offset!
assert json_doc["ents"][0]["label"] == "ORG" assert json_doc["ents"][0]["label"] == "ORG"
assert not schemas.validate(schemas.DocJSONSchema, json_doc) assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
def test_doc_to_json_underscore(doc): def test_doc_to_json_underscore(doc):
@ -64,11 +110,96 @@ def test_doc_to_json_underscore(doc):
Doc.set_extension("json_test2", default=False) Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world" doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3] doc._.json_test2 = [1, 2, 3]
json_doc = doc.to_json(underscore=["json_test1", "json_test2"]) json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
assert "_" in json_doc assert "_" in json_doc
assert json_doc["_"]["json_test1"] == "hello world" assert json_doc["_"]["json_test1"] == "hello world"
assert json_doc["_"]["json_test2"] == [1, 2, 3] assert json_doc["_"]["json_test2"] == [1, 2, 3]
assert not schemas.validate(schemas.DocJSONSchema, json_doc) assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
def test_doc_to_json_with_token_span_attributes(doc):
Doc.set_extension("json_test1", default=False)
Doc.set_extension("json_test2", default=False)
Token.set_extension("token_test", default=False)
Span.set_extension("span_test", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
doc[0:1]._.span_test = "span_attribute"
doc[0]._.token_test = 117
doc.spans["span_group"] = [doc[0:1]]
json_doc = doc.to_json(
underscore=["json_test1", "json_test2", "token_test", "span_test"]
)
assert "_" in json_doc
assert json_doc["_"]["json_test1"] == "hello world"
assert json_doc["_"]["json_test2"] == [1, 2, 3]
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["token_test"]["value"] == 117
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
def test_doc_to_json_with_custom_user_data(doc):
Doc.set_extension("json_test", default=False)
Token.set_extension("token_test", default=False)
Span.set_extension("span_test", default=False)
doc._.json_test = "hello world"
doc[0:1]._.span_test = "span_attribute"
doc[0]._.token_test = 117
json_doc = doc.to_json(underscore=["json_test", "token_test", "span_test"])
doc.user_data["user_data_test"] = 10
doc.user_data[("user_data_test2", True)] = 10
assert "_" in json_doc
assert json_doc["_"]["json_test"] == "hello world"
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["token_test"]["value"] == 117
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
def test_doc_to_json_with_token_span_same_identifier(doc):
Doc.set_extension("my_ext", default=False)
Token.set_extension("my_ext", default=False)
Span.set_extension("my_ext", default=False)
doc._.my_ext = "hello world"
doc[0:1]._.my_ext = "span_attribute"
doc[0]._.my_ext = 117
json_doc = doc.to_json(underscore=["my_ext"])
assert "_" in json_doc
assert json_doc["_"]["my_ext"] == "hello world"
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_token"]["my_ext"]["value"] == 117
assert json_doc["underscore_span"]["my_ext"]["value"] == "span_attribute"
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
def test_doc_to_json_with_token_attributes_missing(doc):
Token.set_extension("token_test", default=False)
Span.set_extension("span_test", default=False)
doc[0:1]._.span_test = "span_attribute"
doc[0]._.token_test = 117
json_doc = doc.to_json(underscore=["span_test"])
assert "underscore_token" in json_doc
assert "underscore_span" in json_doc
assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
assert "token_test" not in json_doc["underscore_token"]
assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
def test_doc_to_json_underscore_error_attr(doc): def test_doc_to_json_underscore_error_attr(doc):
@ -94,11 +225,29 @@ def test_doc_to_json_span(doc):
assert len(json_doc["spans"]) == 1 assert len(json_doc["spans"]) == 1
assert len(json_doc["spans"]["test"]) == 2 assert len(json_doc["spans"]["test"]) == 2
assert json_doc["spans"]["test"][0]["start"] == 0 assert json_doc["spans"]["test"][0]["start"] == 0
assert not schemas.validate(schemas.DocJSONSchema, json_doc) assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
def test_json_to_doc(doc): def test_json_to_doc(doc):
new_doc = Doc(doc.vocab).from_json(doc.to_json(), validate=True) json_doc = doc.to_json()
json_doc = srsly.json_loads(srsly.json_dumps(json_doc))
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert new_doc.text == doc.text == "c d e "
assert len(new_doc) == len(doc) == 3
assert new_doc[0].pos == doc[0].pos
assert new_doc[0].tag == doc[0].tag
assert new_doc[0].dep == doc[0].dep
assert new_doc[0].head.idx == doc[0].head.idx
assert new_doc[0].lemma == doc[0].lemma
assert len(new_doc.ents) == 1
assert new_doc.ents[0].start == 1
assert new_doc.ents[0].end == 2
assert new_doc.ents[0].label_ == "ORG"
assert doc.to_bytes() == new_doc.to_bytes()
def test_json_to_doc_compat(doc, doc_json):
new_doc = Doc(doc.vocab).from_json(doc_json, validate=True)
new_tokens = [token for token in new_doc] new_tokens = [token for token in new_doc]
assert new_doc.text == doc.text == "c d e " assert new_doc.text == doc.text == "c d e "
assert len(new_tokens) == len([token for token in doc]) == 3 assert len(new_tokens) == len([token for token in doc]) == 3
@ -114,11 +263,8 @@ def test_json_to_doc(doc):
def test_json_to_doc_underscore(doc): def test_json_to_doc_underscore(doc):
if not Doc.has_extension("json_test1"): Doc.set_extension("json_test1", default=False)
Doc.set_extension("json_test1", default=False) Doc.set_extension("json_test2", default=False)
if not Doc.has_extension("json_test2"):
Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world" doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3] doc._.json_test2 = [1, 2, 3]
json_doc = doc.to_json(underscore=["json_test1", "json_test2"]) json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
@ -126,6 +272,34 @@ def test_json_to_doc_underscore(doc):
assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)]) assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)])
assert new_doc._.json_test1 == "hello world" assert new_doc._.json_test1 == "hello world"
assert new_doc._.json_test2 == [1, 2, 3] assert new_doc._.json_test2 == [1, 2, 3]
assert doc.to_bytes() == new_doc.to_bytes()
def test_json_to_doc_with_token_span_attributes(doc):
Doc.set_extension("json_test1", default=False)
Doc.set_extension("json_test2", default=False)
Token.set_extension("token_test", default=False)
Span.set_extension("span_test", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
doc[0:1]._.span_test = "span_attribute"
doc[0]._.token_test = 117
json_doc = doc.to_json(
underscore=["json_test1", "json_test2", "token_test", "span_test"]
)
json_doc = srsly.json_loads(srsly.json_dumps(json_doc))
new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)])
assert new_doc._.json_test1 == "hello world"
assert new_doc._.json_test2 == [1, 2, 3]
assert new_doc[0]._.token_test == 117
assert new_doc[0:1]._.span_test == "span_attribute"
assert new_doc.user_data == doc.user_data
assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes(
exclude=["user_data"]
)
def test_json_to_doc_spans(doc): def test_json_to_doc_spans(doc):

Some files were not shown because too many files have changed in this diff Show More