diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml
index 2adb22648..6501a3d11 100644
--- a/.github/azure-steps.yml
+++ b/.github/azure-steps.yml
@@ -41,7 +41,7 @@ steps:
- bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
- ${{ parameters.prefix }} python -m pip install dist/$SDIST
+ ${{ parameters.prefix }} SPACY_NUM_BUILD_JOBS=2 python -m pip install dist/$SDIST
displayName: "Install from sdist"
- script: |
@@ -55,12 +55,12 @@ steps:
condition: eq(${{ parameters.gpu }}, true)
- script: |
- ${{ parameters.prefix }} python -m pytest --pyargs spacy
+ ${{ parameters.prefix }} python -m pytest --pyargs spacy -W error
displayName: "Run CPU tests"
condition: eq(${{ parameters.gpu }}, false)
- script: |
- ${{ parameters.prefix }} python -m pytest --pyargs spacy -p spacy.tests.enable_gpu
+ ${{ parameters.prefix }} python -m pytest --pyargs spacy -W error -p spacy.tests.enable_gpu
displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true)
@@ -114,7 +114,7 @@ steps:
condition: eq(variables['python_version'], '3.8')
- script: |
- ${{ parameters.prefix }} python -m pip install thinc-apple-ops
+ ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops
${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests with thinc-apple-ops"
- condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9'))
+ condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10'))
diff --git a/.github/contributors/Lucaterre.md b/.github/contributors/Lucaterre.md
new file mode 100644
index 000000000..5da763b22
--- /dev/null
+++ b/.github/contributors/Lucaterre.md
@@ -0,0 +1,106 @@
+# spaCy contributor agreement
+
+This spaCy Contributor Agreement (**"SCA"**) is based on the
+[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf).
+The SCA applies to any contribution that you make to any product or project
+managed by us (the **"project"**), and sets out the intellectual property rights
+you grant to us in the contributed materials. The term **"us"** shall mean
+[ExplosionAI GmbH](https://explosion.ai/legal). The term
+**"you"** shall mean the person or entity identified below.
+
+If you agree to be bound by these terms, fill in the information requested
+below and include the filled-in version with your first pull request, under the
+folder [`.github/contributors/`](/.github/contributors/). The name of the file
+should be your GitHub username, with the extension `.md`. For example, the user
+example_user would create the file `.github/contributors/example_user.md`.
+
+Read this agreement carefully before signing. These terms and conditions
+constitute a binding legal agreement.
+
+## Contributor Agreement
+
+1. The term "contribution" or "contributed materials" means any source code,
+object code, patch, tool, sample, graphic, specification, manual,
+documentation, or any other material posted or submitted by you to the project.
+
+2. With respect to any worldwide copyrights, or copyright applications and
+registrations, in your contribution:
+
+ * you hereby assign to us joint ownership, and to the extent that such
+ assignment is or becomes invalid, ineffective or unenforceable, you hereby
+ grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge,
+ royalty-free, unrestricted license to exercise all rights under those
+ copyrights. This includes, at our option, the right to sublicense these same
+ rights to third parties through multiple levels of sublicensees or other
+ licensing arrangements;
+
+ * you agree that each of us can do all things in relation to your
+ contribution as if each of us were the sole owners, and if one of us makes
+ a derivative work of your contribution, the one who makes the derivative
+ work (or has it made will be the sole owner of that derivative work;
+
+ * you agree that you will not assert any moral rights in your contribution
+ against us, our licensees or transferees;
+
+ * you agree that we may register a copyright in your contribution and
+ exercise all ownership rights associated with it; and
+
+ * you agree that neither of us has any duty to consult with, obtain the
+ consent of, pay or render an accounting to the other for any use or
+ distribution of your contribution.
+
+3. With respect to any patents you own, or that you can license without payment
+to any third party, you hereby grant to us a perpetual, irrevocable,
+non-exclusive, worldwide, no-charge, royalty-free license to:
+
+ * make, have made, use, sell, offer to sell, import, and otherwise transfer
+ your contribution in whole or in part, alone or in combination with or
+ included in any product, work or materials arising out of the project to
+ which your contribution was submitted, and
+
+ * at our option, to sublicense these same rights to third parties through
+ multiple levels of sublicensees or other licensing arrangements.
+
+4. Except as set out above, you keep all right, title, and interest in your
+contribution. The rights that you grant to us under these terms are effective
+on the date you first submitted a contribution to us, even if your submission
+took place before the date you sign these terms.
+
+5. You covenant, represent, warrant and agree that:
+
+ * Each contribution that you submit is and shall be an original work of
+ authorship and you can legally grant the rights set out in this SCA;
+
+ * to the best of your knowledge, each contribution will not violate any
+ third party's copyrights, trademarks, patents, or other intellectual
+ property rights; and
+
+ * each contribution shall be in compliance with U.S. export control laws and
+ other applicable export and import laws. You agree to notify us if you
+ become aware of any circumstance which would make any of the foregoing
+ representations inaccurate in any respect. We may publicly disclose your
+ participation in the project, including the fact that you have signed the SCA.
+
+6. This SCA is governed by the laws of the State of California and applicable
+U.S. Federal law. Any choice of law rules will not apply.
+
+7. Please place an “x” on one of the applicable statement below. Please do NOT
+mark both statements:
+
+ * [x] I am signing on behalf of myself as an individual and no other person
+ or entity, including my employer, has or will have rights with respect to my
+ contributions.
+
+ * [ ] I am signing on behalf of my employer or a legal entity and I have the
+ actual authority to contractually bind that entity.
+
+## Contributor Details
+
+| Field | Entry |
+|------------------------------- |---------------|
+| Name | Lucas Terriel |
+| Company name (if applicable) | |
+| Title or role (if applicable) | |
+| Date | 2022-06-20 |
+| GitHub username | Lucaterre |
+| Website (optional) | |
\ No newline at end of file
diff --git a/.github/no-response.yml b/.github/no-response.yml
deleted file mode 100644
index ea78104b9..000000000
--- a/.github/no-response.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# Configuration for probot-no-response - https://github.com/probot/no-response
-
-# Number of days of inactivity before an Issue is closed for lack of response
-daysUntilClose: 14
-# Label requiring a response
-responseRequiredLabel: more-info-needed
-# Comment to post when closing an Issue for lack of response. Set to `false` to disable
-closeComment: >
- This issue has been automatically closed because there has been no response
- to a request for more information from the original author. With only the
- information that is currently in the issue, there's not enough information
- to take action. If you're the original author, feel free to reopen the issue
- if you have or find the answers needed to investigate further.
diff --git a/.github/spacy_universe_alert.py b/.github/spacy_universe_alert.py
new file mode 100644
index 000000000..99ffabe93
--- /dev/null
+++ b/.github/spacy_universe_alert.py
@@ -0,0 +1,67 @@
+import os
+import sys
+import json
+from datetime import datetime
+
+from slack_sdk.web.client import WebClient
+
+CHANNEL = "#alerts-universe"
+SLACK_TOKEN = os.environ.get("SLACK_BOT_TOKEN", "ENV VAR not available!")
+DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+
+client = WebClient(SLACK_TOKEN)
+github_context = json.loads(sys.argv[1])
+
+event = github_context['event']
+pr_title = event['pull_request']["title"]
+pr_link = event['pull_request']["patch_url"].replace(".patch", "")
+pr_author_url = event['sender']["html_url"]
+pr_author_name = pr_author_url.rsplit('/')[-1]
+pr_created_at_dt = datetime.strptime(
+ event['pull_request']["created_at"],
+ DATETIME_FORMAT
+)
+pr_created_at = pr_created_at_dt.strftime("%c")
+pr_updated_at_dt = datetime.strptime(
+ event['pull_request']["updated_at"],
+ DATETIME_FORMAT
+)
+pr_updated_at = pr_updated_at_dt.strftime("%c")
+
+blocks = [
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "📣 New spaCy Universe Project Alert ✨"
+ }
+ },
+ {
+ "type": "section",
+ "fields": [
+ {
+ "type": "mrkdwn",
+ "text": f"*Pull Request:*\n<{pr_link}|{pr_title}>"
+ },
+ {
+ "type": "mrkdwn",
+ "text": f"*Author:*\n<{pr_author_url}|{pr_author_name}>"
+ },
+ {
+ "type": "mrkdwn",
+ "text": f"*Created at:*\n {pr_created_at}"
+ },
+ {
+ "type": "mrkdwn",
+ "text": f"*Last Updated:*\n {pr_updated_at}"
+ }
+ ]
+ }
+ ]
+
+
+client.chat_postMessage(
+ channel=CHANNEL,
+ text="spaCy universe project PR alert",
+ blocks=blocks
+)
diff --git a/.github/workflows/issue-manager.yml b/.github/workflows/issue-manager.yml
index 3fb42ed01..8f3a151ea 100644
--- a/.github/workflows/issue-manager.yml
+++ b/.github/workflows/issue-manager.yml
@@ -15,7 +15,7 @@ jobs:
issue-manager:
runs-on: ubuntu-latest
steps:
- - uses: tiangolo/issue-manager@0.2.1
+ - uses: tiangolo/issue-manager@0.4.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
config: >
@@ -25,5 +25,11 @@ jobs:
"message": "This issue has been automatically closed because it was answered and there was no follow-up discussion.",
"remove_label_on_comment": true,
"remove_label_on_close": true
+ },
+ "more-info-needed": {
+ "delay": "P7D",
+ "message": "This issue has been automatically closed because there has been no response to a request for more information from the original author. With only the information that is currently in the issue, there's not enough information to take action. If you're the original author, feel free to reopen the issue if you have or find the answers needed to investigate further.",
+ "remove_label_on_comment": true,
+ "remove_label_on_close": true
}
}
diff --git a/.github/workflows/spacy_universe_alert.yml b/.github/workflows/spacy_universe_alert.yml
new file mode 100644
index 000000000..cbbf14c6e
--- /dev/null
+++ b/.github/workflows/spacy_universe_alert.yml
@@ -0,0 +1,30 @@
+name: spaCy universe project alert
+
+on:
+ pull_request_target:
+ paths:
+ - "website/meta/universe.json"
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Dump GitHub context
+ env:
+ GITHUB_CONTEXT: ${{ toJson(github) }}
+ PR_NUMBER: ${{github.event.number}}
+ run: |
+ echo "$GITHUB_CONTEXT"
+
+ - uses: actions/checkout@v1
+ - uses: actions/setup-python@v1
+ - name: Install Bernadette app dependency and send an alert
+ env:
+ SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
+ GITHUB_CONTEXT: ${{ toJson(github) }}
+ CHANNEL: "#alerts-universe"
+ run: |
+ pip install slack-sdk==3.17.2 aiohttp==3.8.1
+ echo "$CHANNEL"
+ python .github/spacy_universe_alert.py "$GITHUB_CONTEXT"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ddd833be1..1f396bd71 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -271,7 +271,8 @@ except: # noqa: E722
### Python conventions
-All Python code must be written **compatible with Python 3.6+**.
+All Python code must be written **compatible with Python 3.6+**. More detailed
+code conventions can be found in the [developer docs](https://github.com/explosion/spaCy/blob/master/extra/DEVELOPER_DOCS/Code%20Conventions.md).
#### I/O and handling paths
diff --git a/README.md b/README.md
index bcdf0f844..d9ef83e01 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ production-ready [**training system**](https://spacy.io/usage/training) and easy
model packaging, deployment and workflow management. spaCy is commercial
open-source software, released under the MIT license.
-💫 **Version 3.3.1 out now!**
+💫 **Version 3.4.0 out now!**
[Check out the release notes here.](https://github.com/explosion/spaCy/releases)
[](https://dev.azure.com/explosion-ai/public/_build?definitionId=8)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 4624b2eb2..f475b7fdd 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -32,7 +32,7 @@ jobs:
versionSpec: "3.7"
- script: |
pip install flake8==3.9.2
- python -m flake8 spacy --count --select=E901,E999,F821,F822,F823 --show-source --statistics
+ python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics
displayName: "flake8"
- job: "Test"
diff --git a/build-constraints.txt b/build-constraints.txt
index cf5fe3284..956973abf 100644
--- a/build-constraints.txt
+++ b/build-constraints.txt
@@ -1,6 +1,8 @@
# build version constraints for use with wheelwright + multibuild
-numpy==1.15.0; python_version<='3.7'
-numpy==1.17.3; python_version=='3.8'
+numpy==1.15.0; python_version<='3.7' and platform_machine!='aarch64'
+numpy==1.19.2; python_version<='3.7' and platform_machine=='aarch64'
+numpy==1.17.3; python_version=='3.8' and platform_machine!='aarch64'
+numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'
numpy==1.19.3; python_version=='3.9'
numpy==1.21.3; python_version=='3.10'
numpy; python_version>='3.11'
diff --git a/extra/DEVELOPER_DOCS/Code Conventions.md b/extra/DEVELOPER_DOCS/Code Conventions.md
index 37cd8ff27..7294ac38b 100644
--- a/extra/DEVELOPER_DOCS/Code Conventions.md
+++ b/extra/DEVELOPER_DOCS/Code Conventions.md
@@ -191,6 +191,8 @@ def load_model(name: str) -> "Language":
...
```
+Note that we typically put the `from typing` import statements on the first line(s) of the Python module.
+
## Structuring logic
### Positional and keyword arguments
@@ -275,6 +277,27 @@ If you have to use `try`/`except`, make sure to only include what's **absolutely
+ return [v.strip() for v in value.split(",")]
```
+### Numeric comparisons
+
+For numeric comparisons, as a general rule we always use `<` and `>=` and avoid the usage of `<=` and `>`. This is to ensure we consistently
+apply inclusive lower bounds and exclusive upper bounds, helping to prevent off-by-one errors.
+
+One exception to this rule is the ternary case. With a chain like
+
+```python
+if value >= 0 and value < max:
+ ...
+```
+
+it's fine to rewrite this to the shorter form
+
+```python
+if 0 <= value < max:
+ ...
+```
+
+even though this requires the usage of the `<=` operator.
+
### Iteration and comprehensions
We generally avoid using built-in functions like `filter` or `map` in favor of list or generator comprehensions.
@@ -451,10 +474,14 @@ spaCy uses the [`pytest`](http://doc.pytest.org/) framework for testing. Tests f
When adding tests, make sure to use descriptive names and only test for one behavior at a time. Tests should be grouped into modules dedicated to the same type of functionality and some test modules are organized as directories of test files related to the same larger area of the library, e.g. `matcher` or `tokenizer`.
-Regression tests are tests that refer to bugs reported in specific issues. They should live in the relevant module of the test suite, named according to the issue number (e.g., `test_issue1234.py`), and [marked](https://docs.pytest.org/en/6.2.x/example/markers.html#working-with-custom-markers) appropriately (e.g. `@pytest.mark.issue(1234)`). This system allows us to relate tests for specific bugs back to the original reported issue, which is especially useful if we introduce a regression and a previously passing regression tests suddenly fails again. When fixing a bug, it's often useful to create a regression test for it first.
+Regression tests are tests that refer to bugs reported in specific issues. They should live in the relevant module of the test suite, named according to the issue number (e.g., `test_issue1234.py`), and [marked](https://docs.pytest.org/en/6.2.x/example/markers.html#working-with-custom-markers) appropriately (e.g. `@pytest.mark.issue(1234)`). This system allows us to relate tests for specific bugs back to the original reported issue, which is especially useful if we introduce a regression and a previously passing regression tests suddenly fails again. When fixing a bug, it's often useful to create a regression test for it first.
The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file.
+### Testing Cython Code
+
+If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`.
+
### Constructing objects and state
Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation.
diff --git a/extra/DEVELOPER_DOCS/ExplosionBot.md b/extra/DEVELOPER_DOCS/ExplosionBot.md
index eebec1a06..606fe93a0 100644
--- a/extra/DEVELOPER_DOCS/ExplosionBot.md
+++ b/extra/DEVELOPER_DOCS/ExplosionBot.md
@@ -16,21 +16,41 @@ To summon the robot, write a github comment on the issue/PR you wish to test. Th
Some things to note:
-* The `@explosion-bot please` must be the beginning of the command - you cannot add anything in front of this or else the robot won't know how to parse it. Adding anything at the end aside from the test name will also confuse the robot, so keep it simple!
-* The command name (such as `test_gpu`) must be one of the tests that the bot knows how to run. The available commands are documented in the bot's [workflow config](https://github.com/explosion/spaCy/blob/master/.github/workflows/explosionbot.yml#L26) and must match exactly one of the commands listed there.
-* The robot can't do multiple things at once, so if you want it to run multiple tests, you'll have to summon it with one comment per test.
-* For the `test_gpu` command, you can specify an optional thinc branch (from the spaCy repo) or a spaCy branch (from the thinc repo) with either the `--thinc-branch` or `--spacy-branch` flags. By default, the bot will pull in the PR branch from the repo where the command was issued, and the main branch of the other repository. However, if you need to run against another branch, you can say (for example):
+- The `@explosion-bot please` must be the beginning of the command - you cannot add anything in front of this or else the robot won't know how to parse it. Adding anything at the end aside from the test name will also confuse the robot, so keep it simple!
+- The command name (such as `test_gpu`) must be one of the tests that the bot knows how to run. The available commands are documented in the bot's [workflow config](https://github.com/explosion/spaCy/blob/master/.github/workflows/explosionbot.yml#L26) and must match exactly one of the commands listed there.
+- The robot can't do multiple things at once, so if you want it to run multiple tests, you'll have to summon it with one comment per test.
-```
-@explosion-bot please test_gpu --thinc-branch develop
-```
-You can also specify a branch from an unmerged PR:
-```
-@explosion-bot please test_gpu --thinc-branch refs/pull/633/head
-```
+### Examples
+
+- Execute spaCy slow GPU tests with a custom thinc branch from a spaCy PR:
+
+ ```
+ @explosion-bot please test_slow_gpu --thinc-branch
+ ```
+
+ `branch_name` can either be a named branch, e.g: `develop`, or an unmerged PR, e.g: `refs/pull//head`.
+
+- Execute spaCy Transformers GPU tests from a spaCy PR:
+
+ ```
+ @explosion-bot please test_gpu --run-on spacy-transformers --run-on-branch master --spacy-branch current_pr
+ ```
+
+ This will launch the GPU pipeline for the `spacy-transformers` repo on its `master` branch, using the current spaCy PR's branch to build spaCy. The name of the repository passed to `--run-on` is case-sensitive, e.g: use `spaCy` instead of `spacy`.
+
+- General info about supported commands.
+
+ ```
+ @explosion-bot please info
+ ```
+
+- Help text for a specific command
+ ```
+ @explosion-bot please --help
+ ```
## Troubleshooting
-If the robot isn't responding to commands as expected, you can check its logs in the [Github Action](https://github.com/explosion/spaCy/actions/workflows/explosionbot.yml).
+If the robot isn't responding to commands as expected, you can check its logs in the [Github Action](https://github.com/explosion/spaCy/actions/workflows/explosionbot.yml).
For each command sent to the bot, there should be a run of the `explosion-bot` workflow. In the `Install and run explosion-bot` step, towards the ends of the logs you should see info about the configuration that the bot was run with, as well as any errors that the bot encountered.
diff --git a/pyproject.toml b/pyproject.toml
index 4e388e54f..7abd7a96f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,8 +5,7 @@ requires = [
"cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0",
- "thinc>=8.1.0.dev3,<8.2.0",
- "pathy",
+ "thinc>=8.1.0,<8.2.0",
"numpy>=1.15.0",
]
build-backend = "setuptools.build_meta"
diff --git a/requirements.txt b/requirements.txt
index 3b77140f6..3e8501b2f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,9 @@
# Our libraries
-spacy-legacy>=3.0.9,<3.1.0
+spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
-thinc>=8.1.0.dev3,<8.2.0
+thinc>=8.1.0,<8.2.0
ml_datasets>=0.2.0,<0.3.0
murmurhash>=0.28.0,<1.1.0
wasabi>=0.9.1,<1.1.0
@@ -30,8 +30,9 @@ pytest-timeout>=1.3.0,<2.0.0
mock>=2.0.0,<3.0.0
flake8>=3.8.0,<3.10.0
hypothesis>=3.27.0,<7.0.0
-mypy>=0.910,<=0.960
+mypy>=0.910,<0.970; platform_machine!='aarch64'
types-dataclasses>=0.1.3; python_version < "3.7"
types-mock>=0.1.1
types-requests
+types-setuptools>=57.0.0
black>=22.0,<23.0
diff --git a/setup.cfg b/setup.cfg
index 795d6d7d4..c76961181 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -31,28 +31,20 @@ project_urls =
zip_safe = false
include_package_data = true
python_requires = >=3.6
-setup_requires =
- cython>=0.25,<3.0
- numpy>=1.15.0
- # We also need our Cython packages here to compile against
- cymem>=2.0.2,<2.1.0
- preshed>=3.0.2,<3.1.0
- murmurhash>=0.28.0,<1.1.0
- thinc>=8.1.0.dev3,<8.2.0
install_requires =
# Our libraries
- spacy-legacy>=3.0.9,<3.1.0
+ spacy-legacy>=3.0.10,<3.1.0
spacy-loggers>=1.0.0,<2.0.0
murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
- thinc>=8.1.0.dev3,<8.2.0
+ thinc>=8.1.0,<8.2.0
wasabi>=0.9.1,<1.1.0
srsly>=2.4.3,<3.0.0
catalogue>=2.0.6,<2.1.0
+ # Third-party dependencies
typer>=0.3.0,<0.5.0
pathy>=0.3.5
- # Third-party dependencies
tqdm>=4.38.0,<5.0.0
numpy>=1.15.0
requests>=2.13.0,<3.0.0
@@ -103,14 +95,18 @@ cuda114 =
cupy-cuda114>=5.0.0b4,<11.0.0
cuda115 =
cupy-cuda115>=5.0.0b4,<11.0.0
+cuda116 =
+ cupy-cuda116>=5.0.0b4,<11.0.0
+cuda117 =
+ cupy-cuda117>=5.0.0b4,<11.0.0
apple =
- thinc-apple-ops>=0.0.4,<1.0.0
+ thinc-apple-ops>=0.1.0.dev0,<1.0.0
# Language tokenizers with external dependencies
ja =
sudachipy>=0.5.2,!=0.6.1
sudachidict_core>=20211220
ko =
- natto-py==0.9.0
+ mecab-ko>=1.0.0
th =
pythainlp>=2.0
diff --git a/setup.py b/setup.py
index 316a58f47..3e02b156f 100755
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ MOD_NAMES = [
"spacy.tokens.span_group",
"spacy.tokens.graph",
"spacy.tokens.morphanalysis",
- "spacy.tokens._retokenize",
+ "spacy.tokens.retokenizer",
"spacy.matcher.matcher",
"spacy.matcher.phrasematcher",
"spacy.matcher.dependencymatcher",
@@ -128,6 +128,8 @@ class build_ext_options:
class build_ext_subclass(build_ext, build_ext_options):
def build_extensions(self):
+ if self.parallel is None and os.environ.get("SPACY_NUM_BUILD_JOBS") is not None:
+ self.parallel = int(os.environ.get("SPACY_NUM_BUILD_JOBS"))
build_ext_options.build_options(self)
build_ext.build_extensions(self)
diff --git a/spacy/__init__.py b/spacy/__init__.py
index ca47edc94..d60f46b96 100644
--- a/spacy/__init__.py
+++ b/spacy/__init__.py
@@ -31,25 +31,33 @@ def load(
name: Union[str, Path],
*,
vocab: Union[Vocab, bool] = True,
- disable: Iterable[str] = util.SimpleFrozenList(),
- exclude: Iterable[str] = util.SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = util.SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = util.SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(),
) -> Language:
"""Load a spaCy model from an installed package or a local path.
name (str): Package name or model path.
vocab (Vocab): A Vocab object. If True, a vocab is created.
- disable (Iterable[str]): Names of pipeline components to disable. Disabled
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe.
- exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
+ pipes will be disabled (but can be enabled later using nlp.enable_pipe).
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation.
RETURNS (Language): The loaded nlp object.
"""
return util.load_model(
- name, vocab=vocab, disable=disable, exclude=exclude, config=config
+ name,
+ vocab=vocab,
+ disable=disable,
+ enable=enable,
+ exclude=exclude,
+ config=config,
)
diff --git a/spacy/about.py b/spacy/about.py
index 03eabc2e9..843c15aba 100644
--- a/spacy/about.py
+++ b/spacy/about.py
@@ -1,6 +1,6 @@
# fmt: off
__title__ = "spacy"
-__version__ = "3.3.0"
+__version__ = "3.4.1"
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__projects__ = "https://github.com/explosion/projects"
diff --git a/spacy/attrs.pxd b/spacy/attrs.pxd
index 33d5372de..b8a7a1f08 100644
--- a/spacy/attrs.pxd
+++ b/spacy/attrs.pxd
@@ -1,98 +1,49 @@
-# Reserve 64 values for flag features
from . cimport symbols
cdef enum attr_id_t:
- NULL_ATTR
- IS_ALPHA
- IS_ASCII
- IS_DIGIT
- IS_LOWER
- IS_PUNCT
- IS_SPACE
- IS_TITLE
- IS_UPPER
- LIKE_URL
- LIKE_NUM
- LIKE_EMAIL
- IS_STOP
- IS_OOV_DEPRECATED
- IS_BRACKET
- IS_QUOTE
- IS_LEFT_PUNCT
- IS_RIGHT_PUNCT
- IS_CURRENCY
+ NULL_ATTR = 0
+ IS_ALPHA = symbols.IS_ALPHA
+ IS_ASCII = symbols.IS_ASCII
+ IS_DIGIT = symbols.IS_DIGIT
+ IS_LOWER = symbols.IS_LOWER
+ IS_PUNCT = symbols.IS_PUNCT
+ IS_SPACE = symbols.IS_SPACE
+ IS_TITLE = symbols.IS_TITLE
+ IS_UPPER = symbols.IS_UPPER
+ LIKE_URL = symbols.LIKE_URL
+ LIKE_NUM = symbols.LIKE_NUM
+ LIKE_EMAIL = symbols.LIKE_EMAIL
+ IS_STOP = symbols.IS_STOP
+ IS_BRACKET = symbols.IS_BRACKET
+ IS_QUOTE = symbols.IS_QUOTE
+ IS_LEFT_PUNCT = symbols.IS_LEFT_PUNCT
+ IS_RIGHT_PUNCT = symbols.IS_RIGHT_PUNCT
+ IS_CURRENCY = symbols.IS_CURRENCY
- FLAG19 = 19
- FLAG20
- FLAG21
- FLAG22
- FLAG23
- FLAG24
- FLAG25
- FLAG26
- FLAG27
- FLAG28
- FLAG29
- FLAG30
- FLAG31
- FLAG32
- FLAG33
- FLAG34
- FLAG35
- FLAG36
- FLAG37
- FLAG38
- FLAG39
- FLAG40
- FLAG41
- FLAG42
- FLAG43
- FLAG44
- FLAG45
- FLAG46
- FLAG47
- FLAG48
- FLAG49
- FLAG50
- FLAG51
- FLAG52
- FLAG53
- FLAG54
- FLAG55
- FLAG56
- FLAG57
- FLAG58
- FLAG59
- FLAG60
- FLAG61
- FLAG62
- FLAG63
+ ID = symbols.ID
+ ORTH = symbols.ORTH
+ LOWER = symbols.LOWER
+ NORM = symbols.NORM
+ SHAPE = symbols.SHAPE
+ PREFIX = symbols.PREFIX
+ SUFFIX = symbols.SUFFIX
- ID
- ORTH
- LOWER
- NORM
- SHAPE
- PREFIX
- SUFFIX
+ LENGTH = symbols.LENGTH
+ CLUSTER = symbols.CLUSTER
+ LEMMA = symbols.LEMMA
+ POS = symbols.POS
+ TAG = symbols.TAG
+ DEP = symbols.DEP
+ ENT_IOB = symbols.ENT_IOB
+ ENT_TYPE = symbols.ENT_TYPE
+ HEAD = symbols.HEAD
+ SENT_START = symbols.SENT_START
+ SPACY = symbols.SPACY
+ PROB = symbols.PROB
- LENGTH
- CLUSTER
- LEMMA
- POS
- TAG
- DEP
- ENT_IOB
- ENT_TYPE
- HEAD
- SENT_START
- SPACY
- PROB
-
- LANG
+ LANG = symbols.LANG
ENT_KB_ID = symbols.ENT_KB_ID
- MORPH
+ MORPH = symbols.MORPH
ENT_ID = symbols.ENT_ID
- IDX
- SENT_END
\ No newline at end of file
+ IDX = symbols.IDX
diff --git a/spacy/attrs.pyx b/spacy/attrs.pyx
index dc8eed7c3..9b0ae3400 100644
--- a/spacy/attrs.pyx
+++ b/spacy/attrs.pyx
@@ -16,57 +16,11 @@ IDS = {
"LIKE_NUM": LIKE_NUM,
"LIKE_EMAIL": LIKE_EMAIL,
"IS_STOP": IS_STOP,
- "IS_OOV_DEPRECATED": IS_OOV_DEPRECATED,
"IS_BRACKET": IS_BRACKET,
"IS_QUOTE": IS_QUOTE,
"IS_LEFT_PUNCT": IS_LEFT_PUNCT,
"IS_RIGHT_PUNCT": IS_RIGHT_PUNCT,
"IS_CURRENCY": IS_CURRENCY,
- "FLAG19": FLAG19,
- "FLAG20": FLAG20,
- "FLAG21": FLAG21,
- "FLAG22": FLAG22,
- "FLAG23": FLAG23,
- "FLAG24": FLAG24,
- "FLAG25": FLAG25,
- "FLAG26": FLAG26,
- "FLAG27": FLAG27,
- "FLAG28": FLAG28,
- "FLAG29": FLAG29,
- "FLAG30": FLAG30,
- "FLAG31": FLAG31,
- "FLAG32": FLAG32,
- "FLAG33": FLAG33,
- "FLAG34": FLAG34,
- "FLAG35": FLAG35,
- "FLAG36": FLAG36,
- "FLAG37": FLAG37,
- "FLAG38": FLAG38,
- "FLAG39": FLAG39,
- "FLAG40": FLAG40,
- "FLAG41": FLAG41,
- "FLAG42": FLAG42,
- "FLAG43": FLAG43,
- "FLAG44": FLAG44,
- "FLAG45": FLAG45,
- "FLAG46": FLAG46,
- "FLAG47": FLAG47,
- "FLAG48": FLAG48,
- "FLAG49": FLAG49,
- "FLAG50": FLAG50,
- "FLAG51": FLAG51,
- "FLAG52": FLAG52,
- "FLAG53": FLAG53,
- "FLAG54": FLAG54,
- "FLAG55": FLAG55,
- "FLAG56": FLAG56,
- "FLAG57": FLAG57,
- "FLAG58": FLAG58,
- "FLAG59": FLAG59,
- "FLAG60": FLAG60,
- "FLAG61": FLAG61,
- "FLAG62": FLAG62,
- "FLAG63": FLAG63,
"ID": ID,
"ORTH": ORTH,
"LOWER": LOWER,
@@ -92,12 +46,11 @@ IDS = {
}
-# ATTR IDs, in order of the symbol
-NAMES = [key for key, value in sorted(IDS.items(), key=lambda item: item[1])]
+NAMES = {v: k for k, v in IDS.items()}
locals().update(IDS)
-def intify_attrs(stringy_attrs, strings_map=None, _do_deprecated=False):
+def intify_attrs(stringy_attrs, strings_map=None):
"""
Normalize a dictionary of attributes, converting them to ints.
@@ -109,75 +62,6 @@ def intify_attrs(stringy_attrs, strings_map=None, _do_deprecated=False):
converted to ints.
"""
inty_attrs = {}
- if _do_deprecated:
- if "F" in stringy_attrs:
- stringy_attrs["ORTH"] = stringy_attrs.pop("F")
- if "L" in stringy_attrs:
- stringy_attrs["LEMMA"] = stringy_attrs.pop("L")
- if "pos" in stringy_attrs:
- stringy_attrs["TAG"] = stringy_attrs.pop("pos")
- if "morph" in stringy_attrs:
- morphs = stringy_attrs.pop("morph")
- if "number" in stringy_attrs:
- stringy_attrs.pop("number")
- if "tenspect" in stringy_attrs:
- stringy_attrs.pop("tenspect")
- morph_keys = [
- "PunctType",
- "PunctSide",
- "Other",
- "Degree",
- "AdvType",
- "Number",
- "VerbForm",
- "PronType",
- "Aspect",
- "Tense",
- "PartType",
- "Poss",
- "Hyph",
- "ConjType",
- "NumType",
- "Foreign",
- "VerbType",
- "NounType",
- "Gender",
- "Mood",
- "Negative",
- "Tense",
- "Voice",
- "Abbr",
- "Derivation",
- "Echo",
- "Foreign",
- "NameType",
- "NounType",
- "NumForm",
- "NumValue",
- "PartType",
- "Polite",
- "StyleVariant",
- "PronType",
- "AdjType",
- "Person",
- "Variant",
- "AdpType",
- "Reflex",
- "Negative",
- "Mood",
- "Aspect",
- "Case",
- "Polarity",
- "PrepCase",
- "Animacy", # U20
- ]
- for key in morph_keys:
- if key in stringy_attrs:
- stringy_attrs.pop(key)
- elif key.lower() in stringy_attrs:
- stringy_attrs.pop(key.lower())
- elif key.upper() in stringy_attrs:
- stringy_attrs.pop(key.upper())
for name, value in stringy_attrs.items():
int_key = intify_attr(name)
if int_key is not None:
diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py
index bb7f2d352..ae43b991b 100644
--- a/spacy/cli/_util.py
+++ b/spacy/cli/_util.py
@@ -462,6 +462,23 @@ def git_sparse_checkout(repo, subpath, dest, branch):
shutil.move(str(source_path), str(dest))
+def git_repo_branch_exists(repo: str, branch: str) -> bool:
+ """Uses 'git ls-remote' to check if a repository and branch exists
+
+ repo (str): URL to get repo.
+ branch (str): Branch on repo to check.
+ RETURNS (bool): True if repo:branch exists.
+ """
+ get_git_version()
+ cmd = f"git ls-remote {repo} {branch}"
+ # We might be tempted to use `--exit-code` with `git ls-remote`, but
+ # `run_command` handles the `returncode` for us, so we'll rely on
+ # the fact that stdout returns '' if the requested branch doesn't exist
+ ret = run_command(cmd, capture=True)
+ exists = ret.stdout != ""
+ return exists
+
+
def get_git_version(
error: str = "Could not run 'git'. Make sure it's installed and the executable is available.",
) -> Tuple[int, int]:
diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py
index 0061515c6..bd05471b1 100644
--- a/spacy/cli/debug_data.py
+++ b/spacy/cli/debug_data.py
@@ -10,7 +10,7 @@ import math
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli
-from ..training import Example
+from ..training import Example, remove_bilu_prefix
from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining
from ..pipeline._parser_internals import nonproj
@@ -361,7 +361,7 @@ def debug_data(
if label != "-"
]
labels_with_counts = _format_labels(labels_with_counts, counts=True)
- msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose)
+ msg.text(f"Labels in train data: {labels_with_counts}", show=verbose)
missing_labels = model_labels - labels
if missing_labels:
msg.warn(
@@ -758,9 +758,9 @@ def _compile_gold(
# "Illegal" whitespace entity
data["ws_ents"] += 1
if label.startswith(("B-", "U-")):
- combined_label = label.split("-")[1]
+ combined_label = remove_bilu_prefix(label)
data["ner"][combined_label] += 1
- if sent_starts[i] == True and label.startswith(("I-", "L-")):
+ if sent_starts[i] and label.startswith(("I-", "L-")):
data["boundary_cross_ents"] += 1
elif label == "-":
data["ner"]["-"] += 1
@@ -908,7 +908,7 @@ def _get_examples_without_label(
for eg in data:
if component == "ner":
labels = [
- label.split("-")[1]
+ remove_bilu_prefix(label)
for label in eg.get_aligned_ner()
if label not in ("O", "-", None)
]
diff --git a/spacy/cli/download.py b/spacy/cli/download.py
index 4ea9a8f0e..0c9a32b93 100644
--- a/spacy/cli/download.py
+++ b/spacy/cli/download.py
@@ -7,6 +7,7 @@ import typer
from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX
from .. import about
from ..util import is_package, get_minor_version, run_command
+from ..util import is_prerelease_version
from ..errors import OLD_MODEL_SHORTCUTS
@@ -19,7 +20,7 @@ def download_cli(
ctx: typer.Context,
model: str = Arg(..., help="Name of pipeline package to download"),
direct: bool = Opt(False, "--direct", "-d", "-D", help="Force direct download of name + version"),
- sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel")
+ sdist: bool = Opt(False, "--sdist", "-S", help="Download sdist (.tar.gz) archive instead of pre-built binary wheel"),
# fmt: on
):
"""
@@ -35,7 +36,12 @@ def download_cli(
download(model, direct, sdist, *ctx.args)
-def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -> None:
+def download(
+ model: str,
+ direct: bool = False,
+ sdist: bool = False,
+ *pip_args,
+) -> None:
if (
not (is_package("spacy") or is_package("spacy-nightly"))
and "--no-deps" not in pip_args
@@ -49,13 +55,10 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
"dependencies, you'll have to install them manually."
)
pip_args = pip_args + ("--no-deps",)
- suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
- dl_tpl = "{m}-{v}/{m}-{v}{s}#egg={m}=={v}"
if direct:
components = model.split("-")
model_name = "".join(components[:-1])
version = components[-1]
- download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
else:
model_name = model
if model in OLD_MODEL_SHORTCUTS:
@@ -66,15 +69,31 @@ def download(model: str, direct: bool = False, sdist: bool = False, *pip_args) -
model_name = OLD_MODEL_SHORTCUTS[model]
compatibility = get_compatibility()
version = get_version(model_name, compatibility)
- download_model(dl_tpl.format(m=model_name, v=version, s=suffix), pip_args)
+
+ filename = get_model_filename(model_name, version, sdist)
+
+ download_model(filename, pip_args)
msg.good(
"Download and installation successful",
f"You can now load the package via spacy.load('{model_name}')",
)
+def get_model_filename(model_name: str, version: str, sdist: bool = False) -> str:
+ dl_tpl = "{m}-{v}/{m}-{v}{s}"
+ egg_tpl = "#egg={m}=={v}"
+ suffix = SDIST_SUFFIX if sdist else WHEEL_SUFFIX
+ filename = dl_tpl.format(m=model_name, v=version, s=suffix)
+ if sdist:
+ filename += egg_tpl.format(m=model_name, v=version)
+ return filename
+
+
def get_compatibility() -> dict:
- version = get_minor_version(about.__version__)
+ if is_prerelease_version(about.__version__):
+ version: Optional[str] = about.__version__
+ else:
+ version = get_minor_version(about.__version__)
r = requests.get(about.__compatibility__)
if r.status_code != 200:
msg.fail(
@@ -101,6 +120,11 @@ def get_version(model: str, comp: dict) -> str:
return comp[model][0]
+def get_latest_version(model: str) -> str:
+ comp = get_compatibility()
+ return get_version(model, comp)
+
+
def download_model(
filename: str, user_pip_args: Optional[Sequence[str]] = None
) -> None:
diff --git a/spacy/cli/info.py b/spacy/cli/info.py
index e6a1cb616..e6ac4270f 100644
--- a/spacy/cli/info.py
+++ b/spacy/cli/info.py
@@ -1,10 +1,13 @@
from typing import Optional, Dict, Any, Union, List
import platform
+import pkg_resources
+import json
from pathlib import Path
from wasabi import Printer, MarkdownRenderer
import srsly
from ._util import app, Arg, Opt, string_to_list
+from .download import get_model_filename, get_latest_version
from .. import util
from .. import about
@@ -16,6 +19,7 @@ def info_cli(
markdown: bool = Opt(False, "--markdown", "-md", help="Generate Markdown for GitHub issues"),
silent: bool = Opt(False, "--silent", "-s", "-S", help="Don't print anything (just return)"),
exclude: str = Opt("labels", "--exclude", "-e", help="Comma-separated keys to exclude from the print-out"),
+ url: bool = Opt(False, "--url", "-u", help="Print the URL to download the most recent compatible version of the pipeline"),
# fmt: on
):
"""
@@ -23,10 +27,19 @@ def info_cli(
print its meta information. Flag --markdown prints details in Markdown for easy
copy-pasting to GitHub issues.
+ Flag --url prints only the download URL of the most recent compatible
+ version of the pipeline.
+
DOCS: https://spacy.io/api/cli#info
"""
exclude = string_to_list(exclude)
- info(model, markdown=markdown, silent=silent, exclude=exclude)
+ info(
+ model,
+ markdown=markdown,
+ silent=silent,
+ exclude=exclude,
+ url=url,
+ )
def info(
@@ -35,11 +48,20 @@ def info(
markdown: bool = False,
silent: bool = True,
exclude: Optional[List[str]] = None,
+ url: bool = False,
) -> Union[str, dict]:
msg = Printer(no_print=silent, pretty=not silent)
if not exclude:
exclude = []
- if model:
+ if url:
+ if model is not None:
+ title = f"Download info for pipeline '{model}'"
+ data = info_model_url(model)
+ print(data["download_url"])
+ return data
+ else:
+ msg.fail("--url option requires a pipeline name", exits=1)
+ elif model:
title = f"Info about pipeline '{model}'"
data = info_model(model, silent=silent)
else:
@@ -99,11 +121,43 @@ def info_model(model: str, *, silent: bool = True) -> Dict[str, Any]:
meta["source"] = str(model_path.resolve())
else:
meta["source"] = str(model_path)
+ download_url = info_installed_model_url(model)
+ if download_url:
+ meta["download_url"] = download_url
return {
k: v for k, v in meta.items() if k not in ("accuracy", "performance", "speed")
}
+def info_installed_model_url(model: str) -> Optional[str]:
+ """Given a pipeline name, get the download URL if available, otherwise
+ return None.
+
+ This is only available for pipelines installed as modules that have
+ dist-info available.
+ """
+ try:
+ dist = pkg_resources.get_distribution(model)
+ data = json.loads(dist.get_metadata("direct_url.json"))
+ return data["url"]
+ except pkg_resources.DistributionNotFound:
+ # no such package
+ return None
+ except Exception:
+ # something else, like no file or invalid JSON
+ return None
+
+def info_model_url(model: str) -> Dict[str, Any]:
+ """Return the download URL for the latest version of a pipeline."""
+ version = get_latest_version(model)
+
+ filename = get_model_filename(model, version)
+ download_url = about.__download_url__ + "/" + filename
+ release_tpl = "https://github.com/explosion/spacy-models/releases/tag/{m}-{v}"
+ release_url = release_tpl.format(m=model, v=version)
+ return {"download_url": download_url, "release_url": release_url}
+
+
def get_markdown(
data: Dict[str, Any],
title: Optional[str] = None,
diff --git a/spacy/cli/init_config.py b/spacy/cli/init_config.py
index d4cd939c2..b634caa4c 100644
--- a/spacy/cli/init_config.py
+++ b/spacy/cli/init_config.py
@@ -10,6 +10,7 @@ from jinja2 import Template
from .. import util
from ..language import DEFAULT_CONFIG_PRETRAIN_PATH
from ..schemas import RecommendationSchema
+from ..util import SimpleFrozenList
from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND
from ._util import string_to_list, import_code
@@ -24,16 +25,30 @@ class Optimizations(str, Enum):
accuracy = "accuracy"
+class InitValues:
+ """
+ Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and
+ init_config(), i.e. initialization calls via CLI respectively Python.
+ """
+
+ lang = "en"
+ pipeline = SimpleFrozenList(["tagger", "parser", "ner"])
+ optimize = Optimizations.efficiency
+ gpu = False
+ pretraining = False
+ force_overwrite = False
+
+
@init_cli.command("config")
def init_config_cli(
# fmt: off
output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True),
- lang: str = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"),
- pipeline: str = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"),
- optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."),
- gpu: bool = Opt(False, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."),
- pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
- force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"),
+ lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"),
+ pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"),
+ optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."),
+ gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."),
+ pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
+ force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"),
# fmt: on
):
"""
@@ -133,11 +148,11 @@ def fill_config(
def init_config(
*,
- lang: str,
- pipeline: List[str],
- optimize: str,
- gpu: bool,
- pretraining: bool = False,
+ lang: str = InitValues.lang,
+ pipeline: List[str] = InitValues.pipeline,
+ optimize: str = InitValues.optimize,
+ gpu: bool = InitValues.gpu,
+ pretraining: bool = InitValues.pretraining,
silent: bool = True,
) -> Config:
msg = Printer(no_print=silent)
diff --git a/spacy/cli/pretrain.py b/spacy/cli/pretrain.py
index fe3ce0dad..381d589cf 100644
--- a/spacy/cli/pretrain.py
+++ b/spacy/cli/pretrain.py
@@ -61,7 +61,7 @@ def pretrain_cli(
# TODO: What's the solution here? How do we handle optional blocks?
msg.fail("The [pretraining] block in your config is empty", exits=1)
if not output_dir.exists():
- output_dir.mkdir()
+ output_dir.mkdir(parents=True)
msg.good(f"Created output directory: {output_dir}")
# Save non-interpolated config
raw_config.to_disk(output_dir / "config.cfg")
diff --git a/spacy/cli/project/clone.py b/spacy/cli/project/clone.py
index 360ee3428..14b4ed9b5 100644
--- a/spacy/cli/project/clone.py
+++ b/spacy/cli/project/clone.py
@@ -7,11 +7,11 @@ import re
from ... import about
from ...util import ensure_path
from .._util import project_cli, Arg, Opt, COMMAND, PROJECT_FILE
-from .._util import git_checkout, get_git_version
+from .._util import git_checkout, get_git_version, git_repo_branch_exists
DEFAULT_REPO = about.__projects__
DEFAULT_PROJECTS_BRANCH = about.__projects_branch__
-DEFAULT_BRANCH = "master"
+DEFAULT_BRANCHES = ["main", "master"]
@project_cli.command("clone")
@@ -20,7 +20,7 @@ def project_clone_cli(
name: str = Arg(..., help="The name of the template to clone"),
dest: Optional[Path] = Arg(None, help="Where to clone the project. Defaults to current working directory", exists=False),
repo: str = Opt(DEFAULT_REPO, "--repo", "-r", help="The repository to clone from"),
- branch: Optional[str] = Opt(None, "--branch", "-b", help="The branch to clone from"),
+ branch: Optional[str] = Opt(None, "--branch", "-b", help=f"The branch to clone from. If not provided, will attempt {', '.join(DEFAULT_BRANCHES)}"),
sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse Git checkout to only check out and clone the files needed. Requires Git v22.2+.")
# fmt: on
):
@@ -33,9 +33,25 @@ def project_clone_cli(
"""
if dest is None:
dest = Path.cwd() / Path(name).parts[-1]
+ if repo == DEFAULT_REPO and branch is None:
+ branch = DEFAULT_PROJECTS_BRANCH
+
if branch is None:
- # If it's a user repo, we want to default to other branch
- branch = DEFAULT_PROJECTS_BRANCH if repo == DEFAULT_REPO else DEFAULT_BRANCH
+ for default_branch in DEFAULT_BRANCHES:
+ if git_repo_branch_exists(repo, default_branch):
+ branch = default_branch
+ break
+ if branch is None:
+ default_branches_msg = ", ".join(f"'{b}'" for b in DEFAULT_BRANCHES)
+ msg.fail(
+ "No branch provided and attempted default "
+ f"branches {default_branches_msg} do not exist.",
+ exits=1,
+ )
+ else:
+ if not git_repo_branch_exists(repo, branch):
+ msg.fail(f"repo: {repo} (branch: {branch}) does not exist.", exits=1)
+ assert isinstance(branch, str)
project_clone(name, dest, repo=repo, branch=branch, sparse_checkout=sparse_checkout)
@@ -61,9 +77,9 @@ def project_clone(
try:
git_checkout(repo, name, dest, branch=branch, sparse=sparse_checkout)
except subprocess.CalledProcessError:
- err = f"Could not clone '{name}' from repo '{repo_name}'"
+ err = f"Could not clone '{name}' from repo '{repo_name}' (branch '{branch}')"
msg.fail(err, exits=1)
- msg.good(f"Cloned '{name}' from {repo_name}", project_dir)
+ msg.good(f"Cloned '{name}' from '{repo_name}' (branch '{branch}')", project_dir)
if not (project_dir / PROJECT_FILE).exists():
msg.warn(f"No {PROJECT_FILE} found in directory")
else:
diff --git a/spacy/displacy/__init__.py b/spacy/displacy/__init__.py
index 5d49b6eb7..7bb300afa 100644
--- a/spacy/displacy/__init__.py
+++ b/spacy/displacy/__init__.py
@@ -123,7 +123,8 @@ def app(environ, start_response):
def parse_deps(orig_doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Generate dependency parse in {'words': [], 'arcs': []} format.
- doc (Doc): Document do parse.
+ orig_doc (Doc): Document to parse.
+ options (Dict[str, Any]): Dependency parse specific visualisation options.
RETURNS (dict): Generated dependency parse keyed by words and arcs.
"""
doc = Doc(orig_doc.vocab).from_bytes(
@@ -209,7 +210,7 @@ def parse_ents(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
def parse_spans(doc: Doc, options: Dict[str, Any] = {}) -> Dict[str, Any]:
- """Generate spans in [{start: i, end: i, label: 'label'}] format.
+ """Generate spans in [{start_token: i, end_token: i, label: 'label'}] format.
doc (Doc): Document to parse.
options (Dict[str, any]): Span-specific visualisation options.
diff --git a/spacy/displacy/render.py b/spacy/displacy/render.py
index 247ad996b..50dc3466c 100644
--- a/spacy/displacy/render.py
+++ b/spacy/displacy/render.py
@@ -64,8 +64,11 @@ class SpanRenderer:
# Set up how the text and labels will be rendered
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
+ # These values are in px
self.top_offset = options.get("top_offset", 40)
- self.top_offset_step = options.get("top_offset_step", 17)
+ # This is how far under the top offset the span labels appear
+ self.span_label_offset = options.get("span_label_offset", 20)
+ self.offset_step = options.get("top_offset_step", 17)
# Set up which templates will be used
template = options.get("template")
@@ -127,26 +130,56 @@ class SpanRenderer:
title (str / None): Document title set in Doc.user_data['title'].
"""
per_token_info = []
+ # we must sort so that we can correctly describe when spans need to "stack"
+ # which is determined by their start token, then span length (longer spans on top),
+ # then break any remaining ties with the span label
+ spans = sorted(
+ spans,
+ key=lambda s: (
+ s["start_token"],
+ -(s["end_token"] - s["start_token"]),
+ s["label"],
+ ),
+ )
+ for s in spans:
+ # this is the vertical 'slot' that the span will be rendered in
+ # vertical_position = span_label_offset + (offset_step * (slot - 1))
+ s["render_slot"] = 0
for idx, token in enumerate(tokens):
# Identify if a token belongs to a Span (and which) and if it's a
# start token of said Span. We'll use this for the final HTML render
token_markup: Dict[str, Any] = {}
token_markup["text"] = token
+ concurrent_spans = 0
entities = []
for span in spans:
ent = {}
if span["start_token"] <= idx < span["end_token"]:
+ concurrent_spans += 1
+ span_start = idx == span["start_token"]
ent["label"] = span["label"]
- ent["is_start"] = True if idx == span["start_token"] else False
+ ent["is_start"] = span_start
+ if span_start:
+ # When the span starts, we need to know how many other
+ # spans are on the 'span stack' and will be rendered.
+ # This value becomes the vertical render slot for this entire span
+ span["render_slot"] = concurrent_spans
+ ent["render_slot"] = span["render_slot"]
kb_id = span.get("kb_id", "")
kb_url = span.get("kb_url", "#")
ent["kb_link"] = (
TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else ""
)
entities.append(ent)
+ else:
+ # We don't specifically need to do this since we loop
+ # over tokens and spans sorted by their start_token,
+ # so we'll never use a span again after the last token it appears in,
+ # but if we were to use these spans again we'd want to make sure
+ # this value was reset correctly.
+ span["render_slot"] = 0
token_markup["entities"] = entities
per_token_info.append(token_markup)
-
markup = self._render_markup(per_token_info)
markup = TPL_SPANS.format(content=markup, dir=self.direction)
if title:
@@ -157,12 +190,24 @@ class SpanRenderer:
"""Render the markup from per-token information"""
markup = ""
for token in per_token_info:
- entities = sorted(token["entities"], key=lambda d: d["label"])
- if entities:
+ entities = sorted(token["entities"], key=lambda d: d["render_slot"])
+ # Whitespace tokens disrupt the vertical space (no line height) so that the
+ # span indicators get misaligned. We don't render them as individual
+ # tokens anyway, so we'll just not display a span indicator either.
+ is_whitespace = token["text"].strip() == ""
+ if entities and not is_whitespace:
slices = self._get_span_slices(token["entities"])
starts = self._get_span_starts(token["entities"])
+ total_height = (
+ self.top_offset
+ + self.span_label_offset
+ + (self.offset_step * (len(entities) - 1))
+ )
markup += self.span_template.format(
- text=token["text"], span_slices=slices, span_starts=starts
+ text=token["text"],
+ span_slices=slices,
+ span_starts=starts,
+ total_height=total_height,
)
else:
markup += escape_html(token["text"] + " ")
@@ -171,10 +216,18 @@ class SpanRenderer:
def _get_span_slices(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span slices"""
span_slices = []
- for entity, step in zip(entities, itertools.count(step=self.top_offset_step)):
+ for entity in entities:
+ # rather than iterate over multiples of offset_step, we use entity['render_slot']
+ # to determine the vertical position, since that tells where
+ # the span starts vertically so we can extend it horizontally,
+ # past other spans that might have already ended
color = self.colors.get(entity["label"].upper(), self.default_color)
+ top_offset = self.top_offset + (
+ self.offset_step * (entity["render_slot"] - 1)
+ )
span_slice = self.span_slice_template.format(
- bg=color, top_offset=self.top_offset + step
+ bg=color,
+ top_offset=top_offset,
)
span_slices.append(span_slice)
return "".join(span_slices)
@@ -182,12 +235,15 @@ class SpanRenderer:
def _get_span_starts(self, entities: List[Dict]) -> str:
"""Get the rendered markup of all Span start tokens"""
span_starts = []
- for entity, step in zip(entities, itertools.count(step=self.top_offset_step)):
+ for entity in entities:
color = self.colors.get(entity["label"].upper(), self.default_color)
+ top_offset = self.top_offset + (
+ self.offset_step * (entity["render_slot"] - 1)
+ )
span_start = (
self.span_start_template.format(
bg=color,
- top_offset=self.top_offset + step,
+ top_offset=top_offset,
label=entity["label"],
kb_link=entity["kb_link"],
)
diff --git a/spacy/displacy/templates.py b/spacy/displacy/templates.py
index ff81e7a1d..40f5376b1 100644
--- a/spacy/displacy/templates.py
+++ b/spacy/displacy/templates.py
@@ -67,7 +67,7 @@ TPL_SPANS = """
"""
TPL_SPAN = """
-
+
{text}
{span_slices}
{span_starts}
diff --git a/spacy/errors.py b/spacy/errors.py
index d7df8b4ca..ff5ddacf0 100644
--- a/spacy/errors.py
+++ b/spacy/errors.py
@@ -16,8 +16,8 @@ def setup_default_warnings():
filter_warning("ignore", error_msg="numpy.dtype size changed") # noqa
filter_warning("ignore", error_msg="numpy.ufunc size changed") # noqa
- # warn about entity_ruler & matcher having no patterns only once
- for pipe in ["matcher", "entity_ruler"]:
+ # warn about entity_ruler, span_ruler & matcher having no patterns only once
+ for pipe in ["matcher", "entity_ruler", "span_ruler"]:
filter_warning("once", error_msg=Warnings.W036.format(name=pipe))
# warn once about lemmatizer without required POS
@@ -209,6 +209,9 @@ class Warnings(metaclass=ErrorsWithCodes):
"Only the last span group will be loaded under "
"Doc.spans['{group_name}']. Skipping span group with values: "
"{group_values}")
+ W121 = ("Attempting to trace non-existent method '{method}' in pipe '{pipe}'")
+ W122 = ("Couldn't trace method '{method}' in pipe '{pipe}'. This can happen if the pipe class "
+ "is a Cython extension type.")
class Errors(metaclass=ErrorsWithCodes):
@@ -227,8 +230,9 @@ class Errors(metaclass=ErrorsWithCodes):
"initialized component.")
E004 = ("Can't set up pipeline component: a factory for '{name}' already "
"exists. Existing factory: {func}. New factory: {new_func}")
- E005 = ("Pipeline component '{name}' returned None. If you're using a "
- "custom component, maybe you forgot to return the processed Doc?")
+ E005 = ("Pipeline component '{name}' returned {returned_type} instead of a "
+ "Doc. If you're using a custom component, maybe you forgot to "
+ "return the processed Doc?")
E006 = ("Invalid constraints for adding pipeline component. You can only "
"set one of the following: before (component name or index), "
"after (component name or index), first (True) or last (True). "
@@ -386,7 +390,7 @@ class Errors(metaclass=ErrorsWithCodes):
"consider using doc.spans instead.")
E106 = ("Can't find `doc._.{attr}` attribute specified in the underscore "
"settings: {opts}")
- E107 = ("Value of `doc._.{attr}` is not JSON-serializable: {value}")
+ E107 = ("Value of custom attribute `{attr}` is not JSON-serializable: {value}")
E109 = ("Component '{name}' could not be run. Did you forget to "
"call `initialize()`?")
E110 = ("Invalid displaCy render wrapper. Expected callable, got: {obj}")
@@ -484,7 +488,7 @@ class Errors(metaclass=ErrorsWithCodes):
"Current DocBin: {current}\nOther DocBin: {other}")
E169 = ("Can't find module: {module}")
E170 = ("Cannot apply transition {name}: invalid for the current state.")
- E171 = ("Matcher.add received invalid 'on_match' callback argument: expected "
+ E171 = ("{name}.add received invalid 'on_match' callback argument: expected "
"callable or None, but got: {arg_type}")
E175 = ("Can't remove rule for unknown match pattern ID: {key}")
E176 = ("Alias '{alias}' is not defined in the Knowledge Base.")
@@ -532,11 +536,12 @@ class Errors(metaclass=ErrorsWithCodes):
E198 = ("Unable to return {n} most similar vectors for the current vectors "
"table, which contains {n_rows} vectors.")
E199 = ("Unable to merge 0-length span at `doc[{start}:{end}]`.")
- E200 = ("Can't yet set {attr} from Span. Vote for this feature on the "
- "issue tracker: http://github.com/explosion/spaCy/issues")
+ E200 = ("Can't set {attr} from Span.")
E202 = ("Unsupported {name} mode '{mode}'. Supported modes: {modes}.")
# New errors added in v3.x
+ E853 = ("Unsupported component factory name '{name}'. The character '.' is "
+ "not permitted in factory names.")
E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not "
"permit overlapping spans.")
E855 = ("Invalid {obj}: {obj} is not from the same doc.")
@@ -734,7 +739,7 @@ class Errors(metaclass=ErrorsWithCodes):
"loaded nlp object, but got: {source}")
E947 = ("`Matcher.add` received invalid `greedy` argument: expected "
"a string value from {expected} but got: '{arg}'")
- E948 = ("`Matcher.add` received invalid 'patterns' argument: expected "
+ E948 = ("`{name}.add` received invalid 'patterns' argument: expected "
"a list, but got: {arg_type}")
E949 = ("Unable to align tokens for the predicted and reference docs. It "
"is only possible to align the docs when both texts are the same "
@@ -932,7 +937,14 @@ class Errors(metaclass=ErrorsWithCodes):
E1040 = ("Doc.from_json requires all tokens to have the same attributes. "
"Some tokens do not contain annotation for: {partial_attrs}")
E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}")
- E1042 = ("Backprop is not supported when is_train is not set.")
+ E1042 = ("Function was called with `{arg1}`={arg1_values} and "
+ "`{arg2}`={arg2_values} but these arguments are conflicting.")
+ E1043 = ("Expected None or a value in range [{range_start}, {range_end}] for entity linker threshold, but got "
+ "{value}.")
+
+ # v4 error strings
+ E4000 = ("Expected a Doc as input, but got: '{type}'")
+ E4001 = ("Backprop is not supported when is_train is not set.")
# Deprecated model shortcuts, only used in errors and warnings
diff --git a/spacy/kb.pyx b/spacy/kb.pyx
index 9a765c8e4..ae1983a8d 100644
--- a/spacy/kb.pyx
+++ b/spacy/kb.pyx
@@ -93,14 +93,14 @@ cdef class KnowledgeBase:
self.vocab = vocab
self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
- def initialize_entities(self, int64_t nr_entities):
+ def _initialize_entities(self, int64_t nr_entities):
self._entry_index = PreshMap(nr_entities + 1)
self._entries = entry_vec(nr_entities + 1)
- def initialize_vectors(self, int64_t nr_entities):
+ def _initialize_vectors(self, int64_t nr_entities):
self._vectors_table = float_matrix(nr_entities + 1)
- def initialize_aliases(self, int64_t nr_aliases):
+ def _initialize_aliases(self, int64_t nr_aliases):
self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1)
@@ -155,8 +155,8 @@ cdef class KnowledgeBase:
raise ValueError(Errors.E140)
nr_entities = len(set(entity_list))
- self.initialize_entities(nr_entities)
- self.initialize_vectors(nr_entities)
+ self._initialize_entities(nr_entities)
+ self._initialize_vectors(nr_entities)
i = 0
cdef KBEntryC entry
@@ -388,9 +388,9 @@ cdef class KnowledgeBase:
nr_entities = header[0]
nr_aliases = header[1]
entity_vector_length = header[2]
- self.initialize_entities(nr_entities)
- self.initialize_vectors(nr_entities)
- self.initialize_aliases(nr_aliases)
+ self._initialize_entities(nr_entities)
+ self._initialize_vectors(nr_entities)
+ self._initialize_aliases(nr_aliases)
self.entity_vector_length = entity_vector_length
def deserialize_vectors(b):
@@ -512,8 +512,8 @@ cdef class KnowledgeBase:
cdef int64_t entity_vector_length
reader.read_header(&nr_entities, &entity_vector_length)
- self.initialize_entities(nr_entities)
- self.initialize_vectors(nr_entities)
+ self._initialize_entities(nr_entities)
+ self._initialize_vectors(nr_entities)
self.entity_vector_length = entity_vector_length
# STEP 1: load entity vectors
@@ -552,7 +552,7 @@ cdef class KnowledgeBase:
# STEP 3: load aliases
cdef int64_t nr_aliases
reader.read_alias_length(&nr_aliases)
- self.initialize_aliases(nr_aliases)
+ self._initialize_aliases(nr_aliases)
cdef int64_t nr_candidates
cdef vector[int64_t] entry_indices
diff --git a/spacy/lang/bg/__init__.py b/spacy/lang/bg/__init__.py
index 559cc34c4..c9176b946 100644
--- a/spacy/lang/bg/__init__.py
+++ b/spacy/lang/bg/__init__.py
@@ -2,7 +2,8 @@ from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from ..tokenizer_exceptions import BASE_EXCEPTIONS
-
+from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
+from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults
from ...attrs import LANG
from ...util import update_exc
@@ -16,6 +17,8 @@ class BulgarianDefaults(BaseDefaults):
stop_words = STOP_WORDS
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
+ suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
+ infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Bulgarian(Language):
diff --git a/spacy/lang/ca/lemmatizer.py b/spacy/lang/ca/lemmatizer.py
index 2fd012912..0f15e6e65 100644
--- a/spacy/lang/ca/lemmatizer.py
+++ b/spacy/lang/ca/lemmatizer.py
@@ -72,10 +72,10 @@ class CatalanLemmatizer(Lemmatizer):
oov_forms.append(form)
if not forms:
forms.extend(oov_forms)
- if not forms and string in lookup_table.keys():
- forms.append(self.lookup_lemmatize(token)[0])
+
+ # use lookups, and fall back to the token itself
if not forms:
- forms.append(string)
+ forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms
return forms
diff --git a/spacy/lang/char_classes.py b/spacy/lang/char_classes.py
index b15bb3cf3..1d204c46c 100644
--- a/spacy/lang/char_classes.py
+++ b/spacy/lang/char_classes.py
@@ -258,6 +258,10 @@ ALPHA = group_chars(
ALPHA_LOWER = group_chars(_lower + _uncased)
ALPHA_UPPER = group_chars(_upper + _uncased)
+_combining_diacritics = r"\u0300-\u036f"
+
+COMBINING_DIACRITICS = _combining_diacritics
+
_units = (
"km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft "
"kg g mg µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb "
diff --git a/spacy/lang/fr/lemmatizer.py b/spacy/lang/fr/lemmatizer.py
index c6422cf96..a7cbe0bcf 100644
--- a/spacy/lang/fr/lemmatizer.py
+++ b/spacy/lang/fr/lemmatizer.py
@@ -53,11 +53,16 @@ class FrenchLemmatizer(Lemmatizer):
rules = rules_table.get(univ_pos, [])
string = string.lower()
forms = []
+ # first try lookup in table based on upos
if string in index:
forms.append(string)
self.cache[cache_key] = forms
return forms
+
+ # then add anything in the exceptions table
forms.extend(exceptions.get(string, []))
+
+ # if nothing found yet, use the rules
oov_forms = []
if not forms:
for old, new in rules:
@@ -69,12 +74,14 @@ class FrenchLemmatizer(Lemmatizer):
forms.append(form)
else:
oov_forms.append(form)
+
+ # if still nothing, add the oov forms from rules
if not forms:
forms.extend(oov_forms)
- if not forms and string in lookup_table.keys():
- forms.append(self.lookup_lemmatize(token)[0])
+
+ # use lookups, which fall back to the token itself
if not forms:
- forms.append(string)
+ forms.append(lookup_table.get(string, [string])[0])
forms = list(dict.fromkeys(forms))
self.cache[cache_key] = forms
return forms
diff --git a/spacy/lang/ko/__init__.py b/spacy/lang/ko/__init__.py
index 0e02e4a2d..1220aa141 100644
--- a/spacy/lang/ko/__init__.py
+++ b/spacy/lang/ko/__init__.py
@@ -18,34 +18,23 @@ DEFAULT_CONFIG = """
[nlp.tokenizer]
@tokenizers = "spacy.ko.KoreanTokenizer"
+mecab_args = ""
"""
@registry.tokenizers("spacy.ko.KoreanTokenizer")
-def create_tokenizer():
+def create_tokenizer(mecab_args: str):
def korean_tokenizer_factory(nlp):
- return KoreanTokenizer(nlp.vocab)
+ return KoreanTokenizer(nlp.vocab, mecab_args=mecab_args)
return korean_tokenizer_factory
class KoreanTokenizer(DummyTokenizer):
- def __init__(self, vocab: Vocab):
+ def __init__(self, vocab: Vocab, *, mecab_args: str = ""):
self.vocab = vocab
- self._mecab = try_mecab_import() # type: ignore[func-returns-value]
- self._mecab_tokenizer = None
-
- @property
- def mecab_tokenizer(self):
- # This is a property so that initializing a pipeline with blank:ko is
- # possible without actually requiring mecab-ko, e.g. to run
- # `spacy init vectors ko` for a pipeline that will have a different
- # tokenizer in the end. The languages need to match for the vectors
- # to be imported and there's no way to pass a custom config to
- # `init vectors`.
- if self._mecab_tokenizer is None:
- self._mecab_tokenizer = self._mecab("-F%f[0],%f[7]")
- return self._mecab_tokenizer
+ mecab = try_mecab_import()
+ self.mecab_tokenizer = mecab.Tagger(mecab_args)
def __reduce__(self):
return KoreanTokenizer, (self.vocab,)
@@ -68,13 +57,15 @@ class KoreanTokenizer(DummyTokenizer):
def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]:
# 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3],
# 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], *
- for node in self.mecab_tokenizer.parse(text, as_nodes=True):
- if node.is_eos():
+ for line in self.mecab_tokenizer.parse(text).split("\n"):
+ if line == "EOS":
break
- surface = node.surface
- feature = node.feature
- tag, _, expr = feature.partition(",")
- lemma, _, remainder = expr.partition("/")
+ surface, _, expr = line.partition("\t")
+ features = expr.split("/")[0].split(",")
+ tag = features[0]
+ lemma = "*"
+ if len(features) >= 8:
+ lemma = features[7]
if lemma == "*":
lemma = surface
yield {"surface": surface, "lemma": lemma, "tag": tag}
@@ -97,20 +88,94 @@ class Korean(Language):
Defaults = KoreanDefaults
-def try_mecab_import() -> None:
+def try_mecab_import():
try:
- from natto import MeCab
+ import mecab_ko as MeCab
return MeCab
except ImportError:
raise ImportError(
'The Korean tokenizer ("spacy.ko.KoreanTokenizer") requires '
- "[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), "
- "[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), "
- "and [natto-py](https://github.com/buruzaemon/natto-py)"
+ "the python package `mecab-ko`: pip install mecab-ko"
) from None
+@registry.tokenizers("spacy.KoreanNattoTokenizer.v1")
+def create_natto_tokenizer():
+ def korean_natto_tokenizer_factory(nlp):
+ return KoreanNattoTokenizer(nlp.vocab)
+
+ return korean_natto_tokenizer_factory
+
+
+class KoreanNattoTokenizer(DummyTokenizer):
+ def __init__(self, vocab: Vocab):
+ self.vocab = vocab
+ self._mecab = self._try_mecab_import() # type: ignore[func-returns-value]
+ self._mecab_tokenizer = None
+
+ @property
+ def mecab_tokenizer(self):
+ # This is a property so that initializing a pipeline with blank:ko is
+ # possible without actually requiring mecab-ko, e.g. to run
+ # `spacy init vectors ko` for a pipeline that will have a different
+ # tokenizer in the end. The languages need to match for the vectors
+ # to be imported and there's no way to pass a custom config to
+ # `init vectors`.
+ if self._mecab_tokenizer is None:
+ self._mecab_tokenizer = self._mecab("-F%f[0],%f[7]")
+ return self._mecab_tokenizer
+
+ def __reduce__(self):
+ return KoreanNattoTokenizer, (self.vocab,)
+
+ def __call__(self, text: str) -> Doc:
+ dtokens = list(self.detailed_tokens(text))
+ surfaces = [dt["surface"] for dt in dtokens]
+ doc = Doc(self.vocab, words=surfaces, spaces=list(check_spaces(text, surfaces)))
+ for token, dtoken in zip(doc, dtokens):
+ first_tag, sep, eomi_tags = dtoken["tag"].partition("+")
+ token.tag_ = first_tag # stem(어간) or pre-final(선어말 어미)
+ if token.tag_ in TAG_MAP:
+ token.pos = TAG_MAP[token.tag_][POS]
+ else:
+ token.pos = X
+ token.lemma_ = dtoken["lemma"]
+ doc.user_data["full_tags"] = [dt["tag"] for dt in dtokens]
+ return doc
+
+ def detailed_tokens(self, text: str) -> Iterator[Dict[str, Any]]:
+ # 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3],
+ # 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], *
+ for node in self.mecab_tokenizer.parse(text, as_nodes=True):
+ if node.is_eos():
+ break
+ surface = node.surface
+ feature = node.feature
+ tag, _, expr = feature.partition(",")
+ lemma, _, remainder = expr.partition("/")
+ if lemma == "*" or lemma == "":
+ lemma = surface
+ yield {"surface": surface, "lemma": lemma, "tag": tag}
+
+ def score(self, examples):
+ validate_examples(examples, "KoreanTokenizer.score")
+ return Scorer.score_tokenization(examples)
+
+ def _try_mecab_import(self):
+ try:
+ from natto import MeCab
+
+ return MeCab
+ except ImportError:
+ raise ImportError(
+ 'The Korean Natto tokenizer ("spacy.ko.KoreanNattoTokenizer") requires '
+ "[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), "
+ "[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), "
+ "and [natto-py](https://github.com/buruzaemon/natto-py)"
+ ) from None
+
+
def check_spaces(text, tokens):
prev_end = -1
start = 0
diff --git a/spacy/lang/ko/punctuation.py b/spacy/lang/ko/punctuation.py
index 7f7b40c5b..f5f1c51da 100644
--- a/spacy/lang/ko/punctuation.py
+++ b/spacy/lang/ko/punctuation.py
@@ -3,7 +3,7 @@ from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES
_infixes = (
- ["·", "ㆍ", "\(", "\)"]
+ ["·", "ㆍ", r"\(", r"\)"]
+ [r"(?<=[0-9])~(?=[0-9-])"]
+ LIST_QUOTES
+ BASE_TOKENIZER_INFIXES
diff --git a/spacy/lang/la/__init__.py b/spacy/lang/la/__init__.py
new file mode 100644
index 000000000..15b87c5b9
--- /dev/null
+++ b/spacy/lang/la/__init__.py
@@ -0,0 +1,18 @@
+from ...language import Language, BaseDefaults
+from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
+from .stop_words import STOP_WORDS
+from .lex_attrs import LEX_ATTRS
+
+
+class LatinDefaults(BaseDefaults):
+ tokenizer_exceptions = TOKENIZER_EXCEPTIONS
+ stop_words = STOP_WORDS
+ lex_attr_getters = LEX_ATTRS
+
+
+class Latin(Language):
+ lang = "la"
+ Defaults = LatinDefaults
+
+
+__all__ = ["Latin"]
diff --git a/spacy/lang/la/lex_attrs.py b/spacy/lang/la/lex_attrs.py
new file mode 100644
index 000000000..9efb4dd3c
--- /dev/null
+++ b/spacy/lang/la/lex_attrs.py
@@ -0,0 +1,34 @@
+from ...attrs import LIKE_NUM
+import re
+
+# cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4
+roman_numerals_compile = re.compile(
+ r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$"
+)
+
+_num_words = set(
+ """
+unus una unum duo duae tres tria quattuor quinque sex septem octo novem decem
+""".split()
+)
+
+_ordinal_words = set(
+ """
+primus prima primum secundus secunda secundum tertius tertia tertium
+""".split()
+)
+
+
+def like_num(text):
+ if text.isdigit():
+ return True
+ if roman_numerals_compile.match(text):
+ return True
+ if text.lower() in _num_words:
+ return True
+ if text.lower() in _ordinal_words:
+ return True
+ return False
+
+
+LEX_ATTRS = {LIKE_NUM: like_num}
diff --git a/spacy/lang/la/stop_words.py b/spacy/lang/la/stop_words.py
new file mode 100644
index 000000000..8b590bb67
--- /dev/null
+++ b/spacy/lang/la/stop_words.py
@@ -0,0 +1,37 @@
+# Corrected Perseus list, cf. https://wiki.digitalclassicist.org/Stopwords_for_Greek_and_Latin
+
+STOP_WORDS = set(
+ """
+ab ac ad adhuc aliqui aliquis an ante apud at atque aut autem
+
+cum cur
+
+de deinde dum
+
+ego enim ergo es est et etiam etsi ex
+
+fio
+
+haud hic
+
+iam idem igitur ille in infra inter interim ipse is ita
+
+magis modo mox
+
+nam ne nec necque neque nisi non nos
+
+o ob
+
+per possum post pro
+
+quae quam quare qui quia quicumque quidem quilibet quis quisnam quisquam quisque quisquis quo quoniam
+
+sed si sic sive sub sui sum super suus
+
+tam tamen trans tu tum
+
+ubi uel uero
+
+vel vero
+""".split()
+)
diff --git a/spacy/lang/la/tokenizer_exceptions.py b/spacy/lang/la/tokenizer_exceptions.py
new file mode 100644
index 000000000..060f6e085
--- /dev/null
+++ b/spacy/lang/la/tokenizer_exceptions.py
@@ -0,0 +1,76 @@
+from ..tokenizer_exceptions import BASE_EXCEPTIONS
+from ...symbols import ORTH
+from ...util import update_exc
+
+
+## TODO: Look into systematically handling u/v
+_exc = {
+ "mecum": [{ORTH: "me"}, {ORTH: "cum"}],
+ "tecum": [{ORTH: "te"}, {ORTH: "cum"}],
+ "nobiscum": [{ORTH: "nobis"}, {ORTH: "cum"}],
+ "vobiscum": [{ORTH: "vobis"}, {ORTH: "cum"}],
+ "uobiscum": [{ORTH: "uobis"}, {ORTH: "cum"}],
+}
+
+for orth in [
+ "A.",
+ "Agr.",
+ "Ap.",
+ "C.",
+ "Cn.",
+ "D.",
+ "F.",
+ "K.",
+ "L.",
+ "M'.",
+ "M.",
+ "Mam.",
+ "N.",
+ "Oct.",
+ "Opet.",
+ "P.",
+ "Paul.",
+ "Post.",
+ "Pro.",
+ "Q.",
+ "S.",
+ "Ser.",
+ "Sert.",
+ "Sex.",
+ "St.",
+ "Sta.",
+ "T.",
+ "Ti.",
+ "V.",
+ "Vol.",
+ "Vop.",
+ "U.",
+ "Uol.",
+ "Uop.",
+ "Ian.",
+ "Febr.",
+ "Mart.",
+ "Apr.",
+ "Mai.",
+ "Iun.",
+ "Iul.",
+ "Aug.",
+ "Sept.",
+ "Oct.",
+ "Nov.",
+ "Nou.",
+ "Dec.",
+ "Non.",
+ "Id.",
+ "A.D.",
+ "Coll.",
+ "Cos.",
+ "Ord.",
+ "Pl.",
+ "S.C.",
+ "Suff.",
+ "Trib.",
+]:
+ _exc[orth] = [{ORTH: orth}]
+
+TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
diff --git a/spacy/lang/lg/__init__.py b/spacy/lang/lg/__init__.py
new file mode 100644
index 000000000..6f7153fce
--- /dev/null
+++ b/spacy/lang/lg/__init__.py
@@ -0,0 +1,18 @@
+from .stop_words import STOP_WORDS
+from .lex_attrs import LEX_ATTRS
+from .punctuation import TOKENIZER_INFIXES
+from ...language import Language, BaseDefaults
+
+
+class LugandaDefaults(BaseDefaults):
+ lex_attr_getters = LEX_ATTRS
+ infixes = TOKENIZER_INFIXES
+ stop_words = STOP_WORDS
+
+
+class Luganda(Language):
+ lang = "lg"
+ Defaults = LugandaDefaults
+
+
+__all__ = ["Luganda"]
diff --git a/spacy/lang/lg/examples.py b/spacy/lang/lg/examples.py
new file mode 100644
index 000000000..5450c5520
--- /dev/null
+++ b/spacy/lang/lg/examples.py
@@ -0,0 +1,17 @@
+"""
+Example sentences to test spaCy and its language models.
+
+>>> from spacy.lang.lg.examples import sentences
+>>> docs = nlp.pipe(sentences)
+"""
+
+sentences = [
+ "Mpa ebyafaayo ku byalo Nakatu ne Nkajja",
+ "Okuyita Ttembo kitegeeza kugwa ddalu",
+ "Ekifumu kino kyali kya mulimu ki?",
+ "Ekkovu we liyise wayitibwa mukululo",
+ "Akola mulimu ki oguvaamu ssente?",
+ "Emisumaali egikomerera embaawo giyitibwa nninga",
+ "Abooluganda ab’emmamba ababiri",
+ "Ekisaawe ky'ebyenjigiriza kya mugaso nnyo",
+]
diff --git a/spacy/lang/lg/lex_attrs.py b/spacy/lang/lg/lex_attrs.py
new file mode 100644
index 000000000..3c60e3d0e
--- /dev/null
+++ b/spacy/lang/lg/lex_attrs.py
@@ -0,0 +1,95 @@
+from ...attrs import LIKE_NUM
+
+_num_words = [
+ "nnooti", # Zero
+ "zeero", # zero
+ "emu", # one
+ "bbiri", # two
+ "ssatu", # three
+ "nnya", # four
+ "ttaano", # five
+ "mukaaga", # six
+ "musanvu", # seven
+ "munaana", # eight
+ "mwenda", # nine
+ "kkumi", # ten
+ "kkumi n'emu", # eleven
+ "kkumi na bbiri", # twelve
+ "kkumi na ssatu", # thirteen
+ "kkumi na nnya", # forteen
+ "kkumi na ttaano", # fifteen
+ "kkumi na mukaaga", # sixteen
+ "kkumi na musanvu", # seventeen
+ "kkumi na munaana", # eighteen
+ "kkumi na mwenda", # nineteen
+ "amakumi abiri", # twenty
+ "amakumi asatu", # thirty
+ "amakumi ana", # forty
+ "amakumi ataano", # fifty
+ "nkaaga", # sixty
+ "nsanvu", # seventy
+ "kinaana", # eighty
+ "kyenda", # ninety
+ "kikumi", # hundred
+ "lukumi", # thousand
+ "kakadde", # million
+ "kawumbi", # billion
+ "kase", # trillion
+ "katabalika", # quadrillion
+ "keesedde", # gajillion
+ "kafukunya", # bazillion
+ "ekisooka", # first
+ "ekyokubiri", # second
+ "ekyokusatu", # third
+ "ekyokuna", # fourth
+ "ekyokutaano", # fifith
+ "ekyomukaaga", # sixth
+ "ekyomusanvu", # seventh
+ "eky'omunaana", # eighth
+ "ekyomwenda", # nineth
+ "ekyekkumi", # tenth
+ "ekyekkumi n'ekimu", # eleventh
+ "ekyekkumi n'ebibiri", # twelveth
+ "ekyekkumi n'ebisatu", # thirteenth
+ "ekyekkumi n'ebina", # fourteenth
+ "ekyekkumi n'ebitaano", # fifteenth
+ "ekyekkumi n'omukaaga", # sixteenth
+ "ekyekkumi n'omusanvu", # seventeenth
+ "ekyekkumi n'omunaana", # eigteenth
+ "ekyekkumi n'omwenda", # nineteenth
+ "ekyamakumi abiri", # twentieth
+ "ekyamakumi asatu", # thirtieth
+ "ekyamakumi ana", # fortieth
+ "ekyamakumi ataano", # fiftieth
+ "ekyenkaaga", # sixtieth
+ "ekyensanvu", # seventieth
+ "ekyekinaana", # eightieth
+ "ekyekyenda", # ninetieth
+ "ekyekikumi", # hundredth
+ "ekyolukumi", # thousandth
+ "ekyakakadde", # millionth
+ "ekyakawumbi", # billionth
+ "ekyakase", # trillionth
+ "ekyakatabalika", # quadrillionth
+ "ekyakeesedde", # gajillionth
+ "ekyakafukunya", # bazillionth
+]
+
+
+def like_num(text):
+ if text.startswith(("+", "-", "±", "~")):
+ text = text[1:]
+ text = text.replace(",", "").replace(".", "")
+ if text.isdigit():
+ return True
+ if text.count("/") == 1:
+ num, denom = text.split("/")
+ if num.isdigit() and denom.isdigit():
+ return True
+ text_lower = text.lower()
+ if text_lower in _num_words:
+ return True
+ return False
+
+
+LEX_ATTRS = {LIKE_NUM: like_num}
diff --git a/spacy/lang/lg/punctuation.py b/spacy/lang/lg/punctuation.py
new file mode 100644
index 000000000..5d3eb792e
--- /dev/null
+++ b/spacy/lang/lg/punctuation.py
@@ -0,0 +1,19 @@
+from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS
+from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
+
+_infixes = (
+ LIST_ELLIPSES
+ + LIST_ICONS
+ + [
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
+ ),
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
+ r"(?<=[{a}0-9])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
+ ]
+)
+
+
+TOKENIZER_INFIXES = _infixes
diff --git a/spacy/lang/lg/stop_words.py b/spacy/lang/lg/stop_words.py
new file mode 100644
index 000000000..7bad59344
--- /dev/null
+++ b/spacy/lang/lg/stop_words.py
@@ -0,0 +1,19 @@
+STOP_WORDS = set(
+ """
+abadde abalala abamu abangi abava ajja ali alina ani anti ateekeddwa atewamu
+atya awamu aweebwa ayinza ba baali babadde babalina bajja
+bajjanewankubade bali balina bandi bangi bano bateekeddwa baweebwa bayina bebombi beera bibye
+bimu bingi bino bo bokka bonna buli bulijjo bulungi bwabwe bwaffe bwayo bwe bwonna bya byabwe
+byaffe byebimu byonna ddaa ddala ddi e ebimu ebiri ebweruobulungi ebyo edda ejja ekirala ekyo
+endala engeri ennyo era erimu erina ffe ffenna ga gujja gumu gunno guno gwa gwe kaseera kati
+kennyini ki kiki kikino kikye kikyo kino kirungi kki ku kubangabyombi kubangaolwokuba kudda
+kuva kuwa kwegamba kyaffe kye kyekimuoyo kyekyo kyonna leero liryo lwa lwaki lyabwezaabwe
+lyaffe lyange mbadde mingi mpozzi mu mulinaoyina munda mwegyabwe nolwekyo nabadde nabo nandiyagadde
+nandiye nanti naye ne nedda neera nga nnyingi nnyini nnyinza nnyo nti nyinza nze oba ojja okudda
+okugenda okuggyako okutuusa okuva okuwa oli olina oluvannyuma olwekyobuva omuli ono osobola otya
+oyina oyo seetaaga si sinakindi singa talina tayina tebaali tebaalina tebayina terina tetulina
+tetuteekeddwa tewali teyalina teyayina tolina tu tuyina tulina tuyina twafuna twetaaga wa wabula
+wabweru wadde waggulunnina wakati waliwobangi waliyo wandi wange wano wansi weebwa yabadde yaffe
+ye yenna yennyini yina yonna ziba zijja zonna
+""".split()
+)
diff --git a/spacy/lang/nl/syntax_iterators.py b/spacy/lang/nl/syntax_iterators.py
index 1ab5e7cff..be9beabe6 100644
--- a/spacy/lang/nl/syntax_iterators.py
+++ b/spacy/lang/nl/syntax_iterators.py
@@ -40,6 +40,7 @@ def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
span_label = doc.vocab.strings.add("NP")
# Only NOUNS and PRONOUNS matter
+ end_span = -1
for i, word in enumerate(filter(lambda x: x.pos in [PRON, NOUN], doclike)):
# For NOUNS
# Pick children from syntactic parse (only those with certain dependencies)
@@ -58,15 +59,17 @@ def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]:
children_i = [c.i for c in children] + [word.i]
start_span = min(children_i)
- end_span = max(children_i) + 1
- yield start_span, end_span, span_label
+ if start_span >= end_span:
+ end_span = max(children_i) + 1
+ yield start_span, end_span, span_label
# PRONOUNS only if it is the subject of a verb
elif word.pos == PRON:
if word.dep in pronoun_deps:
start_span = word.i
- end_span = word.i + 1
- yield start_span, end_span, span_label
+ if start_span >= end_span:
+ end_span = word.i + 1
+ yield start_span, end_span, span_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
diff --git a/spacy/lang/punctuation.py b/spacy/lang/punctuation.py
index e712e71d6..a1cfe6224 100644
--- a/spacy/lang/punctuation.py
+++ b/spacy/lang/punctuation.py
@@ -1,5 +1,5 @@
from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY
-from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS
+from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS, COMBINING_DIACRITICS
from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT
@@ -44,3 +44,23 @@ TOKENIZER_INFIXES = (
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
+
+
+# Some languages e.g. written with the Cyrillic alphabet permit the use of diacritics
+# to mark stressed syllables in words where stress is distinctive. Such languages
+# should use the COMBINING_DIACRITICS... suffix and infix regex lists in
+# place of the standard ones.
+COMBINING_DIACRITICS_TOKENIZER_SUFFIXES = list(TOKENIZER_SUFFIXES) + [
+ r"(?<=[{a}][{d}])\.".format(a=ALPHA, d=COMBINING_DIACRITICS),
+]
+
+COMBINING_DIACRITICS_TOKENIZER_INFIXES = list(TOKENIZER_INFIXES) + [
+ r"(?<=[{al}][{d}])\.(?=[{au}{q}])".format(
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES, d=COMBINING_DIACRITICS
+ ),
+ r"(?<=[{a}][{d}]),(?=[{a}])".format(a=ALPHA, d=COMBINING_DIACRITICS),
+ r"(?<=[{a}][{d}])(?:{h})(?=[{a}])".format(
+ a=ALPHA, d=COMBINING_DIACRITICS, h=HYPHENS
+ ),
+ r"(?<=[{a}][{d}])[:<>=/](?=[{a}])".format(a=ALPHA, d=COMBINING_DIACRITICS),
+]
diff --git a/spacy/lang/ru/__init__.py b/spacy/lang/ru/__init__.py
index 5d31d8ea2..7d17628c4 100644
--- a/spacy/lang/ru/__init__.py
+++ b/spacy/lang/ru/__init__.py
@@ -5,6 +5,8 @@ from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer
+from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
+from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults
@@ -12,6 +14,8 @@ class RussianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
+ suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
+ infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Russian(Language):
@@ -24,7 +28,7 @@ class Russian(Language):
assigns=["token.lemma"],
default_config={
"model": None,
- "mode": "pymorphy2",
+ "mode": "pymorphy3",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py
index 85180b1e4..720d3a8cb 100644
--- a/spacy/lang/ru/lemmatizer.py
+++ b/spacy/lang/ru/lemmatizer.py
@@ -19,7 +19,7 @@ class RussianLemmatizer(Lemmatizer):
model: Optional[Model],
name: str = "lemmatizer",
*,
- mode: str = "pymorphy2",
+ mode: str = "pymorphy3",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
@@ -33,6 +33,16 @@ class RussianLemmatizer(Lemmatizer):
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer()
+ elif mode == "pymorphy3":
+ try:
+ from pymorphy3 import MorphAnalyzer
+ except ImportError:
+ raise ImportError(
+ "The Russian lemmatizer mode 'pymorphy3' requires the "
+ "pymorphy3 library. Install it with: pip install pymorphy3"
+ ) from None
+ if getattr(self, "_morph", None) is None:
+ self._morph = MorphAnalyzer()
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
@@ -104,6 +114,9 @@ class RussianLemmatizer(Lemmatizer):
return [analyses[0].normal_form]
return [string]
+ def pymorphy3_lemmatize(self, token: Token) -> List[str]:
+ return self.pymorphy2_lemmatize(token)
+
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
gram_map = {
diff --git a/spacy/lang/sl/__init__.py b/spacy/lang/sl/__init__.py
index 9ddd676bf..0070e9fa1 100644
--- a/spacy/lang/sl/__init__.py
+++ b/spacy/lang/sl/__init__.py
@@ -1,9 +1,17 @@
+from .lex_attrs import LEX_ATTRS
+from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES
from .stop_words import STOP_WORDS
+from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from ...language import Language, BaseDefaults
class SlovenianDefaults(BaseDefaults):
stop_words = STOP_WORDS
+ tokenizer_exceptions = TOKENIZER_EXCEPTIONS
+ prefixes = TOKENIZER_PREFIXES
+ infixes = TOKENIZER_INFIXES
+ suffixes = TOKENIZER_SUFFIXES
+ lex_attr_getters = LEX_ATTRS
class Slovenian(Language):
diff --git a/spacy/lang/sl/lex_attrs.py b/spacy/lang/sl/lex_attrs.py
new file mode 100644
index 000000000..958152e37
--- /dev/null
+++ b/spacy/lang/sl/lex_attrs.py
@@ -0,0 +1,145 @@
+from ...attrs import LIKE_NUM
+from ...attrs import IS_CURRENCY
+import unicodedata
+
+
+_num_words = set(
+ """
+ nula ničla nič ena dva tri štiri pet šest sedem osem
+ devet deset enajst dvanajst trinajst štirinajst petnajst
+ šestnajst sedemnajst osemnajst devetnajst dvajset trideset štirideset
+ petdeset šestdest sedemdeset osemdeset devedeset sto tisoč
+ milijon bilijon trilijon kvadrilijon nešteto
+
+ en eden enega enemu ennem enim enih enima enimi ene eni eno
+ dveh dvema dvem dvoje trije treh trem tremi troje štirje štirih štirim štirimi
+ petih petim petimi šestih šestim šestimi sedmih sedmim sedmimi osmih osmim osmimi
+ devetih devetim devetimi desetih desetim desetimi enajstih enajstim enajstimi
+ dvanajstih dvanajstim dvanajstimi trinajstih trinajstim trinajstimi
+ šestnajstih šestnajstim šestnajstimi petnajstih petnajstim petnajstimi
+ sedemnajstih sedemnajstim sedemnajstimi osemnajstih osemnajstim osemnajstimi
+ devetnajstih devetnajstim devetnajstimi dvajsetih dvajsetim dvajsetimi
+ """.split()
+)
+
+_ordinal_words = set(
+ """
+ prvi drugi tretji četrti peti šesti sedmi osmi
+ deveti deseti enajsti dvanajsti trinajsti štirinajsti
+ petnajsti šestnajsti sedemnajsti osemnajsti devetnajsti
+ dvajseti trideseti štirideseti petdeseti šestdeseti sedemdeseti
+ osemdeseti devetdeseti stoti tisoči milijonti bilijonti
+ trilijonti kvadrilijonti nešteti
+
+ prva druga tretja četrta peta šesta sedma osma
+ deveta deseta enajsta dvanajsta trinajsta štirnajsta
+ petnajsta šestnajsta sedemnajsta osemnajsta devetnajsta
+ dvajseta trideseta štirideseta petdeseta šestdeseta sedemdeseta
+ osemdeseta devetdeseta stota tisoča milijonta bilijonta
+ trilijonta kvadrilijonta nešteta
+
+ prvo drugo tretje četrto peto šestro sedmo osmo
+ deveto deseto enajsto dvanajsto trinajsto štirnajsto
+ petnajsto šestnajsto sedemnajsto osemnajsto devetnajsto
+ dvajseto trideseto štirideseto petdeseto šestdeseto sedemdeseto
+ osemdeseto devetdeseto stoto tisočo milijonto bilijonto
+ trilijonto kvadrilijonto nešteto
+
+ prvega drugega tretjega četrtega petega šestega sedmega osmega
+ devega desetega enajstega dvanajstega trinajstega štirnajstega
+ petnajstega šestnajstega sedemnajstega osemnajstega devetnajstega
+ dvajsetega tridesetega štiridesetega petdesetega šestdesetega sedemdesetega
+ osemdesetega devetdesetega stotega tisočega milijontega bilijontega
+ trilijontega kvadrilijontega neštetega
+
+ prvemu drugemu tretjemu četrtemu petemu šestemu sedmemu osmemu devetemu desetemu
+ enajstemu dvanajstemu trinajstemu štirnajstemu petnajstemu šestnajstemu sedemnajstemu
+ osemnajstemu devetnajstemu dvajsetemu tridesetemu štiridesetemu petdesetemu šestdesetemu
+ sedemdesetemu osemdesetemu devetdesetemu stotemu tisočemu milijontemu bilijontemu
+ trilijontemu kvadrilijontemu neštetemu
+
+ prvem drugem tretjem četrtem petem šestem sedmem osmem devetem desetem
+ enajstem dvanajstem trinajstem štirnajstem petnajstem šestnajstem sedemnajstem
+ osemnajstem devetnajstem dvajsetem tridesetem štiridesetem petdesetem šestdesetem
+ sedemdesetem osemdesetem devetdesetem stotem tisočem milijontem bilijontem
+ trilijontem kvadrilijontem neštetem
+
+ prvim drugim tretjim četrtim petim šestim sedtim osmim devetim desetim
+ enajstim dvanajstim trinajstim štirnajstim petnajstim šestnajstim sedemnajstim
+ osemnajstim devetnajstim dvajsetim tridesetim štiridesetim petdesetim šestdesetim
+ sedemdesetim osemdesetim devetdesetim stotim tisočim milijontim bilijontim
+ trilijontim kvadrilijontim neštetim
+
+ prvih drugih tretjih četrthih petih šestih sedmih osmih deveth desetih
+ enajstih dvanajstih trinajstih štirnajstih petnajstih šestnajstih sedemnajstih
+ osemnajstih devetnajstih dvajsetih tridesetih štiridesetih petdesetih šestdesetih
+ sedemdesetih osemdesetih devetdesetih stotih tisočih milijontih bilijontih
+ trilijontih kvadrilijontih nešteth
+
+ prvima drugima tretjima četrtima petima šestima sedmima osmima devetima desetima
+ enajstima dvanajstima trinajstima štirnajstima petnajstima šestnajstima sedemnajstima
+ osemnajstima devetnajstima dvajsetima tridesetima štiridesetima petdesetima šestdesetima
+ sedemdesetima osemdesetima devetdesetima stotima tisočima milijontima bilijontima
+ trilijontima kvadrilijontima neštetima
+
+ prve druge četrte pete šeste sedme osme devete desete
+ enajste dvanajste trinajste štirnajste petnajste šestnajste sedemnajste
+ osemnajste devetnajste dvajsete tridesete štiridesete petdesete šestdesete
+ sedemdesete osemdesete devetdesete stote tisoče milijonte bilijonte
+ trilijonte kvadrilijonte neštete
+
+ prvimi drugimi tretjimi četrtimi petimi šestimi sedtimi osmimi devetimi desetimi
+ enajstimi dvanajstimi trinajstimi štirnajstimi petnajstimi šestnajstimi sedemnajstimi
+ osemnajstimi devetnajstimi dvajsetimi tridesetimi štiridesetimi petdesetimi šestdesetimi
+ sedemdesetimi osemdesetimi devetdesetimi stotimi tisočimi milijontimi bilijontimi
+ trilijontimi kvadrilijontimi neštetimi
+ """.split()
+)
+
+_currency_words = set(
+ """
+ evro evra evru evrom evrov evroma evrih evrom evre evri evr eur
+ cent centa centu cenom centov centoma centih centom cente centi
+ dolar dolarja dolarji dolarju dolarjem dolarjev dolarjema dolarjih dolarje usd
+ tolar tolarja tolarji tolarju tolarjem tolarjev tolarjema tolarjih tolarje tol
+ dinar dinarja dinarji dinarju dinarjem dinarjev dinarjema dinarjih dinarje din
+ funt funta funti funtu funtom funtov funtoma funtih funte gpb
+ forint forinta forinti forintu forintom forintov forintoma forintih forinte
+ zlot zlota zloti zlotu zlotom zlotov zlotoma zlotih zlote
+ rupij rupija rupiji rupiju rupijem rupijev rupijema rupijih rupije
+ jen jena jeni jenu jenom jenov jenoma jenih jene
+ kuna kuni kune kuno kun kunama kunah kunam kunami
+ marka marki marke markama markah markami
+ """.split()
+)
+
+
+def like_num(text):
+ if text.startswith(("+", "-", "±", "~")):
+ text = text[1:]
+ text = text.replace(",", "").replace(".", "")
+ if text.isdigit():
+ return True
+ if text.count("/") == 1:
+ num, denom = text.split("/")
+ if num.isdigit() and denom.isdigit():
+ return True
+ text_lower = text.lower()
+ if text_lower in _num_words:
+ return True
+ if text_lower in _ordinal_words:
+ return True
+ return False
+
+
+def is_currency(text):
+ text_lower = text.lower()
+ if text in _currency_words:
+ return True
+ for char in text:
+ if unicodedata.category(char) != "Sc":
+ return False
+ return True
+
+
+LEX_ATTRS = {LIKE_NUM: like_num, IS_CURRENCY: is_currency}
diff --git a/spacy/lang/sl/punctuation.py b/spacy/lang/sl/punctuation.py
new file mode 100644
index 000000000..b6ca1830e
--- /dev/null
+++ b/spacy/lang/sl/punctuation.py
@@ -0,0 +1,84 @@
+from ..char_classes import (
+ LIST_ELLIPSES,
+ LIST_ICONS,
+ HYPHENS,
+ LIST_PUNCT,
+ LIST_QUOTES,
+ CURRENCY,
+ UNITS,
+ PUNCT,
+ LIST_CURRENCY,
+ CONCAT_QUOTES,
+)
+from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
+from ..char_classes import merge_chars
+from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES
+
+
+INCLUDE_SPECIAL = ["\\+", "\\/", "\\•", "\\¯", "\\=", "\\×"] + HYPHENS.split("|")
+
+_prefixes = INCLUDE_SPECIAL + BASE_TOKENIZER_PREFIXES
+
+_suffixes = (
+ INCLUDE_SPECIAL
+ + LIST_PUNCT
+ + LIST_ELLIPSES
+ + LIST_QUOTES
+ + LIST_ICONS
+ + [
+ r"(?<=°[FfCcKk])\.",
+ r"(?<=[0-9])(?:{c})".format(c=CURRENCY),
+ r"(?<=[0-9])(?:{u})".format(u=UNITS),
+ r"(?<=[{al}{e}{p}(?:{q})])\.".format(
+ al=ALPHA_LOWER, e=r"%²\-\+", q=CONCAT_QUOTES, p=PUNCT
+ ),
+ r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER),
+ # split initials like J.K. Rowling
+ r"(?<=[A-Z]\.)(?:[A-Z].)",
+ ]
+)
+
+# a list of all suffixes following a hyphen that are shouldn't split (eg. BTC-jev)
+# source: Obeliks tokenizer - https://github.com/clarinsi/obeliks/blob/master/obeliks/res/TokRulesPart1.txt
+CONCAT_QUOTES = CONCAT_QUOTES.replace("'", "")
+HYPHENS_PERMITTED = (
+ "((a)|(evemu)|(evskega)|(i)|(jevega)|(jevska)|(jevskimi)|(jinemu)|(oma)|(ovim)|"
+ "(ovski)|(e)|(evi)|(evskem)|(ih)|(jevem)|(jevske)|(jevsko)|(jini)|(ov)|(ovima)|"
+ "(ovskih)|(em)|(evih)|(evskemu)|(ja)|(jevemu)|(jevskega)|(ji)|(jinih)|(ova)|"
+ "(ovimi)|(ovskim)|(ema)|(evim)|(evski)|(je)|(jevi)|(jevskem)|(jih)|(jinim)|"
+ "(ove)|(ovo)|(ovskima)|(ev)|(evima)|(evskih)|(jem)|(jevih)|(jevskemu)|(jin)|"
+ "(jinima)|(ovega)|(ovska)|(ovskimi)|(eva)|(evimi)|(evskim)|(jema)|(jevim)|"
+ "(jevski)|(jina)|(jinimi)|(ovem)|(ovske)|(ovsko)|(eve)|(evo)|(evskima)|(jev)|"
+ "(jevima)|(jevskih)|(jine)|(jino)|(ovemu)|(ovskega)|(u)|(evega)|(evska)|"
+ "(evskimi)|(jeva)|(jevimi)|(jevskim)|(jinega)|(ju)|(ovi)|(ovskem)|(evem)|"
+ "(evske)|(evsko)|(jeve)|(jevo)|(jevskima)|(jinem)|(om)|(ovih)|(ovskemu)|"
+ "(ovec)|(ovca)|(ovcu)|(ovcem)|(ovcev)|(ovcema)|(ovcih)|(ovci)|(ovce)|(ovcimi)|"
+ "(evec)|(evca)|(evcu)|(evcem)|(evcev)|(evcema)|(evcih)|(evci)|(evce)|(evcimi)|"
+ "(jevec)|(jevca)|(jevcu)|(jevcem)|(jevcev)|(jevcema)|(jevcih)|(jevci)|(jevce)|"
+ "(jevcimi)|(ovka)|(ovke)|(ovki)|(ovko)|(ovk)|(ovkama)|(ovkah)|(ovkam)|(ovkami)|"
+ "(evka)|(evke)|(evki)|(evko)|(evk)|(evkama)|(evkah)|(evkam)|(evkami)|(jevka)|"
+ "(jevke)|(jevki)|(jevko)|(jevk)|(jevkama)|(jevkah)|(jevkam)|(jevkami)|(timi)|"
+ "(im)|(ima)|(a)|(imi)|(e)|(o)|(ega)|(ti)|(em)|(tih)|(emu)|(tim)|(i)|(tima)|"
+ "(ih)|(ta)|(te)|(to)|(tega)|(tem)|(temu))"
+)
+
+_infixes = (
+ LIST_ELLIPSES
+ + LIST_ICONS
+ + [
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
+ ),
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
+ r"(?<=[{a}0-9])(?:{h})(?!{hp}$)(?=[{a}])".format(
+ a=ALPHA, h=HYPHENS, hp=HYPHENS_PERMITTED
+ ),
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
+ ]
+)
+
+
+TOKENIZER_PREFIXES = _prefixes
+TOKENIZER_SUFFIXES = _suffixes
+TOKENIZER_INFIXES = _infixes
diff --git a/spacy/lang/sl/stop_words.py b/spacy/lang/sl/stop_words.py
index c9004ed5d..8491efcb5 100644
--- a/spacy/lang/sl/stop_words.py
+++ b/spacy/lang/sl/stop_words.py
@@ -1,326 +1,84 @@
# Source: https://github.com/stopwords-iso/stopwords-sl
-# Removed various words that are not normally considered stop words, such as months.
STOP_WORDS = set(
"""
-a
-ali
-b
-bi
-bil
-bila
-bile
-bili
-bilo
-biti
-blizu
-bo
-bodo
-bolj
-bom
-bomo
-boste
-bova
-boš
-brez
-c
-cel
-cela
-celi
-celo
-d
-da
-daleč
-dan
-danes
-do
-dober
-dobra
-dobri
-dobro
-dokler
-dol
-dovolj
-e
-eden
-en
-ena
-ene
-eni
-enkrat
-eno
-etc.
+a ali
+
+b bi bil bila bile bili bilo biti blizu bo bodo bojo bolj bom bomo
+boste bova boš brez
+
+c cel cela celi celo
+
+č če često četrta četrtek četrti četrto čez čigav
+
+d da daleč dan danes datum deset deseta deseti deseto devet
+deveta deveti deveto do dober dobra dobri dobro dokler dol dolg
+dolga dolgi dovolj drug druga drugi drugo dva dve
+
+e eden en ena ene eni enkrat eno etc.
+
f
-g
-g.
-ga
-ga.
-gor
-gospa
-gospod
-h
-halo
-i
-idr.
-ii
-iii
-in
-iv
-ix
-iz
-j
-jaz
-je
-ji
-jih
-jim
-jo
-k
-kadarkoli
-kaj
-kajti
-kako
-kakor
-kamor
-kamorkoli
-kar
-karkoli
-katerikoli
-kdaj
-kdo
-kdorkoli
-ker
-ki
-kje
-kjer
-kjerkoli
-ko
-koderkoli
-koga
-komu
-kot
-l
-le
-lep
-lepa
-lepe
-lepi
-lepo
-m
-manj
-me
-med
-medtem
-mene
-mi
-midva
-midve
-mnogo
-moj
-moja
-moje
-mora
-morajo
-moram
-moramo
-morate
-moraš
-morem
-mu
-n
-na
-nad
-naj
-najina
-najino
-najmanj
-naju
-največ
-nam
-nas
-nato
-nazaj
-naš
-naša
-naše
-ne
-nedavno
-nek
-neka
-nekaj
-nekatere
-nekateri
-nekatero
-nekdo
-neke
-nekega
-neki
-nekje
-neko
-nekoga
-nekoč
-ni
-nikamor
-nikdar
-nikjer
-nikoli
-nič
-nje
-njega
-njegov
-njegova
-njegovo
-njej
-njemu
-njen
-njena
-njeno
-nji
-njih
-njihov
-njihova
-njihovo
-njiju
-njim
-njo
-njun
-njuna
-njuno
-no
-nocoj
-npr.
-o
-ob
-oba
-obe
-oboje
-od
-okoli
-on
-onadva
-one
-oni
-onidve
-oz.
-p
-pa
-po
-pod
-pogosto
-poleg
-ponavadi
-ponovno
-potem
-povsod
-prbl.
-precej
-pred
-prej
-preko
-pri
-pribl.
-približno
-proti
-r
-redko
-res
-s
-saj
-sam
-sama
-same
-sami
-samo
-se
-sebe
-sebi
-sedaj
-sem
-seveda
-si
-sicer
-skoraj
-skozi
-smo
-so
-spet
-sta
-ste
-sva
-t
-ta
-tak
-taka
-take
-taki
-tako
-takoj
-tam
-te
-tebe
-tebi
-tega
-ti
-tista
-tiste
-tisti
-tisto
-tj.
-tja
-to
-toda
-tu
-tudi
-tukaj
-tvoj
-tvoja
-tvoje
+
+g g. ga ga. gor gospa gospod
+
+h halo
+
+i idr. ii iii in iv ix iz
+
+j jaz je ji jih jim jo jutri
+
+k kadarkoli kaj kajti kako kakor kamor kamorkoli kar karkoli
+katerikoli kdaj kdo kdorkoli ker ki kje kjer kjerkoli
+ko koder koderkoli koga komu kot kratek kratka kratke kratki
+
+l lahka lahke lahki lahko le lep lepa lepe lepi lepo leto
+
+m majhen majhna majhni malce malo manj me med medtem mene
+mesec mi midva midve mnogo moj moja moje mora morajo moram
+moramo morate moraš morem mu
+
+n na nad naj najina najino najmanj naju največ nam narobe
+nas nato nazaj naš naša naše ne nedavno nedelja nek neka
+nekaj nekatere nekateri nekatero nekdo neke nekega neki
+nekje neko nekoga nekoč ni nikamor nikdar nikjer nikoli
+nič nje njega njegov njegova njegovo njej njemu njen
+njena njeno nji njih njihov njihova njihovo njiju njim
+njo njun njuna njuno no nocoj npr.
+
+o ob oba obe oboje od odprt odprta odprti okoli on
+onadva one oni onidve osem osma osmi osmo oz.
+
+p pa pet peta petek peti peto po pod pogosto poleg poln
+polna polni polno ponavadi ponedeljek ponovno potem
+povsod pozdravljen pozdravljeni prav prava prave pravi
+pravo prazen prazna prazno prbl. precej pred prej preko
+pri pribl. približno primer pripravljen pripravljena
+pripravljeni proti prva prvi prvo
+
+r ravno redko res reč
+
+s saj sam sama same sami samo se sebe sebi sedaj sedem
+sedma sedmi sedmo sem seveda si sicer skoraj skozi slab sm
+so sobota spet sreda srednja srednji sta ste stran stvar sva
+
+š šest šesta šesti šesto štiri
+
+t ta tak taka take taki tako takoj tam te tebe tebi tega
+težak težka težki težko ti tista tiste tisti tisto tj.
+tja to toda torek tretja tretje tretji tri tu tudi tukaj
+tvoj tvoja tvoje
+
u
-v
-vaju
-vam
-vas
-vaš
-vaša
-vaše
-ve
-vedno
-vendar
-ves
-več
-vi
-vidva
-vii
-viii
-vsa
-vsaj
-vsak
-vsaka
-vsakdo
-vsake
-vsaki
-vsakomur
-vse
-vsega
-vsi
-vso
-včasih
-x
-z
-za
-zadaj
-zadnji
-zakaj
-zdaj
-zelo
-zunaj
-č
-če
-često
-čez
-čigav
-š
-ž
-že
+
+v vaju vam vas vaš vaša vaše ve vedno velik velika veliki
+veliko vendar ves več vi vidva vii viii visok visoka visoke
+visoki vsa vsaj vsak vsaka vsakdo vsake vsaki vsakomur vse
+vsega vsi vso včasih včeraj
+
+x
+
+z za zadaj zadnji zakaj zaprta zaprti zaprto zdaj zelo zunaj
+
+ž že
""".split()
)
diff --git a/spacy/lang/sl/tokenizer_exceptions.py b/spacy/lang/sl/tokenizer_exceptions.py
new file mode 100644
index 000000000..3d4109228
--- /dev/null
+++ b/spacy/lang/sl/tokenizer_exceptions.py
@@ -0,0 +1,272 @@
+from typing import Dict, List
+from ..tokenizer_exceptions import BASE_EXCEPTIONS
+from ...symbols import ORTH, NORM
+from ...util import update_exc
+
+_exc: Dict[str, List[Dict]] = {}
+
+_other_exc = {
+ "t.i.": [{ORTH: "t.", NORM: "tako"}, {ORTH: "i.", NORM: "imenovano"}],
+ "t.j.": [{ORTH: "t.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
+ "T.j.": [{ORTH: "T.", NORM: "to"}, {ORTH: "j.", NORM: "je"}],
+ "d.o.o.": [
+ {ORTH: "d.", NORM: "družba"},
+ {ORTH: "o.", NORM: "omejeno"},
+ {ORTH: "o.", NORM: "odgovornostjo"},
+ ],
+ "D.O.O.": [
+ {ORTH: "D.", NORM: "družba"},
+ {ORTH: "O.", NORM: "omejeno"},
+ {ORTH: "O.", NORM: "odgovornostjo"},
+ ],
+ "d.n.o.": [
+ {ORTH: "d.", NORM: "družba"},
+ {ORTH: "n.", NORM: "neomejeno"},
+ {ORTH: "o.", NORM: "odgovornostjo"},
+ ],
+ "D.N.O.": [
+ {ORTH: "D.", NORM: "družba"},
+ {ORTH: "N.", NORM: "neomejeno"},
+ {ORTH: "O.", NORM: "odgovornostjo"},
+ ],
+ "d.d.": [{ORTH: "d.", NORM: "delniška"}, {ORTH: "d.", NORM: "družba"}],
+ "D.D.": [{ORTH: "D.", NORM: "delniška"}, {ORTH: "D.", NORM: "družba"}],
+ "s.p.": [{ORTH: "s.", NORM: "samostojni"}, {ORTH: "p.", NORM: "podjetnik"}],
+ "S.P.": [{ORTH: "S.", NORM: "samostojni"}, {ORTH: "P.", NORM: "podjetnik"}],
+ "l.r.": [{ORTH: "l.", NORM: "lastno"}, {ORTH: "r.", NORM: "ročno"}],
+ "le-te": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "te"}],
+ "Le-te": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "te"}],
+ "le-ti": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ti"}],
+ "Le-ti": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ti"}],
+ "le-to": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "to"}],
+ "Le-to": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "to"}],
+ "le-ta": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "ta"}],
+ "Le-ta": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "ta"}],
+ "le-tega": [{ORTH: "le"}, {ORTH: "-"}, {ORTH: "tega"}],
+ "Le-tega": [{ORTH: "Le"}, {ORTH: "-"}, {ORTH: "tega"}],
+}
+
+_exc.update(_other_exc)
+
+
+for exc_data in [
+ {ORTH: "adm.", NORM: "administracija"},
+ {ORTH: "aer.", NORM: "aeronavtika"},
+ {ORTH: "agr.", NORM: "agronomija"},
+ {ORTH: "amer.", NORM: "ameriško"},
+ {ORTH: "anat.", NORM: "anatomija"},
+ {ORTH: "angl.", NORM: "angleški"},
+ {ORTH: "ant.", NORM: "antonim"},
+ {ORTH: "antr.", NORM: "antropologija"},
+ {ORTH: "apr.", NORM: "april"},
+ {ORTH: "arab.", NORM: "arabsko"},
+ {ORTH: "arheol.", NORM: "arheologija"},
+ {ORTH: "arhit.", NORM: "arhitektura"},
+ {ORTH: "avg.", NORM: "avgust"},
+ {ORTH: "avstr.", NORM: "avstrijsko"},
+ {ORTH: "avt.", NORM: "avtomobilizem"},
+ {ORTH: "bibl.", NORM: "biblijsko"},
+ {ORTH: "biokem.", NORM: "biokemija"},
+ {ORTH: "biol.", NORM: "biologija"},
+ {ORTH: "bolg.", NORM: "bolgarski"},
+ {ORTH: "bot.", NORM: "botanika"},
+ {ORTH: "cit.", NORM: "citat"},
+ {ORTH: "daj.", NORM: "dajalnik"},
+ {ORTH: "del.", NORM: "deležnik"},
+ {ORTH: "ed.", NORM: "ednina"},
+ {ORTH: "etn.", NORM: "etnografija"},
+ {ORTH: "farm.", NORM: "farmacija"},
+ {ORTH: "filat.", NORM: "filatelija"},
+ {ORTH: "filoz.", NORM: "filozofija"},
+ {ORTH: "fin.", NORM: "finančništvo"},
+ {ORTH: "fiz.", NORM: "fizika"},
+ {ORTH: "fot.", NORM: "fotografija"},
+ {ORTH: "fr.", NORM: "francoski"},
+ {ORTH: "friz.", NORM: "frizerstvo"},
+ {ORTH: "gastr.", NORM: "gastronomija"},
+ {ORTH: "geogr.", NORM: "geografija"},
+ {ORTH: "geol.", NORM: "geologija"},
+ {ORTH: "geom.", NORM: "geometrija"},
+ {ORTH: "germ.", NORM: "germanski"},
+ {ORTH: "gl.", NORM: "glej"},
+ {ORTH: "glag.", NORM: "glagolski"},
+ {ORTH: "glasb.", NORM: "glasba"},
+ {ORTH: "gled.", NORM: "gledališče"},
+ {ORTH: "gost.", NORM: "gostinstvo"},
+ {ORTH: "gozd.", NORM: "gozdarstvo"},
+ {ORTH: "gr.", NORM: "grški"},
+ {ORTH: "grad.", NORM: "gradbeništvo"},
+ {ORTH: "hebr.", NORM: "hebrejsko"},
+ {ORTH: "hrv.", NORM: "hrvaško"},
+ {ORTH: "ide.", NORM: "indoevropsko"},
+ {ORTH: "igr.", NORM: "igre"},
+ {ORTH: "im.", NORM: "imenovalnik"},
+ {ORTH: "iron.", NORM: "ironično"},
+ {ORTH: "it.", NORM: "italijanski"},
+ {ORTH: "itd.", NORM: "in tako dalje"},
+ {ORTH: "itn.", NORM: "in tako naprej"},
+ {ORTH: "ipd.", NORM: "in podobno"},
+ {ORTH: "jap.", NORM: "japonsko"},
+ {ORTH: "jul.", NORM: "julij"},
+ {ORTH: "jun.", NORM: "junij"},
+ {ORTH: "kit.", NORM: "kitajsko"},
+ {ORTH: "knj.", NORM: "knjižno"},
+ {ORTH: "knjiž.", NORM: "knjižno"},
+ {ORTH: "kor.", NORM: "koreografija"},
+ {ORTH: "lat.", NORM: "latinski"},
+ {ORTH: "les.", NORM: "lesna stroka"},
+ {ORTH: "lingv.", NORM: "lingvistika"},
+ {ORTH: "lit.", NORM: "literarni"},
+ {ORTH: "ljubk.", NORM: "ljubkovalno"},
+ {ORTH: "lov.", NORM: "lovstvo"},
+ {ORTH: "m.", NORM: "moški"},
+ {ORTH: "mak.", NORM: "makedonski"},
+ {ORTH: "mar.", NORM: "marec"},
+ {ORTH: "mat.", NORM: "matematika"},
+ {ORTH: "med.", NORM: "medicina"},
+ {ORTH: "meh.", NORM: "mehiško"},
+ {ORTH: "mest.", NORM: "mestnik"},
+ {ORTH: "mdr.", NORM: "med drugim"},
+ {ORTH: "min.", NORM: "mineralogija"},
+ {ORTH: "mitol.", NORM: "mitologija"},
+ {ORTH: "mn.", NORM: "množina"},
+ {ORTH: "mont.", NORM: "montanistika"},
+ {ORTH: "muz.", NORM: "muzikologija"},
+ {ORTH: "nam.", NORM: "namenilnik"},
+ {ORTH: "nar.", NORM: "narečno"},
+ {ORTH: "nav.", NORM: "navadno"},
+ {ORTH: "nedol.", NORM: "nedoločnik"},
+ {ORTH: "nedov.", NORM: "nedovršni"},
+ {ORTH: "neprav.", NORM: "nepravilno"},
+ {ORTH: "nepreh.", NORM: "neprehodno"},
+ {ORTH: "neskl.", NORM: "nesklonljiv(o)"},
+ {ORTH: "nestrok.", NORM: "nestrokovno"},
+ {ORTH: "num.", NORM: "numizmatika"},
+ {ORTH: "npr.", NORM: "na primer"},
+ {ORTH: "obrt.", NORM: "obrtništvo"},
+ {ORTH: "okt.", NORM: "oktober"},
+ {ORTH: "or.", NORM: "orodnik"},
+ {ORTH: "os.", NORM: "oseba"},
+ {ORTH: "otr.", NORM: "otroško"},
+ {ORTH: "oz.", NORM: "oziroma"},
+ {ORTH: "pal.", NORM: "paleontologija"},
+ {ORTH: "papir.", NORM: "papirništvo"},
+ {ORTH: "ped.", NORM: "pedagogika"},
+ {ORTH: "pisar.", NORM: "pisarniško"},
+ {ORTH: "pog.", NORM: "pogovorno"},
+ {ORTH: "polit.", NORM: "politika"},
+ {ORTH: "polj.", NORM: "poljsko"},
+ {ORTH: "poljud.", NORM: "poljudno"},
+ {ORTH: "preg.", NORM: "pregovor"},
+ {ORTH: "preh.", NORM: "prehodno"},
+ {ORTH: "pren.", NORM: "preneseno"},
+ {ORTH: "prid.", NORM: "pridevnik"},
+ {ORTH: "prim.", NORM: "primerjaj"},
+ {ORTH: "prisl.", NORM: "prislov"},
+ {ORTH: "psih.", NORM: "psihologija"},
+ {ORTH: "psiht.", NORM: "psihiatrija"},
+ {ORTH: "rad.", NORM: "radiotehnika"},
+ {ORTH: "rač.", NORM: "računalništvo"},
+ {ORTH: "rib.", NORM: "ribištvo"},
+ {ORTH: "rod.", NORM: "rodilnik"},
+ {ORTH: "rus.", NORM: "rusko"},
+ {ORTH: "s.", NORM: "srednji"},
+ {ORTH: "sam.", NORM: "samostalniški"},
+ {ORTH: "sed.", NORM: "sedanjik"},
+ {ORTH: "sep.", NORM: "september"},
+ {ORTH: "slabš.", NORM: "slabšalno"},
+ {ORTH: "slovan.", NORM: "slovansko"},
+ {ORTH: "slovaš.", NORM: "slovaško"},
+ {ORTH: "srb.", NORM: "srbsko"},
+ {ORTH: "star.", NORM: "starinsko"},
+ {ORTH: "stil.", NORM: "stilno"},
+ {ORTH: "sv.", NORM: "svet(i)"},
+ {ORTH: "teh.", NORM: "tehnika"},
+ {ORTH: "tisk.", NORM: "tiskarstvo"},
+ {ORTH: "tj.", NORM: "to je"},
+ {ORTH: "tož.", NORM: "tožilnik"},
+ {ORTH: "trg.", NORM: "trgovina"},
+ {ORTH: "ukr.", NORM: "ukrajinski"},
+ {ORTH: "um.", NORM: "umetnost"},
+ {ORTH: "vel.", NORM: "velelnik"},
+ {ORTH: "vet.", NORM: "veterina"},
+ {ORTH: "vez.", NORM: "veznik"},
+ {ORTH: "vn.", NORM: "visokonemško"},
+ {ORTH: "voj.", NORM: "vojska"},
+ {ORTH: "vrtn.", NORM: "vrtnarstvo"},
+ {ORTH: "vulg.", NORM: "vulgarno"},
+ {ORTH: "vznes.", NORM: "vzneseno"},
+ {ORTH: "zal.", NORM: "založništvo"},
+ {ORTH: "zastar.", NORM: "zastarelo"},
+ {ORTH: "zgod.", NORM: "zgodovina"},
+ {ORTH: "zool.", NORM: "zoologija"},
+ {ORTH: "čeb.", NORM: "čebelarstvo"},
+ {ORTH: "češ.", NORM: "češki"},
+ {ORTH: "člov.", NORM: "človeškost"},
+ {ORTH: "šah.", NORM: "šahovski"},
+ {ORTH: "šalj.", NORM: "šaljivo"},
+ {ORTH: "šp.", NORM: "španski"},
+ {ORTH: "špan.", NORM: "špansko"},
+ {ORTH: "šport.", NORM: "športni"},
+ {ORTH: "štev.", NORM: "števnik"},
+ {ORTH: "šved.", NORM: "švedsko"},
+ {ORTH: "švic.", NORM: "švicarsko"},
+ {ORTH: "ž.", NORM: "ženski"},
+ {ORTH: "žarg.", NORM: "žargonsko"},
+ {ORTH: "žel.", NORM: "železnica"},
+ {ORTH: "živ.", NORM: "živost"},
+]:
+ _exc[exc_data[ORTH]] = [exc_data]
+
+
+abbrv = """
+Co. Ch. DIPL. DR. Dr. Ev. Inc. Jr. Kr. Mag. M. MR. Mr. Mt. Murr. Npr. OZ.
+Opr. Osn. Prim. Roj. ST. Sim. Sp. Sred. St. Sv. Škofl. Tel. UR. Zb.
+a. aa. ab. abc. abit. abl. abs. abt. acc. accel. add. adj. adv. aet. afr. akad. al. alban. all. alleg.
+alp. alt. alter. alžir. am. an. andr. ang. anh. anon. ans. antrop. apoc. app. approx. apt. ar. arc. arch.
+arh. arr. as. asist. assist. assoc. asst. astr. attn. aug. avstral. az. b. bab. bal. bbl. bd. belg. bioinf.
+biomed. bk. bl. bn. borg. bp. br. braz. brit. bros. broš. bt. bu. c. ca. cal. can. cand. cantab. cap. capt.
+cat. cath. cc. cca. cd. cdr. cdre. cent. cerkv. cert. cf. cfr. ch. chap. chem. chr. chs. cic. circ. civ. cl.
+cm. cmd. cnr. co. cod. col. coll. colo. com. comp. con. conc. cond. conn. cons. cont. coop. corr. cost. cp.
+cpl. cr. crd. cres. cresc. ct. cu. d. dan. dat. davč. ddr. dec. ded. def. dem. dent. dept. dia. dip. dipl.
+dir. disp. diss. div. do. doc. dok. dol. doo. dop. dott. dr. dram. druž. družb. drž. dt. duh. dur. dvr. dwt. e.
+ea. ecc. eccl. eccles. econ. edn. egipt. egr. ekon. eksp. el. em. enc. eng. eo. ep. err. esp. esq. est.
+et. etc. etnogr. etnol. ev. evfem. evr. ex. exc. excl. exp. expl. ext. exx. f. fa. facs. fak. faks. fas.
+fasc. fco. fcp. feb. febr. fec. fed. fem. ff. fff. fid. fig. fil. film. fiziol. fiziot. flam. fm. fo. fol. folk.
+frag. fran. franc. fsc. g. ga. gal. gdč. ge. gen. geod. geog. geotehnol. gg. gimn. glas. glav. gnr. go. gor.
+gosp. gp. graf. gram. gren. grš. gs. h. hab. hf. hist. ho. hort. i. ia. ib. ibid. id. idr. idridr. ill. imen.
+imp. impf. impr. in. inc. incl. ind. indus. inf. inform. ing. init. ins. int. inv. inšp. inštr. inž. is. islam.
+ist. ital. iur. iz. izbr. izd. izg. izgr. izr. izv. j. jak. jam. jan. jav. je. jez. jr. jsl. jud. jug.
+jugoslovan. jur. juž. jv. jz. k. kal. kan. kand. kat. kdo. kem. kip. kmet. kol. kom. komp. konf. kont. kost. kov.
+kp. kpfw. kr. kraj. krat. kub. kult. kv. kval. l. la. lab. lb. ld. let. lib. lik. litt. lj. ljud. ll. loc. log.
+loč. lt. ma. madž. mag. manag. manjš. masc. mass. mater. max. maxmax. mb. md. mech. medic. medij. medn.
+mehč. mem. menedž. mes. mess. metal. meteor. meteorol. mex. mi. mikr. mil. minn. mio. misc. miss. mit. mk.
+mkt. ml. mlad. mlle. mlr. mm. mme. množ. mo. moj. moš. možn. mr. mrd. mrs. ms. msc. msgr. mt. murr. mus. mut.
+n. na. nad. nadalj. nadom. nagl. nakl. namer. nan. naniz. nasl. nat. navt. nač. ned. nem. nik. nizoz. nm. nn.
+no. nom. norv. notr. nov. novogr. ns. o. ob. obd. obj. oblač. obl. oblik. obr. obraz. obs. obst. obt. obč. oc.
+oct. od. odd. odg. odn. odst. odv. oec. off. ok. okla. okr. ont. oo. op. opis. opp. opr. orch. ord. ore. oreg.
+org. orient. orig. ork. ort. oseb. osn. ot. ozir. ošk. p. pag. par. para. parc. parl. part. past. pat. pdk.
+pen. perf. pert. perz. pesn. pet. pev. pf. pfc. ph. pharm. phil. pis. pl. po. pod. podr. podaljš. pogl. pogoj. pojm.
+pok. pokr. pol. poljed. poljub. polu. pom. pomen. pon. ponov. pop. por. port. pos. posl. posn. pov. pp. ppl. pr.
+praet. prav. pravopis. pravosl. preb. pred. predl. predm. predp. preds. pref. pregib. prel. prem. premen. prep.
+pres. pret. prev. pribl. prih. pril. primerj. primor. prip. pripor. prir. prist. priv. proc. prof. prog. proiz.
+prom. pron. prop. prot. protest. prov. ps. pss. pt. publ. pz. q. qld. qu. quad. que. r. racc. rastl. razgl.
+razl. razv. rd. red. ref. reg. rel. relig. rep. repr. rer. resp. rest. ret. rev. revol. rež. rim. rist. rkp. rm.
+roj. rom. romun. rp. rr. rt. rud. ruš. ry. sal. samogl. san. sc. scen. sci. scr. sdv. seg. sek. sen. sept. ser.
+sev. sg. sgt. sh. sig. sigg. sign. sim. sin. sing. sinh. skand. skl. sklad. sklanj. sklep. skr. sl. slik. slov.
+slovak. slovn. sn. so. sob. soc. sociol. sod. sopomen. sopr. sor. sov. sovj. sp. spec. spl. spr. spreg. sq. sr.
+sre. sred. sredoz. srh. ss. ssp. st. sta. stan. stanstar. stcsl. ste. stim. stol. stom. str. stroj. strok. stsl.
+stud. sup. supl. suppl. svet. sz. t. tab. tech. ted. tehn. tehnol. tek. teks. tekst. tel. temp. ten. teol. ter.
+term. test. th. theol. tim. tip. tisočl. tit. tl. tol. tolmač. tom. tor. tov. tr. trad. traj. trans. tren.
+trib. tril. trop. trp. trž. ts. tt. tu. tur. turiz. tvor. tvorb. tč. u. ul. umet. un. univ. up. upr. ur. urad.
+us. ust. utr. v. va. val. var. varn. ven. ver. verb. vest. vezal. vic. vis. viv. viz. viš. vod. vok. vol. vpr.
+vrst. vrstil. vs. vv. vzd. vzg. vzh. vzor. w. wed. wg. wk. x. y. z. zah. zaim. zak. zap. zasl. zavar. zač. zb.
+združ. zg. zn. znan. znanstv. zoot. zun. zv. zvd. á. é. ć. č. čas. čet. čl. člen. čustv. đ. ľ. ł. ş. ŠT. š. šir.
+škofl. škot. šol. št. števil. štud. ů. ű. žen. žival.
+""".split()
+
+for orth in abbrv:
+ _exc[orth] = [{ORTH: orth}]
+
+
+TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
diff --git a/spacy/lang/tokenizer_exceptions.py b/spacy/lang/tokenizer_exceptions.py
index d76fe4262..a5e388ca8 100644
--- a/spacy/lang/tokenizer_exceptions.py
+++ b/spacy/lang/tokenizer_exceptions.py
@@ -17,10 +17,6 @@ URL_PATTERN = (
r"(?:\S+(?::\S*)?@)?"
r"(?:"
# IP address exclusion
- # private & local networks
- r"(?!(?:10|127)(?:\.\d{1,3}){3})"
- r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
- r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
diff --git a/spacy/lang/uk/__init__.py b/spacy/lang/uk/__init__.py
index 21f9649f2..bfea9ff69 100644
--- a/spacy/lang/uk/__init__.py
+++ b/spacy/lang/uk/__init__.py
@@ -6,6 +6,8 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import UkrainianLemmatizer
+from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES
+from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults
@@ -13,6 +15,8 @@ class UkrainianDefaults(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
+ suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
+ infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
class Ukrainian(Language):
@@ -25,7 +29,7 @@ class Ukrainian(Language):
assigns=["token.lemma"],
default_config={
"model": None,
- "mode": "pymorphy2",
+ "mode": "pymorphy3",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py
index a8bc56057..97ee80479 100644
--- a/spacy/lang/uk/lemmatizer.py
+++ b/spacy/lang/uk/lemmatizer.py
@@ -14,7 +14,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
model: Optional[Model],
name: str = "lemmatizer",
*,
- mode: str = "pymorphy2",
+ mode: str = "pymorphy3",
overwrite: bool = False,
scorer: Optional[Callable] = lemmatizer_score,
) -> None:
@@ -29,6 +29,17 @@ class UkrainianLemmatizer(RussianLemmatizer):
) from None
if getattr(self, "_morph", None) is None:
self._morph = MorphAnalyzer(lang="uk")
+ elif mode == "pymorphy3":
+ try:
+ from pymorphy3 import MorphAnalyzer
+ except ImportError:
+ raise ImportError(
+ "The Ukrainian lemmatizer mode 'pymorphy3' requires the "
+ "pymorphy3 library and dictionaries. Install them with: "
+ "pip install pymorphy3 pymorphy3-dicts-uk"
+ ) from None
+ if getattr(self, "_morph", None) is None:
+ self._morph = MorphAnalyzer(lang="uk")
super().__init__(
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
diff --git a/spacy/language.py b/spacy/language.py
index 42847823f..34a06e576 100644
--- a/spacy/language.py
+++ b/spacy/language.py
@@ -1,4 +1,4 @@
-from typing import Iterator, Optional, Any, Dict, Callable, Iterable
+from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection
from typing import Union, Tuple, List, Set, Pattern, Sequence
from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload
@@ -465,6 +465,8 @@ class Language:
"""
if not isinstance(name, str):
raise ValueError(Errors.E963.format(decorator="factory"))
+ if "." in name:
+ raise ValueError(Errors.E853.format(name=name))
if not isinstance(default_config, dict):
err = Errors.E962.format(
style="default config", name=name, cfg_type=type(default_config)
@@ -543,8 +545,11 @@ class Language:
DOCS: https://spacy.io/api/language#component
"""
- if name is not None and not isinstance(name, str):
- raise ValueError(Errors.E963.format(decorator="component"))
+ if name is not None:
+ if not isinstance(name, str):
+ raise ValueError(Errors.E963.format(decorator="component"))
+ if "." in name:
+ raise ValueError(Errors.E853.format(name=name))
component_name = name if name is not None else util.get_object_name(func)
def add_component(component_func: "Pipe") -> Callable:
@@ -1023,8 +1028,8 @@ class Language:
raise ValueError(Errors.E109.format(name=name)) from e
except Exception as e:
error_handler(name, proc, [doc], e)
- if doc is None:
- raise ValueError(Errors.E005.format(name=name))
+ if not isinstance(doc, Doc):
+ raise ValueError(Errors.E005.format(name=name, returned_type=type(doc)))
return doc
def disable_pipes(self, *names) -> "DisabledPipes":
@@ -1058,7 +1063,7 @@ class Language:
"""
if enable is None and disable is None:
raise ValueError(Errors.E991)
- if disable is not None and isinstance(disable, str):
+ if isinstance(disable, str):
disable = [disable]
if enable is not None:
if isinstance(enable, str):
@@ -1693,8 +1698,9 @@ class Language:
config: Union[Dict[str, Any], Config] = {},
*,
vocab: Union[Vocab, bool] = True,
- disable: Iterable[str] = SimpleFrozenList(),
- exclude: Iterable[str] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True,
validate: bool = True,
@@ -1705,10 +1711,12 @@ class Language:
config (Dict[str, Any] / Config): The loaded config.
vocab (Vocab): A Vocab object. If True, a vocab is created.
- disable (Iterable[str]): Names of pipeline components to disable.
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable.
Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe.
- exclude (Iterable[str]): Names of pipeline components to exclude.
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
+ pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude.
Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta.
auto_fill (bool): Automatically fill in missing values in config based
@@ -1719,6 +1727,12 @@ class Language:
DOCS: https://spacy.io/api/language#from_config
"""
+ if isinstance(disable, str):
+ disable = [disable]
+ if isinstance(enable, str):
+ enable = [enable]
+ if isinstance(exclude, str):
+ exclude = [exclude]
if auto_fill:
config = Config(
cls.default_config, section_order=CONFIG_SECTION_ORDER
@@ -1861,8 +1875,15 @@ class Language:
# Restore the original vocab after sourcing if necessary
if vocab_b is not None:
nlp.vocab.from_bytes(vocab_b)
- disabled_pipes = [*config["nlp"]["disabled"], *disable]
+
+ # Resolve disabled/enabled settings.
+ disabled_pipes = cls._resolve_component_status(
+ [*config["nlp"]["disabled"], *disable],
+ [*config["nlp"].get("enabled", []), *enable],
+ config["nlp"]["pipeline"],
+ )
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
+
nlp.batch_size = config["nlp"]["batch_size"]
nlp.config = filled if auto_fill else config
if after_pipeline_creation is not None:
@@ -2014,6 +2035,46 @@ class Language:
serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude)
util.to_disk(path, serializers, exclude)
+ @staticmethod
+ def _resolve_component_status(
+ disable: Union[str, Iterable[str]],
+ enable: Union[str, Iterable[str]],
+ pipe_names: Iterable[str],
+ ) -> Tuple[str, ...]:
+ """Derives whether (1) `disable` and `enable` values are consistent and (2)
+ resolves those to a single set of disabled components. Raises an error in
+ case of inconsistency.
+
+ disable (Union[str, Iterable[str]]): Name(s) of component(s) or serialization fields to disable.
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable.
+ pipe_names (Iterable[str]): Names of all pipeline components.
+
+ RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t.
+ specified includes and excludes.
+ """
+
+ if isinstance(disable, str):
+ disable = [disable]
+ to_disable = disable
+
+ if enable:
+ if isinstance(enable, str):
+ enable = [enable]
+ to_disable = [
+ pipe_name for pipe_name in pipe_names if pipe_name not in enable
+ ]
+ if disable and disable != to_disable:
+ raise ValueError(
+ Errors.E1042.format(
+ arg1="enable",
+ arg2="disable",
+ arg1_values=enable,
+ arg2_values=disable,
+ )
+ )
+
+ return tuple(to_disable)
+
def from_disk(
self,
path: Union[str, Path],
diff --git a/spacy/matcher/dependencymatcher.pyx b/spacy/matcher/dependencymatcher.pyx
index a602ba737..4c6004907 100644
--- a/spacy/matcher/dependencymatcher.pyx
+++ b/spacy/matcher/dependencymatcher.pyx
@@ -82,6 +82,10 @@ cdef class DependencyMatcher:
"$-": self._imm_left_sib,
"$++": self._right_sib,
"$--": self._left_sib,
+ ">++": self._right_child,
+ ">--": self._left_child,
+ "<++": self._right_parent,
+ "<--": self._left_parent,
}
def __reduce__(self):
@@ -161,9 +165,9 @@ cdef class DependencyMatcher:
on_match (callable): Optional callback executed on match.
"""
if on_match is not None and not hasattr(on_match, "__call__"):
- raise ValueError(Errors.E171.format(arg_type=type(on_match)))
- if patterns is None or not isinstance(patterns, List): # old API
- raise ValueError(Errors.E948.format(arg_type=type(patterns)))
+ raise ValueError(Errors.E171.format(name="DependencyMatcher", arg_type=type(on_match)))
+ if patterns is None or not isinstance(patterns, List):
+ raise ValueError(Errors.E948.format(name="DependencyMatcher", arg_type=type(patterns)))
for pattern in patterns:
if len(pattern) == 0:
raise ValueError(Errors.E012.format(key=key))
@@ -423,6 +427,22 @@ cdef class DependencyMatcher:
def _left_sib(self, doc, node):
return [doc[child.i] for child in doc[node].head.children if child.i < node]
+ def _right_child(self, doc, node):
+ return [doc[child.i] for child in doc[node].children if child.i > node]
+
+ def _left_child(self, doc, node):
+ return [doc[child.i] for child in doc[node].children if child.i < node]
+
+ def _right_parent(self, doc, node):
+ if doc[node].head.i > node:
+ return [doc[node].head]
+ return []
+
+ def _left_parent(self, doc, node):
+ if doc[node].head.i < node:
+ return [doc[node].head]
+ return []
+
def _normalize_key(self, key):
if isinstance(key, str):
return self.vocab.strings.add(key)
diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx
index 28f6c16c4..865e7594e 100644
--- a/spacy/matcher/matcher.pyx
+++ b/spacy/matcher/matcher.pyx
@@ -1,5 +1,5 @@
# cython: infer_types=True, cython: profile=True
-from typing import List
+from typing import List, Iterable
from libcpp.vector cimport vector
from libc.stdint cimport int32_t, int8_t
@@ -86,10 +86,14 @@ cdef class Matcher:
is a dictionary mapping attribute IDs to values, and optionally a
quantifier operator under the key "op". The available quantifiers are:
- '!': Negate the pattern, by requiring it to match exactly 0 times.
- '?': Make the pattern optional, by allowing it to match 0 or 1 times.
- '+': Require the pattern to match 1 or more times.
- '*': Allow the pattern to zero or more times.
+ '!': Negate the pattern, by requiring it to match exactly 0 times.
+ '?': Make the pattern optional, by allowing it to match 0 or 1 times.
+ '+': Require the pattern to match 1 or more times.
+ '*': Allow the pattern to zero or more times.
+ '{n}': Require the pattern to match exactly _n_ times.
+ '{n,m}': Require the pattern to match at least _n_ but not more than _m_ times.
+ '{n,}': Require the pattern to match at least _n_ times.
+ '{,m}': Require the pattern to match at most _m_ times.
The + and * operators return all possible matches (not just the greedy
ones). However, the "greedy" argument can filter the final matches
@@ -106,9 +110,9 @@ cdef class Matcher:
"""
errors = {}
if on_match is not None and not hasattr(on_match, "__call__"):
- raise ValueError(Errors.E171.format(arg_type=type(on_match)))
- if patterns is None or not isinstance(patterns, List): # old API
- raise ValueError(Errors.E948.format(arg_type=type(patterns)))
+ raise ValueError(Errors.E171.format(name="Matcher", arg_type=type(on_match)))
+ if patterns is None or not isinstance(patterns, List):
+ raise ValueError(Errors.E948.format(name="Matcher", arg_type=type(patterns)))
if greedy is not None and greedy not in ["FIRST", "LONGEST"]:
raise ValueError(Errors.E947.format(expected=["FIRST", "LONGEST"], arg=greedy))
for i, pattern in enumerate(patterns):
@@ -864,20 +868,27 @@ class _SetPredicate:
def __call__(self, Token token):
if self.is_extension:
- value = get_string_id(token._.get(self.attr))
+ value = token._.get(self.attr)
else:
value = get_token_attr_for_matcher(token.c, self.attr)
- if self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"):
+ if self.predicate in ("IN", "NOT_IN"):
+ if isinstance(value, (str, int)):
+ value = get_string_id(value)
+ else:
+ return False
+ elif self.predicate in ("IS_SUBSET", "IS_SUPERSET", "INTERSECTS"):
+ # ensure that all values are enclosed in a set
if self.attr == MORPH:
# break up MORPH into individual Feat=Val values
value = set(get_string_id(v) for v in MorphAnalysis.from_id(self.vocab, value))
+ elif isinstance(value, (str, int)):
+ value = set((get_string_id(value),))
+ elif isinstance(value, Iterable) and all(isinstance(v, (str, int)) for v in value):
+ value = set(get_string_id(v) for v in value)
else:
- # treat a single value as a list
- if isinstance(value, (str, int)):
- value = set([get_string_id(value)])
- else:
- value = set(get_string_id(v) for v in value)
+ return False
+
if self.predicate == "IN":
return value in self.value
elif self.predicate == "NOT_IN":
@@ -1005,8 +1016,29 @@ def _get_operators(spec):
return (ONE,)
elif spec["OP"] in lookup:
return lookup[spec["OP"]]
+ #Min_max {n,m}
+ elif spec["OP"].startswith("{") and spec["OP"].endswith("}"):
+ # {n} --> {n,n} exactly n ONE,(n)
+ # {n,m}--> {n,m} min of n, max of m ONE,(n),ZERO_ONE,(m)
+ # {,m} --> {0,m} min of zero, max of m ZERO_ONE,(m)
+ # {n,} --> {n,∞} min of n, max of inf ONE,(n),ZERO_PLUS
+
+ min_max = spec["OP"][1:-1]
+ min_max = min_max if "," in min_max else f"{min_max},{min_max}"
+ n, m = min_max.split(",")
+
+ #1. Either n or m is a blank string and the other is numeric -->isdigit
+ #2. Both are numeric and n <= m
+ if (not n.isdecimal() and not m.isdecimal()) or (n.isdecimal() and m.isdecimal() and int(n) > int(m)):
+ keys = ", ".join(lookup.keys()) + ", {n}, {n,m}, {n,}, {,m} where n and m are integers and n <= m "
+ raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys))
+
+ # if n is empty string, zero would be used
+ head = tuple(ONE for __ in range(int(n or 0)))
+ tail = tuple(ZERO_ONE for __ in range(int(m) - int(n or 0))) if m else (ZERO_PLUS,)
+ return head + tail
else:
- keys = ", ".join(lookup.keys())
+ keys = ", ".join(lookup.keys()) + ", {n}, {n,m}, {n,}, {,m} where n and m are integers and n <= m "
raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys))
diff --git a/spacy/matcher/phrasematcher.pyi b/spacy/matcher/phrasematcher.pyi
index 68e3386e4..670c87409 100644
--- a/spacy/matcher/phrasematcher.pyi
+++ b/spacy/matcher/phrasematcher.pyi
@@ -20,6 +20,15 @@ class PhraseMatcher:
Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any]
] = ...,
) -> None: ...
+ def _add_from_arrays(
+ self,
+ key: str,
+ specs: List[List[int]],
+ *,
+ on_match: Optional[
+ Callable[[Matcher, Doc, int, List[Tuple[Any, ...]]], Any]
+ ] = ...,
+ ) -> None: ...
def remove(self, key: str) -> None: ...
@overload
def __call__(
diff --git a/spacy/matcher/phrasematcher.pyx b/spacy/matcher/phrasematcher.pyx
index 382029872..ebe1213c7 100644
--- a/spacy/matcher/phrasematcher.pyx
+++ b/spacy/matcher/phrasematcher.pyx
@@ -1,4 +1,6 @@
# cython: infer_types=True, profile=True
+from typing import List
+from collections import defaultdict
from libc.stdint cimport uintptr_t
from preshed.maps cimport map_init, map_set, map_get, map_clear, map_iter
@@ -39,7 +41,7 @@ cdef class PhraseMatcher:
"""
self.vocab = vocab
self._callbacks = {}
- self._docs = {}
+ self._docs = defaultdict(set)
self._validate = validate
self.mem = Pool()
@@ -155,66 +157,24 @@ cdef class PhraseMatcher:
del self._callbacks[key]
del self._docs[key]
- def add(self, key, docs, *_docs, on_match=None):
- """Add a match-rule to the phrase-matcher. A match-rule consists of: an ID
- key, an on_match callback, and one or more patterns.
- Since spaCy v2.2.2, PhraseMatcher.add takes a list of patterns as the
- second argument, with the on_match callback as an optional keyword
- argument.
+ def _add_from_arrays(self, key, specs, *, on_match=None):
+ """Add a preprocessed list of specs, with an optional callback.
key (str): The match ID.
- docs (list): List of `Doc` objects representing match patterns.
+ specs (List[List[int]]): A list of lists of hashes to match.
on_match (callable): Callback executed on match.
- *_docs (Doc): For backwards compatibility: list of patterns to add
- as variable arguments. Will be ignored if a list of patterns is
- provided as the second argument.
-
- DOCS: https://spacy.io/api/phrasematcher#add
"""
- if docs is None or hasattr(docs, "__call__"): # old API
- on_match = docs
- docs = _docs
-
- _ = self.vocab[key]
- self._callbacks[key] = on_match
- self._docs.setdefault(key, set())
-
cdef MapStruct* current_node
cdef MapStruct* internal_node
cdef void* result
- if isinstance(docs, Doc):
- raise ValueError(Errors.E179.format(key=key))
- for doc in docs:
- if len(doc) == 0:
- continue
- if isinstance(doc, Doc):
- attrs = (TAG, POS, MORPH, LEMMA, DEP)
- has_annotation = {attr: doc.has_annotation(attr) for attr in attrs}
- for attr in attrs:
- if self.attr == attr and not has_annotation[attr]:
- if attr == TAG:
- pipe = "tagger"
- elif attr in (POS, MORPH):
- pipe = "morphologizer or tagger+attribute_ruler"
- elif attr == LEMMA:
- pipe = "lemmatizer"
- elif attr == DEP:
- pipe = "parser"
- error_msg = Errors.E155.format(pipe=pipe, attr=self.vocab.strings.as_string(attr))
- raise ValueError(error_msg)
- if self._validate and any(has_annotation.values()) \
- and self.attr not in attrs:
- string_attr = self.vocab.strings[self.attr]
- warnings.warn(Warnings.W012.format(key=key, attr=string_attr))
- keyword = self._convert_to_array(doc)
- else:
- keyword = doc
- self._docs[key].add(tuple(keyword))
+ self._callbacks[key] = on_match
+ for spec in specs:
+ self._docs[key].add(tuple(spec))
current_node = self.c_map
- for token in keyword:
+ for token in spec:
if token == self._terminal_hash:
warnings.warn(Warnings.W021)
break
@@ -233,6 +193,57 @@ cdef class PhraseMatcher:
result = internal_node
map_set(self.mem, result, self.vocab.strings[key], NULL)
+
+ def add(self, key, docs, *, on_match=None):
+ """Add a match-rule to the phrase-matcher. A match-rule consists of: an ID
+ key, a list of one or more patterns, and (optionally) an on_match callback.
+
+ key (str): The match ID.
+ docs (list): List of `Doc` objects representing match patterns.
+ on_match (callable): Callback executed on match.
+
+ If any of the input Docs are invalid, no internal state will be updated.
+
+ DOCS: https://spacy.io/api/phrasematcher#add
+ """
+ if isinstance(docs, Doc):
+ raise ValueError(Errors.E179.format(key=key))
+ if docs is None or not isinstance(docs, List):
+ raise ValueError(Errors.E948.format(name="PhraseMatcher", arg_type=type(docs)))
+ if on_match is not None and not hasattr(on_match, "__call__"):
+ raise ValueError(Errors.E171.format(name="PhraseMatcher", arg_type=type(on_match)))
+
+ _ = self.vocab[key]
+ specs = []
+
+ for doc in docs:
+ if len(doc) == 0:
+ continue
+ if not isinstance(doc, Doc):
+ raise ValueError(Errors.E4000.format(type=type(doc)))
+
+ attrs = (TAG, POS, MORPH, LEMMA, DEP)
+ has_annotation = {attr: doc.has_annotation(attr) for attr in attrs}
+ for attr in attrs:
+ if self.attr == attr and not has_annotation[attr]:
+ if attr == TAG:
+ pipe = "tagger"
+ elif attr in (POS, MORPH):
+ pipe = "morphologizer or tagger+attribute_ruler"
+ elif attr == LEMMA:
+ pipe = "lemmatizer"
+ elif attr == DEP:
+ pipe = "parser"
+ error_msg = Errors.E155.format(pipe=pipe, attr=self.vocab.strings.as_string(attr))
+ raise ValueError(error_msg)
+ if self._validate and any(has_annotation.values()) \
+ and self.attr not in attrs:
+ string_attr = self.vocab.strings[self.attr]
+ warnings.warn(Warnings.W012.format(key=key, attr=string_attr))
+ specs.append(self._convert_to_array(doc))
+
+ self._add_from_arrays(key, specs, on_match=on_match)
+
def __call__(self, object doclike, *, as_spans=False):
"""Find all sequences matching the supplied patterns on the `Doc`.
@@ -345,7 +356,7 @@ def unpickle_matcher(vocab, docs, callbacks, attr):
matcher = PhraseMatcher(vocab, attr=attr)
for key, specs in docs.items():
callback = callbacks.get(key, None)
- matcher.add(key, specs, on_match=callback)
+ matcher._add_from_arrays(key, specs, on_match=callback)
return matcher
diff --git a/spacy/ml/callbacks.py b/spacy/ml/callbacks.py
index b0d088182..18290b947 100644
--- a/spacy/ml/callbacks.py
+++ b/spacy/ml/callbacks.py
@@ -1,9 +1,14 @@
-from functools import partial
-from typing import Type, Callable, TYPE_CHECKING
+from typing import Type, Callable, Dict, TYPE_CHECKING, List, Optional, Set
+import functools
+import inspect
+import types
+import warnings
from thinc.layers import with_nvtx_range
from thinc.model import Model, wrap_model_recursive
+from thinc.util import use_nvtx_range
+from ..errors import Warnings
from ..util import registry
if TYPE_CHECKING:
@@ -11,29 +16,106 @@ if TYPE_CHECKING:
from ..language import Language # noqa: F401
-@registry.callbacks("spacy.models_with_nvtx_range.v1")
-def create_models_with_nvtx_range(
- forward_color: int = -1, backprop_color: int = -1
-) -> Callable[["Language"], "Language"]:
- def models_with_nvtx_range(nlp):
- pipes = [
- pipe
- for _, pipe in nlp.components
- if hasattr(pipe, "is_trainable") and pipe.is_trainable
- ]
+DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS = [
+ "pipe",
+ "predict",
+ "set_annotations",
+ "update",
+ "rehearse",
+ "get_loss",
+ "initialize",
+ "begin_update",
+ "finish_update",
+ "update",
+]
- # We need process all models jointly to avoid wrapping callbacks twice.
- models = Model(
- "wrap_with_nvtx_range",
- forward=lambda model, X, is_train: ...,
- layers=[pipe.model for pipe in pipes],
- )
- for node in models.walk():
+def models_with_nvtx_range(nlp, forward_color: int, backprop_color: int):
+ pipes = [
+ pipe
+ for _, pipe in nlp.components
+ if hasattr(pipe, "is_trainable") and pipe.is_trainable
+ ]
+
+ seen_models: Set[int] = set()
+ for pipe in pipes:
+ for node in pipe.model.walk():
+ if id(node) in seen_models:
+ continue
+ seen_models.add(id(node))
with_nvtx_range(
node, forward_color=forward_color, backprop_color=backprop_color
)
+ return nlp
+
+
+@registry.callbacks("spacy.models_with_nvtx_range.v1")
+def create_models_with_nvtx_range(
+ forward_color: int = -1, backprop_color: int = -1
+) -> Callable[["Language"], "Language"]:
+ return functools.partial(
+ models_with_nvtx_range,
+ forward_color=forward_color,
+ backprop_color=backprop_color,
+ )
+
+
+def nvtx_range_wrapper_for_pipe_method(self, func, *args, **kwargs):
+ if isinstance(func, functools.partial):
+ return func(*args, **kwargs)
+ else:
+ with use_nvtx_range(f"{self.name} {func.__name__}"):
+ return func(*args, **kwargs)
+
+
+def pipes_with_nvtx_range(
+ nlp, additional_pipe_functions: Optional[Dict[str, List[str]]]
+):
+ for _, pipe in nlp.components:
+ if additional_pipe_functions:
+ extra_funcs = additional_pipe_functions.get(pipe.name, [])
+ else:
+ extra_funcs = []
+
+ for name in DEFAULT_NVTX_ANNOTATABLE_PIPE_METHODS + extra_funcs:
+ func = getattr(pipe, name, None)
+ if func is None:
+ if name in extra_funcs:
+ warnings.warn(Warnings.W121.format(method=name, pipe=pipe.name))
+ continue
+
+ wrapped_func = functools.partial(
+ types.MethodType(nvtx_range_wrapper_for_pipe_method, pipe), func
+ )
+
+ # Try to preserve the original function signature.
+ try:
+ wrapped_func.__signature__ = inspect.signature(func) # type: ignore
+ except:
+ pass
+
+ try:
+ setattr(
+ pipe,
+ name,
+ wrapped_func,
+ )
+ except AttributeError:
+ warnings.warn(Warnings.W122.format(method=name, pipe=pipe.name))
+
+ return nlp
+
+
+@registry.callbacks("spacy.models_and_pipes_with_nvtx_range.v1")
+def create_models_and_pipes_with_nvtx_range(
+ forward_color: int = -1,
+ backprop_color: int = -1,
+ additional_pipe_functions: Optional[Dict[str, List[str]]] = None,
+) -> Callable[["Language"], "Language"]:
+ def inner(nlp):
+ nlp = models_with_nvtx_range(nlp, forward_color, backprop_color)
+ nlp = pipes_with_nvtx_range(nlp, additional_pipe_functions)
return nlp
- return models_with_nvtx_range
+ return inner
diff --git a/spacy/ml/_character_embed.py b/spacy/ml/character_embed.py
similarity index 100%
rename from spacy/ml/_character_embed.py
rename to spacy/ml/character_embed.py
diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py
index 30c7360ff..79772ad80 100644
--- a/spacy/ml/models/tok2vec.py
+++ b/spacy/ml/models/tok2vec.py
@@ -7,7 +7,7 @@ from thinc.api import expand_window, residual, Maxout, Mish, PyTorchLSTM
from ...tokens import Doc
from ...util import registry
from ...errors import Errors
-from ...ml import _character_embed
+from ...ml import character_embed
from ..staticvectors import StaticVectors
from ..featureextractor import FeatureExtractor
from ...pipeline.tok2vec import Tok2VecListener
@@ -226,7 +226,7 @@ def CharacterEmbed(
if feature is None:
raise ValueError(Errors.E911.format(feat=feature))
char_embed = chain(
- _character_embed.CharacterEmbed(nM=nM, nC=nC),
+ character_embed.CharacterEmbed(nM=nM, nC=nC),
cast(Model[List[Floats2d], Ragged], list2ragged()),
)
feature_extractor: Model[List[Doc], Ragged] = chain(
diff --git a/spacy/ml/tb_framework.pyx b/spacy/ml/tb_framework.pyx
index d6cd5be2b..b5ef2a489 100644
--- a/spacy/ml/tb_framework.pyx
+++ b/spacy/ml/tb_framework.pyx
@@ -191,7 +191,7 @@ def _forward_greedy_cpu(model: Model, TransitionSystem moves, states: List[State
scores = _parse_batch(cblas, moves, &c_states[0], weights, sizes, actions=actions)
def backprop(dY):
- raise ValueError(Errors.E1042)
+ raise ValueError(Errors.E4001)
return (states, scores), backprop
diff --git a/spacy/morphology.pxd b/spacy/morphology.pxd
index 8d449d065..63faab5be 100644
--- a/spacy/morphology.pxd
+++ b/spacy/morphology.pxd
@@ -1,23 +1,41 @@
-from cymem.cymem cimport Pool
-from preshed.maps cimport PreshMap
cimport numpy as np
-from libc.stdint cimport uint64_t
+from libc.stdint cimport uint32_t, uint64_t
+from libcpp.unordered_map cimport unordered_map
+from libcpp.vector cimport vector
+from libcpp.memory cimport shared_ptr
-from .structs cimport MorphAnalysisC
from .strings cimport StringStore
from .typedefs cimport attr_t, hash_t
+cdef cppclass Feature:
+ hash_t field
+ hash_t value
+
+ __init__():
+ this.field = 0
+ this.value = 0
+
+
+cdef cppclass MorphAnalysisC:
+ hash_t key
+ vector[Feature] features
+
+ __init__():
+ this.key = 0
+
cdef class Morphology:
- cdef readonly Pool mem
cdef readonly StringStore strings
- cdef PreshMap tags # Keyed by hash, value is pointer to tag
+ cdef unordered_map[hash_t, shared_ptr[MorphAnalysisC]] tags
- cdef MorphAnalysisC create_morph_tag(self, field_feature_pairs) except *
- cdef int insert(self, MorphAnalysisC tag) except -1
+ cdef shared_ptr[MorphAnalysisC] _lookup_tag(self, hash_t tag_hash)
+ cdef void _intern_morph_tag(self, hash_t tag_key, feats)
+ cdef hash_t _add(self, features)
+ cdef str _normalize_features(self, features)
+ cdef str get_morph_str(self, hash_t morph_key)
+ cdef shared_ptr[MorphAnalysisC] get_morph_c(self, hash_t morph_key)
-
-cdef int check_feature(const MorphAnalysisC* morph, attr_t feature) nogil
-cdef list list_features(const MorphAnalysisC* morph)
-cdef np.ndarray get_by_field(const MorphAnalysisC* morph, attr_t field)
-cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t field) nogil
+cdef int check_feature(const shared_ptr[MorphAnalysisC] morph, attr_t feature) nogil
+cdef list list_features(const shared_ptr[MorphAnalysisC] morph)
+cdef np.ndarray get_by_field(const shared_ptr[MorphAnalysisC] morph, attr_t field)
+cdef int get_n_by_field(attr_t* results, const shared_ptr[MorphAnalysisC] morph, attr_t field) nogil
diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx
index c3ffc46a1..2c3be7b46 100644
--- a/spacy/morphology.pyx
+++ b/spacy/morphology.pyx
@@ -1,10 +1,10 @@
# cython: infer_types
import numpy
import warnings
+from typing import Union, Tuple, List, Dict, Optional
+from cython.operator cimport dereference as deref
+from libcpp.memory cimport shared_ptr
-from .attrs cimport POS
-
-from .parts_of_speech import IDS as POS_IDS
from .errors import Warnings
from . import symbols
@@ -24,134 +24,187 @@ cdef class Morphology:
EMPTY_MORPH = symbols.NAMES[symbols._]
def __init__(self, StringStore strings):
- self.mem = Pool()
self.strings = strings
- self.tags = PreshMap()
def __reduce__(self):
tags = set([self.get(self.strings[s]) for s in self.strings])
tags -= set([""])
return (unpickle_morphology, (self.strings, sorted(tags)), None, None)
- def add(self, features):
+ cdef shared_ptr[MorphAnalysisC] _lookup_tag(self, hash_t tag_hash):
+ match = self.tags.find(tag_hash)
+ if match != self.tags.const_end():
+ return deref(match).second
+ else:
+ return shared_ptr[MorphAnalysisC]()
+
+ def _normalize_attr(self, attr_key : Union[int, str], attr_value : Union[int, str]) -> Optional[Tuple[str, Union[str, List[str]]]]:
+ if isinstance(attr_key, (int, str)) and isinstance(attr_value, (int, str)):
+ attr_key = self.strings.as_string(attr_key)
+ attr_value = self.strings.as_string(attr_value)
+
+ # Preserve multiple values as a list
+ if self.VALUE_SEP in attr_value:
+ values = attr_value.split(self.VALUE_SEP)
+ values.sort()
+ attr_value = values
+ else:
+ warnings.warn(Warnings.W100.format(feature={attr_key: attr_value}))
+ return None
+
+ return attr_key, attr_value
+
+ def _str_to_normalized_feat_dict(self, feats: str) -> Dict[str, str]:
+ if not feats or feats == self.EMPTY_MORPH:
+ return {}
+
+ out = []
+ for feat in feats.split(self.FEATURE_SEP):
+ field, values = feat.split(self.FIELD_SEP, 1)
+ normalized_attr = self._normalize_attr(field, values)
+ if normalized_attr is None:
+ continue
+ out.append((normalized_attr[0], normalized_attr[1]))
+ out.sort(key=lambda x: x[0])
+ return dict(out)
+
+ def _dict_to_normalized_feat_dict(self, feats: Dict[Union[int, str], Union[int, str]]) -> Dict[str, str]:
+ out = []
+ for field, values in feats.items():
+ normalized_attr = self._normalize_attr(field, values)
+ if normalized_attr is None:
+ continue
+ out.append((normalized_attr[0], normalized_attr[1]))
+ out.sort(key=lambda x: x[0])
+ return dict(out)
+
+
+ def _normalized_feat_dict_to_str(self, feats: Dict[str, str]) -> str:
+ norm_feats_string = self.FEATURE_SEP.join([
+ self.FIELD_SEP.join([field, self.VALUE_SEP.join(values) if isinstance(values, list) else values])
+ for field, values in feats.items()
+ ])
+ return norm_feats_string or self.EMPTY_MORPH
+
+
+ cdef hash_t _add(self, features):
"""Insert a morphological analysis in the morphology table, if not
already present. The morphological analysis may be provided in the UD
FEATS format as a string or in the tag map dict format.
Returns the hash of the new analysis.
"""
- cdef MorphAnalysisC* tag_ptr
+ cdef hash_t tag_hash = 0
+ cdef shared_ptr[MorphAnalysisC] tag
if isinstance(features, str):
if features == "":
features = self.EMPTY_MORPH
- tag_ptr = self.tags.get(self.strings[features])
- if tag_ptr != NULL:
- return tag_ptr.key
- features = self.feats_to_dict(features)
- if not isinstance(features, dict):
+
+ tag_hash = self.strings[features]
+ tag = self._lookup_tag(tag_hash)
+ if tag:
+ return deref(tag).key
+
+ features = self._str_to_normalized_feat_dict(features)
+ elif isinstance(features, dict):
+ features = self._dict_to_normalized_feat_dict(features)
+ else:
warnings.warn(Warnings.W100.format(feature=features))
features = {}
- string_features = {self.strings.as_string(field): self.strings.as_string(values) for field, values in features.items()}
- # intified ("Field", "Field=Value") pairs
- field_feature_pairs = []
- for field in sorted(string_features):
- values = string_features[field]
- for value in values.split(self.VALUE_SEP):
- field_feature_pairs.append((
- self.strings.add(field),
- self.strings.add(field + self.FIELD_SEP + value),
- ))
- cdef MorphAnalysisC tag = self.create_morph_tag(field_feature_pairs)
+
# the hash key for the tag is either the hash of the normalized UFEATS
# string or the hash of an empty placeholder
- norm_feats_string = self.normalize_features(features)
- tag.key = self.strings.add(norm_feats_string)
- self.insert(tag)
- return tag.key
+ norm_feats_string = self._normalized_feat_dict_to_str(features)
+ tag_hash = self.strings.add(norm_feats_string)
+ tag = self._lookup_tag(tag_hash)
+ if tag:
+ return deref(tag).key
- def normalize_features(self, features):
+ self._intern_morph_tag(tag_hash, features)
+ return tag_hash
+
+ cdef void _intern_morph_tag(self, hash_t tag_key, feats):
+ # intified ("Field", "Field=Value") pairs where fields with multiple values have
+ # been split into individual tuples, e.g.:
+ # [("Field1", "Field1=Value1"), ("Field1", "Field1=Value2"),
+ # ("Field2", "Field2=Value3")]
+ field_feature_pairs = []
+
+ # Feat dict is normalized at this point.
+ for field, values in feats.items():
+ field_key = self.strings.add(field)
+ if isinstance(values, list):
+ for value in values:
+ value_key = self.strings.add(field + self.FIELD_SEP + value)
+ field_feature_pairs.append((field_key, value_key))
+ else:
+ # We could box scalar values into a list and use a common
+ # code path to generate features but that incurs a small
+ # but measurable allocation/iteration overhead (as this
+ # branch is taken often enough).
+ value_key = self.strings.add(field + self.FIELD_SEP + values)
+ field_feature_pairs.append((field_key, value_key))
+
+ num_features = len(field_feature_pairs)
+ cdef shared_ptr[MorphAnalysisC] tag = shared_ptr[MorphAnalysisC](new MorphAnalysisC())
+ deref(tag).key = tag_key
+ deref(tag).features.resize(num_features)
+
+ for i in range(num_features):
+ deref(tag).features[i].field = field_feature_pairs[i][0]
+ deref(tag).features[i].value = field_feature_pairs[i][1]
+
+ self.tags[tag_key] = tag
+
+ cdef str get_morph_str(self, hash_t morph_key):
+ cdef shared_ptr[MorphAnalysisC] tag = self._lookup_tag(morph_key)
+ if not tag:
+ return ""
+ else:
+ return self.strings[deref(tag).key]
+
+ cdef shared_ptr[MorphAnalysisC] get_morph_c(self, hash_t morph_key):
+ return self._lookup_tag(morph_key)
+
+ cdef str _normalize_features(self, features):
"""Create a normalized FEATS string from a features string or dict.
features (Union[dict, str]): Features as dict or UFEATS string.
RETURNS (str): Features as normalized UFEATS string.
"""
if isinstance(features, str):
- features = self.feats_to_dict(features)
- if not isinstance(features, dict):
+ features = self._str_to_normalized_feat_dict(features)
+ elif isinstance(features, dict):
+ features = self._dict_to_normalized_feat_dict(features)
+ else:
warnings.warn(Warnings.W100.format(feature=features))
features = {}
- features = self.normalize_attrs(features)
- string_features = {self.strings.as_string(field): self.strings.as_string(values) for field, values in features.items()}
- # normalized UFEATS string with sorted fields and values
- norm_feats_string = self.FEATURE_SEP.join(sorted([
- self.FIELD_SEP.join([field, values])
- for field, values in string_features.items()
- ]))
- return norm_feats_string or self.EMPTY_MORPH
- def normalize_attrs(self, attrs):
- """Convert attrs dict so that POS is always by ID, other features are
- by string. Values separated by VALUE_SEP are sorted.
- """
- out = {}
- attrs = dict(attrs)
- for key, value in attrs.items():
- # convert POS value to ID
- if key == POS or (isinstance(key, str) and key.upper() == "POS"):
- if isinstance(value, str) and value.upper() in POS_IDS:
- value = POS_IDS[value.upper()]
- elif isinstance(value, int) and value not in POS_IDS.values():
- warnings.warn(Warnings.W100.format(feature={key: value}))
- continue
- out[POS] = value
- # accept any string or ID fields and values and convert to strings
- elif isinstance(key, (int, str)) and isinstance(value, (int, str)):
- key = self.strings.as_string(key)
- value = self.strings.as_string(value)
- # sort values
- if self.VALUE_SEP in value:
- value = self.VALUE_SEP.join(sorted(value.split(self.VALUE_SEP)))
- out[key] = value
- else:
- warnings.warn(Warnings.W100.format(feature={key: value}))
- return out
+ return self._normalized_feat_dict_to_str(features)
- cdef MorphAnalysisC create_morph_tag(self, field_feature_pairs) except *:
- """Creates a MorphAnalysisC from a list of intified
- ("Field", "Field=Value") tuples where fields with multiple values have
- been split into individual tuples, e.g.:
- [("Field1", "Field1=Value1"), ("Field1", "Field1=Value2"),
- ("Field2", "Field2=Value3")]
- """
- cdef MorphAnalysisC tag
- tag.length = len(field_feature_pairs)
- if tag.length > 0:
- tag.fields = self.mem.alloc(tag.length, sizeof(attr_t))
- tag.features = self.mem.alloc(tag.length, sizeof(attr_t))
- for i, (field, feature) in enumerate(field_feature_pairs):
- tag.fields[i] = field
- tag.features[i] = feature
- return tag
+ def add(self, features):
+ return self._add(features)
- cdef int insert(self, MorphAnalysisC tag) except -1:
- cdef hash_t key = tag.key
- if self.tags.get(key) == NULL:
- tag_ptr = self.mem.alloc(1, sizeof(MorphAnalysisC))
- tag_ptr[0] = tag
- self.tags.set(key, tag_ptr)
+ def get(self, morph_key):
+ return self.get_morph_str(morph_key)
- def get(self, hash_t morph):
- tag = self.tags.get(morph)
- if tag == NULL:
- return ""
- else:
- return self.strings[tag.key]
+ def normalize_features(self, features):
+ return self._normalize_features(features)
@staticmethod
- def feats_to_dict(feats):
+ def feats_to_dict(feats, *, sort_values=True):
if not feats or feats == Morphology.EMPTY_MORPH:
return {}
- return {field: Morphology.VALUE_SEP.join(sorted(values.split(Morphology.VALUE_SEP))) for field, values in
- [feat.split(Morphology.FIELD_SEP) for feat in feats.split(Morphology.FEATURE_SEP)]}
+
+ out = {}
+ for feat in feats.split(Morphology.FEATURE_SEP):
+ field, values = feat.split(Morphology.FIELD_SEP, 1)
+ if sort_values:
+ values = values.split(Morphology.VALUE_SEP)
+ values.sort()
+ values = Morphology.VALUE_SEP.join(values)
+
+ out[field] = values
+ return out
@staticmethod
def dict_to_feats(feats_dict):
@@ -160,34 +213,34 @@ cdef class Morphology:
return Morphology.FEATURE_SEP.join(sorted([Morphology.FIELD_SEP.join([field, Morphology.VALUE_SEP.join(sorted(values.split(Morphology.VALUE_SEP)))]) for field, values in feats_dict.items()]))
-cdef int check_feature(const MorphAnalysisC* morph, attr_t feature) nogil:
+cdef int check_feature(const shared_ptr[MorphAnalysisC] morph, attr_t feature) nogil:
cdef int i
- for i in range(morph.length):
- if morph.features[i] == feature:
+ for i in range(deref(morph).features.size()):
+ if deref(morph).features[i].value == feature:
return True
return False
-cdef list list_features(const MorphAnalysisC* morph):
+cdef list list_features(const shared_ptr[MorphAnalysisC] morph):
cdef int i
features = []
- for i in range(morph.length):
- features.append(morph.features[i])
+ for i in range(deref(morph).features.size()):
+ features.append(deref(morph).features[i].value)
return features
-cdef np.ndarray get_by_field(const MorphAnalysisC* morph, attr_t field):
- cdef np.ndarray results = numpy.zeros((morph.length,), dtype="uint64")
+cdef np.ndarray get_by_field(const shared_ptr[MorphAnalysisC] morph, attr_t field):
+ cdef np.ndarray results = numpy.zeros((deref(morph).features.size(),), dtype="uint64")
n = get_n_by_field(results.data, morph, field)
return results[:n]
-cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t field) nogil:
+cdef int get_n_by_field(attr_t* results, const shared_ptr[MorphAnalysisC] morph, attr_t field) nogil:
cdef int n_results = 0
cdef int i
- for i in range(morph.length):
- if morph.fields[i] == field:
- results[n_results] = morph.features[i]
+ for i in range(deref(morph).features.size()):
+ if deref(morph).features[i].field == field:
+ results[n_results] = deref(morph).features[i].value
n_results += 1
return n_results
diff --git a/spacy/parts_of_speech.pxd b/spacy/parts_of_speech.pxd
index 0bf5b4789..67390ad63 100644
--- a/spacy/parts_of_speech.pxd
+++ b/spacy/parts_of_speech.pxd
@@ -3,22 +3,22 @@ from . cimport symbols
cpdef enum univ_pos_t:
NO_TAG = 0
ADJ = symbols.ADJ
- ADP
- ADV
- AUX
- CONJ
- CCONJ # U20
- DET
- INTJ
- NOUN
- NUM
- PART
- PRON
- PROPN
- PUNCT
- SCONJ
- SYM
- VERB
- X
- EOL
- SPACE
+ ADP = symbols.ADP
+ ADV = symbols.ADV
+ AUX = symbols.AUX
+ CONJ = symbols.CONJ
+ CCONJ = symbols.CCONJ # U20
+ DET = symbols.DET
+ INTJ = symbols.INTJ
+ NOUN = symbols.NOUN
+ NUM = symbols.NUM
+ PART = symbols.PART
+ PRON = symbols.PRON
+ PROPN = symbols.PROPN
+ PUNCT = symbols.PUNCT
+ SCONJ = symbols.SCONJ
+ SYM = symbols.SYM
+ VERB = symbols.VERB
+ X = symbols.X
+ EOL = symbols.EOL
+ SPACE = symbols.SPACE
diff --git a/spacy/pipeline/__init__.py b/spacy/pipeline/__init__.py
index 26931606b..4744a989b 100644
--- a/spacy/pipeline/__init__.py
+++ b/spacy/pipeline/__init__.py
@@ -1,9 +1,9 @@
-from .attributeruler import AttributeRuler
+from .attribute_ruler import AttributeRuler
from .dep_parser import DependencyParser
from .edit_tree_lemmatizer import EditTreeLemmatizer
from .entity_linker import EntityLinker
from .ner import EntityRecognizer
-from .entityruler import EntityRuler
+from .entity_ruler import EntityRuler
from .lemmatizer import Lemmatizer
from .morphologizer import Morphologizer
from .pipe import Pipe
diff --git a/spacy/pipeline/_parser_internals/arc_eager.pyx b/spacy/pipeline/_parser_internals/arc_eager.pyx
index a487292ca..9c358475a 100644
--- a/spacy/pipeline/_parser_internals/arc_eager.pyx
+++ b/spacy/pipeline/_parser_internals/arc_eager.pyx
@@ -10,6 +10,7 @@ from ...strings cimport hash_string
from ...structs cimport TokenC
from ...tokens.doc cimport Doc, set_children_from_heads
from ...tokens.token cimport MISSING_DEP
+from ...training import split_bilu_label
from ...training.example cimport Example
from .stateclass cimport StateClass
from ._state cimport StateC, ArcC
@@ -687,7 +688,7 @@ cdef class ArcEager(TransitionSystem):
return self.c[name_or_id]
name = name_or_id
if '-' in name:
- move_str, label_str = name.split('-', 1)
+ move_str, label_str = split_bilu_label(name)
label = self.strings[label_str]
else:
move_str = name
diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx
index e411081c5..d4d564dc7 100644
--- a/spacy/pipeline/_parser_internals/ner.pyx
+++ b/spacy/pipeline/_parser_internals/ner.pyx
@@ -14,6 +14,7 @@ from ...typedefs cimport weight_t, attr_t
from ...lexeme cimport Lexeme
from ...attrs cimport IS_SPACE
from ...structs cimport TokenC, SpanC
+from ...training import split_bilu_label
from ...training.example cimport Example
from .search cimport Beam
from .stateclass cimport StateClass
@@ -180,7 +181,7 @@ cdef class BiluoPushDown(TransitionSystem):
if name == '-' or name == '' or name is None:
return Transition(clas=0, move=MISSING, label=0, score=0)
elif '-' in name:
- move_str, label_str = name.split('-', 1)
+ move_str, label_str = split_bilu_label(name)
# Deprecated, hacky way to denote 'not this entity'
if label_str.startswith('!'):
raise ValueError(Errors.E869.format(label=name))
diff --git a/spacy/pipeline/attributeruler.py b/spacy/pipeline/attribute_ruler.py
similarity index 99%
rename from spacy/pipeline/attributeruler.py
rename to spacy/pipeline/attribute_ruler.py
index 0d9494865..ac998a61d 100644
--- a/spacy/pipeline/attributeruler.py
+++ b/spacy/pipeline/attribute_ruler.py
@@ -11,7 +11,7 @@ from ..matcher import Matcher
from ..scorer import Scorer
from ..symbols import IDS
from ..tokens import Doc, Span
-from ..tokens._retokenize import normalize_token_attrs, set_token_attrs
+from ..tokens.retokenizer import normalize_token_attrs, set_token_attrs
from ..vocab import Vocab
from ..util import SimpleFrozenList, registry
from .. import util
diff --git a/spacy/pipeline/dep_parser.py b/spacy/pipeline/dep_parser.py
index 7cf11de64..f6689e017 100644
--- a/spacy/pipeline/dep_parser.py
+++ b/spacy/pipeline/dep_parser.py
@@ -12,6 +12,7 @@ from ..language import Language
from ._parser_internals import nonproj
from ._parser_internals.nonproj import DELIMITER
from ..scorer import Scorer
+from ..training import remove_bilu_prefix
from ..util import registry
@@ -318,7 +319,7 @@ class DependencyParser(Parser):
# Get the labels from the model by looking at the available moves
for move in self.move_names:
if "-" in move:
- label = move.split("-")[1]
+ label = remove_bilu_prefix(move)
if DELIMITER in label:
label = label.split(DELIMITER)[1]
labels.add(label)
diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py
index b7d615f6d..37aa9663b 100644
--- a/spacy/pipeline/edit_tree_lemmatizer.py
+++ b/spacy/pipeline/edit_tree_lemmatizer.py
@@ -7,7 +7,7 @@ import numpy as np
import srsly
from thinc.api import Config, Model, SequenceCategoricalCrossentropy
-from thinc.types import Floats2d, Ints1d, Ints2d
+from thinc.types import ArrayXd, Floats2d, Ints1d
from ._edit_tree_internals.edit_trees import EditTrees
from ._edit_tree_internals.schemas import validate_edit_tree
@@ -21,6 +21,9 @@ from ..vocab import Vocab
from .. import util
+ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
+
+
default_model_config = """
[model]
@architectures = "spacy.Tagger.v2"
@@ -49,6 +52,7 @@ DEFAULT_EDIT_TREE_LEMMATIZER_MODEL = Config().from_str(default_model_config)["mo
"overwrite": False,
"top_k": 1,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
+ "save_activations": False,
},
default_score_weights={"lemma_acc": 1.0},
)
@@ -61,6 +65,7 @@ def make_edit_tree_lemmatizer(
overwrite: bool,
top_k: int,
scorer: Optional[Callable],
+ save_activations: bool,
):
"""Construct an EditTreeLemmatizer component."""
return EditTreeLemmatizer(
@@ -72,6 +77,7 @@ def make_edit_tree_lemmatizer(
overwrite=overwrite,
top_k=top_k,
scorer=scorer,
+ save_activations=save_activations,
)
@@ -91,6 +97,7 @@ class EditTreeLemmatizer(TrainablePipe):
overwrite: bool = False,
top_k: int = 1,
scorer: Optional[Callable] = lemmatizer_score,
+ save_activations: bool = False,
):
"""
Construct an edit tree lemmatizer.
@@ -102,6 +109,7 @@ class EditTreeLemmatizer(TrainablePipe):
frequency in the training data.
overwrite (bool): overwrite existing lemma annotations.
top_k (int): try to apply at most the k most probable edit trees.
+ save_activations (bool): save model activations in Doc when annotating.
"""
self.vocab = vocab
self.model = model
@@ -116,6 +124,7 @@ class EditTreeLemmatizer(TrainablePipe):
self.cfg: Dict[str, Any] = {"labels": []}
self.scorer = scorer
+ self.save_activations = save_activations
def get_loss(
self, examples: Iterable[Example], scores: List[Floats2d]
@@ -144,21 +153,24 @@ class EditTreeLemmatizer(TrainablePipe):
return float(loss), d_scores
- def predict(self, docs: Iterable[Doc]) -> List[Ints2d]:
+ def predict(self, docs: Iterable[Doc]) -> ActivationsT:
n_docs = len(list(docs))
if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs.
n_labels = len(self.cfg["labels"])
- guesses: List[Ints2d] = [
+ guesses: List[Ints1d] = [
+ self.model.ops.alloc((0,), dtype="i") for doc in docs
+ ]
+ scores: List[Floats2d] = [
self.model.ops.alloc((0, n_labels), dtype="i") for doc in docs
]
assert len(guesses) == n_docs
- return guesses
+ return {"probabilities": scores, "tree_ids": guesses}
scores = self.model.predict(docs)
assert len(scores) == n_docs
guesses = self._scores2guesses(docs, scores)
assert len(guesses) == n_docs
- return guesses
+ return {"probabilities": scores, "tree_ids": guesses}
def _scores2guesses(self, docs, scores):
guesses = []
@@ -186,8 +198,13 @@ class EditTreeLemmatizer(TrainablePipe):
return guesses
- def set_annotations(self, docs: Iterable[Doc], batch_tree_ids):
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
+ batch_tree_ids = activations["tree_ids"]
for i, doc in enumerate(docs):
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ for act_name, acts in activations.items():
+ doc.activations[self.name][act_name] = acts[i]
doc_tree_ids = batch_tree_ids[i]
if hasattr(doc_tree_ids, "get"):
doc_tree_ids = doc_tree_ids.get()
diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py
index aa7985a9c..ac05cb840 100644
--- a/spacy/pipeline/entity_linker.py
+++ b/spacy/pipeline/entity_linker.py
@@ -1,5 +1,7 @@
-from typing import Optional, Iterable, Callable, Dict, Union, List, Any
-from thinc.types import Floats2d
+from typing import Optional, Iterable, Callable, Dict, Sequence, Union, List, Any
+from typing import cast
+from numpy import dtype
+from thinc.types import Floats1d, Floats2d, Ints1d, Ragged
from pathlib import Path
from itertools import islice
import srsly
@@ -21,6 +23,11 @@ from ..util import SimpleFrozenList, registry
from .. import util
from ..scorer import Scorer
+
+ActivationsT = Dict[str, Union[List[Ragged], List[str]]]
+
+KNOWLEDGE_BASE_IDS = "kb_ids"
+
# See #9050
BACKWARD_OVERWRITE = True
@@ -56,6 +63,8 @@ DEFAULT_NEL_MODEL = Config().from_str(default_model_config)["model"]
"overwrite": True,
"scorer": {"@scorers": "spacy.entity_linker_scorer.v1"},
"use_gold_ents": True,
+ "threshold": None,
+ "save_activations": False,
},
default_score_weights={
"nel_micro_f": 1.0,
@@ -77,6 +86,8 @@ def make_entity_linker(
overwrite: bool,
scorer: Optional[Callable],
use_gold_ents: bool,
+ threshold: Optional[float] = None,
+ save_activations: bool,
):
"""Construct an EntityLinker component.
@@ -91,6 +102,11 @@ def make_entity_linker(
get_candidates (Callable[[KnowledgeBase, "Span"], Iterable[Candidate]]): Function that
produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method.
+ use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
+ component must provide entity annotations.
+ threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold,
+ prediction is discarded. If None, predictions are not filtered by any threshold.
+ save_activations (bool): save model activations in Doc when annotating.
"""
if not model.attrs.get("include_span_maker", False):
@@ -121,6 +137,8 @@ def make_entity_linker(
overwrite=overwrite,
scorer=scorer,
use_gold_ents=use_gold_ents,
+ threshold=threshold,
+ save_activations=save_activations,
)
@@ -156,6 +174,8 @@ class EntityLinker(TrainablePipe):
overwrite: bool = BACKWARD_OVERWRITE,
scorer: Optional[Callable] = entity_linker_score,
use_gold_ents: bool,
+ threshold: Optional[float] = None,
+ save_activations: bool = False,
) -> None:
"""Initialize an entity linker.
@@ -174,9 +194,20 @@ class EntityLinker(TrainablePipe):
Scorer.score_links.
use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another
component must provide entity annotations.
-
+ threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the
+ threshold, prediction is discarded. If None, predictions are not filtered by any threshold.
DOCS: https://spacy.io/api/entitylinker#init
"""
+
+ if threshold is not None and not (0 <= threshold <= 1):
+ raise ValueError(
+ Errors.E1043.format(
+ range_start=0,
+ range_end=1,
+ value=threshold,
+ )
+ )
+
self.vocab = vocab
self.model = model
self.name = name
@@ -192,6 +223,8 @@ class EntityLinker(TrainablePipe):
self.kb = empty_kb(entity_vector_length)(self.vocab)
self.scorer = scorer
self.use_gold_ents = use_gold_ents
+ self.threshold = threshold
+ self.save_activations = save_activations
def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]):
"""Define the KB of this pipe by providing a function that will
@@ -377,7 +410,7 @@ class EntityLinker(TrainablePipe):
loss = loss / len(entity_encodings)
return float(loss), out
- def predict(self, docs: Iterable[Doc]) -> List[str]:
+ def predict(self, docs: Iterable[Doc]) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them.
Returns the KB IDs for each entity in each doc, including NIL if there is
no prediction.
@@ -390,13 +423,20 @@ class EntityLinker(TrainablePipe):
self.validate_kb()
entity_count = 0
final_kb_ids: List[str] = []
- xp = self.model.ops.xp
+ ops = self.model.ops
+ xp = ops.xp
+ docs_ents: List[Ragged] = []
+ docs_scores: List[Ragged] = []
if not docs:
- return final_kb_ids
+ return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
if isinstance(docs, Doc):
docs = [docs]
- for i, doc in enumerate(docs):
+ for doc in docs:
+ doc_ents: List[Ints1d] = []
+ doc_scores: List[Floats1d] = []
if len(doc) == 0:
+ docs_scores.append(Ragged(ops.alloc1f(0), ops.alloc1i(0)))
+ docs_ents.append(Ragged(xp.zeros(0, dtype="uint64"), ops.alloc1i(0)))
continue
sentences = [s for s in doc.sents]
# Looping through each entity (TODO: rewrite)
@@ -419,15 +459,32 @@ class EntityLinker(TrainablePipe):
if ent.label_ in self.labels_discard:
# ignoring this entity - setting to NIL
final_kb_ids.append(self.NIL)
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=[0.0],
+ ents=[0],
+ )
else:
candidates = list(self.get_candidates(self.kb, ent))
if not candidates:
# no prediction possible for this entity - setting to NIL
final_kb_ids.append(self.NIL)
- elif len(candidates) == 1:
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=[0.0],
+ ents=[0],
+ )
+ elif len(candidates) == 1 and self.threshold is None:
# shortcut for efficiency reasons: take the 1 candidate
- # TODO: thresholding
final_kb_ids.append(candidates[0].entity_)
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=[1.0],
+ ents=[candidates[0].entity_],
+ )
else:
random.shuffle(candidates)
# set all prior probabilities to 0 if incl_prior=False
@@ -455,31 +512,53 @@ class EntityLinker(TrainablePipe):
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
- # TODO: thresholding
- best_index = scores.argmax().item()
- best_candidate = candidates[best_index]
- final_kb_ids.append(best_candidate.entity_)
+ final_kb_ids.append(
+ candidates[scores.argmax().item()].entity_
+ if self.threshold is None or scores.max() >= self.threshold
+ else EntityLinker.NIL
+ )
+ self._add_activations(
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ scores=scores,
+ ents=[c.entity for c in candidates],
+ )
+ self._add_doc_activations(
+ docs_scores=docs_scores,
+ docs_ents=docs_ents,
+ doc_scores=doc_scores,
+ doc_ents=doc_ents,
+ )
if not (len(final_kb_ids) == entity_count):
err = Errors.E147.format(
method="predict", msg="result variables not of equal length"
)
raise RuntimeError(err)
- return final_kb_ids
+ return {KNOWLEDGE_BASE_IDS: final_kb_ids, "ents": docs_ents, "scores": docs_scores}
- def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None:
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
- kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict.
+ activations (ActivationsT): The activations used for setting annotations, produced
+ by EntityLinker.predict.
DOCS: https://spacy.io/api/entitylinker#set_annotations
"""
+ kb_ids = cast(List[str], activations[KNOWLEDGE_BASE_IDS])
count_ents = len([ent for doc in docs for ent in doc.ents])
if count_ents != len(kb_ids):
raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids)))
i = 0
overwrite = self.cfg["overwrite"]
- for doc in docs:
+ for j, doc in enumerate(docs):
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ for act_name, acts in activations.items():
+ if act_name != KNOWLEDGE_BASE_IDS:
+ # We only copy activations that are Ragged.
+ doc.activations[self.name][act_name] = cast(Ragged, acts[j])
+
for ent in doc.ents:
kb_id = kb_ids[i]
i += 1
@@ -578,3 +657,32 @@ class EntityLinker(TrainablePipe):
def add_label(self, label):
raise NotImplementedError
+
+ def _add_doc_activations(
+ self,
+ *,
+ docs_scores: List[Ragged],
+ docs_ents: List[Ragged],
+ doc_scores: List[Floats1d],
+ doc_ents: List[Ints1d],
+ ):
+ if not self.save_activations:
+ return
+ ops = self.model.ops
+ lengths = ops.asarray1i([s.shape[0] for s in doc_scores])
+ docs_scores.append(Ragged(ops.flatten(doc_scores), lengths))
+ docs_ents.append(Ragged(ops.flatten(doc_ents), lengths))
+
+ def _add_activations(
+ self,
+ *,
+ doc_scores: List[Floats1d],
+ doc_ents: List[Ints1d],
+ scores: Sequence[float],
+ ents: Sequence[int],
+ ):
+ if not self.save_activations:
+ return
+ ops = self.model.ops
+ doc_scores.append(ops.asarray1f(scores))
+ doc_ents.append(ops.asarray1i(ents, dtype="uint64"))
diff --git a/spacy/pipeline/entityruler.py b/spacy/pipeline/entity_ruler.py
similarity index 100%
rename from spacy/pipeline/entityruler.py
rename to spacy/pipeline/entity_ruler.py
diff --git a/spacy/pipeline/legacy/entity_linker.py b/spacy/pipeline/legacy/entity_linker.py
index d723bdbe5..2f8a1f8ea 100644
--- a/spacy/pipeline/legacy/entity_linker.py
+++ b/spacy/pipeline/legacy/entity_linker.py
@@ -7,7 +7,7 @@ from pathlib import Path
from itertools import islice
import srsly
import random
-from thinc.api import CosineDistance, Model, Optimizer, Config
+from thinc.api import CosineDistance, Model, Optimizer
from thinc.api import set_dropout_rate
import warnings
@@ -20,7 +20,7 @@ from ...language import Language
from ...vocab import Vocab
from ...training import Example, validate_examples, validate_get_examples
from ...errors import Errors, Warnings
-from ...util import SimpleFrozenList, registry
+from ...util import SimpleFrozenList
from ... import util
from ...scorer import Scorer
@@ -70,7 +70,6 @@ class EntityLinker_v1(TrainablePipe):
produces a list of candidates, given a certain knowledge base and a textual mention.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_links.
-
DOCS: https://spacy.io/api/entitylinker#init
"""
self.vocab = vocab
@@ -272,7 +271,6 @@ class EntityLinker_v1(TrainablePipe):
final_kb_ids.append(self.NIL)
elif len(candidates) == 1:
# shortcut for efficiency reasons: take the 1 candidate
- # TODO: thresholding
final_kb_ids.append(candidates[0].entity_)
else:
random.shuffle(candidates)
@@ -301,7 +299,6 @@ class EntityLinker_v1(TrainablePipe):
if sims.shape != prior_probs.shape:
raise ValueError(Errors.E161)
scores = prior_probs + sims - (prior_probs * sims)
- # TODO: thresholding
best_index = scores.argmax().item()
best_candidate = candidates[best_index]
final_kb_ids.append(best_candidate.entity_)
diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx
index 24f98508f..782a1dabe 100644
--- a/spacy/pipeline/morphologizer.pyx
+++ b/spacy/pipeline/morphologizer.pyx
@@ -1,7 +1,8 @@
# cython: infer_types=True, profile=True, binding=True
-from typing import Optional, Union, Dict, Callable
+from typing import Callable, Dict, Iterable, List, Optional, Union
import srsly
from thinc.api import SequenceCategoricalCrossentropy, Model, Config
+from thinc.types import Floats2d, Ints1d
from itertools import islice
from ..tokens.doc cimport Doc
@@ -13,7 +14,7 @@ from ..symbols import POS
from ..language import Language
from ..errors import Errors
from .pipe import deserialize_config
-from .tagger import Tagger
+from .tagger import ActivationsT, Tagger
from .. import util
from ..scorer import Scorer
from ..training import validate_examples, validate_get_examples
@@ -52,7 +53,13 @@ DEFAULT_MORPH_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"morphologizer",
assigns=["token.morph", "token.pos"],
- default_config={"model": DEFAULT_MORPH_MODEL, "overwrite": True, "extend": False, "scorer": {"@scorers": "spacy.morphologizer_scorer.v1"}},
+ default_config={
+ "model": DEFAULT_MORPH_MODEL,
+ "overwrite": True,
+ "extend": False,
+ "scorer": {"@scorers": "spacy.morphologizer_scorer.v1"},
+ "save_activations": False,
+ },
default_score_weights={"pos_acc": 0.5, "morph_acc": 0.5, "morph_per_feat": None},
)
def make_morphologizer(
@@ -62,8 +69,10 @@ def make_morphologizer(
overwrite: bool,
extend: bool,
scorer: Optional[Callable],
+ save_activations: bool,
):
- return Morphologizer(nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer)
+ return Morphologizer(nlp.vocab, model, name, overwrite=overwrite, extend=extend, scorer=scorer,
+ save_activations=save_activations)
def morphologizer_score(examples, **kwargs):
@@ -95,6 +104,7 @@ class Morphologizer(Tagger):
overwrite: bool = BACKWARD_OVERWRITE,
extend: bool = BACKWARD_EXTEND,
scorer: Optional[Callable] = morphologizer_score,
+ save_activations: bool = False,
):
"""Initialize a morphologizer.
@@ -105,6 +115,7 @@ class Morphologizer(Tagger):
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attributes "pos" and "morph" and
Scorer.score_token_attr_per_feat for the attribute "morph".
+ save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/morphologizer#init
"""
@@ -124,11 +135,12 @@ class Morphologizer(Tagger):
}
self.cfg = dict(sorted(cfg.items()))
self.scorer = scorer
+ self.save_activations = save_activations
@property
def labels(self):
- """RETURNS (Tuple[str]): The labels currently added to the component."""
- return tuple(self.cfg["labels_morph"].keys())
+ """RETURNS (Iterable[str]): The labels currently added to the component."""
+ return self.cfg["labels_morph"].keys()
@property
def label_data(self) -> Dict[str, Dict[str, Union[str, float, int, None]]]:
@@ -151,7 +163,7 @@ class Morphologizer(Tagger):
# normalize label
norm_label = self.vocab.morphology.normalize_features(label)
# extract separate POS and morph tags
- label_dict = Morphology.feats_to_dict(label)
+ label_dict = Morphology.feats_to_dict(label, sort_values=False)
pos = label_dict.get(self.POS_FEAT, "")
if self.POS_FEAT in label_dict:
label_dict.pop(self.POS_FEAT)
@@ -189,7 +201,7 @@ class Morphologizer(Tagger):
continue
morph = str(token.morph)
# create and add the combined morph+POS label
- morph_dict = Morphology.feats_to_dict(morph)
+ morph_dict = Morphology.feats_to_dict(morph, sort_values=False)
if pos:
morph_dict[self.POS_FEAT] = pos
norm_label = self.vocab.strings[self.vocab.morphology.add(morph_dict)]
@@ -206,7 +218,7 @@ class Morphologizer(Tagger):
for i, token in enumerate(example.reference):
pos = token.pos_
morph = str(token.morph)
- morph_dict = Morphology.feats_to_dict(morph)
+ morph_dict = Morphology.feats_to_dict(morph, sort_values=False)
if pos:
morph_dict[self.POS_FEAT] = pos
norm_label = self.vocab.strings[self.vocab.morphology.add(morph_dict)]
@@ -217,40 +229,48 @@ class Morphologizer(Tagger):
assert len(label_sample) > 0, Errors.E923.format(name=self.name)
self.model.initialize(X=doc_sample, Y=label_sample)
- def set_annotations(self, docs, batch_tag_ids):
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
- batch_tag_ids: The IDs to set, produced by Morphologizer.predict.
+ activations (ActivationsT): The activations used for setting annotations, produced by Morphologizer.predict.
DOCS: https://spacy.io/api/morphologizer#set_annotations
"""
+ batch_tag_ids = activations["label_ids"]
if isinstance(docs, Doc):
docs = [docs]
cdef Doc doc
cdef Vocab vocab = self.vocab
cdef bint overwrite = self.cfg["overwrite"]
cdef bint extend = self.cfg["extend"]
- labels = self.labels
+
+ # We require random access for the upcoming ops, so we need
+ # to allocate a compatible container out of the iterable.
+ labels = tuple(self.labels)
for i, doc in enumerate(docs):
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ for act_name, acts in activations.items():
+ doc.activations[self.name][act_name] = acts[i]
doc_tag_ids = batch_tag_ids[i]
if hasattr(doc_tag_ids, "get"):
doc_tag_ids = doc_tag_ids.get()
for j, tag_id in enumerate(doc_tag_ids):
- morph = labels[tag_id]
+ morph = labels[int(tag_id)]
# set morph
if doc.c[j].morph == 0 or overwrite or extend:
if overwrite and extend:
# morphologizer morph overwrites any existing features
# while extending
- extended_morph = Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph])
- extended_morph.update(Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0)))
+ extended_morph = Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph], sort_values=False)
+ extended_morph.update(Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0), sort_values=False))
doc.c[j].morph = self.vocab.morphology.add(extended_morph)
elif extend:
# existing features are preserved and any new features
# are added
- extended_morph = Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0))
- extended_morph.update(Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph]))
+ extended_morph = Morphology.feats_to_dict(self.cfg["labels_morph"].get(morph, 0), sort_values=False)
+ extended_morph.update(Morphology.feats_to_dict(self.vocab.strings[doc.c[j].morph], sort_values=False))
doc.c[j].morph = self.vocab.morphology.add(extended_morph)
else:
# clobber
@@ -270,7 +290,7 @@ class Morphologizer(Tagger):
DOCS: https://spacy.io/api/morphologizer#get_loss
"""
validate_examples(examples, "Morphologizer.get_loss")
- loss_func = SequenceCategoricalCrossentropy(names=self.labels, normalize=False)
+ loss_func = SequenceCategoricalCrossentropy(names=tuple(self.labels), normalize=False)
truths = []
for eg in examples:
eg_truths = []
@@ -291,7 +311,7 @@ class Morphologizer(Tagger):
label = None
# Otherwise, generate the combined label
else:
- label_dict = Morphology.feats_to_dict(morph)
+ label_dict = Morphology.feats_to_dict(morph, sort_values=False)
if pos:
label_dict[self.POS_FEAT] = pos
label = self.vocab.strings[self.vocab.morphology.add(label_dict)]
diff --git a/spacy/pipeline/ner.py b/spacy/pipeline/ner.py
index c446748ac..651a0b3e3 100644
--- a/spacy/pipeline/ner.py
+++ b/spacy/pipeline/ner.py
@@ -6,11 +6,11 @@ from thinc.api import Model, Config
from ._parser_internals.transition_system import TransitionSystem
from .transition_parser import Parser
from ._parser_internals.ner import BiluoPushDown
-
from ..language import Language
from ..scorer import get_ner_prf, PRFScore
from ..training import validate_examples
from ..util import registry
+from ..training import remove_bilu_prefix
default_model_config = """
@@ -252,11 +252,8 @@ class EntityRecognizer(Parser):
def labels(self):
# Get the labels from the model by looking at the available moves, e.g.
# B-PERSON, I-PERSON, L-PERSON, U-PERSON
- labels = set(
- move.split("-")[1]
- for move in self.move_names
- if move[0] in ("B", "I", "L", "U")
- )
+ labels = set(remove_bilu_prefix(move) for move in self.move_names
+ if move[0] in ("B", "I", "L", "U"))
return tuple(sorted(labels))
def scored_ents(self, beams):
diff --git a/spacy/pipeline/senter.pyx b/spacy/pipeline/senter.pyx
index 6808fe70e..93a7ee796 100644
--- a/spacy/pipeline/senter.pyx
+++ b/spacy/pipeline/senter.pyx
@@ -1,13 +1,14 @@
# cython: infer_types=True, profile=True, binding=True
-from typing import Optional, Callable
+from typing import Dict, Iterable, Optional, Callable, List, Union
from itertools import islice
import srsly
from thinc.api import Model, SequenceCategoricalCrossentropy, Config
+from thinc.types import Floats2d, Ints1d
from ..tokens.doc cimport Doc
-from .tagger import Tagger
+from .tagger import ActivationsT, Tagger
from ..language import Language
from ..errors import Errors
from ..scorer import Scorer
@@ -38,11 +39,21 @@ DEFAULT_SENTER_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"senter",
assigns=["token.is_sent_start"],
- default_config={"model": DEFAULT_SENTER_MODEL, "overwrite": False, "scorer": {"@scorers": "spacy.senter_scorer.v1"}},
+ default_config={
+ "model": DEFAULT_SENTER_MODEL,
+ "overwrite": False,
+ "scorer": {"@scorers": "spacy.senter_scorer.v1"},
+ "save_activations": False,
+ },
default_score_weights={"sents_f": 1.0, "sents_p": 0.0, "sents_r": 0.0},
)
-def make_senter(nlp: Language, name: str, model: Model, overwrite: bool, scorer: Optional[Callable]):
- return SentenceRecognizer(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer)
+def make_senter(nlp: Language,
+ name: str,
+ model: Model,
+ overwrite: bool,
+ scorer: Optional[Callable],
+ save_activations: bool):
+ return SentenceRecognizer(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, save_activations=save_activations)
def senter_score(examples, **kwargs):
@@ -72,6 +83,7 @@ class SentenceRecognizer(Tagger):
*,
overwrite=BACKWARD_OVERWRITE,
scorer=senter_score,
+ save_activations: bool = False,
):
"""Initialize a sentence recognizer.
@@ -81,6 +93,7 @@ class SentenceRecognizer(Tagger):
losses during training.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the attribute "sents".
+ save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/sentencerecognizer#init
"""
@@ -90,6 +103,7 @@ class SentenceRecognizer(Tagger):
self._rehearsal_model = None
self.cfg = {"overwrite": overwrite}
self.scorer = scorer
+ self.save_activations = save_activations
@property
def labels(self):
@@ -107,19 +121,24 @@ class SentenceRecognizer(Tagger):
def label_data(self):
return None
- def set_annotations(self, docs, batch_tag_ids):
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
- batch_tag_ids: The IDs to set, produced by SentenceRecognizer.predict.
+ activations (ActivationsT): The activations used for setting annotations, produced by SentenceRecognizer.predict.
DOCS: https://spacy.io/api/sentencerecognizer#set_annotations
"""
+ batch_tag_ids = activations["label_ids"]
if isinstance(docs, Doc):
docs = [docs]
cdef Doc doc
cdef bint overwrite = self.cfg["overwrite"]
for i, doc in enumerate(docs):
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ for act_name, acts in activations.items():
+ doc.activations[self.name][act_name] = acts[i]
doc_tag_ids = batch_tag_ids[i]
if hasattr(doc_tag_ids, "get"):
doc_tag_ids = doc_tag_ids.get()
diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py
index 1b7a9eecb..c517991f5 100644
--- a/spacy/pipeline/spancat.py
+++ b/spacy/pipeline/spancat.py
@@ -1,4 +1,5 @@
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
+from typing import Union
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
from thinc.api import Optimizer
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d
@@ -16,6 +17,9 @@ from ..errors import Errors
from ..util import registry
+ActivationsT = Dict[str, Union[Floats2d, Ragged]]
+
+
spancat_default_config = """
[model]
@architectures = "spacy.SpanCategorizer.v1"
@@ -106,6 +110,7 @@ def build_ngram_range_suggester(min_size: int, max_size: int) -> Suggester:
"model": DEFAULT_SPANCAT_MODEL,
"suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]},
"scorer": {"@scorers": "spacy.spancat_scorer.v1"},
+ "save_activations": False,
},
default_score_weights={"spans_sc_f": 1.0, "spans_sc_p": 0.0, "spans_sc_r": 0.0},
)
@@ -118,6 +123,7 @@ def make_spancat(
scorer: Optional[Callable],
threshold: float,
max_positive: Optional[int],
+ save_activations: bool,
) -> "SpanCategorizer":
"""Create a SpanCategorizer component. The span categorizer consists of two
parts: a suggester function that proposes candidate spans, and a labeller
@@ -138,6 +144,7 @@ def make_spancat(
0.5.
max_positive (Optional[int]): Maximum number of labels to consider positive
per span. Defaults to None, indicating no limit.
+ save_activations (bool): save model activations in Doc when annotating.
"""
return SpanCategorizer(
nlp.vocab,
@@ -148,6 +155,7 @@ def make_spancat(
max_positive=max_positive,
name=name,
scorer=scorer,
+ save_activations=save_activations,
)
@@ -186,6 +194,7 @@ class SpanCategorizer(TrainablePipe):
threshold: float = 0.5,
max_positive: Optional[int] = None,
scorer: Optional[Callable] = spancat_score,
+ save_activations: bool = False,
) -> None:
"""Initialize the span categorizer.
vocab (Vocab): The shared vocabulary.
@@ -218,6 +227,7 @@ class SpanCategorizer(TrainablePipe):
self.model = model
self.name = name
self.scorer = scorer
+ self.save_activations = save_activations
@property
def key(self) -> str:
@@ -260,7 +270,7 @@ class SpanCategorizer(TrainablePipe):
"""
return list(self.labels)
- def predict(self, docs: Iterable[Doc]):
+ def predict(self, docs: Iterable[Doc]) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict.
@@ -270,7 +280,7 @@ class SpanCategorizer(TrainablePipe):
"""
indices = self.suggester(docs, ops=self.model.ops)
scores = self.model.predict((docs, indices)) # type: ignore
- return indices, scores
+ return {"indices": indices, "scores": scores}
def set_candidates(
self, docs: Iterable[Doc], *, candidates_key: str = "candidates"
@@ -290,19 +300,29 @@ class SpanCategorizer(TrainablePipe):
for index in candidates.dataXd:
doc.spans[candidates_key].append(doc[index[0] : index[1]])
- def set_annotations(self, docs: Iterable[Doc], indices_scores) -> None:
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
- scores: The scores to set, produced by SpanCategorizer.predict.
+ activations: ActivationsT: The activations, produced by SpanCategorizer.predict.
DOCS: https://spacy.io/api/spancategorizer#set_annotations
"""
labels = self.labels
- indices, scores = indices_scores
+
+ indices = activations["indices"]
+ assert isinstance(indices, Ragged)
+ scores = cast(Floats2d, activations["scores"])
+
offset = 0
for i, doc in enumerate(docs):
indices_i = indices[i].dataXd
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ doc.activations[self.name]["indices"] = indices_i
+ doc.activations[self.name]["scores"] = scores[
+ offset : offset + indices.lengths[i]
+ ]
doc.spans[self.key] = self._make_span_group(
doc, indices_i, scores[offset : offset + indices.lengths[i]], labels # type: ignore[arg-type]
)
diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx
index d6ecbf084..3b4715ce5 100644
--- a/spacy/pipeline/tagger.pyx
+++ b/spacy/pipeline/tagger.pyx
@@ -1,9 +1,9 @@
# cython: infer_types=True, profile=True, binding=True
-from typing import Callable, Optional
+from typing import Callable, Dict, Iterable, List, Optional, Union
import numpy
import srsly
from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config
-from thinc.types import Floats2d
+from thinc.types import Floats2d, Ints1d
import warnings
from itertools import islice
@@ -22,6 +22,9 @@ from ..training import validate_examples, validate_get_examples
from ..util import registry
from .. import util
+
+ActivationsT = Dict[str, Union[List[Floats2d], List[Ints1d]]]
+
# See #9050
BACKWARD_OVERWRITE = False
@@ -45,7 +48,13 @@ DEFAULT_TAGGER_MODEL = Config().from_str(default_model_config)["model"]
@Language.factory(
"tagger",
assigns=["token.tag"],
- default_config={"model": DEFAULT_TAGGER_MODEL, "overwrite": False, "scorer": {"@scorers": "spacy.tagger_scorer.v1"}, "neg_prefix": "!"},
+ default_config={
+ "model": DEFAULT_TAGGER_MODEL,
+ "overwrite": False,
+ "scorer": {"@scorers": "spacy.tagger_scorer.v1"},
+ "neg_prefix": "!",
+ "save_activations": False,
+ },
default_score_weights={"tag_acc": 1.0},
)
def make_tagger(
@@ -55,6 +64,7 @@ def make_tagger(
overwrite: bool,
scorer: Optional[Callable],
neg_prefix: str,
+ save_activations: bool,
):
"""Construct a part-of-speech tagger component.
@@ -63,7 +73,8 @@ def make_tagger(
in size, and be normalized as probabilities (all scores between 0 and 1,
with the rows summing to 1).
"""
- return Tagger(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, neg_prefix=neg_prefix)
+ return Tagger(nlp.vocab, model, name, overwrite=overwrite, scorer=scorer, neg_prefix=neg_prefix,
+ save_activations=save_activations)
def tagger_score(examples, **kwargs):
@@ -89,6 +100,7 @@ class Tagger(TrainablePipe):
overwrite=BACKWARD_OVERWRITE,
scorer=tagger_score,
neg_prefix="!",
+ save_activations: bool = False,
):
"""Initialize a part-of-speech tagger.
@@ -98,6 +110,7 @@ class Tagger(TrainablePipe):
losses during training.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attribute "tag".
+ save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/tagger#init
"""
@@ -108,6 +121,7 @@ class Tagger(TrainablePipe):
cfg = {"labels": [], "overwrite": overwrite, "neg_prefix": neg_prefix}
self.cfg = dict(sorted(cfg.items()))
self.scorer = scorer
+ self.save_activations = save_activations
@property
def labels(self):
@@ -126,7 +140,7 @@ class Tagger(TrainablePipe):
"""Data about the labels currently added to the component."""
return tuple(self.cfg["labels"])
- def predict(self, docs):
+ def predict(self, docs) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict.
@@ -139,12 +153,12 @@ class Tagger(TrainablePipe):
n_labels = len(self.labels)
guesses = [self.model.ops.alloc((0, n_labels)) for doc in docs]
assert len(guesses) == len(docs)
- return guesses
+ return {"probabilities": guesses, "label_ids": guesses}
scores = self.model.predict(docs)
assert len(scores) == len(docs), (len(scores), len(docs))
guesses = self._scores2guesses(scores)
assert len(guesses) == len(docs)
- return guesses
+ return {"probabilities": scores, "label_ids": guesses}
def _scores2guesses(self, scores):
guesses = []
@@ -155,14 +169,15 @@ class Tagger(TrainablePipe):
guesses.append(doc_guesses)
return guesses
- def set_annotations(self, docs, batch_tag_ids):
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT):
"""Modify a batch of documents, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
- batch_tag_ids: The IDs to set, produced by Tagger.predict.
+ activations (ActivationsT): The activations used for setting annotations, produced by Tagger.predict.
DOCS: https://spacy.io/api/tagger#set_annotations
"""
+ batch_tag_ids = activations["label_ids"]
if isinstance(docs, Doc):
docs = [docs]
cdef Doc doc
@@ -170,6 +185,10 @@ class Tagger(TrainablePipe):
cdef bint overwrite = self.cfg["overwrite"]
labels = self.labels
for i, doc in enumerate(docs):
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ for act_name, acts in activations.items():
+ doc.activations[self.name][act_name] = acts[i]
doc_tag_ids = batch_tag_ids[i]
if hasattr(doc_tag_ids, "get"):
doc_tag_ids = doc_tag_ids.get()
diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py
index c45f819fc..506cdb61c 100644
--- a/spacy/pipeline/textcat.py
+++ b/spacy/pipeline/textcat.py
@@ -1,4 +1,4 @@
-from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any
+from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any, Union
from thinc.api import get_array_module, Model, Optimizer, set_dropout_rate, Config
from thinc.types import Floats2d
import numpy
@@ -14,6 +14,9 @@ from ..util import registry
from ..vocab import Vocab
+ActivationsT = Dict[str, Floats2d]
+
+
single_label_default_config = """
[model]
@architectures = "spacy.TextCatEnsemble.v2"
@@ -75,6 +78,7 @@ subword_features = true
"threshold": 0.5,
"model": DEFAULT_SINGLE_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_scorer.v1"},
+ "save_activations": False,
},
default_score_weights={
"cats_score": 1.0,
@@ -96,6 +100,7 @@ def make_textcat(
model: Model[List[Doc], List[Floats2d]],
threshold: float,
scorer: Optional[Callable],
+ save_activations: bool,
) -> "TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered
@@ -105,8 +110,16 @@ def make_textcat(
scores for each category.
threshold (float): Cutoff to consider a prediction "positive".
scorer (Optional[Callable]): The scoring method.
+ save_activations (bool): save model activations in Doc when annotating.
"""
- return TextCategorizer(nlp.vocab, model, name, threshold=threshold, scorer=scorer)
+ return TextCategorizer(
+ nlp.vocab,
+ model,
+ name,
+ threshold=threshold,
+ scorer=scorer,
+ save_activations=save_activations,
+ )
def textcat_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
@@ -137,6 +150,7 @@ class TextCategorizer(TrainablePipe):
*,
threshold: float,
scorer: Optional[Callable] = textcat_score,
+ save_activations: bool = False,
) -> None:
"""Initialize a text categorizer for single-label classification.
@@ -157,6 +171,7 @@ class TextCategorizer(TrainablePipe):
cfg = {"labels": [], "threshold": threshold, "positive_label": None}
self.cfg = dict(cfg)
self.scorer = scorer
+ self.save_activations = save_activations
@property
def support_missing_values(self):
@@ -181,7 +196,7 @@ class TextCategorizer(TrainablePipe):
"""
return self.labels # type: ignore[return-value]
- def predict(self, docs: Iterable[Doc]):
+ def predict(self, docs: Iterable[Doc]) -> ActivationsT:
"""Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict.
@@ -194,12 +209,12 @@ class TextCategorizer(TrainablePipe):
tensors = [doc.tensor for doc in docs]
xp = self.model.ops.xp
scores = xp.zeros((len(list(docs)), len(self.labels)))
- return scores
+ return {"probabilities": scores}
scores = self.model.predict(docs)
scores = self.model.ops.asarray(scores)
- return scores
+ return {"probabilities": scores}
- def set_annotations(self, docs: Iterable[Doc], scores) -> None:
+ def set_annotations(self, docs: Iterable[Doc], activations: ActivationsT) -> None:
"""Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
@@ -207,9 +222,13 @@ class TextCategorizer(TrainablePipe):
DOCS: https://spacy.io/api/textcategorizer#set_annotations
"""
+ probs = activations["probabilities"]
for i, doc in enumerate(docs):
+ if self.save_activations:
+ doc.activations[self.name] = {}
+ doc.activations[self.name]["probabilities"] = probs[i]
for j, label in enumerate(self.labels):
- doc.cats[label] = float(scores[i, j])
+ doc.cats[label] = float(probs[i, j])
def update(
self,
diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py
index e33a885f8..3a6dd0b0c 100644
--- a/spacy/pipeline/textcat_multilabel.py
+++ b/spacy/pipeline/textcat_multilabel.py
@@ -1,4 +1,4 @@
-from typing import Iterable, Optional, Dict, List, Callable, Any
+from typing import Iterable, Optional, Dict, List, Callable, Any, Union
from thinc.types import Floats2d
from thinc.api import Model, Config
@@ -75,6 +75,7 @@ subword_features = true
"threshold": 0.5,
"model": DEFAULT_MULTI_TEXTCAT_MODEL,
"scorer": {"@scorers": "spacy.textcat_multilabel_scorer.v1"},
+ "save_activations": False,
},
default_score_weights={
"cats_score": 1.0,
@@ -96,6 +97,7 @@ def make_multilabel_textcat(
model: Model[List[Doc], List[Floats2d]],
threshold: float,
scorer: Optional[Callable],
+ save_activations: bool,
) -> "TextCategorizer":
"""Create a TextCategorizer component. The text categorizer predicts categories
over a whole document. It can learn one or more labels, and the labels are considered
@@ -107,7 +109,12 @@ def make_multilabel_textcat(
threshold (float): Cutoff to consider a prediction "positive".
"""
return MultiLabel_TextCategorizer(
- nlp.vocab, model, name, threshold=threshold, scorer=scorer
+ nlp.vocab,
+ model,
+ name,
+ threshold=threshold,
+ scorer=scorer,
+ save_activations=save_activations,
)
@@ -139,6 +146,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
*,
threshold: float,
scorer: Optional[Callable] = textcat_multilabel_score,
+ save_activations: bool = False,
) -> None:
"""Initialize a text categorizer for multi-label classification.
@@ -147,6 +155,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
name (str): The component instance name, used to add entries to the
losses during training.
threshold (float): Cutoff to consider a prediction "positive".
+ save_activations (bool): save model activations in Doc when annotating.
DOCS: https://spacy.io/api/textcategorizer#init
"""
@@ -157,6 +166,7 @@ class MultiLabel_TextCategorizer(TextCategorizer):
cfg = {"labels": [], "threshold": threshold}
self.cfg = dict(cfg)
self.scorer = scorer
+ self.save_activations = save_activations
@property
def support_missing_values(self):
diff --git a/spacy/pipeline/trainable_pipe.pxd b/spacy/pipeline/trainable_pipe.pxd
index 65daa8b22..180f86f45 100644
--- a/spacy/pipeline/trainable_pipe.pxd
+++ b/spacy/pipeline/trainable_pipe.pxd
@@ -6,3 +6,4 @@ cdef class TrainablePipe(Pipe):
cdef public object model
cdef public object cfg
cdef public object scorer
+ cdef bint _save_activations
diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx
index 76b0733cf..c82f2830c 100644
--- a/spacy/pipeline/trainable_pipe.pyx
+++ b/spacy/pipeline/trainable_pipe.pyx
@@ -2,11 +2,12 @@
from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable
import srsly
from thinc.api import set_dropout_rate, Model, Optimizer
+import warnings
from ..tokens.doc cimport Doc
from ..training import validate_examples
-from ..errors import Errors
+from ..errors import Errors, Warnings
from .pipe import Pipe, deserialize_config
from .. import util
from ..vocab import Vocab
@@ -342,3 +343,11 @@ cdef class TrainablePipe(Pipe):
deserialize["model"] = load_model
util.from_disk(path, deserialize, exclude)
return self
+
+ @property
+ def save_activations(self):
+ return self._save_activations
+
+ @save_activations.setter
+ def save_activations(self, save_activations: bool):
+ self._save_activations = save_activations
diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx
index 2f6902c50..10219c2ad 100644
--- a/spacy/pipeline/transition_parser.pyx
+++ b/spacy/pipeline/transition_parser.pyx
@@ -11,7 +11,9 @@ import random
import contextlib
import srsly
-from thinc.api import set_dropout_rate, CupyOps, get_array_module
+from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps
+from thinc.api import get_array_module
+from thinc.extra.search cimport Beam
from thinc.types import Ints1d
import numpy.random
import numpy
@@ -20,7 +22,7 @@ import warnings
from ._parser_internals.stateclass cimport StateC, StateClass
from ._parser_internals.search cimport Beam
from ..tokens.doc cimport Doc
-from .trainable_pipe import TrainablePipe
+from .trainable_pipe cimport TrainablePipe
from ._parser_internals cimport _beam_utils
from ._parser_internals import _beam_utils
from ..vocab cimport Vocab
@@ -32,7 +34,10 @@ from ..errors import Errors, Warnings
from .. import util
-class Parser(TrainablePipe):
+NUMPY_OPS = NumpyOps()
+
+
+cdef class Parser(TrainablePipe):
"""
Base class of the DependencyParser and EntityRecognizer.
"""
@@ -122,6 +127,7 @@ class Parser(TrainablePipe):
self._rehearsal_model = None
self.scorer = scorer
+ self._cpu_ops = get_ops("cpu") if isinstance(self.model.ops, CupyOps) else self.model.ops
def __getnewargs_ex__(self):
"""This allows pickling the Parser and its keyword-only init arguments"""
diff --git a/spacy/schemas.py b/spacy/schemas.py
index b284b82e5..a38421fa0 100644
--- a/spacy/schemas.py
+++ b/spacy/schemas.py
@@ -3,12 +3,13 @@ from typing import Iterable, TypeVar, TYPE_CHECKING
from .compat import Literal
from enum import Enum
from pydantic import BaseModel, Field, ValidationError, validator, create_model
-from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool
+from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr
from pydantic.main import ModelMetaclass
from thinc.api import Optimizer, ConfigValidationError, Model
from thinc.config import Promise
from collections import defaultdict
import inspect
+import re
from .attrs import NAMES
from .lookups import Lookups
@@ -143,7 +144,7 @@ def validate_init_settings(
def validate_token_pattern(obj: list) -> List[str]:
# Try to convert non-string keys (e.g. {ORTH: "foo"} -> {"ORTH": "foo"})
- get_key = lambda k: NAMES[k] if isinstance(k, int) and k < len(NAMES) else k
+ get_key = lambda k: NAMES[k] if isinstance(k, int) and k in NAMES else k
if isinstance(obj, list):
converted = []
for pattern in obj:
@@ -198,13 +199,18 @@ class TokenPatternNumber(BaseModel):
return v
-class TokenPatternOperator(str, Enum):
+class TokenPatternOperatorSimple(str, Enum):
plus: StrictStr = StrictStr("+")
- start: StrictStr = StrictStr("*")
+ star: StrictStr = StrictStr("*")
question: StrictStr = StrictStr("?")
exclamation: StrictStr = StrictStr("!")
+class TokenPatternOperatorMinMax(ConstrainedStr):
+ regex = re.compile(r"^({\d+}|{\d+,\d*}|{\d*,\d+})$")
+
+
+TokenPatternOperator = Union[TokenPatternOperatorSimple, TokenPatternOperatorMinMax]
StringValue = Union[TokenPatternString, StrictStr]
NumberValue = Union[TokenPatternNumber, StrictInt, StrictFloat]
UnderscoreValue = Union[
@@ -508,6 +514,14 @@ class DocJSONSchema(BaseModel):
tokens: List[Dict[StrictStr, Union[StrictStr, StrictInt]]] = Field(
..., title="Token information - ID, start, annotations"
)
- _: Optional[Dict[StrictStr, Any]] = Field(
- None, title="Any custom data stored in the document's _ attribute"
+ underscore_doc: Optional[Dict[StrictStr, Any]] = Field(
+ None,
+ title="Any custom data stored in the document's _ attribute",
+ alias="_",
+ )
+ underscore_token: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
+ None, title="Any custom data stored in the token's _ attribute"
+ )
+ underscore_span: Optional[Dict[StrictStr, Dict[StrictStr, Any]]] = Field(
+ None, title="Any custom data stored in the span's _ attribute"
)
diff --git a/spacy/strings.pxd b/spacy/strings.pxd
index 370180135..5f03a9a28 100644
--- a/spacy/strings.pxd
+++ b/spacy/strings.pxd
@@ -26,4 +26,4 @@ cdef class StringStore:
cdef public PreshMap _map
cdef const Utf8Str* intern_unicode(self, str py_string)
- cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length)
+ cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash)
diff --git a/spacy/strings.pyx b/spacy/strings.pyx
index 39fc441e9..e86682733 100644
--- a/spacy/strings.pyx
+++ b/spacy/strings.pyx
@@ -14,6 +14,13 @@ from .symbols import NAMES as SYMBOLS_BY_INT
from .errors import Errors
from . import util
+# Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)`
+cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash):
+ try:
+ out_hash[0] = key
+ return True
+ except:
+ return False
def get_string_id(key):
"""Get a string ID, handling the reserved symbols correctly. If the key is
@@ -22,15 +29,27 @@ def get_string_id(key):
This function optimises for convenience over performance, so shouldn't be
used in tight loops.
"""
- if not isinstance(key, str):
- return key
- elif key in SYMBOLS_BY_STR:
- return SYMBOLS_BY_STR[key]
- elif not key:
- return 0
+ cdef hash_t str_hash
+ if isinstance(key, str):
+ if len(key) == 0:
+ return 0
+
+ symbol = SYMBOLS_BY_STR.get(key, None)
+ if symbol is not None:
+ return symbol
+ else:
+ chars = key.encode("utf8")
+ return hash_utf8(chars, len(chars))
+ elif _try_coerce_to_hash(key, &str_hash):
+ # Coerce the integral key to the expected primitive hash type.
+ # This ensures that custom/overloaded "primitive" data types
+ # such as those implemented by numpy are not inadvertently used
+ # downsteam (as these are internally implemented as custom PyObjects
+ # whose comparison operators can incur a significant overhead).
+ return str_hash
else:
- chars = key.encode("utf8")
- return hash_utf8(chars, len(chars))
+ # TODO: Raise an error instead
+ return key
cpdef hash_t hash_string(str string) except 0:
@@ -110,28 +129,36 @@ cdef class StringStore:
string_or_id (bytes, str or uint64): The value to encode.
Returns (str / uint64): The value to be retrieved.
"""
- if isinstance(string_or_id, str) and len(string_or_id) == 0:
- return 0
- elif string_or_id == 0:
- return ""
- elif string_or_id in SYMBOLS_BY_STR:
- return SYMBOLS_BY_STR[string_or_id]
- cdef hash_t key
+ cdef hash_t str_hash
+ cdef Utf8Str* utf8str = NULL
+
if isinstance(string_or_id, str):
- key = hash_string(string_or_id)
- return key
- elif isinstance(string_or_id, bytes):
- key = hash_utf8(string_or_id, len(string_or_id))
- return key
- elif string_or_id < len(SYMBOLS_BY_INT):
- return SYMBOLS_BY_INT[string_or_id]
- else:
- key = string_or_id
- utf8str = self._map.get(key)
- if utf8str is NULL:
- raise KeyError(Errors.E018.format(hash_value=string_or_id))
+ if len(string_or_id) == 0:
+ return 0
+
+ # Return early if the string is found in the symbols LUT.
+ symbol = SYMBOLS_BY_STR.get(string_or_id, None)
+ if symbol is not None:
+ return symbol
else:
- return decode_Utf8Str(utf8str)
+ return hash_string(string_or_id)
+ elif isinstance(string_or_id, bytes):
+ return hash_utf8(string_or_id, len(string_or_id))
+ elif _try_coerce_to_hash(string_or_id, &str_hash):
+ if str_hash == 0:
+ return ""
+ elif str_hash in SYMBOLS_BY_INT:
+ return SYMBOLS_BY_INT[str_hash]
+ else:
+ utf8str = self._map.get(str_hash)
+ else:
+ # TODO: Raise an error instead
+ utf8str = self._map.get(string_or_id)
+
+ if utf8str is NULL:
+ raise KeyError(Errors.E018.format(hash_value=string_or_id))
+ else:
+ return decode_Utf8Str(utf8str)
def as_int(self, key):
"""If key is an int, return it; otherwise, get the int value."""
@@ -153,19 +180,22 @@ cdef class StringStore:
string (str): The string to add.
RETURNS (uint64): The string's hash value.
"""
+ cdef hash_t str_hash
if isinstance(string, str):
if string in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string]
- key = hash_string(string)
- self.intern_unicode(string)
+
+ string = string.encode("utf8")
+ str_hash = hash_utf8(string, len(string))
+ self._intern_utf8(string, len(string), &str_hash)
elif isinstance(string, bytes):
if string in SYMBOLS_BY_STR:
return SYMBOLS_BY_STR[string]
- key = hash_utf8(string, len(string))
- self._intern_utf8(string, len(string))
+ str_hash = hash_utf8(string, len(string))
+ self._intern_utf8(string, len(string), &str_hash)
else:
raise TypeError(Errors.E017.format(value_type=type(string)))
- return key
+ return str_hash
def __len__(self):
"""The number of strings in the store.
@@ -174,30 +204,29 @@ cdef class StringStore:
"""
return self.keys.size()
- def __contains__(self, string not None):
- """Check whether a string is in the store.
+ def __contains__(self, string_or_id not None):
+ """Check whether a string or ID is in the store.
- string (str): The string to check.
+ string_or_id (str or int): The string to check.
RETURNS (bool): Whether the store contains the string.
"""
- cdef hash_t key
- if isinstance(string, int) or isinstance(string, long):
- if string == 0:
+ cdef hash_t str_hash
+ if isinstance(string_or_id, str):
+ if len(string_or_id) == 0:
return True
- key = string
- elif len(string) == 0:
- return True
- elif string in SYMBOLS_BY_STR:
- return True
- elif isinstance(string, str):
- key = hash_string(string)
+ elif string_or_id in SYMBOLS_BY_STR:
+ return True
+ str_hash = hash_string(string_or_id)
+ elif _try_coerce_to_hash(string_or_id, &str_hash):
+ pass
else:
- string = string.encode("utf8")
- key = hash_utf8(string, len(string))
- if key < len(SYMBOLS_BY_INT):
+ # TODO: Raise an error instead
+ return self._map.get(string_or_id) is not NULL
+
+ if str_hash in SYMBOLS_BY_INT:
return True
else:
- return self._map.get(key) is not NULL
+ return self._map.get(str_hash) is not NULL
def __iter__(self):
"""Iterate over the strings in the store, in order.
@@ -272,13 +301,13 @@ cdef class StringStore:
cdef const Utf8Str* intern_unicode(self, str py_string):
# 0 means missing, but we don't bother offsetting the index.
cdef bytes byte_string = py_string.encode("utf8")
- return self._intern_utf8(byte_string, len(byte_string))
+ return self._intern_utf8(byte_string, len(byte_string), NULL)
@cython.final
- cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length):
+ cdef const Utf8Str* _intern_utf8(self, char* utf8_string, int length, hash_t* precalculated_hash):
# TODO: This function's API/behaviour is an unholy mess...
# 0 means missing, but we don't bother offsetting the index.
- cdef hash_t key = hash_utf8(utf8_string, length)
+ cdef hash_t key = precalculated_hash[0] if precalculated_hash is not NULL else hash_utf8(utf8_string, length)
cdef Utf8Str* value = self._map.get(key)
if value is not NULL:
return value
diff --git a/spacy/structs.pxd b/spacy/structs.pxd
index 86d5b67ed..b9b6f6ba8 100644
--- a/spacy/structs.pxd
+++ b/spacy/structs.pxd
@@ -58,14 +58,6 @@ cdef struct TokenC:
hash_t ent_id
-cdef struct MorphAnalysisC:
- hash_t key
- int length
-
- attr_t* fields
- attr_t* features
-
-
# Internal struct, for storage and disambiguation of entities.
cdef struct KBEntryC:
diff --git a/spacy/symbols.pxd b/spacy/symbols.pxd
index bc15d9b80..f5d7784dc 100644
--- a/spacy/symbols.pxd
+++ b/spacy/symbols.pxd
@@ -1,5 +1,6 @@
+# DO NOT EDIT! The symbols are frozen as of spaCy v3.0.0.
cdef enum symbol_t:
- NIL
+ NIL = 0
IS_ALPHA
IS_ASCII
IS_DIGIT
@@ -65,7 +66,7 @@ cdef enum symbol_t:
FLAG62
FLAG63
- ID
+ ID = 64
ORTH
LOWER
NORM
@@ -385,7 +386,7 @@ cdef enum symbol_t:
DEPRECATED275
DEPRECATED276
- PERSON
+ PERSON = 380
NORP
FACILITY
ORG
@@ -405,7 +406,7 @@ cdef enum symbol_t:
ORDINAL
CARDINAL
- acomp
+ acomp = 398
advcl
advmod
agent
@@ -458,12 +459,12 @@ cdef enum symbol_t:
rcmod
root
xcomp
-
acl
- ENT_KB_ID
+ ENT_KB_ID = 452
MORPH
ENT_ID
IDX
- _
+ _ = 456
+ # DO NOT ADD ANY NEW SYMBOLS!
diff --git a/spacy/symbols.pyx b/spacy/symbols.pyx
index b0345c710..fbfc6f10d 100644
--- a/spacy/symbols.pyx
+++ b/spacy/symbols.pyx
@@ -469,11 +469,7 @@ IDS = {
}
-def sort_nums(x):
- return x[1]
-
-
-NAMES = [it[0] for it in sorted(IDS.items(), key=sort_nums)]
+NAMES = {v: k for k, v in IDS.items()}
# Unfortunate hack here, to work around problem with long cpdef enum
# (which is generating an enormous amount of C++ in Cython 0.24+)
# We keep the enum cdef, and just make sure the names are available to Python
diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py
index edbdeb838..ee78b64a5 100644
--- a/spacy/tests/conftest.py
+++ b/spacy/tests/conftest.py
@@ -4,6 +4,12 @@ import functools
import inspect
import importlib
import sys
+from hypothesis import settings
+
+# Functionally disable deadline settings for tests
+# to prevent spurious test failures in CI builds.
+settings.register_profile("no_deadlines", deadline=2 * 60 * 1000) # in ms
+settings.load_profile("no_deadlines")
def pytest_addoption(parser):
@@ -264,7 +270,7 @@ def hsb_tokenizer():
@pytest.fixture(scope="session")
def ko_tokenizer():
- pytest.importorskip("natto")
+ pytest.importorskip("mecab_ko")
return get_lang_class("ko")().tokenizer
@@ -281,11 +287,35 @@ def ko_tokenizer_tokenizer():
return nlp.tokenizer
+@pytest.fixture(scope="module")
+def la_tokenizer():
+ return get_lang_class("la")().tokenizer
+
+
+@pytest.fixture(scope="session")
+def ko_tokenizer_natto():
+ pytest.importorskip("natto")
+ config = {
+ "nlp": {
+ "tokenizer": {
+ "@tokenizers": "spacy.KoreanNattoTokenizer.v1",
+ }
+ }
+ }
+ nlp = get_lang_class("ko").from_config(config)
+ return nlp.tokenizer
+
+
@pytest.fixture(scope="session")
def lb_tokenizer():
return get_lang_class("lb")().tokenizer
+@pytest.fixture(scope="session")
+def lg_tokenizer():
+ return get_lang_class("lg")().tokenizer
+
+
@pytest.fixture(scope="session")
def lt_tokenizer():
return get_lang_class("lt")().tokenizer
@@ -348,13 +378,13 @@ def ro_tokenizer():
@pytest.fixture(scope="session")
def ru_tokenizer():
- pytest.importorskip("pymorphy2")
+ pytest.importorskip("pymorphy3")
return get_lang_class("ru")().tokenizer
@pytest.fixture
def ru_lemmatizer():
- pytest.importorskip("pymorphy2")
+ pytest.importorskip("pymorphy3")
return get_lang_class("ru")().add_pipe("lemmatizer")
@@ -426,14 +456,14 @@ def ky_tokenizer():
@pytest.fixture(scope="session")
def uk_tokenizer():
- pytest.importorskip("pymorphy2")
+ pytest.importorskip("pymorphy3")
return get_lang_class("uk")().tokenizer
@pytest.fixture
def uk_lemmatizer():
- pytest.importorskip("pymorphy2")
- pytest.importorskip("pymorphy2_dicts_uk")
+ pytest.importorskip("pymorphy3")
+ pytest.importorskip("pymorphy3_dicts_uk")
return get_lang_class("uk")().add_pipe("lemmatizer")
diff --git a/spacy/tests/doc/test_add_entities.py b/spacy/tests/doc/test_add_entities.py
index 231b7c2a8..30d66115f 100644
--- a/spacy/tests/doc/test_add_entities.py
+++ b/spacy/tests/doc/test_add_entities.py
@@ -45,6 +45,33 @@ def test_ents_reset(en_vocab):
assert [t.ent_iob_ for t in doc] == orig_iobs
+def test_ents_clear(en_vocab):
+ """Ensure that removing entities clears token attributes"""
+ text = ["Louisiana", "Office", "of", "Conservation"]
+ doc = Doc(en_vocab, words=text)
+ entity = Span(doc, 0, 4, label=391, span_id="TEST")
+ doc.ents = [entity]
+ doc.ents = []
+ for token in doc:
+ assert token.ent_iob == 2
+ assert token.ent_type == 0
+ assert token.ent_id == 0
+ assert token.ent_kb_id == 0
+ doc.ents = [entity]
+ doc.set_ents([], default="missing")
+ for token in doc:
+ assert token.ent_iob == 0
+ assert token.ent_type == 0
+ assert token.ent_id == 0
+ assert token.ent_kb_id == 0
+ doc.set_ents([], default="blocked")
+ for token in doc:
+ assert token.ent_iob == 3
+ assert token.ent_type == 0
+ assert token.ent_id == 0
+ assert token.ent_kb_id == 0
+
+
def test_add_overlapping_entities(en_vocab):
text = ["Louisiana", "Office", "of", "Conservation"]
doc = Doc(en_vocab, words=text)
diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py
index dd4942989..a64ab2ba8 100644
--- a/spacy/tests/doc/test_doc_api.py
+++ b/spacy/tests/doc/test_doc_api.py
@@ -3,6 +3,7 @@ import weakref
import numpy
from numpy.testing import assert_array_equal
import pytest
+import warnings
from thinc.api import NumpyOps, get_current_ops
from spacy.attrs import DEP, ENT_IOB, ENT_TYPE, HEAD, IS_ALPHA, MORPH, POS
@@ -529,9 +530,9 @@ def test_doc_from_array_sent_starts(en_vocab):
# no warning using default attrs
attrs = doc._get_array_attrs()
arr = doc.to_array(attrs)
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
new_doc.from_array(attrs, arr)
- assert len(record) == 0
# only SENT_START uses SENT_START
attrs = [SENT_START]
arr = doc.to_array(attrs)
diff --git a/spacy/tests/doc/test_json_doc_conversion.py b/spacy/tests/doc/test_json_doc_conversion.py
index 85e4def29..0d7c061c9 100644
--- a/spacy/tests/doc/test_json_doc_conversion.py
+++ b/spacy/tests/doc/test_json_doc_conversion.py
@@ -1,12 +1,15 @@
import pytest
import spacy
from spacy import schemas
-from spacy.tokens import Doc, Span
+from spacy.tokens import Doc, Span, Token
+import srsly
+from .test_underscore import clean_underscore # noqa: F401
@pytest.fixture()
def doc(en_vocab):
words = ["c", "d", "e"]
+ spaces = [True, True, True]
pos = ["VERB", "NOUN", "NOUN"]
tags = ["VBP", "NN", "NN"]
heads = [0, 0, 1]
@@ -17,6 +20,7 @@ def doc(en_vocab):
return Doc(
en_vocab,
words=words,
+ spaces=spaces,
pos=pos,
tags=tags,
heads=heads,
@@ -45,6 +49,47 @@ def doc_without_deps(en_vocab):
)
+@pytest.fixture()
+def doc_json():
+ return {
+ "text": "c d e ",
+ "ents": [{"start": 2, "end": 3, "label": "ORG"}],
+ "sents": [{"start": 0, "end": 5}],
+ "tokens": [
+ {
+ "id": 0,
+ "start": 0,
+ "end": 1,
+ "tag": "VBP",
+ "pos": "VERB",
+ "morph": "Feat1=A",
+ "dep": "ROOT",
+ "head": 0,
+ },
+ {
+ "id": 1,
+ "start": 2,
+ "end": 3,
+ "tag": "NN",
+ "pos": "NOUN",
+ "morph": "Feat1=B",
+ "dep": "dobj",
+ "head": 0,
+ },
+ {
+ "id": 2,
+ "start": 4,
+ "end": 5,
+ "tag": "NN",
+ "pos": "NOUN",
+ "morph": "Feat1=A|Feat2=D",
+ "dep": "dobj",
+ "head": 1,
+ },
+ ],
+ }
+
+
def test_doc_to_json(doc):
json_doc = doc.to_json()
assert json_doc["text"] == "c d e "
@@ -56,7 +101,8 @@ def test_doc_to_json(doc):
assert json_doc["ents"][0]["start"] == 2 # character offset!
assert json_doc["ents"][0]["end"] == 3 # character offset!
assert json_doc["ents"][0]["label"] == "ORG"
- assert not schemas.validate(schemas.DocJSONSchema, json_doc)
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
+ assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
def test_doc_to_json_underscore(doc):
@@ -64,11 +110,96 @@ def test_doc_to_json_underscore(doc):
Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
+
json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
assert "_" in json_doc
assert json_doc["_"]["json_test1"] == "hello world"
assert json_doc["_"]["json_test2"] == [1, 2, 3]
- assert not schemas.validate(schemas.DocJSONSchema, json_doc)
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
+ assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
+
+
+def test_doc_to_json_with_token_span_attributes(doc):
+ Doc.set_extension("json_test1", default=False)
+ Doc.set_extension("json_test2", default=False)
+ Token.set_extension("token_test", default=False)
+ Span.set_extension("span_test", default=False)
+
+ doc._.json_test1 = "hello world"
+ doc._.json_test2 = [1, 2, 3]
+ doc[0:1]._.span_test = "span_attribute"
+ doc[0]._.token_test = 117
+ doc.spans["span_group"] = [doc[0:1]]
+ json_doc = doc.to_json(
+ underscore=["json_test1", "json_test2", "token_test", "span_test"]
+ )
+
+ assert "_" in json_doc
+ assert json_doc["_"]["json_test1"] == "hello world"
+ assert json_doc["_"]["json_test2"] == [1, 2, 3]
+ assert "underscore_token" in json_doc
+ assert "underscore_span" in json_doc
+ assert json_doc["underscore_token"]["token_test"]["value"] == 117
+ assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
+ assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
+
+
+def test_doc_to_json_with_custom_user_data(doc):
+ Doc.set_extension("json_test", default=False)
+ Token.set_extension("token_test", default=False)
+ Span.set_extension("span_test", default=False)
+
+ doc._.json_test = "hello world"
+ doc[0:1]._.span_test = "span_attribute"
+ doc[0]._.token_test = 117
+ json_doc = doc.to_json(underscore=["json_test", "token_test", "span_test"])
+ doc.user_data["user_data_test"] = 10
+ doc.user_data[("user_data_test2", True)] = 10
+
+ assert "_" in json_doc
+ assert json_doc["_"]["json_test"] == "hello world"
+ assert "underscore_token" in json_doc
+ assert "underscore_span" in json_doc
+ assert json_doc["underscore_token"]["token_test"]["value"] == 117
+ assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
+ assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
+
+
+def test_doc_to_json_with_token_span_same_identifier(doc):
+ Doc.set_extension("my_ext", default=False)
+ Token.set_extension("my_ext", default=False)
+ Span.set_extension("my_ext", default=False)
+
+ doc._.my_ext = "hello world"
+ doc[0:1]._.my_ext = "span_attribute"
+ doc[0]._.my_ext = 117
+ json_doc = doc.to_json(underscore=["my_ext"])
+
+ assert "_" in json_doc
+ assert json_doc["_"]["my_ext"] == "hello world"
+ assert "underscore_token" in json_doc
+ assert "underscore_span" in json_doc
+ assert json_doc["underscore_token"]["my_ext"]["value"] == 117
+ assert json_doc["underscore_span"]["my_ext"]["value"] == "span_attribute"
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
+ assert srsly.json_loads(srsly.json_dumps(json_doc)) == json_doc
+
+
+def test_doc_to_json_with_token_attributes_missing(doc):
+ Token.set_extension("token_test", default=False)
+ Span.set_extension("span_test", default=False)
+
+ doc[0:1]._.span_test = "span_attribute"
+ doc[0]._.token_test = 117
+ json_doc = doc.to_json(underscore=["span_test"])
+
+ assert "underscore_token" in json_doc
+ assert "underscore_span" in json_doc
+ assert json_doc["underscore_span"]["span_test"]["value"] == "span_attribute"
+ assert "token_test" not in json_doc["underscore_token"]
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
def test_doc_to_json_underscore_error_attr(doc):
@@ -94,11 +225,29 @@ def test_doc_to_json_span(doc):
assert len(json_doc["spans"]) == 1
assert len(json_doc["spans"]["test"]) == 2
assert json_doc["spans"]["test"][0]["start"] == 0
- assert not schemas.validate(schemas.DocJSONSchema, json_doc)
+ assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0
def test_json_to_doc(doc):
- new_doc = Doc(doc.vocab).from_json(doc.to_json(), validate=True)
+ json_doc = doc.to_json()
+ json_doc = srsly.json_loads(srsly.json_dumps(json_doc))
+ new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
+ assert new_doc.text == doc.text == "c d e "
+ assert len(new_doc) == len(doc) == 3
+ assert new_doc[0].pos == doc[0].pos
+ assert new_doc[0].tag == doc[0].tag
+ assert new_doc[0].dep == doc[0].dep
+ assert new_doc[0].head.idx == doc[0].head.idx
+ assert new_doc[0].lemma == doc[0].lemma
+ assert len(new_doc.ents) == 1
+ assert new_doc.ents[0].start == 1
+ assert new_doc.ents[0].end == 2
+ assert new_doc.ents[0].label_ == "ORG"
+ assert doc.to_bytes() == new_doc.to_bytes()
+
+
+def test_json_to_doc_compat(doc, doc_json):
+ new_doc = Doc(doc.vocab).from_json(doc_json, validate=True)
new_tokens = [token for token in new_doc]
assert new_doc.text == doc.text == "c d e "
assert len(new_tokens) == len([token for token in doc]) == 3
@@ -114,11 +263,8 @@ def test_json_to_doc(doc):
def test_json_to_doc_underscore(doc):
- if not Doc.has_extension("json_test1"):
- Doc.set_extension("json_test1", default=False)
- if not Doc.has_extension("json_test2"):
- Doc.set_extension("json_test2", default=False)
-
+ Doc.set_extension("json_test1", default=False)
+ Doc.set_extension("json_test2", default=False)
doc._.json_test1 = "hello world"
doc._.json_test2 = [1, 2, 3]
json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
@@ -126,6 +272,34 @@ def test_json_to_doc_underscore(doc):
assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)])
assert new_doc._.json_test1 == "hello world"
assert new_doc._.json_test2 == [1, 2, 3]
+ assert doc.to_bytes() == new_doc.to_bytes()
+
+
+def test_json_to_doc_with_token_span_attributes(doc):
+ Doc.set_extension("json_test1", default=False)
+ Doc.set_extension("json_test2", default=False)
+ Token.set_extension("token_test", default=False)
+ Span.set_extension("span_test", default=False)
+ doc._.json_test1 = "hello world"
+ doc._.json_test2 = [1, 2, 3]
+ doc[0:1]._.span_test = "span_attribute"
+ doc[0]._.token_test = 117
+
+ json_doc = doc.to_json(
+ underscore=["json_test1", "json_test2", "token_test", "span_test"]
+ )
+ json_doc = srsly.json_loads(srsly.json_dumps(json_doc))
+ new_doc = Doc(doc.vocab).from_json(json_doc, validate=True)
+
+ assert all([new_doc.has_extension(f"json_test{i}") for i in range(1, 3)])
+ assert new_doc._.json_test1 == "hello world"
+ assert new_doc._.json_test2 == [1, 2, 3]
+ assert new_doc[0]._.token_test == 117
+ assert new_doc[0:1]._.span_test == "span_attribute"
+ assert new_doc.user_data == doc.user_data
+ assert new_doc.to_bytes(exclude=["user_data"]) == doc.to_bytes(
+ exclude=["user_data"]
+ )
def test_json_to_doc_spans(doc):
diff --git a/spacy/tests/doc/test_span.py b/spacy/tests/doc/test_span.py
index c6303c52d..1a2f3cdcd 100644
--- a/spacy/tests/doc/test_span.py
+++ b/spacy/tests/doc/test_span.py
@@ -692,3 +692,23 @@ def test_span_group_copy(doc):
assert len(doc.spans["test"]) == 3
# check that the copy spans were not modified and this is an isolated doc
assert len(doc_copy.spans["test"]) == 2
+
+
+@pytest.mark.issue(11113)
+def test_span_ent_id(en_tokenizer):
+ doc = en_tokenizer("a b c d")
+ doc.ents = [Span(doc, 1, 3, label="A", span_id="ID0")]
+ span = doc.ents[0]
+ assert doc[1].ent_id_ == "ID0"
+
+ # setting Span.id sets Token.ent_id
+ span.id_ = "ID1"
+ doc.ents = [span]
+ assert doc.ents[0].ent_id_ == "ID1"
+ assert doc[1].ent_id_ == "ID1"
+
+ # Span.ent_id is an alias of Span.id
+ span.ent_id_ = "ID2"
+ doc.ents = [span]
+ assert doc.ents[0].ent_id_ == "ID2"
+ assert doc[1].ent_id_ == "ID2"
diff --git a/spacy/tests/lang/bg/test_tokenizer.py b/spacy/tests/lang/bg/test_tokenizer.py
new file mode 100644
index 000000000..2e2c45001
--- /dev/null
+++ b/spacy/tests/lang/bg/test_tokenizer.py
@@ -0,0 +1,8 @@
+import pytest
+
+
+def test_bg_tokenizer_handles_final_diacritics(bg_tokenizer):
+ text = "Ня̀маше яйца̀. Ня̀маше яйца̀."
+ tokens = bg_tokenizer(text)
+ assert tokens[1].text == "яйца̀"
+ assert tokens[2].text == "."
diff --git a/spacy/tests/lang/ko/test_lemmatization.py b/spacy/tests/lang/ko/test_lemmatization.py
index 7782ca4bc..0c389b9ce 100644
--- a/spacy/tests/lang/ko/test_lemmatization.py
+++ b/spacy/tests/lang/ko/test_lemmatization.py
@@ -7,3 +7,11 @@ import pytest
def test_ko_lemmatizer_assigns(ko_tokenizer, word, lemma):
test_lemma = ko_tokenizer(word)[0].lemma_
assert test_lemma == lemma
+
+
+@pytest.mark.parametrize(
+ "word,lemma", [("새로운", "새롭"), ("빨간", "빨갛"), ("클수록", "크"), ("뭡니까", "뭣"), ("됐다", "되")]
+)
+def test_ko_lemmatizer_natto_assigns(ko_tokenizer_natto, word, lemma):
+ test_lemma = ko_tokenizer_natto(word)[0].lemma_
+ assert test_lemma == lemma
diff --git a/spacy/tests/lang/ko/test_serialize.py b/spacy/tests/lang/ko/test_serialize.py
index 75288fcc5..35d28d42a 100644
--- a/spacy/tests/lang/ko/test_serialize.py
+++ b/spacy/tests/lang/ko/test_serialize.py
@@ -22,3 +22,23 @@ def test_ko_tokenizer_pickle(ko_tokenizer):
b = pickle.dumps(ko_tokenizer)
ko_tokenizer_re = pickle.loads(b)
assert ko_tokenizer.to_bytes() == ko_tokenizer_re.to_bytes()
+
+
+def test_ko_tokenizer_natto_serialize(ko_tokenizer_natto):
+ tokenizer_bytes = ko_tokenizer_natto.to_bytes()
+ nlp = Korean()
+ nlp.tokenizer.from_bytes(tokenizer_bytes)
+ assert tokenizer_bytes == nlp.tokenizer.to_bytes()
+
+ with make_tempdir() as d:
+ file_path = d / "tokenizer"
+ ko_tokenizer_natto.to_disk(file_path)
+ nlp = Korean()
+ nlp.tokenizer.from_disk(file_path)
+ assert tokenizer_bytes == nlp.tokenizer.to_bytes()
+
+
+def test_ko_tokenizer_natto_pickle(ko_tokenizer_natto):
+ b = pickle.dumps(ko_tokenizer_natto)
+ ko_tokenizer_natto_re = pickle.loads(b)
+ assert ko_tokenizer_natto.to_bytes() == ko_tokenizer_natto_re.to_bytes()
diff --git a/spacy/tests/lang/ko/test_tokenizer.py b/spacy/tests/lang/ko/test_tokenizer.py
index 6e06e405e..e7f8a5c0d 100644
--- a/spacy/tests/lang/ko/test_tokenizer.py
+++ b/spacy/tests/lang/ko/test_tokenizer.py
@@ -19,6 +19,8 @@ POS_TESTS = [("서울 타워 근처에 살고 있습니다.",
"PROPN ADP VERB X NOUN ADV VERB AUX X PUNCT")]
# fmt: on
+# tests for ko_tokenizer (default KoreanTokenizer)
+
@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
def test_ko_tokenizer(ko_tokenizer, text, expected_tokens):
@@ -44,7 +46,7 @@ def test_ko_tokenizer_pos(ko_tokenizer, text, expected_pos):
assert pos == expected_pos.split()
-def test_ko_empty_doc(ko_tokenizer):
+def test_ko_tokenizer_empty_doc(ko_tokenizer):
tokens = ko_tokenizer("")
assert len(tokens) == 0
@@ -55,6 +57,44 @@ def test_ko_tokenizer_unknown_tag(ko_tokenizer):
assert tokens[1].pos_ == "X"
+# same tests for ko_tokenizer_natto (KoreanNattoTokenizer)
+
+
+@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
+def test_ko_tokenizer_natto(ko_tokenizer_natto, text, expected_tokens):
+ tokens = [token.text for token in ko_tokenizer_natto(text)]
+ assert tokens == expected_tokens.split()
+
+
+@pytest.mark.parametrize("text,expected_tags", TAG_TESTS)
+def test_ko_tokenizer_natto_tags(ko_tokenizer_natto, text, expected_tags):
+ tags = [token.tag_ for token in ko_tokenizer_natto(text)]
+ assert tags == expected_tags.split()
+
+
+@pytest.mark.parametrize("text,expected_tags", FULL_TAG_TESTS)
+def test_ko_tokenizer_natto_full_tags(ko_tokenizer_natto, text, expected_tags):
+ tags = ko_tokenizer_natto(text).user_data["full_tags"]
+ assert tags == expected_tags.split()
+
+
+@pytest.mark.parametrize("text,expected_pos", POS_TESTS)
+def test_ko_tokenizer_natto_pos(ko_tokenizer_natto, text, expected_pos):
+ pos = [token.pos_ for token in ko_tokenizer_natto(text)]
+ assert pos == expected_pos.split()
+
+
+def test_ko_tokenizer_natto_empty_doc(ko_tokenizer_natto):
+ tokens = ko_tokenizer_natto("")
+ assert len(tokens) == 0
+
+
+@pytest.mark.issue(10535)
+def test_ko_tokenizer_natto_unknown_tag(ko_tokenizer_natto):
+ tokens = ko_tokenizer_natto("미닛 리피터")
+ assert tokens[1].pos_ == "X"
+
+
# fmt: off
SPACY_TOKENIZER_TESTS = [
("있다.", "있다 ."),
diff --git a/spacy/tests/lang/la/__init__.py b/spacy/tests/lang/la/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/spacy/tests/lang/la/test_exception.py b/spacy/tests/lang/la/test_exception.py
new file mode 100644
index 000000000..966ae22cf
--- /dev/null
+++ b/spacy/tests/lang/la/test_exception.py
@@ -0,0 +1,8 @@
+import pytest
+
+
+def test_la_tokenizer_handles_exc_in_text(la_tokenizer):
+ text = "scio te omnia facturum, ut nobiscum quam primum sis"
+ tokens = la_tokenizer(text)
+ assert len(tokens) == 11
+ assert tokens[6].text == "nobis"
diff --git a/spacy/tests/lang/la/test_text.py b/spacy/tests/lang/la/test_text.py
new file mode 100644
index 000000000..48e7359a4
--- /dev/null
+++ b/spacy/tests/lang/la/test_text.py
@@ -0,0 +1,35 @@
+import pytest
+from spacy.lang.la.lex_attrs import like_num
+
+
+@pytest.mark.parametrize(
+ "text,match",
+ [
+ ("IIII", True),
+ ("VI", True),
+ ("vi", True),
+ ("IV", True),
+ ("iv", True),
+ ("IX", True),
+ ("ix", True),
+ ("MMXXII", True),
+ ("0", True),
+ ("1", True),
+ ("quattuor", True),
+ ("decem", True),
+ ("tertius", True),
+ ("canis", False),
+ ("MMXX11", False),
+ (",", False),
+ ],
+)
+def test_lex_attrs_like_number(la_tokenizer, text, match):
+ tokens = la_tokenizer(text)
+ assert len(tokens) == 1
+ assert tokens[0].like_num == match
+
+
+@pytest.mark.parametrize("word", ["quinque"])
+def test_la_lex_attrs_capitals(word):
+ assert like_num(word)
+ assert like_num(word.upper())
diff --git a/spacy/tests/lang/lg/__init__.py b/spacy/tests/lang/lg/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/spacy/tests/lang/lg/test_tokenizer.py b/spacy/tests/lang/lg/test_tokenizer.py
new file mode 100644
index 000000000..958385a77
--- /dev/null
+++ b/spacy/tests/lang/lg/test_tokenizer.py
@@ -0,0 +1,15 @@
+import pytest
+
+LG_BASIC_TOKENIZATION_TESTS = [
+ (
+ "Abooluganda ab’emmamba ababiri",
+ ["Abooluganda", "ab’emmamba", "ababiri"],
+ ),
+]
+
+
+@pytest.mark.parametrize("text,expected_tokens", LG_BASIC_TOKENIZATION_TESTS)
+def test_lg_tokenizer_basic(lg_tokenizer, text, expected_tokens):
+ tokens = lg_tokenizer(text)
+ token_list = [token.text for token in tokens if not token.is_space]
+ assert expected_tokens == token_list
diff --git a/spacy/tests/lang/nl/test_noun_chunks.py b/spacy/tests/lang/nl/test_noun_chunks.py
index 73b501e4a..8962e3b75 100644
--- a/spacy/tests/lang/nl/test_noun_chunks.py
+++ b/spacy/tests/lang/nl/test_noun_chunks.py
@@ -1,5 +1,6 @@
-from spacy.tokens import Doc
import pytest
+from spacy.tokens import Doc
+from spacy.util import filter_spans
@pytest.fixture
@@ -207,3 +208,18 @@ def test_chunking(nl_sample, nl_reference_chunking):
"""
chunks = [s.text.lower() for s in nl_sample.noun_chunks]
assert chunks == nl_reference_chunking
+
+
+@pytest.mark.issue(10846)
+def test_no_overlapping_chunks(nl_vocab):
+ # fmt: off
+ doc = Doc(
+ nl_vocab,
+ words=["Dit", "programma", "wordt", "beschouwd", "als", "'s", "werelds", "eerste", "computerprogramma"],
+ deps=["det", "nsubj:pass", "aux:pass", "ROOT", "mark", "det", "fixed", "amod", "xcomp"],
+ heads=[1, 3, 3, 3, 8, 8, 5, 8, 3],
+ pos=["DET", "NOUN", "AUX", "VERB", "SCONJ", "DET", "NOUN", "ADJ", "NOUN"],
+ )
+ # fmt: on
+ chunks = list(doc.noun_chunks)
+ assert filter_spans(chunks) == chunks
diff --git a/spacy/tests/lang/ru/test_lemmatizer.py b/spacy/tests/lang/ru/test_lemmatizer.py
index 3810323bf..9ca7f441b 100644
--- a/spacy/tests/lang/ru/test_lemmatizer.py
+++ b/spacy/tests/lang/ru/test_lemmatizer.py
@@ -2,6 +2,9 @@ import pytest
from spacy.tokens import Doc
+pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
+
+
def test_ru_doc_lemmatization(ru_lemmatizer):
words = ["мама", "мыла", "раму"]
pos = ["NOUN", "VERB", "NOUN"]
diff --git a/spacy/tests/lang/ru/test_tokenizer.py b/spacy/tests/lang/ru/test_tokenizer.py
index 1cfdc50ee..083b55a09 100644
--- a/spacy/tests/lang/ru/test_tokenizer.py
+++ b/spacy/tests/lang/ru/test_tokenizer.py
@@ -1,3 +1,4 @@
+from string import punctuation
import pytest
@@ -122,3 +123,36 @@ def test_ru_tokenizer_splits_bracket_period(ru_tokenizer):
text = "(Раз, два, три, проверка)."
tokens = ru_tokenizer(text)
assert tokens[len(tokens) - 1].text == "."
+
+
+@pytest.mark.parametrize(
+ "text",
+ [
+ "рекоменду́я подда́ть жару́. Самого́ Баргамота",
+ "РЕКОМЕНДУ́Я ПОДДА́ТЬ ЖАРУ́. САМОГО́ БАРГАМОТА",
+ "рекоменду̍я подда̍ть жару̍.Самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍.'Самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍,самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍:самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍. самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍, самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍: самого̍ Баргамота",
+ "рекоменду̍я подда̍ть жару̍-самого̍ Баргамота",
+ ],
+)
+def test_ru_tokenizer_handles_final_diacritics(ru_tokenizer, text):
+ tokens = ru_tokenizer(text)
+ assert tokens[2].text in ("жару́", "ЖАРУ́", "жару̍")
+ assert tokens[3].text in punctuation
+
+
+@pytest.mark.parametrize(
+ "text",
+ [
+ "РЕКОМЕНДУ́Я ПОДДА́ТЬ ЖАРУ́.САМОГО́ БАРГАМОТА",
+ "рекоменду̍я подда̍ть жару́.самого́ Баргамота",
+ ],
+)
+def test_ru_tokenizer_handles_final_diacritic_and_period(ru_tokenizer, text):
+ tokens = ru_tokenizer(text)
+ assert tokens[2].text.lower() == "жару́.самого́"
diff --git a/spacy/tests/lang/sl/test_text.py b/spacy/tests/lang/sl/test_text.py
index ddc5b6b5d..a2a932077 100644
--- a/spacy/tests/lang/sl/test_text.py
+++ b/spacy/tests/lang/sl/test_text.py
@@ -20,7 +20,6 @@ od katerih so te svoboščine odvisne,
assert len(tokens) == 116
-@pytest.mark.xfail
def test_ordinal_number(sl_tokenizer):
text = "10. decembra 1948"
tokens = sl_tokenizer(text)
diff --git a/spacy/tests/lang/test_attrs.py b/spacy/tests/lang/test_attrs.py
index 1c27c1744..1e1bae08c 100644
--- a/spacy/tests/lang/test_attrs.py
+++ b/spacy/tests/lang/test_attrs.py
@@ -26,14 +26,6 @@ def test_attrs_idempotence(text):
assert intify_attrs(int_attrs) == {LEMMA: 10, IS_ALPHA: True}
-@pytest.mark.parametrize("text", ["dog"])
-def test_attrs_do_deprecated(text):
- int_attrs = intify_attrs(
- {"F": text, "is_alpha": True}, strings_map={text: 10}, _do_deprecated=True
- )
- assert int_attrs == {ORTH: 10, IS_ALPHA: True}
-
-
def test_attrs_ent_iob_intify():
int_attrs = intify_attrs({"ENT_IOB": ""})
assert int_attrs == {ENT_IOB: 0}
diff --git a/spacy/tests/lang/uk/test_lemmatizer.py b/spacy/tests/lang/uk/test_lemmatizer.py
index 4a787b2a6..57dd4198a 100644
--- a/spacy/tests/lang/uk/test_lemmatizer.py
+++ b/spacy/tests/lang/uk/test_lemmatizer.py
@@ -1,6 +1,10 @@
+import pytest
from spacy.tokens import Doc
+pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
+
+
def test_uk_lemmatizer(uk_lemmatizer):
"""Check that the default uk lemmatizer runs."""
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
diff --git a/spacy/tests/lang/uk/test_tokenizer.py b/spacy/tests/lang/uk/test_tokenizer.py
index 3d6e87301..6596f490a 100644
--- a/spacy/tests/lang/uk/test_tokenizer.py
+++ b/spacy/tests/lang/uk/test_tokenizer.py
@@ -140,3 +140,10 @@ def test_uk_tokenizer_splits_bracket_period(uk_tokenizer):
text = "(Раз, два, три, проверка)."
tokens = uk_tokenizer(text)
assert tokens[len(tokens) - 1].text == "."
+
+
+def test_uk_tokenizer_handles_final_diacritics(uk_tokenizer):
+ text = "Хлібі́в не було́. Хлібі́в не було́."
+ tokens = uk_tokenizer(text)
+ assert tokens[2].text == "було́"
+ assert tokens[3].text == "."
diff --git a/spacy/tests/matcher/test_dependency_matcher.py b/spacy/tests/matcher/test_dependency_matcher.py
index 1728c82af..b4e19d69d 100644
--- a/spacy/tests/matcher/test_dependency_matcher.py
+++ b/spacy/tests/matcher/test_dependency_matcher.py
@@ -316,6 +316,20 @@ def test_dependency_matcher_precedence_ops(en_vocab, op, num_matches):
("the", "brown", "$--", 0),
("brown", "the", "$--", 1),
("brown", "brown", "$--", 0),
+ ("quick", "fox", "<++", 1),
+ ("quick", "over", "<++", 0),
+ ("over", "jumped", "<++", 0),
+ ("the", "fox", "<++", 2),
+ ("brown", "fox", "<--", 0),
+ ("fox", "jumped", "<--", 0),
+ ("fox", "over", "<--", 1),
+ ("jumped", "over", ">++", 1),
+ ("fox", "lazy", ">++", 0),
+ ("over", "the", ">++", 0),
+ ("brown", "fox", ">--", 0),
+ ("fox", "brown", ">--", 1),
+ ("jumped", "fox", ">--", 1),
+ ("fox", "the", ">--", 2),
],
)
def test_dependency_matcher_ops(en_vocab, doc, left, right, op, num_matches):
diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py
index 82abe0914..8a594ed7e 100644
--- a/spacy/tests/matcher/test_matcher_api.py
+++ b/spacy/tests/matcher/test_matcher_api.py
@@ -368,6 +368,16 @@ def test_matcher_intersect_value_operator(en_vocab):
doc[0]._.ext = ["A", "B"]
assert len(matcher(doc)) == 1
+ # INTERSECTS matches nothing for iterables that aren't all str or int
+ matcher = Matcher(en_vocab)
+ pattern = [{"_": {"ext": {"INTERSECTS": ["Abx", "C"]}}}]
+ matcher.add("M", [pattern])
+ doc = Doc(en_vocab, words=["a", "b", "c"])
+ doc[0]._.ext = [["Abx"], "B"]
+ assert len(matcher(doc)) == 0
+ doc[0]._.ext = ["Abx", "B"]
+ assert len(matcher(doc)) == 1
+
# INTERSECTS with an empty pattern list matches nothing
matcher = Matcher(en_vocab)
pattern = [{"_": {"ext": {"INTERSECTS": []}}}]
@@ -476,6 +486,25 @@ def test_matcher_extension_set_membership(en_vocab):
assert len(matches) == 0
+def test_matcher_extension_in_set_predicate(en_vocab):
+ matcher = Matcher(en_vocab)
+ Token.set_extension("ext", default=[])
+ pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}]
+ matcher.add("M", [pattern])
+ doc = Doc(en_vocab, words=["a", "b", "c"])
+
+ # The IN predicate expects an exact match between the
+ # extension value and one of the pattern's values.
+ doc[0]._.ext = ["A", "B"]
+ assert len(matcher(doc)) == 0
+
+ doc[0]._.ext = ["A"]
+ assert len(matcher(doc)) == 0
+
+ doc[0]._.ext = "A"
+ assert len(matcher(doc)) == 1
+
+
def test_matcher_basic_check(en_vocab):
matcher = Matcher(en_vocab)
# Potential mistake: pass in pattern instead of list of patterns
@@ -676,3 +705,38 @@ def test_matcher_ent_iob_key(en_vocab):
assert matches[0] == "Maria"
assert matches[1] == "Maria Esperanza"
assert matches[2] == "Esperanza"
+
+
+def test_matcher_min_max_operator(en_vocab):
+ # Exactly n matches {n}
+ doc = Doc(
+ en_vocab,
+ words=["foo", "bar", "foo", "foo", "bar", "foo", "foo", "foo", "bar", "bar"],
+ )
+ matcher = Matcher(en_vocab)
+ pattern = [{"ORTH": "foo", "OP": "{3}"}]
+ matcher.add("TEST", [pattern])
+
+ matches1 = [doc[start:end].text for _, start, end in matcher(doc)]
+ assert len(matches1) == 1
+
+ # At least n matches {n,}
+ matcher = Matcher(en_vocab)
+ pattern = [{"ORTH": "foo", "OP": "{2,}"}]
+ matcher.add("TEST", [pattern])
+ matches2 = [doc[start:end].text for _, start, end in matcher(doc)]
+ assert len(matches2) == 4
+
+ # At most m matches {,m}
+ matcher = Matcher(en_vocab)
+ pattern = [{"ORTH": "foo", "OP": "{,2}"}]
+ matcher.add("TEST", [pattern])
+ matches3 = [doc[start:end].text for _, start, end in matcher(doc)]
+ assert len(matches3) == 9
+
+ # At least n matches and most m matches {n,m}
+ matcher = Matcher(en_vocab)
+ pattern = [{"ORTH": "foo", "OP": "{2,3}"}]
+ matcher.add("TEST", [pattern])
+ matches4 = [doc[start:end].text for _, start, end in matcher(doc)]
+ assert len(matches4) == 4
diff --git a/spacy/tests/matcher/test_matcher_logic.py b/spacy/tests/matcher/test_matcher_logic.py
index 3649b07ed..3b65fee23 100644
--- a/spacy/tests/matcher/test_matcher_logic.py
+++ b/spacy/tests/matcher/test_matcher_logic.py
@@ -699,6 +699,10 @@ def test_matcher_with_alignments_greedy_longest(en_vocab):
("aaaa", "a a a a a?", [0, 1, 2, 3]),
("aaab", "a+ a b", [0, 0, 1, 2]),
("aaab", "a+ a+ b", [0, 0, 1, 2]),
+ ("aaab", "a{2,} b", [0, 0, 0, 1]),
+ ("aaab", "a{,3} b", [0, 0, 0, 1]),
+ ("aaab", "a{2} b", [0, 0, 1]),
+ ("aaab", "a{2,3} b", [0, 0, 0, 1]),
]
for string, pattern_str, result in cases:
matcher = Matcher(en_vocab)
@@ -711,6 +715,8 @@ def test_matcher_with_alignments_greedy_longest(en_vocab):
pattern.append({"ORTH": part[0], "OP": "*"})
elif part.endswith("?"):
pattern.append({"ORTH": part[0], "OP": "?"})
+ elif part.endswith("}"):
+ pattern.append({"ORTH": part[0], "OP": part[1:]})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern], greedy="LONGEST")
@@ -722,7 +728,7 @@ def test_matcher_with_alignments_greedy_longest(en_vocab):
assert expected == result, (string, pattern_str, s, e, n_matches)
-def test_matcher_with_alignments_nongreedy(en_vocab):
+def test_matcher_with_alignments_non_greedy(en_vocab):
cases = [
(0, "aaab", "a* b", [[0, 1], [0, 0, 1], [0, 0, 0, 1], [1]]),
(1, "baab", "b a* b", [[0, 1, 1, 2]]),
@@ -752,6 +758,10 @@ def test_matcher_with_alignments_nongreedy(en_vocab):
(15, "aaaa", "a a a a a?", [[0, 1, 2, 3]]),
(16, "aaab", "a+ a b", [[0, 1, 2], [0, 0, 1, 2]]),
(17, "aaab", "a+ a+ b", [[0, 1, 2], [0, 0, 1, 2]]),
+ (18, "aaab", "a{2,} b", [[0, 0, 1], [0, 0, 0, 1]]),
+ (19, "aaab", "a{3} b", [[0, 0, 0, 1]]),
+ (20, "aaab", "a{2} b", [[0, 0, 1]]),
+ (21, "aaab", "a{2,3} b", [[0, 0, 1], [0, 0, 0, 1]]),
]
for case_id, string, pattern_str, results in cases:
matcher = Matcher(en_vocab)
@@ -764,6 +774,8 @@ def test_matcher_with_alignments_nongreedy(en_vocab):
pattern.append({"ORTH": part[0], "OP": "*"})
elif part.endswith("?"):
pattern.append({"ORTH": part[0], "OP": "?"})
+ elif part.endswith("}"):
+ pattern.append({"ORTH": part[0], "OP": part[1:]})
else:
pattern.append({"ORTH": part})
diff --git a/spacy/tests/matcher/test_pattern_validation.py b/spacy/tests/matcher/test_pattern_validation.py
index 8c265785c..e7eced02c 100644
--- a/spacy/tests/matcher/test_pattern_validation.py
+++ b/spacy/tests/matcher/test_pattern_validation.py
@@ -14,6 +14,14 @@ TEST_PATTERNS = [
('[{"TEXT": "foo"}, {"LOWER": "bar"}]', 1, 1),
([{"ENT_IOB": "foo"}], 1, 1),
([1, 2, 3], 3, 1),
+ ([{"TEXT": "foo", "OP": "{,}"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{,4}4"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{a,3}"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{a}"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{,a}"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{1,2,3}"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{1, 3}"}], 1, 1),
+ ([{"TEXT": "foo", "OP": "{-2}"}], 1, 1),
# Bad patterns flagged outside of Matcher
([{"_": {"foo": "bar", "baz": {"IN": "foo"}}}], 2, 0), # prev: (1, 0)
# Bad patterns not flagged with minimal checks
@@ -38,6 +46,7 @@ TEST_PATTERNS = [
([{"SENT_START": True}], 0, 0),
([{"ENT_ID": "STRING"}], 0, 0),
([{"ENT_KB_ID": "STRING"}], 0, 0),
+ ([{"TEXT": "ha", "OP": "{3}"}], 0, 0),
]
diff --git a/spacy/tests/matcher/test_phrase_matcher.py b/spacy/tests/matcher/test_phrase_matcher.py
index 3b24f3ba8..b462b1878 100644
--- a/spacy/tests/matcher/test_phrase_matcher.py
+++ b/spacy/tests/matcher/test_phrase_matcher.py
@@ -1,4 +1,5 @@
import pytest
+import warnings
import srsly
from mock import Mock
@@ -197,28 +198,6 @@ def test_phrase_matcher_contains(en_vocab):
assert "TEST2" not in matcher
-def test_phrase_matcher_add_new_api(en_vocab):
- doc = Doc(en_vocab, words=["a", "b"])
- patterns = [Doc(en_vocab, words=["a"]), Doc(en_vocab, words=["a", "b"])]
- matcher = PhraseMatcher(en_vocab)
- matcher.add("OLD_API", None, *patterns)
- assert len(matcher(doc)) == 2
- matcher = PhraseMatcher(en_vocab)
- on_match = Mock()
- matcher.add("OLD_API_CALLBACK", on_match, *patterns)
- assert len(matcher(doc)) == 2
- assert on_match.call_count == 2
- # New API: add(key: str, patterns: List[List[dict]], on_match: Callable)
- matcher = PhraseMatcher(en_vocab)
- matcher.add("NEW_API", patterns)
- assert len(matcher(doc)) == 2
- matcher = PhraseMatcher(en_vocab)
- on_match = Mock()
- matcher.add("NEW_API_CALLBACK", patterns, on_match=on_match)
- assert len(matcher(doc)) == 2
- assert on_match.call_count == 2
-
-
def test_phrase_matcher_repeated_add(en_vocab):
matcher = PhraseMatcher(en_vocab)
# match ID only gets added once
@@ -344,13 +323,13 @@ def test_phrase_matcher_validation(en_vocab):
matcher.add("TEST1", [doc1])
with pytest.warns(UserWarning):
matcher.add("TEST2", [doc2])
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
matcher.add("TEST3", [doc3])
- assert not record.list
matcher = PhraseMatcher(en_vocab, attr="POS", validate=True)
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
matcher.add("TEST4", [doc2])
- assert not record.list
def test_attr_validation(en_vocab):
@@ -467,6 +446,13 @@ def test_phrase_matcher_deprecated(en_vocab):
assert "spaCy v3.0" in str(record.list[0].message)
+def test_phrase_matcher_non_doc(en_vocab):
+ matcher = PhraseMatcher(en_vocab)
+ doc = Doc(en_vocab, words=["hello", "world"])
+ with pytest.raises(ValueError):
+ matcher.add("TEST", [doc, "junk"])
+
+
@pytest.mark.parametrize("attr", ["SENT_START", "IS_SENT_START"])
def test_phrase_matcher_sent_start(en_vocab, attr):
_ = PhraseMatcher(en_vocab, attr=attr) # noqa: F841
diff --git a/spacy/tests/package/test_requirements.py b/spacy/tests/package/test_requirements.py
index e20227455..94dffd7ce 100644
--- a/spacy/tests/package/test_requirements.py
+++ b/spacy/tests/package/test_requirements.py
@@ -4,8 +4,8 @@ from pathlib import Path
def test_build_dependencies():
# Check that library requirements are pinned exactly the same across different setup files.
- # TODO: correct checks for numpy rather than ignoring
libs_ignore_requirements = [
+ "cython",
"pytest",
"pytest-timeout",
"mock",
@@ -17,11 +17,12 @@ def test_build_dependencies():
"types-dataclasses",
"types-mock",
"types-requests",
+ "types-setuptools",
]
# ignore language-specific packages that shouldn't be installed by all
libs_ignore_setup = [
"fugashi",
- "natto-py",
+ "mecab-ko",
"pythainlp",
"sudachipy",
"sudachidict_core",
diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py
index 1e9f178ff..5434a2fe7 100644
--- a/spacy/tests/parser/test_ner.py
+++ b/spacy/tests/parser/test_ner.py
@@ -10,7 +10,7 @@ from spacy.lang.it import Italian
from spacy.language import Language
from spacy.lookups import Lookups
from spacy.pipeline._parser_internals.ner import BiluoPushDown
-from spacy.training import Example, iob_to_biluo
+from spacy.training import Example, iob_to_biluo, split_bilu_label
from spacy.tokens import Doc, Span
from spacy.vocab import Vocab
from thinc.api import fix_random_seed
@@ -111,6 +111,9 @@ def test_issue2385():
# maintain support for iob2 format
tags3 = ("B-PERSON", "I-PERSON", "B-PERSON")
assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"]
+ # ensure it works with hyphens in the name
+ tags4 = ("B-MULTI-PERSON", "I-MULTI-PERSON", "B-MULTI-PERSON")
+ assert iob_to_biluo(tags4) == ["B-MULTI-PERSON", "L-MULTI-PERSON", "U-MULTI-PERSON"]
@pytest.mark.issue(2800)
@@ -155,6 +158,24 @@ def test_issue3209():
assert ner2.move_names == move_names
+def test_labels_from_BILUO():
+ """Test that labels are inferred correctly when there's a - in label."""
+ nlp = English()
+ ner = nlp.add_pipe("ner")
+ ner.add_label("LARGE-ANIMAL")
+ nlp.initialize()
+ move_names = [
+ "O",
+ "B-LARGE-ANIMAL",
+ "I-LARGE-ANIMAL",
+ "L-LARGE-ANIMAL",
+ "U-LARGE-ANIMAL",
+ ]
+ labels = {"LARGE-ANIMAL"}
+ assert ner.move_names == move_names
+ assert set(ner.labels) == labels
+
+
@pytest.mark.issue(4267)
def test_issue4267():
"""Test that running an entity_ruler after ner gives consistent results"""
@@ -299,7 +320,7 @@ def test_oracle_moves_missing_B(en_vocab):
elif tag == "O":
moves.add_action(move_types.index("O"), "")
else:
- action, label = tag.split("-")
+ action, label = split_bilu_label(tag)
moves.add_action(move_types.index("B"), label)
moves.add_action(move_types.index("I"), label)
moves.add_action(move_types.index("L"), label)
@@ -325,7 +346,7 @@ def test_oracle_moves_whitespace(en_vocab):
elif tag == "O":
moves.add_action(move_types.index("O"), "")
else:
- action, label = tag.split("-")
+ action, label = split_bilu_label(tag)
moves.add_action(move_types.index(action), label)
moves.get_oracle_sequence(example)
diff --git a/spacy/tests/parser/test_nonproj.py b/spacy/tests/parser/test_nonproj.py
index b420c300f..051d0ef0c 100644
--- a/spacy/tests/parser/test_nonproj.py
+++ b/spacy/tests/parser/test_nonproj.py
@@ -49,7 +49,9 @@ def test_parser_contains_cycle(tree, cyclic_tree, partial_tree, multirooted_tree
assert contains_cycle(multirooted_tree) is None
-def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multirooted_tree):
+def test_parser_is_nonproj_arc(
+ cyclic_tree, nonproj_tree, partial_tree, multirooted_tree
+):
assert is_nonproj_arc(0, nonproj_tree) is False
assert is_nonproj_arc(1, nonproj_tree) is False
assert is_nonproj_arc(2, nonproj_tree) is False
@@ -62,7 +64,9 @@ def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multiroo
assert is_nonproj_arc(7, partial_tree) is False
assert is_nonproj_arc(17, multirooted_tree) is False
assert is_nonproj_arc(16, multirooted_tree) is True
- with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'):
+ with pytest.raises(
+ ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
+ ):
is_nonproj_arc(6, cyclic_tree)
@@ -73,7 +77,9 @@ def test_parser_is_nonproj_tree(
assert is_nonproj_tree(nonproj_tree) is True
assert is_nonproj_tree(partial_tree) is False
assert is_nonproj_tree(multirooted_tree) is True
- with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'):
+ with pytest.raises(
+ ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]"
+ ):
is_nonproj_tree(cyclic_tree)
diff --git a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py
index cf541e301..ad2e56729 100644
--- a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py
+++ b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py
@@ -1,3 +1,4 @@
+from typing import cast
import pickle
import pytest
from hypothesis import given
@@ -6,6 +7,7 @@ from spacy import util
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline._edit_tree_internals.edit_trees import EditTrees
+from spacy.pipeline.trainable_pipe import TrainablePipe
from spacy.training import Example
from spacy.strings import StringStore
from spacy.util import make_tempdir
@@ -278,3 +280,26 @@ def test_empty_strings():
no_change = trees.add("xyz", "xyz")
empty = trees.add("", "")
assert no_change == empty
+
+
+def test_save_activations():
+ nlp = English()
+ lemmatizer = cast(TrainablePipe, nlp.add_pipe("trainable_lemmatizer"))
+ lemmatizer.min_tree_freq = 1
+ train_examples = []
+ for t in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
+ nlp.initialize(get_examples=lambda: train_examples)
+ nO = lemmatizer.model.get_dim("nO")
+
+ doc = nlp("This is a test.")
+ assert "trainable_lemmatizer" not in doc.activations
+
+ lemmatizer.save_activations = True
+ doc = nlp("This is a test.")
+ assert list(doc.activations["trainable_lemmatizer"].keys()) == [
+ "probabilities",
+ "tree_ids",
+ ]
+ assert doc.activations["trainable_lemmatizer"]["probabilities"].shape == (5, nO)
+ assert doc.activations["trainable_lemmatizer"]["tree_ids"].shape == (5,)
diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py
index a6cfead77..75d1feea5 100644
--- a/spacy/tests/pipeline/test_entity_linker.py
+++ b/spacy/tests/pipeline/test_entity_linker.py
@@ -1,7 +1,8 @@
-from typing import Callable, Iterable
+from typing import Callable, Iterable, Dict, Any, cast
import pytest
from numpy.testing import assert_equal
+from thinc.types import Ragged
from spacy import registry, util
from spacy.attrs import ENT_KB_ID
@@ -9,7 +10,7 @@ from spacy.compat import pickle
from spacy.kb import Candidate, KnowledgeBase, get_candidates
from spacy.lang.en import English
from spacy.ml import load_kb
-from spacy.pipeline import EntityLinker
+from spacy.pipeline import EntityLinker, TrainablePipe
from spacy.pipeline.legacy import EntityLinker_v1
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.scorer import Scorer
@@ -207,7 +208,7 @@ def test_no_entities():
nlp.add_pipe("sentencizer", first=True)
# this will run the pipeline on the examples and shouldn't crash
- results = nlp.evaluate(train_examples)
+ nlp.evaluate(train_examples)
def test_partial_links():
@@ -1048,6 +1049,10 @@ def test_no_gold_ents(patterns):
for eg in train_examples:
eg.predicted = ruler(eg.predicted)
+ # Entity ruler is no longer needed (initialization below wipes out the
+ # patterns and causes warnings)
+ nlp.remove_pipe("entity_ruler")
+
def create_kb(vocab):
# create artificial KB
mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
@@ -1063,7 +1068,7 @@ def test_no_gold_ents(patterns):
"entity_linker", config={"use_gold_ents": False}, last=True
)
entity_linker.set_kb(create_kb)
- assert entity_linker.use_gold_ents == False
+ assert entity_linker.use_gold_ents is False
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
@@ -1074,7 +1079,7 @@ def test_no_gold_ents(patterns):
nlp.add_pipe("sentencizer", first=True)
# this will run the pipeline on the examples and shouldn't crash
- results = nlp.evaluate(train_examples)
+ nlp.evaluate(train_examples)
@pytest.mark.issue(9575)
@@ -1114,4 +1119,124 @@ def test_tokenization_mismatch():
nlp.update(train_examples, sgd=optimizer, losses=losses)
nlp.add_pipe("sentencizer", first=True)
- results = nlp.evaluate(train_examples)
+ nlp.evaluate(train_examples)
+
+
+# fmt: off
+@pytest.mark.parametrize(
+ "meet_threshold,config",
+ [
+ (False, {"@architectures": "spacy.EntityLinker.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL}),
+ (True, {"@architectures": "spacy.EntityLinker.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL}),
+ ],
+)
+# fmt: on
+def test_threshold(meet_threshold: bool, config: Dict[str, Any]):
+ """Tests abstention threshold.
+ meet_threshold (bool): Whether to configure NEL setup so that confidence threshold is met.
+ config (Dict[str, Any]): NEL architecture config.
+ """
+ nlp = English()
+ nlp.add_pipe("sentencizer")
+ text = "Mahler's Symphony No. 8 was beautiful."
+ entities = [(0, 6, "PERSON")]
+ links = {(0, 6): {"Q7304": 1.0}}
+ sent_starts = [1, -1, 0, 0, 0, 0, 0, 0, 0]
+ entity_id = "Q7304"
+ doc = nlp(text)
+ train_examples = [
+ Example.from_dict(
+ doc, {"entities": entities, "links": links, "sent_starts": sent_starts}
+ )
+ ]
+
+ def create_kb(vocab):
+ # create artificial KB
+ mykb = KnowledgeBase(vocab, entity_vector_length=3)
+ mykb.add_entity(entity=entity_id, freq=12, entity_vector=[6, -4, 3])
+ mykb.add_alias(
+ alias="Mahler",
+ entities=[entity_id],
+ probabilities=[1 if meet_threshold else 0.01],
+ )
+ return mykb
+
+ # Create the Entity Linker component and add it to the pipeline
+ entity_linker = nlp.add_pipe(
+ "entity_linker",
+ last=True,
+ config={"threshold": 0.99, "model": config},
+ )
+ entity_linker.set_kb(create_kb) # type: ignore
+ nlp.initialize(get_examples=lambda: train_examples)
+
+ # Add a custom rule-based component to mimick NER
+ ruler = nlp.add_pipe("entity_ruler", before="entity_linker")
+ ruler.add_patterns([{"label": "PERSON", "pattern": [{"LOWER": "mahler"}]}]) # type: ignore
+ doc = nlp(text)
+
+ assert len(doc.ents) == 1
+ assert doc.ents[0].kb_id_ == entity_id if meet_threshold else EntityLinker.NIL
+
+
+def test_save_activations():
+ nlp = English()
+ vector_length = 3
+ assert "Q2146908" not in nlp.vocab.strings
+
+ # Convert the texts to docs to make sure we have doc.ents set for the training examples
+ train_examples = []
+ for text, annotation in TRAIN_DATA:
+ doc = nlp(text)
+ train_examples.append(Example.from_dict(doc, annotation))
+
+ def create_kb(vocab):
+ # create artificial KB - assign same prior weight to the two russ cochran's
+ # Q2146908 (Russ Cochran): American golfer
+ # Q7381115 (Russ Cochran): publisher
+ mykb = KnowledgeBase(vocab, entity_vector_length=vector_length)
+ mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3])
+ mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7])
+ mykb.add_alias(
+ alias="Russ Cochran",
+ entities=["Q2146908", "Q7381115"],
+ probabilities=[0.5, 0.5],
+ )
+ return mykb
+
+ # Create the Entity Linker component and add it to the pipeline
+ entity_linker = cast(TrainablePipe, nlp.add_pipe("entity_linker", last=True))
+ assert isinstance(entity_linker, EntityLinker)
+ entity_linker.set_kb(create_kb)
+ assert "Q2146908" in entity_linker.vocab.strings
+ assert "Q2146908" in entity_linker.kb.vocab.strings
+
+ # initialize the NEL pipe
+ nlp.initialize(get_examples=lambda: train_examples)
+
+ nO = entity_linker.model.get_dim("nO")
+
+ nlp.add_pipe("sentencizer", first=True)
+ patterns = [
+ {"label": "PERSON", "pattern": [{"LOWER": "russ"}, {"LOWER": "cochran"}]},
+ {"label": "ORG", "pattern": [{"LOWER": "ec"}, {"LOWER": "comics"}]},
+ ]
+ ruler = nlp.add_pipe("entity_ruler", before="entity_linker")
+ ruler.add_patterns(patterns)
+
+ doc = nlp("Russ Cochran was a publisher")
+ assert "entity_linker" not in doc.activations
+
+ entity_linker.save_activations = True
+ doc = nlp("Russ Cochran was a publisher")
+ assert set(doc.activations["entity_linker"].keys()) == {"ents", "scores"}
+ ents = doc.activations["entity_linker"]["ents"]
+ assert isinstance(ents, Ragged)
+ assert ents.data.shape == (2, 1)
+ assert ents.data.dtype == "uint64"
+ assert ents.lengths.shape == (1,)
+ scores = doc.activations["entity_linker"]["scores"]
+ assert isinstance(scores, Ragged)
+ assert scores.data.shape == (2, 1)
+ assert scores.data.dtype == "float32"
+ assert scores.lengths.shape == (1,)
diff --git a/spacy/tests/pipeline/test_models.py b/spacy/tests/pipeline/test_models.py
index e3fd28d0f..50ad94422 100644
--- a/spacy/tests/pipeline/test_models.py
+++ b/spacy/tests/pipeline/test_models.py
@@ -9,7 +9,7 @@ from thinc.types import Array2d, Ragged
from spacy.lang.en import English
from spacy.ml import FeatureExtractor, StaticVectors
-from spacy.ml._character_embed import CharacterEmbed
+from spacy.ml.character_embed import CharacterEmbed
from spacy.tokens import Doc
diff --git a/spacy/tests/pipeline/test_morphologizer.py b/spacy/tests/pipeline/test_morphologizer.py
index 33696bfd8..70fc77304 100644
--- a/spacy/tests/pipeline/test_morphologizer.py
+++ b/spacy/tests/pipeline/test_morphologizer.py
@@ -1,3 +1,4 @@
+from typing import cast
import pytest
from numpy.testing import assert_equal
@@ -7,6 +8,7 @@ from spacy.lang.en import English
from spacy.language import Language
from spacy.tests.util import make_tempdir
from spacy.morphology import Morphology
+from spacy.pipeline import TrainablePipe
from spacy.attrs import MORPH
from spacy.tokens import Doc
@@ -197,3 +199,25 @@ def test_overfitting_IO():
gold_pos_tags = ["NOUN", "NOUN", "NOUN", "NOUN"]
assert [str(t.morph) for t in doc] == gold_morphs
assert [t.pos_ for t in doc] == gold_pos_tags
+
+
+def test_save_activations():
+ nlp = English()
+ morphologizer = cast(TrainablePipe, nlp.add_pipe("morphologizer"))
+ train_examples = []
+ for inst in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(inst[0]), inst[1]))
+ nlp.initialize(get_examples=lambda: train_examples)
+
+ doc = nlp("This is a test.")
+ assert "morphologizer" not in doc.activations
+
+ morphologizer.save_activations = True
+ doc = nlp("This is a test.")
+ assert "morphologizer" in doc.activations
+ assert set(doc.activations["morphologizer"].keys()) == {
+ "label_ids",
+ "probabilities",
+ }
+ assert doc.activations["morphologizer"]["probabilities"].shape == (5, 6)
+ assert doc.activations["morphologizer"]["label_ids"].shape == (5,)
diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py
index 4b8fb8ebc..b946061f6 100644
--- a/spacy/tests/pipeline/test_pipe_methods.py
+++ b/spacy/tests/pipeline/test_pipe_methods.py
@@ -4,13 +4,14 @@ import numpy
import pytest
from thinc.api import get_current_ops
+import spacy
from spacy.lang.en import English
from spacy.lang.en.syntax_iterators import noun_chunks
from spacy.language import Language
from spacy.pipeline import TrainablePipe
from spacy.tokens import Doc
from spacy.training import Example
-from spacy.util import SimpleFrozenList, get_arg_names
+from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir
from spacy.vocab import Vocab
@@ -602,3 +603,63 @@ def test_update_with_annotates():
assert results[component] == "".join(eg.predicted.text for eg in examples)
for component in components - set(components_to_annotate):
assert results[component] == ""
+
+
+def test_load_disable_enable() -> None:
+ """
+ Tests spacy.load() with dis-/enabling components.
+ """
+
+ base_nlp = English()
+ for pipe in ("sentencizer", "tagger", "parser"):
+ base_nlp.add_pipe(pipe)
+
+ with make_tempdir() as tmp_dir:
+ base_nlp.to_disk(tmp_dir)
+ to_disable = ["parser", "tagger"]
+ to_enable = ["tagger", "parser"]
+ single_str = "tagger"
+
+ # Setting only `disable`.
+ nlp = spacy.load(tmp_dir, disable=to_disable)
+ assert all([comp_name in nlp.disabled for comp_name in to_disable])
+
+ # Setting only `enable`.
+ nlp = spacy.load(tmp_dir, enable=to_enable)
+ assert all(
+ [
+ (comp_name in nlp.disabled) is (comp_name not in to_enable)
+ for comp_name in nlp.component_names
+ ]
+ )
+
+ # Loading with a string representing one component
+ nlp = spacy.load(tmp_dir, exclude=single_str)
+ assert single_str not in nlp.component_names
+
+ nlp = spacy.load(tmp_dir, disable=single_str)
+ assert single_str in nlp.component_names
+ assert single_str not in nlp.pipe_names
+ assert nlp._disabled == {single_str}
+ assert nlp.disabled == [single_str]
+
+ # Testing consistent enable/disable combination.
+ nlp = spacy.load(
+ tmp_dir,
+ enable=to_enable,
+ disable=[
+ comp_name
+ for comp_name in nlp.component_names
+ if comp_name not in to_enable
+ ],
+ )
+ assert all(
+ [
+ (comp_name in nlp.disabled) is (comp_name not in to_enable)
+ for comp_name in nlp.component_names
+ ]
+ )
+
+ # Inconsistent enable/disable combination.
+ with pytest.raises(ValueError):
+ spacy.load(tmp_dir, enable=to_enable, disable=["parser"])
diff --git a/spacy/tests/pipeline/test_senter.py b/spacy/tests/pipeline/test_senter.py
index 047f59bef..3deac9e9a 100644
--- a/spacy/tests/pipeline/test_senter.py
+++ b/spacy/tests/pipeline/test_senter.py
@@ -1,3 +1,4 @@
+from typing import cast
import pytest
from numpy.testing import assert_equal
from spacy.attrs import SENT_START
@@ -6,6 +7,7 @@ from spacy import util
from spacy.training import Example
from spacy.lang.en import English
from spacy.language import Language
+from spacy.pipeline import TrainablePipe
from spacy.tests.util import make_tempdir
@@ -101,3 +103,26 @@ def test_overfitting_IO():
# test internal pipe labels vs. Language.pipe_labels with hidden labels
assert nlp.get_pipe("senter").labels == ("I", "S")
assert "senter" not in nlp.pipe_labels
+
+
+def test_save_activations():
+ # Test if activations are correctly added to Doc when requested.
+ nlp = English()
+ senter = cast(TrainablePipe, nlp.add_pipe("senter"))
+
+ train_examples = []
+ for t in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
+
+ nlp.initialize(get_examples=lambda: train_examples)
+ nO = senter.model.get_dim("nO")
+
+ doc = nlp("This is a test.")
+ assert "senter" not in doc.activations
+
+ senter.save_activations = True
+ doc = nlp("This is a test.")
+ assert "senter" in doc.activations
+ assert set(doc.activations["senter"].keys()) == {"label_ids", "probabilities"}
+ assert doc.activations["senter"]["probabilities"].shape == (5, nO)
+ assert doc.activations["senter"]["label_ids"].shape == (5,)
diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py
index 15256a763..4fb26c7e7 100644
--- a/spacy/tests/pipeline/test_spancat.py
+++ b/spacy/tests/pipeline/test_spancat.py
@@ -7,7 +7,7 @@ from spacy import util
from spacy.lang.en import English
from spacy.language import Language
from spacy.tokens import SpanGroup
-from spacy.tokens._dict_proxies import SpanGroups
+from spacy.tokens.span_groups import SpanGroups
from spacy.training import Example
from spacy.util import fix_random_seed, registry, make_tempdir
@@ -419,3 +419,23 @@ def test_set_candidates():
assert len(docs[0].spans["candidates"]) == 9
assert docs[0].spans["candidates"][0].text == "Just"
assert docs[0].spans["candidates"][4].text == "Just a"
+
+
+def test_save_activations():
+ # Test if activations are correctly added to Doc when requested.
+ nlp = English()
+ spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
+ train_examples = make_examples(nlp)
+ nlp.initialize(get_examples=lambda: train_examples)
+ nO = spancat.model.get_dim("nO")
+ assert nO == 2
+ assert set(spancat.labels) == {"LOC", "PERSON"}
+
+ doc = nlp("This is a test.")
+ assert "spancat" not in doc.activations
+
+ spancat.save_activations = True
+ doc = nlp("This is a test.")
+ assert set(doc.activations["spancat"].keys()) == {"indices", "scores"}
+ assert doc.activations["spancat"]["indices"].shape == (12, 2)
+ assert doc.activations["spancat"]["scores"].shape == (12, nO)
diff --git a/spacy/tests/pipeline/test_tagger.py b/spacy/tests/pipeline/test_tagger.py
index 96e75851e..a0c71198e 100644
--- a/spacy/tests/pipeline/test_tagger.py
+++ b/spacy/tests/pipeline/test_tagger.py
@@ -1,3 +1,4 @@
+from typing import cast
import pytest
from numpy.testing import assert_equal
from spacy.attrs import TAG
@@ -6,6 +7,7 @@ from spacy import util
from spacy.training import Example
from spacy.lang.en import English
from spacy.language import Language
+from spacy.pipeline import TrainablePipe
from thinc.api import compounding
from ..util import make_tempdir
@@ -211,6 +213,26 @@ def test_overfitting_IO():
assert doc3[0].tag_ != "N"
+def test_save_activations():
+ # Test if activations are correctly added to Doc when requested.
+ nlp = English()
+ tagger = cast(TrainablePipe, nlp.add_pipe("tagger"))
+ train_examples = []
+ for t in TRAIN_DATA:
+ train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
+ nlp.initialize(get_examples=lambda: train_examples)
+
+ doc = nlp("This is a test.")
+ assert "tagger" not in doc.activations
+
+ tagger.save_activations = True
+ doc = nlp("This is a test.")
+ assert "tagger" in doc.activations
+ assert set(doc.activations["tagger"].keys()) == {"label_ids", "probabilities"}
+ assert doc.activations["tagger"]["probabilities"].shape == (5, len(TAGS))
+ assert doc.activations["tagger"]["label_ids"].shape == (5,)
+
+
def test_tagger_requires_labels():
nlp = English()
nlp.add_pipe("tagger")
diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py
index 0bb036a33..c1f61a3c0 100644
--- a/spacy/tests/pipeline/test_textcat.py
+++ b/spacy/tests/pipeline/test_textcat.py
@@ -1,3 +1,4 @@
+from typing import cast
import random
import numpy.random
@@ -11,7 +12,7 @@ from spacy import util
from spacy.cli.evaluate import print_prf_per_type, print_textcats_auc_per_cat
from spacy.lang.en import English
from spacy.language import Language
-from spacy.pipeline import TextCategorizer
+from spacy.pipeline import TextCategorizer, TrainablePipe
from spacy.pipeline.textcat import single_label_bow_config
from spacy.pipeline.textcat import single_label_cnn_config
from spacy.pipeline.textcat import single_label_default_config
@@ -285,7 +286,7 @@ def test_issue9904():
nlp.initialize(get_examples)
examples = get_examples()
- scores = textcat.predict([eg.predicted for eg in examples])
+ scores = textcat.predict([eg.predicted for eg in examples])["probabilities"]
loss = textcat.get_loss(examples, scores)[0]
loss_double_bs = textcat.get_loss(examples * 2, scores.repeat(2, axis=0))[0]
@@ -871,3 +872,41 @@ def test_textcat_multi_threshold():
scores = nlp.evaluate(train_examples, scorer_cfg={"threshold": 0})
assert scores["cats_f_per_type"]["POSITIVE"]["r"] == 1.0
+
+
+def test_save_activations():
+ nlp = English()
+ textcat = cast(TrainablePipe, nlp.add_pipe("textcat"))
+
+ train_examples = []
+ for text, annotations in TRAIN_DATA_SINGLE_LABEL:
+ train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
+ nlp.initialize(get_examples=lambda: train_examples)
+ nO = textcat.model.get_dim("nO")
+
+ doc = nlp("This is a test.")
+ assert "textcat" not in doc.activations
+
+ textcat.save_activations = True
+ doc = nlp("This is a test.")
+ assert list(doc.activations["textcat"].keys()) == ["probabilities"]
+ assert doc.activations["textcat"]["probabilities"].shape == (nO,)
+
+
+def test_save_activations_multi():
+ nlp = English()
+ textcat = cast(TrainablePipe, nlp.add_pipe("textcat_multilabel"))
+
+ train_examples = []
+ for text, annotations in TRAIN_DATA_MULTI_LABEL:
+ train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
+ nlp.initialize(get_examples=lambda: train_examples)
+ nO = textcat.model.get_dim("nO")
+
+ doc = nlp("This is a test.")
+ assert "textcat_multilabel" not in doc.activations
+
+ textcat.save_activations = True
+ doc = nlp("This is a test.")
+ assert list(doc.activations["textcat_multilabel"].keys()) == ["probabilities"]
+ assert doc.activations["textcat_multilabel"]["probabilities"].shape == (nO,)
diff --git a/spacy/tests/serialize/test_serialize_span_groups.py b/spacy/tests/serialize/test_serialize_span_groups.py
index 85313fcdc..c1c910fa1 100644
--- a/spacy/tests/serialize/test_serialize_span_groups.py
+++ b/spacy/tests/serialize/test_serialize_span_groups.py
@@ -1,7 +1,7 @@
import pytest
from spacy.tokens import Span, SpanGroup
-from spacy.tokens._dict_proxies import SpanGroups
+from spacy.tokens.span_groups import SpanGroups
@pytest.mark.issue(10685)
diff --git a/spacy/tests/test_language.py b/spacy/tests/test_language.py
index c5fdc8eb0..03a98d32f 100644
--- a/spacy/tests/test_language.py
+++ b/spacy/tests/test_language.py
@@ -659,3 +659,36 @@ def test_multiprocessing_gpu_warning(nlp2, texts):
# Trigger multi-processing.
for _ in docs:
pass
+
+
+def test_dot_in_factory_names(nlp):
+ Language.component("my_evil_component", func=evil_component)
+ nlp.add_pipe("my_evil_component")
+
+ with pytest.raises(ValueError, match="not permitted"):
+ Language.component("my.evil.component.v1", func=evil_component)
+
+ with pytest.raises(ValueError, match="not permitted"):
+ Language.factory("my.evil.component.v1", func=evil_component)
+
+
+def test_component_return():
+ """Test that an error is raised if components return a type other than a
+ doc."""
+ nlp = English()
+
+ @Language.component("test_component_good_pipe")
+ def good_pipe(doc):
+ return doc
+
+ nlp.add_pipe("test_component_good_pipe")
+ nlp("text")
+ nlp.remove_pipe("test_component_good_pipe")
+
+ @Language.component("test_component_bad_pipe")
+ def bad_pipe(doc):
+ return doc.text
+
+ nlp.add_pipe("test_component_bad_pipe")
+ with pytest.raises(ValueError, match="instead of a Doc"):
+ nlp("text")
diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py
index 8e9c80ae6..b7853243a 100644
--- a/spacy/tests/test_misc.py
+++ b/spacy/tests/test_misc.py
@@ -7,7 +7,8 @@ from spacy import util
from spacy import prefer_gpu, require_gpu, require_cpu
from spacy.util import dot_to_object, SimpleFrozenList, import_file, to_ternary_int
from thinc.api import Config, Optimizer, ConfigValidationError
-from thinc.api import set_current_ops
+from thinc.api import get_current_ops, set_current_ops, NumpyOps, CupyOps, MPSOps
+from thinc.compat import has_cupy_gpu, has_torch_mps_gpu
from spacy.training.batchers import minibatch_by_words
from spacy.lang.en import English
from spacy.lang.nl import Dutch
@@ -15,7 +16,6 @@ from spacy.language import DEFAULT_CONFIG_PATH
from spacy.schemas import ConfigSchemaTraining, TokenPattern, TokenPatternSchema
from pydantic import ValidationError
-from thinc.api import get_current_ops, NumpyOps, CupyOps
from .util import get_random_doc, make_tempdir
@@ -80,26 +80,25 @@ def test_util_get_package_path(package):
def test_prefer_gpu():
current_ops = get_current_ops()
- try:
- import cupy # noqa: F401
-
- prefer_gpu()
+ if has_cupy_gpu:
+ assert prefer_gpu()
assert isinstance(get_current_ops(), CupyOps)
- except ImportError:
+ elif has_torch_mps_gpu:
+ assert prefer_gpu()
+ assert isinstance(get_current_ops(), MPSOps)
+ else:
assert not prefer_gpu()
set_current_ops(current_ops)
def test_require_gpu():
current_ops = get_current_ops()
- try:
- import cupy # noqa: F401
-
+ if has_cupy_gpu:
require_gpu()
assert isinstance(get_current_ops(), CupyOps)
- except ImportError:
- with pytest.raises(ValueError):
- require_gpu()
+ elif has_torch_mps_gpu:
+ require_gpu()
+ assert isinstance(get_current_ops(), MPSOps)
set_current_ops(current_ops)
diff --git a/spacy/tests/test_symbols.py b/spacy/tests/test_symbols.py
new file mode 100644
index 000000000..fb034acca
--- /dev/null
+++ b/spacy/tests/test_symbols.py
@@ -0,0 +1,467 @@
+import pytest
+from spacy.symbols import IDS, NAMES
+
+V3_SYMBOLS = {
+ "": 0,
+ "IS_ALPHA": 1,
+ "IS_ASCII": 2,
+ "IS_DIGIT": 3,
+ "IS_LOWER": 4,
+ "IS_PUNCT": 5,
+ "IS_SPACE": 6,
+ "IS_TITLE": 7,
+ "IS_UPPER": 8,
+ "LIKE_URL": 9,
+ "LIKE_NUM": 10,
+ "LIKE_EMAIL": 11,
+ "IS_STOP": 12,
+ "IS_OOV_DEPRECATED": 13,
+ "IS_BRACKET": 14,
+ "IS_QUOTE": 15,
+ "IS_LEFT_PUNCT": 16,
+ "IS_RIGHT_PUNCT": 17,
+ "IS_CURRENCY": 18,
+ "FLAG19": 19,
+ "FLAG20": 20,
+ "FLAG21": 21,
+ "FLAG22": 22,
+ "FLAG23": 23,
+ "FLAG24": 24,
+ "FLAG25": 25,
+ "FLAG26": 26,
+ "FLAG27": 27,
+ "FLAG28": 28,
+ "FLAG29": 29,
+ "FLAG30": 30,
+ "FLAG31": 31,
+ "FLAG32": 32,
+ "FLAG33": 33,
+ "FLAG34": 34,
+ "FLAG35": 35,
+ "FLAG36": 36,
+ "FLAG37": 37,
+ "FLAG38": 38,
+ "FLAG39": 39,
+ "FLAG40": 40,
+ "FLAG41": 41,
+ "FLAG42": 42,
+ "FLAG43": 43,
+ "FLAG44": 44,
+ "FLAG45": 45,
+ "FLAG46": 46,
+ "FLAG47": 47,
+ "FLAG48": 48,
+ "FLAG49": 49,
+ "FLAG50": 50,
+ "FLAG51": 51,
+ "FLAG52": 52,
+ "FLAG53": 53,
+ "FLAG54": 54,
+ "FLAG55": 55,
+ "FLAG56": 56,
+ "FLAG57": 57,
+ "FLAG58": 58,
+ "FLAG59": 59,
+ "FLAG60": 60,
+ "FLAG61": 61,
+ "FLAG62": 62,
+ "FLAG63": 63,
+ "ID": 64,
+ "ORTH": 65,
+ "LOWER": 66,
+ "NORM": 67,
+ "SHAPE": 68,
+ "PREFIX": 69,
+ "SUFFIX": 70,
+ "LENGTH": 71,
+ "CLUSTER": 72,
+ "LEMMA": 73,
+ "POS": 74,
+ "TAG": 75,
+ "DEP": 76,
+ "ENT_IOB": 77,
+ "ENT_TYPE": 78,
+ "ENT_ID": 454,
+ "ENT_KB_ID": 452,
+ "HEAD": 79,
+ "SENT_START": 80,
+ "SPACY": 81,
+ "PROB": 82,
+ "LANG": 83,
+ "IDX": 455,
+ "ADJ": 84,
+ "ADP": 85,
+ "ADV": 86,
+ "AUX": 87,
+ "CONJ": 88,
+ "CCONJ": 89,
+ "DET": 90,
+ "INTJ": 91,
+ "NOUN": 92,
+ "NUM": 93,
+ "PART": 94,
+ "PRON": 95,
+ "PROPN": 96,
+ "PUNCT": 97,
+ "SCONJ": 98,
+ "SYM": 99,
+ "VERB": 100,
+ "X": 101,
+ "EOL": 102,
+ "SPACE": 103,
+ "DEPRECATED001": 104,
+ "DEPRECATED002": 105,
+ "DEPRECATED003": 106,
+ "DEPRECATED004": 107,
+ "DEPRECATED005": 108,
+ "DEPRECATED006": 109,
+ "DEPRECATED007": 110,
+ "DEPRECATED008": 111,
+ "DEPRECATED009": 112,
+ "DEPRECATED010": 113,
+ "DEPRECATED011": 114,
+ "DEPRECATED012": 115,
+ "DEPRECATED013": 116,
+ "DEPRECATED014": 117,
+ "DEPRECATED015": 118,
+ "DEPRECATED016": 119,
+ "DEPRECATED017": 120,
+ "DEPRECATED018": 121,
+ "DEPRECATED019": 122,
+ "DEPRECATED020": 123,
+ "DEPRECATED021": 124,
+ "DEPRECATED022": 125,
+ "DEPRECATED023": 126,
+ "DEPRECATED024": 127,
+ "DEPRECATED025": 128,
+ "DEPRECATED026": 129,
+ "DEPRECATED027": 130,
+ "DEPRECATED028": 131,
+ "DEPRECATED029": 132,
+ "DEPRECATED030": 133,
+ "DEPRECATED031": 134,
+ "DEPRECATED032": 135,
+ "DEPRECATED033": 136,
+ "DEPRECATED034": 137,
+ "DEPRECATED035": 138,
+ "DEPRECATED036": 139,
+ "DEPRECATED037": 140,
+ "DEPRECATED038": 141,
+ "DEPRECATED039": 142,
+ "DEPRECATED040": 143,
+ "DEPRECATED041": 144,
+ "DEPRECATED042": 145,
+ "DEPRECATED043": 146,
+ "DEPRECATED044": 147,
+ "DEPRECATED045": 148,
+ "DEPRECATED046": 149,
+ "DEPRECATED047": 150,
+ "DEPRECATED048": 151,
+ "DEPRECATED049": 152,
+ "DEPRECATED050": 153,
+ "DEPRECATED051": 154,
+ "DEPRECATED052": 155,
+ "DEPRECATED053": 156,
+ "DEPRECATED054": 157,
+ "DEPRECATED055": 158,
+ "DEPRECATED056": 159,
+ "DEPRECATED057": 160,
+ "DEPRECATED058": 161,
+ "DEPRECATED059": 162,
+ "DEPRECATED060": 163,
+ "DEPRECATED061": 164,
+ "DEPRECATED062": 165,
+ "DEPRECATED063": 166,
+ "DEPRECATED064": 167,
+ "DEPRECATED065": 168,
+ "DEPRECATED066": 169,
+ "DEPRECATED067": 170,
+ "DEPRECATED068": 171,
+ "DEPRECATED069": 172,
+ "DEPRECATED070": 173,
+ "DEPRECATED071": 174,
+ "DEPRECATED072": 175,
+ "DEPRECATED073": 176,
+ "DEPRECATED074": 177,
+ "DEPRECATED075": 178,
+ "DEPRECATED076": 179,
+ "DEPRECATED077": 180,
+ "DEPRECATED078": 181,
+ "DEPRECATED079": 182,
+ "DEPRECATED080": 183,
+ "DEPRECATED081": 184,
+ "DEPRECATED082": 185,
+ "DEPRECATED083": 186,
+ "DEPRECATED084": 187,
+ "DEPRECATED085": 188,
+ "DEPRECATED086": 189,
+ "DEPRECATED087": 190,
+ "DEPRECATED088": 191,
+ "DEPRECATED089": 192,
+ "DEPRECATED090": 193,
+ "DEPRECATED091": 194,
+ "DEPRECATED092": 195,
+ "DEPRECATED093": 196,
+ "DEPRECATED094": 197,
+ "DEPRECATED095": 198,
+ "DEPRECATED096": 199,
+ "DEPRECATED097": 200,
+ "DEPRECATED098": 201,
+ "DEPRECATED099": 202,
+ "DEPRECATED100": 203,
+ "DEPRECATED101": 204,
+ "DEPRECATED102": 205,
+ "DEPRECATED103": 206,
+ "DEPRECATED104": 207,
+ "DEPRECATED105": 208,
+ "DEPRECATED106": 209,
+ "DEPRECATED107": 210,
+ "DEPRECATED108": 211,
+ "DEPRECATED109": 212,
+ "DEPRECATED110": 213,
+ "DEPRECATED111": 214,
+ "DEPRECATED112": 215,
+ "DEPRECATED113": 216,
+ "DEPRECATED114": 217,
+ "DEPRECATED115": 218,
+ "DEPRECATED116": 219,
+ "DEPRECATED117": 220,
+ "DEPRECATED118": 221,
+ "DEPRECATED119": 222,
+ "DEPRECATED120": 223,
+ "DEPRECATED121": 224,
+ "DEPRECATED122": 225,
+ "DEPRECATED123": 226,
+ "DEPRECATED124": 227,
+ "DEPRECATED125": 228,
+ "DEPRECATED126": 229,
+ "DEPRECATED127": 230,
+ "DEPRECATED128": 231,
+ "DEPRECATED129": 232,
+ "DEPRECATED130": 233,
+ "DEPRECATED131": 234,
+ "DEPRECATED132": 235,
+ "DEPRECATED133": 236,
+ "DEPRECATED134": 237,
+ "DEPRECATED135": 238,
+ "DEPRECATED136": 239,
+ "DEPRECATED137": 240,
+ "DEPRECATED138": 241,
+ "DEPRECATED139": 242,
+ "DEPRECATED140": 243,
+ "DEPRECATED141": 244,
+ "DEPRECATED142": 245,
+ "DEPRECATED143": 246,
+ "DEPRECATED144": 247,
+ "DEPRECATED145": 248,
+ "DEPRECATED146": 249,
+ "DEPRECATED147": 250,
+ "DEPRECATED148": 251,
+ "DEPRECATED149": 252,
+ "DEPRECATED150": 253,
+ "DEPRECATED151": 254,
+ "DEPRECATED152": 255,
+ "DEPRECATED153": 256,
+ "DEPRECATED154": 257,
+ "DEPRECATED155": 258,
+ "DEPRECATED156": 259,
+ "DEPRECATED157": 260,
+ "DEPRECATED158": 261,
+ "DEPRECATED159": 262,
+ "DEPRECATED160": 263,
+ "DEPRECATED161": 264,
+ "DEPRECATED162": 265,
+ "DEPRECATED163": 266,
+ "DEPRECATED164": 267,
+ "DEPRECATED165": 268,
+ "DEPRECATED166": 269,
+ "DEPRECATED167": 270,
+ "DEPRECATED168": 271,
+ "DEPRECATED169": 272,
+ "DEPRECATED170": 273,
+ "DEPRECATED171": 274,
+ "DEPRECATED172": 275,
+ "DEPRECATED173": 276,
+ "DEPRECATED174": 277,
+ "DEPRECATED175": 278,
+ "DEPRECATED176": 279,
+ "DEPRECATED177": 280,
+ "DEPRECATED178": 281,
+ "DEPRECATED179": 282,
+ "DEPRECATED180": 283,
+ "DEPRECATED181": 284,
+ "DEPRECATED182": 285,
+ "DEPRECATED183": 286,
+ "DEPRECATED184": 287,
+ "DEPRECATED185": 288,
+ "DEPRECATED186": 289,
+ "DEPRECATED187": 290,
+ "DEPRECATED188": 291,
+ "DEPRECATED189": 292,
+ "DEPRECATED190": 293,
+ "DEPRECATED191": 294,
+ "DEPRECATED192": 295,
+ "DEPRECATED193": 296,
+ "DEPRECATED194": 297,
+ "DEPRECATED195": 298,
+ "DEPRECATED196": 299,
+ "DEPRECATED197": 300,
+ "DEPRECATED198": 301,
+ "DEPRECATED199": 302,
+ "DEPRECATED200": 303,
+ "DEPRECATED201": 304,
+ "DEPRECATED202": 305,
+ "DEPRECATED203": 306,
+ "DEPRECATED204": 307,
+ "DEPRECATED205": 308,
+ "DEPRECATED206": 309,
+ "DEPRECATED207": 310,
+ "DEPRECATED208": 311,
+ "DEPRECATED209": 312,
+ "DEPRECATED210": 313,
+ "DEPRECATED211": 314,
+ "DEPRECATED212": 315,
+ "DEPRECATED213": 316,
+ "DEPRECATED214": 317,
+ "DEPRECATED215": 318,
+ "DEPRECATED216": 319,
+ "DEPRECATED217": 320,
+ "DEPRECATED218": 321,
+ "DEPRECATED219": 322,
+ "DEPRECATED220": 323,
+ "DEPRECATED221": 324,
+ "DEPRECATED222": 325,
+ "DEPRECATED223": 326,
+ "DEPRECATED224": 327,
+ "DEPRECATED225": 328,
+ "DEPRECATED226": 329,
+ "DEPRECATED227": 330,
+ "DEPRECATED228": 331,
+ "DEPRECATED229": 332,
+ "DEPRECATED230": 333,
+ "DEPRECATED231": 334,
+ "DEPRECATED232": 335,
+ "DEPRECATED233": 336,
+ "DEPRECATED234": 337,
+ "DEPRECATED235": 338,
+ "DEPRECATED236": 339,
+ "DEPRECATED237": 340,
+ "DEPRECATED238": 341,
+ "DEPRECATED239": 342,
+ "DEPRECATED240": 343,
+ "DEPRECATED241": 344,
+ "DEPRECATED242": 345,
+ "DEPRECATED243": 346,
+ "DEPRECATED244": 347,
+ "DEPRECATED245": 348,
+ "DEPRECATED246": 349,
+ "DEPRECATED247": 350,
+ "DEPRECATED248": 351,
+ "DEPRECATED249": 352,
+ "DEPRECATED250": 353,
+ "DEPRECATED251": 354,
+ "DEPRECATED252": 355,
+ "DEPRECATED253": 356,
+ "DEPRECATED254": 357,
+ "DEPRECATED255": 358,
+ "DEPRECATED256": 359,
+ "DEPRECATED257": 360,
+ "DEPRECATED258": 361,
+ "DEPRECATED259": 362,
+ "DEPRECATED260": 363,
+ "DEPRECATED261": 364,
+ "DEPRECATED262": 365,
+ "DEPRECATED263": 366,
+ "DEPRECATED264": 367,
+ "DEPRECATED265": 368,
+ "DEPRECATED266": 369,
+ "DEPRECATED267": 370,
+ "DEPRECATED268": 371,
+ "DEPRECATED269": 372,
+ "DEPRECATED270": 373,
+ "DEPRECATED271": 374,
+ "DEPRECATED272": 375,
+ "DEPRECATED273": 376,
+ "DEPRECATED274": 377,
+ "DEPRECATED275": 378,
+ "DEPRECATED276": 379,
+ "PERSON": 380,
+ "NORP": 381,
+ "FACILITY": 382,
+ "ORG": 383,
+ "GPE": 384,
+ "LOC": 385,
+ "PRODUCT": 386,
+ "EVENT": 387,
+ "WORK_OF_ART": 388,
+ "LANGUAGE": 389,
+ "DATE": 391,
+ "TIME": 392,
+ "PERCENT": 393,
+ "MONEY": 394,
+ "QUANTITY": 395,
+ "ORDINAL": 396,
+ "CARDINAL": 397,
+ "acomp": 398,
+ "advcl": 399,
+ "advmod": 400,
+ "agent": 401,
+ "amod": 402,
+ "appos": 403,
+ "attr": 404,
+ "aux": 405,
+ "auxpass": 406,
+ "cc": 407,
+ "ccomp": 408,
+ "complm": 409,
+ "conj": 410,
+ "cop": 411,
+ "csubj": 412,
+ "csubjpass": 413,
+ "dep": 414,
+ "det": 415,
+ "dobj": 416,
+ "expl": 417,
+ "hmod": 418,
+ "hyph": 419,
+ "infmod": 420,
+ "intj": 421,
+ "iobj": 422,
+ "mark": 423,
+ "meta": 424,
+ "neg": 425,
+ "nmod": 426,
+ "nn": 427,
+ "npadvmod": 428,
+ "nsubj": 429,
+ "nsubjpass": 430,
+ "num": 431,
+ "number": 432,
+ "oprd": 433,
+ "obj": 434,
+ "obl": 435,
+ "parataxis": 436,
+ "partmod": 437,
+ "pcomp": 438,
+ "pobj": 439,
+ "poss": 440,
+ "possessive": 441,
+ "preconj": 442,
+ "prep": 443,
+ "prt": 444,
+ "punct": 445,
+ "quantmod": 446,
+ "rcmod": 448,
+ "relcl": 447,
+ "root": 449,
+ "xcomp": 450,
+ "acl": 451,
+ "LAW": 390,
+ "MORPH": 453,
+ "_": 456,
+}
+
+
+def test_frozen_symbols():
+ assert IDS == V3_SYMBOLS
+ assert NAMES == {v: k for k, v in IDS.items()}
diff --git a/spacy/tests/tokenizer/test_urls.py b/spacy/tests/tokenizer/test_urls.py
index 57e970f87..3d8c7b085 100644
--- a/spacy/tests/tokenizer/test_urls.py
+++ b/spacy/tests/tokenizer/test_urls.py
@@ -33,6 +33,9 @@ URLS_SHOULD_MATCH = [
"http://userid:password@example.com/",
"http://142.42.1.1/",
"http://142.42.1.1:8080/",
+ "http://10.140.12.13/foo",
+ "http://10.140.12.13/foo/bar?arg1=baz&arg2=taz",
+ "http://10.1.1.1",
"http://foo.com/blah_(wikipedia)#cite-1",
"http://foo.com/blah_(wikipedia)_blah#cite-1",
"http://foo.com/unicode_(✪)_in_parens",
@@ -94,6 +97,7 @@ URLS_SHOULD_NOT_MATCH = [
"http://foo.bar/foo(bar)baz quux",
"http://-error-.invalid/",
"http://a.b-.co",
+ # Loopback and broadcast addresses should be excluded
"http://0.0.0.0",
"http://10.1.1.0",
"http://10.1.1.255",
@@ -102,7 +106,6 @@ URLS_SHOULD_NOT_MATCH = [
"http://3628126748",
"http://.www.foo.bar/",
"http://.www.foo.bar./",
- "http://10.1.1.1",
"NASDAQ:GOOG",
"http://-a.b.co",
pytest.param("foo.com", marks=pytest.mark.xfail()),
diff --git a/spacy/tests/training/test_logger.py b/spacy/tests/training/test_logger.py
new file mode 100644
index 000000000..0dfd0cbf4
--- /dev/null
+++ b/spacy/tests/training/test_logger.py
@@ -0,0 +1,30 @@
+import pytest
+import spacy
+
+from spacy.training import loggers
+
+
+@pytest.fixture()
+def nlp():
+ nlp = spacy.blank("en")
+ nlp.add_pipe("ner")
+ return nlp
+
+
+@pytest.fixture()
+def info():
+ return {
+ "losses": {"ner": 100},
+ "other_scores": {"ENTS_F": 0.85, "ENTS_P": 0.90, "ENTS_R": 0.80},
+ "epoch": 100,
+ "step": 125,
+ "score": 85,
+ }
+
+
+def test_console_logger(nlp, info):
+ console_logger = loggers.console_logger(
+ progress_bar=True, console_output=True, output_file=None
+ )
+ log_step, finalize = console_logger(nlp)
+ log_step(info)
diff --git a/spacy/tests/training/test_new_example.py b/spacy/tests/training/test_new_example.py
index a39d40ded..6b15603b3 100644
--- a/spacy/tests/training/test_new_example.py
+++ b/spacy/tests/training/test_new_example.py
@@ -431,3 +431,41 @@ def test_Example_aligned_whitespace(en_vocab):
example = Example(predicted, reference)
assert example.get_aligned("TAG", as_string=True) == tags
+
+
+@pytest.mark.issue("11260")
+def test_issue11260():
+ annots = {
+ "words": ["I", "like", "New", "York", "."],
+ "spans": {
+ "cities": [(7, 15, "LOC", "")],
+ "people": [(0, 1, "PERSON", "")],
+ },
+ }
+ vocab = Vocab()
+ predicted = Doc(vocab, words=annots["words"])
+ example = Example.from_dict(predicted, annots)
+ assert len(example.reference.spans["cities"]) == 1
+ assert len(example.reference.spans["people"]) == 1
+
+ output_dict = example.to_dict()
+ assert "spans" in output_dict["doc_annotation"]
+ assert output_dict["doc_annotation"]["spans"]["cities"] == annots["spans"]["cities"]
+ assert output_dict["doc_annotation"]["spans"]["people"] == annots["spans"]["people"]
+
+ output_example = Example.from_dict(predicted, output_dict)
+
+ assert len(output_example.reference.spans["cities"]) == len(
+ example.reference.spans["cities"]
+ )
+ assert len(output_example.reference.spans["people"]) == len(
+ example.reference.spans["people"]
+ )
+ for span in example.reference.spans["cities"]:
+ assert span.label_ == "LOC"
+ assert span.text == "New York"
+ assert span.start_char == 7
+ for span in example.reference.spans["people"]:
+ assert span.label_ == "PERSON"
+ assert span.text == "I"
+ assert span.start_char == 0
diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py
index 31bf7e07b..4384a796d 100644
--- a/spacy/tests/training/test_training.py
+++ b/spacy/tests/training/test_training.py
@@ -679,6 +679,31 @@ def test_projectivize(en_tokenizer):
assert proj_heads == [3, 2, 3, 3, 3]
assert nonproj_heads == [3, 2, 3, 3, 2]
+ # Test single token documents
+ doc = en_tokenizer("Conrail")
+ heads = [0]
+ deps = ["dep"]
+ example = Example.from_dict(doc, {"heads": heads, "deps": deps})
+ proj_heads, proj_labels = example.get_aligned_parse(projectivize=True)
+ assert proj_heads == heads
+ assert proj_labels == deps
+
+ # Test documents with no alignments
+ doc_a = Doc(
+ doc.vocab, words=["Double-Jointed"], spaces=[False], deps=["ROOT"], heads=[0]
+ )
+ doc_b = Doc(
+ doc.vocab,
+ words=["Double", "-", "Jointed"],
+ spaces=[True, True, True],
+ deps=["amod", "punct", "ROOT"],
+ heads=[2, 2, 2],
+ )
+ example = Example(doc_a, doc_b)
+ proj_heads, proj_deps = example.get_aligned_parse(projectivize=True)
+ assert proj_heads == [None]
+ assert proj_deps == [None]
+
def test_iob_to_biluo():
good_iob = ["O", "O", "B-LOC", "I-LOC", "O", "B-PERSON"]
diff --git a/spacy/tests/util.py b/spacy/tests/util.py
index 365ea4349..d5f3c39ff 100644
--- a/spacy/tests/util.py
+++ b/spacy/tests/util.py
@@ -5,6 +5,7 @@ import srsly
from spacy.tokens import Doc
from spacy.vocab import Vocab
from spacy.util import make_tempdir # noqa: F401
+from spacy.training import split_bilu_label
from thinc.api import get_current_ops
@@ -40,7 +41,7 @@ def apply_transition_sequence(parser, doc, sequence):
desired state."""
for action_name in sequence:
if "-" in action_name:
- move, label = action_name.split("-")
+ move, label = split_bilu_label(action_name)
parser.add_label(label)
with parser.step_through(doc) as stepwise:
for transition in sequence:
diff --git a/spacy/tests/vocab_vectors/test_similarity.py b/spacy/tests/vocab_vectors/test_similarity.py
index 47cd1f060..1efcdd81e 100644
--- a/spacy/tests/vocab_vectors/test_similarity.py
+++ b/spacy/tests/vocab_vectors/test_similarity.py
@@ -1,6 +1,7 @@
import pytest
import numpy
from spacy.tokens import Doc
+from spacy.vocab import Vocab
from ..util import get_cosine, add_vecs_to_vocab
@@ -71,19 +72,17 @@ def test_vectors_similarity_DD(vocab, vectors):
def test_vectors_similarity_TD(vocab, vectors):
[(word1, vec1), (word2, vec2)] = vectors
doc = Doc(vocab, words=[word1, word2])
- with pytest.warns(UserWarning):
- assert isinstance(doc.similarity(doc[0]), float)
- assert isinstance(doc[0].similarity(doc), float)
- assert doc.similarity(doc[0]) == doc[0].similarity(doc)
+ assert isinstance(doc.similarity(doc[0]), float)
+ assert isinstance(doc[0].similarity(doc), float)
+ assert doc.similarity(doc[0]) == doc[0].similarity(doc)
def test_vectors_similarity_TS(vocab, vectors):
[(word1, vec1), (word2, vec2)] = vectors
doc = Doc(vocab, words=[word1, word2])
- with pytest.warns(UserWarning):
- assert isinstance(doc[:2].similarity(doc[0]), float)
- assert isinstance(doc[0].similarity(doc[-2]), float)
- assert doc[:2].similarity(doc[0]) == doc[0].similarity(doc[:2])
+ assert isinstance(doc[:2].similarity(doc[0]), float)
+ assert isinstance(doc[0].similarity(doc[:2]), float)
+ assert doc[:2].similarity(doc[0]) == doc[0].similarity(doc[:2])
def test_vectors_similarity_DS(vocab, vectors):
@@ -91,3 +90,21 @@ def test_vectors_similarity_DS(vocab, vectors):
doc = Doc(vocab, words=[word1, word2])
assert isinstance(doc.similarity(doc[:2]), float)
assert doc.similarity(doc[:2]) == doc[:2].similarity(doc)
+
+
+def test_vectors_similarity_no_vectors():
+ vocab = Vocab()
+ doc1 = Doc(vocab, words=["a", "b"])
+ doc2 = Doc(vocab, words=["c", "d", "e"])
+ with pytest.warns(UserWarning):
+ doc1.similarity(doc2)
+ with pytest.warns(UserWarning):
+ doc1.similarity(doc2[1])
+ with pytest.warns(UserWarning):
+ doc1.similarity(doc2[:2])
+ with pytest.warns(UserWarning):
+ doc2.similarity(doc1)
+ with pytest.warns(UserWarning):
+ doc2[1].similarity(doc1)
+ with pytest.warns(UserWarning):
+ doc2[:2].similarity(doc1)
diff --git a/spacy/tests/vocab_vectors/test_vectors.py b/spacy/tests/vocab_vectors/test_vectors.py
index e3ad206f4..dd2cfc596 100644
--- a/spacy/tests/vocab_vectors/test_vectors.py
+++ b/spacy/tests/vocab_vectors/test_vectors.py
@@ -318,17 +318,15 @@ def test_vectors_lexeme_doc_similarity(vocab, text):
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
def test_vectors_span_span_similarity(vocab, text):
doc = Doc(vocab, words=text)
- with pytest.warns(UserWarning):
- assert doc[0:2].similarity(doc[1:3]) == doc[1:3].similarity(doc[0:2])
- assert -1.0 < doc[0:2].similarity(doc[1:3]) < 1.0
+ assert doc[0:2].similarity(doc[1:3]) == doc[1:3].similarity(doc[0:2])
+ assert -1.0 < doc[0:2].similarity(doc[1:3]) < 1.0
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
def test_vectors_span_doc_similarity(vocab, text):
doc = Doc(vocab, words=text)
- with pytest.warns(UserWarning):
- assert doc[0:2].similarity(doc) == doc.similarity(doc[0:2])
- assert -1.0 < doc[0:2].similarity(doc) < 1.0
+ assert doc[0:2].similarity(doc) == doc.similarity(doc[0:2])
+ assert -1.0 < doc[0:2].similarity(doc) < 1.0
@pytest.mark.parametrize(
diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd
index e6a072053..86e62ddbf 100644
--- a/spacy/tokenizer.pxd
+++ b/spacy/tokenizer.pxd
@@ -23,11 +23,7 @@ cdef class Tokenizer:
cdef object _infix_finditer
cdef object _rules
cdef PhraseMatcher _special_matcher
- # TODO convert to bool in v4
- cdef int _faster_heuristics
- # TODO next one is unused and should be removed in v4
- # https://github.com/explosion/spaCy/pull/9150
- cdef int _unused_int2
+ cdef bint _faster_heuristics
cdef Doc _tokenize_affixes(self, str string, bint with_special_cases)
cdef int _apply_special_cases(self, Doc doc) except -1
diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx
index 0e75b5f7a..ff8d85ac7 100644
--- a/spacy/tokenizer.pyx
+++ b/spacy/tokenizer.pyx
@@ -8,7 +8,6 @@ from preshed.maps cimport PreshMap
cimport cython
import re
-import warnings
from .tokens.doc cimport Doc
from .strings cimport hash_string
@@ -16,9 +15,9 @@ from .lexeme cimport EMPTY_LEXEME
from .attrs import intify_attrs
from .symbols import ORTH, NORM
-from .errors import Errors, Warnings
+from .errors import Errors
from . import util
-from .util import registry, get_words_and_spaces
+from .util import get_words_and_spaces
from .attrs import intify_attrs
from .symbols import ORTH
from .scorer import Scorer
@@ -128,10 +127,10 @@ cdef class Tokenizer:
property faster_heuristics:
def __get__(self):
- return bool(self._faster_heuristics)
+ return self._faster_heuristics
def __set__(self, faster_heuristics):
- self._faster_heuristics = bool(faster_heuristics)
+ self._faster_heuristics = faster_heuristics
self._reload_special_cases()
def __reduce__(self):
@@ -582,7 +581,7 @@ cdef class Tokenizer:
substrings (iterable): A sequence of dicts, where each dict describes
a token and its attributes.
"""
- attrs = [intify_attrs(spec, _do_deprecated=True) for spec in substrings]
+ attrs = [intify_attrs(spec) for spec in substrings]
orth = "".join([spec[ORTH] for spec in attrs])
if chunk != orth:
raise ValueError(Errors.E997.format(chunk=chunk, orth=orth, token_attrs=substrings))
@@ -615,7 +614,7 @@ cdef class Tokenizer:
self._rules[string] = substrings
self._flush_cache()
if not self.faster_heuristics or self.find_prefix(string) or self.find_infix(string) or self.find_suffix(string) or " " in string:
- self._special_matcher.add(string, None, self._tokenize_affixes(string, False))
+ self._special_matcher.add(string, [self._tokenize_affixes(string, False)])
def _reload_special_cases(self):
self._flush_cache()
@@ -650,7 +649,7 @@ cdef class Tokenizer:
url_match = re.compile("a^").match
special_cases = {}
for orth, special_tokens in self.rules.items():
- special_cases[orth] = [intify_attrs(special_token, strings_map=self.vocab.strings, _do_deprecated=True) for special_token in special_tokens]
+ special_cases[orth] = [intify_attrs(special_token, strings_map=self.vocab.strings) for special_token in special_tokens]
tokens = []
for substring in text.split():
suffixes = []
diff --git a/spacy/tokens/__init__.py b/spacy/tokens/__init__.py
index 64090925d..cb0911283 100644
--- a/spacy/tokens/__init__.py
+++ b/spacy/tokens/__init__.py
@@ -2,7 +2,7 @@ from .doc import Doc
from .token import Token
from .span import Span
from .span_group import SpanGroup
-from ._serialize import DocBin
+from .doc_bin import DocBin
from .morphanalysis import MorphAnalysis
__all__ = ["Doc", "Token", "Span", "SpanGroup", "DocBin", "MorphAnalysis"]
diff --git a/spacy/tokens/doc.pxd b/spacy/tokens/doc.pxd
index 57d087958..83a940cbb 100644
--- a/spacy/tokens/doc.pxd
+++ b/spacy/tokens/doc.pxd
@@ -50,6 +50,8 @@ cdef class Doc:
cdef public float sentiment
+ cdef public dict activations
+
cdef public dict user_hooks
cdef public dict user_token_hooks
cdef public dict user_span_hooks
diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi
index a40fa74aa..763c1fd2f 100644
--- a/spacy/tokens/doc.pyi
+++ b/spacy/tokens/doc.pyi
@@ -1,11 +1,11 @@
from typing import Callable, Protocol, Iterable, Iterator, Optional
from typing import Union, Tuple, List, Dict, Any, overload
from cymem.cymem import Pool
-from thinc.types import Floats1d, Floats2d, Ints2d
+from thinc.types import ArrayXd, Floats1d, Floats2d, Ints2d, Ragged
from .span import Span
from .token import Token
-from ._dict_proxies import SpanGroups
-from ._retokenize import Retokenizer
+from .span_groups import SpanGroups
+from .retokenizer import Retokenizer
from ..lexeme import Lexeme
from ..vocab import Vocab
from .underscore import Underscore
@@ -22,6 +22,7 @@ class Doc:
max_length: int
length: int
sentiment: float
+ activations: Dict[str, Dict[str, Union[ArrayXd, Ragged]]]
cats: Dict[str, float]
user_hooks: Dict[str, Callable[..., Any]]
user_token_hooks: Dict[str, Callable[..., Any]]
diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx
index e38de02b4..6969515c3 100644
--- a/spacy/tokens/doc.pyx
+++ b/spacy/tokens/doc.pyx
@@ -19,7 +19,7 @@ import warnings
from .span cimport Span
from .token cimport MISSING_DEP
-from ._dict_proxies import SpanGroups
+from .span_groups import SpanGroups
from .token cimport Token
from ..lexeme cimport Lexeme, EMPTY_LEXEME
from ..typedefs cimport attr_t, flags_t
@@ -35,8 +35,8 @@ from .. import util
from .. import parts_of_speech
from .. import schemas
from .underscore import Underscore, get_ext_args
-from ._retokenize import Retokenizer
-from ._serialize import ALL_ATTRS as DOCBIN_ALL_ATTRS
+from .retokenizer import Retokenizer
+from .doc_bin import ALL_ATTRS as DOCBIN_ALL_ATTRS
from ..util import get_words_and_spaces
DEF PADDING = 5
@@ -245,6 +245,7 @@ cdef class Doc:
self.length = 0
self.sentiment = 0.0
self.cats = {}
+ self.activations = {}
self.user_hooks = {}
self.user_token_hooks = {}
self.user_span_hooks = {}
@@ -607,7 +608,8 @@ cdef class Doc:
if self.vocab.vectors.n_keys == 0:
warnings.warn(Warnings.W007.format(obj="Doc"))
if self.vector_norm == 0 or other.vector_norm == 0:
- warnings.warn(Warnings.W008.format(obj="Doc"))
+ if not self.has_vector or not other.has_vector:
+ warnings.warn(Warnings.W008.format(obj="Doc"))
return 0.0
vector = self.vector
xp = get_array_module(vector)
@@ -627,7 +629,7 @@ cdef class Doc:
if "has_vector" in self.user_hooks:
return self.user_hooks["has_vector"](self)
elif self.vocab.vectors.size:
- return True
+ return any(token.has_vector for token in self)
elif self.tensor.size:
return True
else:
@@ -807,27 +809,33 @@ cdef class Doc:
self.c[i].ent_iob = 1
self.c[i].ent_type = span.label
self.c[i].ent_kb_id = span.kb_id
- # for backwards compatibility in v3, only set ent_id from
- # span.id if it's set, otherwise don't override
- self.c[i].ent_id = span.id if span.id else self.c[i].ent_id
+ self.c[i].ent_id = span.id
for span in blocked:
for i in range(span.start, span.end):
self.c[i].ent_iob = 3
self.c[i].ent_type = 0
+ self.c[i].ent_kb_id = 0
+ self.c[i].ent_id = 0
for span in missing:
for i in range(span.start, span.end):
self.c[i].ent_iob = 0
self.c[i].ent_type = 0
+ self.c[i].ent_kb_id = 0
+ self.c[i].ent_id = 0
for span in outside:
for i in range(span.start, span.end):
self.c[i].ent_iob = 2
self.c[i].ent_type = 0
+ self.c[i].ent_kb_id = 0
+ self.c[i].ent_id = 0
# Set tokens outside of all provided spans
if default != SetEntsDefault.unmodified:
for i in range(self.length):
if i not in seen_tokens:
self.c[i].ent_type = 0
+ self.c[i].ent_kb_id = 0
+ self.c[i].ent_id = 0
if default == SetEntsDefault.outside:
self.c[i].ent_iob = 2
elif default == SetEntsDefault.missing:
@@ -967,22 +975,26 @@ cdef class Doc:
py_attr_ids = [(IDS[id_.upper()] if hasattr(id_, "upper") else id_)
for id_ in py_attr_ids]
except KeyError as msg:
- keys = [k for k in IDS.keys() if not k.startswith("FLAG")]
+ keys = list(IDS.keys())
raise KeyError(Errors.E983.format(dict="IDS", key=msg, keys=keys)) from None
# Make an array from the attributes --- otherwise our inner loop is
# Python dict iteration.
- cdef np.ndarray attr_ids = numpy.asarray(py_attr_ids, dtype="i")
- output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.uint64)
+ cdef Pool mem = Pool()
+ cdef int n_attrs = len(py_attr_ids)
+ cdef attr_id_t* c_attr_ids
+ if n_attrs > 0:
+ c_attr_ids = mem.alloc(n_attrs, sizeof(attr_id_t))
+ for i, attr_id in enumerate(py_attr_ids):
+ c_attr_ids[i] = attr_id
+ output = numpy.ndarray(shape=(self.length, n_attrs), dtype=numpy.uint64)
c_output = output.data
- c_attr_ids = attr_ids.data
cdef TokenC* token
- cdef int nr_attr = attr_ids.shape[0]
for i in range(self.length):
token = &self.c[i]
- for j in range(nr_attr):
- c_output[i*nr_attr + j] = get_token_attr(token, c_attr_ids[j])
+ for j in range(n_attrs):
+ c_output[i*n_attrs + j] = get_token_attr(token, c_attr_ids[j])
# Handle 1d case
- return output if len(attr_ids) >= 2 else output.reshape((self.length,))
+ return output if n_attrs >= 2 else output.reshape((self.length,))
def count_by(self, attr_id_t attr_id, exclude=None, object counts=None):
"""Count the frequencies of a given attribute. Produces a dict of
@@ -1601,13 +1613,30 @@ cdef class Doc:
ents.append(char_span)
self.ents = ents
- # Add custom attributes. Note that only Doc extensions are currently considered, Token and Span extensions are
- # not yet supported.
+ # Add custom attributes for the whole Doc object.
for attr in doc_json.get("_", {}):
if not Doc.has_extension(attr):
Doc.set_extension(attr)
self._.set(attr, doc_json["_"][attr])
+ if doc_json.get("underscore_token", {}):
+ for token_attr in doc_json["underscore_token"]:
+ token_start = doc_json["underscore_token"][token_attr]["token_start"]
+ value = doc_json["underscore_token"][token_attr]["value"]
+
+ if not Token.has_extension(token_attr):
+ Token.set_extension(token_attr)
+ self[token_start]._.set(token_attr, value)
+
+ if doc_json.get("underscore_span", {}):
+ for span_attr in doc_json["underscore_span"]:
+ token_start = doc_json["underscore_span"][span_attr]["token_start"]
+ token_end = doc_json["underscore_span"][span_attr]["token_end"]
+ value = doc_json["underscore_span"][span_attr]["value"]
+
+ if not Span.has_extension(span_attr):
+ Span.set_extension(span_attr)
+ self[token_start:token_end]._.set(span_attr, value)
return self
def to_json(self, underscore=None):
@@ -1649,20 +1678,40 @@ cdef class Doc:
for span_group in self.spans:
data["spans"][span_group] = []
for span in self.spans[span_group]:
- span_data = {
- "start": span.start_char, "end": span.end_char, "label": span.label_, "kb_id": span.kb_id_
- }
+ span_data = {"start": span.start_char, "end": span.end_char, "label": span.label_, "kb_id": span.kb_id_}
data["spans"][span_group].append(span_data)
if underscore:
- data["_"] = {}
+ user_keys = set()
+ if self.user_data:
+ data["_"] = {}
+ data["underscore_token"] = {}
+ data["underscore_span"] = {}
+ for data_key in self.user_data:
+ if type(data_key) == tuple and len(data_key) >= 4 and data_key[0] == "._.":
+ attr = data_key[1]
+ start = data_key[2]
+ end = data_key[3]
+ if attr in underscore:
+ user_keys.add(attr)
+ value = self.user_data[data_key]
+ if not srsly.is_json_serializable(value):
+ raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
+ # Check if doc attribute
+ if start is None:
+ data["_"][attr] = value
+ # Check if token attribute
+ elif end is None:
+ if attr not in data["underscore_token"]:
+ data["underscore_token"][attr] = {"token_start": start, "value": value}
+ # Else span attribute
+ else:
+ if attr not in data["underscore_span"]:
+ data["underscore_span"][attr] = {"token_start": start, "token_end": end, "value": value}
+
for attr in underscore:
- if not self.has_extension(attr):
+ if attr not in user_keys:
raise ValueError(Errors.E106.format(attr=attr, opts=underscore))
- value = self._.get(attr)
- if not srsly.is_json_serializable(value):
- raise ValueError(Errors.E107.format(attr=attr, value=repr(value)))
- data["_"][attr] = value
return data
def to_utf8_array(self, int nr_char=-1):
diff --git a/spacy/tokens/_serialize.py b/spacy/tokens/doc_bin.py
similarity index 99%
rename from spacy/tokens/_serialize.py
rename to spacy/tokens/doc_bin.py
index c4e8f26f4..c107aa25d 100644
--- a/spacy/tokens/_serialize.py
+++ b/spacy/tokens/doc_bin.py
@@ -12,7 +12,7 @@ from ..compat import copy_reg
from ..attrs import SPACY, ORTH, intify_attr, IDS
from ..errors import Errors
from ..util import ensure_path, SimpleFrozenList
-from ._dict_proxies import SpanGroups
+from .span_groups import SpanGroups
# fmt: off
ALL_ATTRS = ("ORTH", "NORM", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "ENT_ID", "LEMMA", "MORPH", "POS", "SENT_START")
diff --git a/spacy/tokens/morphanalysis.pxd b/spacy/tokens/morphanalysis.pxd
index 9510875c9..f866488ec 100644
--- a/spacy/tokens/morphanalysis.pxd
+++ b/spacy/tokens/morphanalysis.pxd
@@ -1,9 +1,12 @@
from ..vocab cimport Vocab
from ..typedefs cimport hash_t
-from ..structs cimport MorphAnalysisC
+from ..morphology cimport MorphAnalysisC
+from libcpp.memory cimport shared_ptr
cdef class MorphAnalysis:
cdef readonly Vocab vocab
cdef readonly hash_t key
- cdef MorphAnalysisC c
+ cdef shared_ptr[MorphAnalysisC] c
+
+ cdef void _init_c(self, hash_t key)
diff --git a/spacy/tokens/morphanalysis.pyx b/spacy/tokens/morphanalysis.pyx
index a7d1f2e44..af0067f4e 100644
--- a/spacy/tokens/morphanalysis.pyx
+++ b/spacy/tokens/morphanalysis.pyx
@@ -5,7 +5,12 @@ from ..errors import Errors
from ..morphology import Morphology
from ..vocab cimport Vocab
from ..typedefs cimport hash_t, attr_t
-from ..morphology cimport list_features, check_feature, get_by_field
+from ..morphology cimport list_features, check_feature, get_by_field, MorphAnalysisC
+from libcpp.memory cimport shared_ptr
+from cython.operator cimport dereference as deref
+
+
+cdef shared_ptr[MorphAnalysisC] EMPTY_MORPH_TAG = shared_ptr[MorphAnalysisC](new MorphAnalysisC())
cdef class MorphAnalysis:
@@ -13,39 +18,38 @@ cdef class MorphAnalysis:
def __init__(self, Vocab vocab, features=dict()):
self.vocab = vocab
self.key = self.vocab.morphology.add(features)
- analysis = self.vocab.morphology.tags.get(self.key)
- if analysis is not NULL:
- self.c = analysis[0]
+ self._init_c(self.key)
+
+ cdef void _init_c(self, hash_t key):
+ cdef shared_ptr[MorphAnalysisC] analysis = self.vocab.morphology.get_morph_c(key)
+ if analysis:
+ self.c = analysis
else:
- memset(&self.c, 0, sizeof(self.c))
+ self.c = EMPTY_MORPH_TAG
@classmethod
def from_id(cls, Vocab vocab, hash_t key):
"""Create a morphological analysis from a given ID."""
- cdef MorphAnalysis morph = MorphAnalysis.__new__(MorphAnalysis, vocab)
+ cdef MorphAnalysis morph = MorphAnalysis(vocab)
morph.vocab = vocab
morph.key = key
- analysis = vocab.morphology.tags.get(key)
- if analysis is not NULL:
- morph.c = analysis[0]
- else:
- memset(&morph.c, 0, sizeof(morph.c))
+ morph._init_c(key)
return morph
def __contains__(self, feature):
"""Test whether the morphological analysis contains some feature."""
cdef attr_t feat_id = self.vocab.strings.as_int(feature)
- return check_feature(&self.c, feat_id)
+ return check_feature(self.c, feat_id)
def __iter__(self):
"""Iterate over the features in the analysis."""
cdef attr_t feature
- for feature in list_features(&self.c):
+ for feature in list_features(self.c):
yield self.vocab.strings[feature]
def __len__(self):
"""The number of features in the analysis."""
- return self.c.length
+ return deref(self.c).features.size()
def __hash__(self):
return self.key
@@ -61,7 +65,7 @@ cdef class MorphAnalysis:
def get(self, field):
"""Retrieve feature values by field."""
cdef attr_t field_id = self.vocab.strings.as_int(field)
- cdef np.ndarray results = get_by_field(&self.c, field_id)
+ cdef np.ndarray results = get_by_field(self.c, field_id)
features = [self.vocab.strings[result] for result in results]
return [f.split(Morphology.FIELD_SEP)[1] for f in features]
@@ -69,7 +73,7 @@ cdef class MorphAnalysis:
"""Produce a json serializable representation as a UD FEATS-style
string.
"""
- morph_string = self.vocab.strings[self.c.key]
+ morph_string = self.vocab.strings[deref(self.c).key]
if morph_string == self.vocab.morphology.EMPTY_MORPH:
return ""
return morph_string
diff --git a/spacy/tokens/_retokenize.pyi b/spacy/tokens/retokenizer.pyi
similarity index 100%
rename from spacy/tokens/_retokenize.pyi
rename to spacy/tokens/retokenizer.pyi
diff --git a/spacy/tokens/_retokenize.pyx b/spacy/tokens/retokenizer.pyx
similarity index 100%
rename from spacy/tokens/_retokenize.pyx
rename to spacy/tokens/retokenizer.pyx
diff --git a/spacy/tokens/span.pyi b/spacy/tokens/span.pyi
index 4a4149652..28b627c32 100644
--- a/spacy/tokens/span.pyi
+++ b/spacy/tokens/span.pyi
@@ -115,13 +115,23 @@ class Span:
end: int
start_char: int
end_char: int
- label: int
- kb_id: int
- ent_id: int
- ent_id_: str
+ @property
+ def label(self) -> int: ...
+ @property
+ def kb_id(self) -> int: ...
+ @property
+ def id(self) -> int: ...
+ @property
+ def ent_id(self) -> int: ...
@property
def orth_(self) -> str: ...
@property
def lemma_(self) -> str: ...
- label_: str
- kb_id_: str
+ @property
+ def label_(self) -> str: ...
+ @property
+ def kb_id_(self) -> str: ...
+ @property
+ def id_(self) -> str: ...
+ @property
+ def ent_id_(self) -> str: ...
diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx
index b66a7ada3..89d9727e9 100644
--- a/spacy/tokens/span.pyx
+++ b/spacy/tokens/span.pyx
@@ -365,7 +365,8 @@ cdef class Span:
if self.vocab.vectors.n_keys == 0:
warnings.warn(Warnings.W007.format(obj="Span"))
if self.vector_norm == 0.0 or other.vector_norm == 0.0:
- warnings.warn(Warnings.W008.format(obj="Span"))
+ if not self.has_vector or not other.has_vector:
+ warnings.warn(Warnings.W008.format(obj="Span"))
return 0.0
vector = self.vector
xp = get_array_module(vector)
@@ -801,28 +802,18 @@ cdef class Span:
property id:
def __get__(self):
- cdef SpanC* span_c = self.span_c()
- return span_c.id
+ return self.span_c().id
def __set__(self, attr_t id):
- cdef SpanC* span_c = self.span_c()
- span_c.id = id
+ self.span_c().id = id
property ent_id:
- """RETURNS (uint64): The entity ID."""
+ """Alias for the span's ID."""
def __get__(self):
- return self.root.ent_id
+ return self.id
- def __set__(self, hash_t key):
- raise NotImplementedError(Errors.E200.format(attr="ent_id"))
-
- property ent_id_:
- """RETURNS (str): The (string) entity ID."""
- def __get__(self):
- return self.root.ent_id_
-
- def __set__(self, str key):
- raise NotImplementedError(Errors.E200.format(attr="ent_id_"))
+ def __set__(self, attr_t ent_id):
+ self.id = ent_id
@property
def orth_(self):
@@ -838,7 +829,7 @@ cdef class Span:
return "".join([t.lemma_ + t.whitespace_ for t in self]).strip()
property label_:
- """RETURNS (str): The span's label."""
+ """The span's label."""
def __get__(self):
return self.doc.vocab.strings[self.label]
@@ -846,7 +837,7 @@ cdef class Span:
self.label = self.doc.vocab.strings.add(label_)
property kb_id_:
- """RETURNS (str): The span's KB ID."""
+ """The span's KB ID."""
def __get__(self):
return self.doc.vocab.strings[self.kb_id]
@@ -854,13 +845,22 @@ cdef class Span:
self.kb_id = self.doc.vocab.strings.add(kb_id_)
property id_:
- """RETURNS (str): The span's ID."""
+ """The span's ID."""
def __get__(self):
return self.doc.vocab.strings[self.id]
def __set__(self, str id_):
self.id = self.doc.vocab.strings.add(id_)
+ property ent_id_:
+ """Alias for the span's ID."""
+ def __get__(self):
+ return self.id_
+
+ def __set__(self, str ent_id_):
+ self.id_ = ent_id_
+
+
cdef int _count_words_to_root(const TokenC* token, int sent_length) except -1:
# Don't allow spaces to be the root, if there are
diff --git a/spacy/tokens/span_group.pyi b/spacy/tokens/span_group.pyi
index 245eb4dbe..21cd124ab 100644
--- a/spacy/tokens/span_group.pyi
+++ b/spacy/tokens/span_group.pyi
@@ -1,4 +1,4 @@
-from typing import Any, Dict, Iterable
+from typing import Any, Dict, Iterable, Optional
from .doc import Doc
from .span import Span
@@ -24,4 +24,4 @@ class SpanGroup:
def __getitem__(self, i: int) -> Span: ...
def to_bytes(self) -> bytes: ...
def from_bytes(self, bytes_data: bytes) -> SpanGroup: ...
- def copy(self) -> SpanGroup: ...
+ def copy(self, doc: Optional[Doc] = ...) -> SpanGroup: ...
diff --git a/spacy/tokens/span_group.pyx b/spacy/tokens/span_group.pyx
index 7d4d7a248..7caa01ee7 100644
--- a/spacy/tokens/span_group.pyx
+++ b/spacy/tokens/span_group.pyx
@@ -244,15 +244,18 @@ cdef class SpanGroup:
cdef void push_back(self, const shared_ptr[SpanC] &span):
self.c.push_back(span)
- def copy(self) -> SpanGroup:
+ def copy(self, doc: Optional["Doc"] = None) -> SpanGroup:
"""Clones the span group.
+ doc (Doc): New reference document to which the copy is bound.
RETURNS (SpanGroup): A copy of the span group.
DOCS: https://spacy.io/api/spangroup#copy
"""
+ if doc is None:
+ doc = self.doc
return SpanGroup(
- self.doc,
+ doc,
name=self.name,
attrs=deepcopy(self.attrs),
spans=list(self),
diff --git a/spacy/tokens/_dict_proxies.py b/spacy/tokens/span_groups.py
similarity index 97%
rename from spacy/tokens/_dict_proxies.py
rename to spacy/tokens/span_groups.py
index 9630da261..6edcce13d 100644
--- a/spacy/tokens/_dict_proxies.py
+++ b/spacy/tokens/span_groups.py
@@ -42,7 +42,8 @@ class SpanGroups(UserDict):
def copy(self, doc: Optional["Doc"] = None) -> "SpanGroups":
if doc is None:
doc = self._ensure_doc()
- return SpanGroups(doc).from_bytes(self.to_bytes())
+ data_copy = ((k, v.copy(doc=doc)) for k, v in self.items())
+ return SpanGroups(doc, items=data_copy)
def setdefault(self, key, default=None):
if not isinstance(default, SpanGroup):
diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx
index d14930348..cee903f48 100644
--- a/spacy/tokens/token.pyx
+++ b/spacy/tokens/token.pyx
@@ -22,6 +22,7 @@ from .. import parts_of_speech
from ..errors import Errors, Warnings
from ..attrs import IOB_STRINGS
from .underscore import Underscore, get_ext_args
+from cython.operator cimport dereference as deref
cdef class Token:
@@ -206,7 +207,8 @@ cdef class Token:
if self.vocab.vectors.n_keys == 0:
warnings.warn(Warnings.W007.format(obj="Token"))
if self.vector_norm == 0 or other.vector_norm == 0:
- warnings.warn(Warnings.W008.format(obj="Token"))
+ if not self.has_vector or not other.has_vector:
+ warnings.warn(Warnings.W008.format(obj="Token"))
return 0.0
vector = self.vector
xp = get_array_module(vector)
@@ -230,7 +232,7 @@ cdef class Token:
# Check that the morph has the same vocab
if self.vocab != morph.vocab:
raise ValueError(Errors.E1013)
- self.c.morph = morph.c.key
+ self.c.morph = deref(morph.c).key
def set_morph(self, features):
cdef hash_t key
diff --git a/spacy/training/__init__.py b/spacy/training/__init__.py
index a4feb01f4..71d1fa775 100644
--- a/spacy/training/__init__.py
+++ b/spacy/training/__init__.py
@@ -5,6 +5,7 @@ from .augment import dont_augment, orth_variants_augmenter # noqa: F401
from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401
from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401
from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401
+from .iob_utils import split_bilu_label, remove_bilu_prefix # noqa: F401
from .gold_io import docs_to_json, read_json_file # noqa: F401
from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401
from .loggers import console_logger # noqa: F401
diff --git a/spacy/training/augment.py b/spacy/training/augment.py
index 59a39c7ee..55d780ba4 100644
--- a/spacy/training/augment.py
+++ b/spacy/training/augment.py
@@ -3,10 +3,10 @@ from typing import Optional
import random
import itertools
from functools import partial
-from pydantic import BaseModel, StrictStr
from ..util import registry
from .example import Example
+from .iob_utils import split_bilu_label
if TYPE_CHECKING:
from ..language import Language # noqa: F401
@@ -278,10 +278,8 @@ def make_whitespace_variant(
ent_prev = doc_dict["entities"][position - 1]
ent_next = doc_dict["entities"][position]
if "-" in ent_prev and "-" in ent_next:
- ent_iob_prev = ent_prev.split("-")[0]
- ent_type_prev = ent_prev.split("-", 1)[1]
- ent_iob_next = ent_next.split("-")[0]
- ent_type_next = ent_next.split("-", 1)[1]
+ ent_iob_prev, ent_type_prev = split_bilu_label(ent_prev)
+ ent_iob_next, ent_type_next = split_bilu_label(ent_next)
if (
ent_iob_prev in ("B", "I")
and ent_iob_next in ("I", "L")
diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx
index fa44191d0..f6fc3a48d 100644
--- a/spacy/training/example.pyx
+++ b/spacy/training/example.pyx
@@ -8,7 +8,7 @@ from ..tokens.span import Span
from ..attrs import IDS
from .alignment import Alignment
from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags
-from .iob_utils import biluo_tags_to_spans
+from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix
from ..errors import Errors, Warnings
from ..pipeline._parser_internals import nonproj
from ..tokens.token cimport MISSING_DEP
@@ -248,9 +248,9 @@ cdef class Example:
# Fetch all aligned gold token incides.
if c2g_single_toks.shape == cand_to_gold.lengths.shape:
# This the most likely case.
- gold_i = cand_to_gold[:].squeeze()
+ gold_i = cand_to_gold[:]
else:
- gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0])(c2g_single_toks).squeeze()
+ gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0], otypes='i')(c2g_single_toks)
# Fetch indices of all gold heads for the aligned gold tokens.
heads = numpy.asarray(heads, dtype='i')
@@ -260,7 +260,7 @@ cdef class Example:
# gold tokens (and are aligned to a single candidate token).
g2c_len_heads = gold_to_cand.lengths[gold_head_i]
g2c_len_heads = numpy.where(g2c_len_heads == 1)[0]
- g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0])(gold_head_i[g2c_len_heads]).squeeze()
+ g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0], otypes='i')(gold_head_i[g2c_len_heads]).squeeze()
# Update head/dep alignments with the above.
aligned_heads = numpy.full((self.x.length), None)
@@ -360,6 +360,7 @@ cdef class Example:
"doc_annotation": {
"cats": dict(self.reference.cats),
"entities": doc_to_biluo_tags(self.reference),
+ "spans": self._spans_to_dict(),
"links": self._links_to_dict()
},
"token_annotation": {
@@ -375,6 +376,18 @@ cdef class Example:
}
}
+ def _spans_to_dict(self):
+ span_dict = {}
+ for key in self.reference.spans:
+ span_tuples = []
+ for span in self.reference.spans[key]:
+ span_tuple = (span.start_char, span.end_char, span.label_, span.kb_id_)
+ span_tuples.append(span_tuple)
+ span_dict[key] = span_tuples
+
+ return span_dict
+
+
def _links_to_dict(self):
links = {}
for ent in self.reference.ents:
@@ -595,7 +608,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces):
else:
ent_iobs.append(iob_tag.split("-")[0])
if iob_tag.startswith("I") or iob_tag.startswith("B"):
- ent_types.append(iob_tag.split("-", 1)[1])
+ ent_types.append(remove_bilu_prefix(iob_tag))
else:
ent_types.append("")
return ent_iobs, ent_types
diff --git a/spacy/training/initialize.py b/spacy/training/initialize.py
index 48ff7b589..6304e4a84 100644
--- a/spacy/training/initialize.py
+++ b/spacy/training/initialize.py
@@ -337,3 +337,5 @@ def ensure_shape(vectors_loc):
# store all the results in a list in memory
lines2 = open_file(vectors_loc)
yield from lines2
+ lines2.close()
+ lines.close()
diff --git a/spacy/training/iob_utils.py b/spacy/training/iob_utils.py
index 64492c2bc..61f83a1c3 100644
--- a/spacy/training/iob_utils.py
+++ b/spacy/training/iob_utils.py
@@ -1,4 +1,4 @@
-from typing import List, Dict, Tuple, Iterable, Union, Iterator
+from typing import List, Dict, Tuple, Iterable, Union, Iterator, cast
import warnings
from ..errors import Errors, Warnings
@@ -218,6 +218,14 @@ def tags_to_entities(tags: Iterable[str]) -> List[Tuple[str, int, int]]:
return entities
+def split_bilu_label(label: str) -> Tuple[str, str]:
+ return cast(Tuple[str, str], label.split("-", 1))
+
+
+def remove_bilu_prefix(label: str) -> str:
+ return label.split("-", 1)[1]
+
+
# Fallbacks to make backwards-compat easier
offsets_from_biluo_tags = biluo_tags_to_offsets
spans_from_biluo_tags = biluo_tags_to_spans
diff --git a/spacy/training/loggers.py b/spacy/training/loggers.py
index edd0f1959..408ea7140 100644
--- a/spacy/training/loggers.py
+++ b/spacy/training/loggers.py
@@ -1,10 +1,13 @@
-from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO
+from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO, Union
from wasabi import Printer
+from pathlib import Path
import tqdm
import sys
+import srsly
from ..util import registry
from ..errors import Errors
+from .. import util
if TYPE_CHECKING:
from ..language import Language # noqa: F401
@@ -23,13 +26,44 @@ def setup_table(
return final_cols, final_widths, ["r" for _ in final_widths]
-@registry.loggers("spacy.ConsoleLogger.v1")
-def console_logger(progress_bar: bool = False):
+@registry.loggers("spacy.ConsoleLogger.v2")
+def console_logger(
+ progress_bar: bool = False,
+ console_output: bool = True,
+ output_file: Optional[Union[str, Path]] = None,
+):
+ """The ConsoleLogger.v2 prints out training logs in the console and/or saves them to a jsonl file.
+ progress_bar (bool): Whether the logger should print the progress bar.
+ console_output (bool): Whether the logger should print the logs on the console.
+ output_file (Optional[Union[str, Path]]): The file to save the training logs to.
+ """
+ _log_exist = False
+ if output_file:
+ output_file = util.ensure_path(output_file) # type: ignore
+ if output_file.exists(): # type: ignore
+ _log_exist = True
+ if not output_file.parents[0].exists(): # type: ignore
+ output_file.parents[0].mkdir(parents=True) # type: ignore
+
def setup_printer(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
write = lambda text: print(text, file=stdout, flush=True)
msg = Printer(no_print=True)
+
+ nonlocal output_file
+ output_stream = None
+ if _log_exist:
+ write(
+ msg.warn(
+ f"Saving logs is disabled because {output_file} already exists."
+ )
+ )
+ output_file = None
+ elif output_file:
+ write(msg.info(f"Saving results to {output_file}"))
+ output_stream = open(output_file, "w", encoding="utf-8")
+
# ensure that only trainable components are logged
logged_pipes = [
name
@@ -40,13 +74,15 @@ def console_logger(progress_bar: bool = False):
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
loss_cols = [f"Loss {pipe}" for pipe in logged_pipes]
- spacing = 2
- table_header, table_widths, table_aligns = setup_table(
- cols=["E", "#"] + loss_cols + score_cols + ["Score"],
- widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
- )
- write(msg.row(table_header, widths=table_widths, spacing=spacing))
- write(msg.row(["-" * width for width in table_widths], spacing=spacing))
+
+ if console_output:
+ spacing = 2
+ table_header, table_widths, table_aligns = setup_table(
+ cols=["E", "#"] + loss_cols + score_cols + ["Score"],
+ widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
+ )
+ write(msg.row(table_header, widths=table_widths, spacing=spacing))
+ write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
def log_step(info: Optional[Dict[str, Any]]) -> None:
@@ -57,12 +93,15 @@ def console_logger(progress_bar: bool = False):
if progress is not None:
progress.update(1)
return
- losses = [
- "{0:.2f}".format(float(info["losses"][pipe_name]))
- for pipe_name in logged_pipes
- ]
+
+ losses = []
+ log_losses = {}
+ for pipe_name in logged_pipes:
+ losses.append("{0:.2f}".format(float(info["losses"][pipe_name])))
+ log_losses[pipe_name] = float(info["losses"][pipe_name])
scores = []
+ log_scores = {}
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
@@ -73,6 +112,7 @@ def console_logger(progress_bar: bool = False):
if col != "speed":
score *= 100
scores.append("{0:.2f}".format(score))
+ log_scores[str(col)] = score
data = (
[info["epoch"], info["step"]]
@@ -80,20 +120,36 @@ def console_logger(progress_bar: bool = False):
+ scores
+ ["{0:.2f}".format(float(info["score"]))]
)
+
+ if output_stream:
+ # Write to log file per log_step
+ log_data = {
+ "epoch": info["epoch"],
+ "step": info["step"],
+ "losses": log_losses,
+ "scores": log_scores,
+ "score": float(info["score"]),
+ }
+ output_stream.write(srsly.json_dumps(log_data) + "\n")
+
if progress is not None:
progress.close()
- write(
- msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing)
- )
- if progress_bar:
- # Set disable=None, so that it disables on non-TTY
- progress = tqdm.tqdm(
- total=eval_frequency, disable=None, leave=False, file=stderr
+ if console_output:
+ write(
+ msg.row(
+ data, widths=table_widths, aligns=table_aligns, spacing=spacing
+ )
)
- progress.set_description(f"Epoch {info['epoch']+1}")
+ if progress_bar:
+ # Set disable=None, so that it disables on non-TTY
+ progress = tqdm.tqdm(
+ total=eval_frequency, disable=None, leave=False, file=stderr
+ )
+ progress.set_description(f"Epoch {info['epoch']+1}")
def finalize() -> None:
- pass
+ if output_stream:
+ output_stream.close()
return log_step, finalize
diff --git a/spacy/util.py b/spacy/util.py
index dfa35daaa..4e1a62d05 100644
--- a/spacy/util.py
+++ b/spacy/util.py
@@ -1,6 +1,6 @@
from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast
from typing import Optional, Iterable, Callable, Tuple, Type
-from typing import Iterator, Type, Pattern, Generator, TYPE_CHECKING
+from typing import Iterator, Pattern, Generator, TYPE_CHECKING
from types import ModuleType
import os
import importlib
@@ -12,7 +12,6 @@ from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer
from thinc.api import ConfigValidationError, Model
import functools
import itertools
-import numpy.random
import numpy
import srsly
import catalogue
@@ -399,8 +398,9 @@ def load_model(
name: Union[str, Path],
*,
vocab: Union["Vocab", bool] = True,
- disable: Iterable[str] = SimpleFrozenList(),
- exclude: Iterable[str] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from a package or data path.
@@ -408,12 +408,20 @@ def load_model(
name (str): Package name or model path.
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created.
- disable (Iterable[str]): Names of pipeline components to disable.
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable.
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All others will be disabled.
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation.
RETURNS (Language): The loaded nlp object.
"""
- kwargs = {"vocab": vocab, "disable": disable, "exclude": exclude, "config": config}
+ kwargs = {
+ "vocab": vocab,
+ "disable": disable,
+ "enable": enable,
+ "exclude": exclude,
+ "config": config,
+ }
if isinstance(name, str): # name or string path
if name.startswith("blank:"): # shortcut for blank model
return get_lang_class(name.replace("blank:", ""))()
@@ -432,8 +440,9 @@ def load_model_from_package(
name: str,
*,
vocab: Union["Vocab", bool] = True,
- disable: Iterable[str] = SimpleFrozenList(),
- exclude: Iterable[str] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from an installed package.
@@ -441,17 +450,19 @@ def load_model_from_package(
name (str): The package name.
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created.
- disable (Iterable[str]): Names of pipeline components to disable. Disabled
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe.
- exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
+ pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation.
RETURNS (Language): The loaded nlp object.
"""
cls = importlib.import_module(name)
- return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined]
+ return cls.load(vocab=vocab, disable=disable, enable=enable, exclude=exclude, config=config) # type: ignore[attr-defined]
def load_model_from_path(
@@ -459,8 +470,9 @@ def load_model_from_path(
*,
meta: Optional[Dict[str, Any]] = None,
vocab: Union["Vocab", bool] = True,
- disable: Iterable[str] = SimpleFrozenList(),
- exclude: Iterable[str] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Load a model from a data directory path. Creates Language class with
@@ -470,10 +482,12 @@ def load_model_from_path(
meta (Dict[str, Any]): Optional model meta.
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created.
- disable (Iterable[str]): Names of pipeline components to disable. Disabled
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe.
- exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
+ pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation.
@@ -487,7 +501,12 @@ def load_model_from_path(
overrides = dict_to_dot(config)
config = load_config(config_path, overrides=overrides)
nlp = load_model_from_config(
- config, vocab=vocab, disable=disable, exclude=exclude, meta=meta
+ config,
+ vocab=vocab,
+ disable=disable,
+ enable=enable,
+ exclude=exclude,
+ meta=meta,
)
return nlp.from_disk(model_path, exclude=exclude, overrides=overrides)
@@ -497,8 +516,9 @@ def load_model_from_config(
*,
meta: Dict[str, Any] = SimpleFrozenDict(),
vocab: Union["Vocab", bool] = True,
- disable: Iterable[str] = SimpleFrozenList(),
- exclude: Iterable[str] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
auto_fill: bool = False,
validate: bool = True,
) -> "Language":
@@ -509,10 +529,12 @@ def load_model_from_config(
meta (Dict[str, Any]): Optional model meta.
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created.
- disable (Iterable[str]): Names of pipeline components to disable. Disabled
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe.
- exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
+ pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded.
auto_fill (bool): Whether to auto-fill config with missing defaults.
validate (bool): Whether to show config validation errors.
@@ -530,6 +552,7 @@ def load_model_from_config(
config,
vocab=vocab,
disable=disable,
+ enable=enable,
exclude=exclude,
auto_fill=auto_fill,
validate=validate,
@@ -593,8 +616,9 @@ def load_model_from_init_py(
init_file: Union[Path, str],
*,
vocab: Union["Vocab", bool] = True,
- disable: Iterable[str] = SimpleFrozenList(),
- exclude: Iterable[str] = SimpleFrozenList(),
+ disable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ enable: Union[str, Iterable[str]] = SimpleFrozenList(),
+ exclude: Union[str, Iterable[str]] = SimpleFrozenList(),
config: Union[Dict[str, Any], Config] = SimpleFrozenDict(),
) -> "Language":
"""Helper function to use in the `load()` method of a model package's
@@ -602,10 +626,12 @@ def load_model_from_init_py(
vocab (Vocab / True): Optional vocab to pass in on initialization. If True,
a new Vocab object will be created.
- disable (Iterable[str]): Names of pipeline components to disable. Disabled
+ disable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to disable. Disabled
pipes will be loaded but they won't be run unless you explicitly
enable them by calling nlp.enable_pipe.
- exclude (Iterable[str]): Names of pipeline components to exclude. Excluded
+ enable (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to enable. All other
+ pipes will be disabled (and can be enabled using `nlp.enable_pipe`).
+ exclude (Union[str, Iterable[str]]): Name(s) of pipeline component(s) to exclude. Excluded
components won't be loaded.
config (Dict[str, Any] / Config): Config overrides as nested dict or dict
keyed by section values in dot notation.
@@ -622,6 +648,7 @@ def load_model_from_init_py(
vocab=vocab,
meta=meta,
disable=disable,
+ enable=enable,
exclude=exclude,
config=config,
)
@@ -768,6 +795,15 @@ def get_model_lower_version(constraint: str) -> Optional[str]:
return None
+def is_prerelease_version(version: str) -> bool:
+ """Check whether a version is a prerelease version.
+
+ version (str): The version, e.g. "3.0.0.dev1".
+ RETURNS (bool): Whether the version is a prerelease version.
+ """
+ return Version(version).is_prerelease
+
+
def get_base_version(version: str) -> str:
"""Generate the base version without any prerelease identifiers.
diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx
index bcba9d03f..8300220c1 100644
--- a/spacy/vectors.pyx
+++ b/spacy/vectors.pyx
@@ -336,10 +336,10 @@ cdef class Vectors:
xp = get_array_module(self.data)
if key is not None:
key = get_string_id(key)
- return self.key2row.get(key, -1)
+ return self.key2row.get(int(key), -1)
elif keys is not None:
keys = [get_string_id(key) for key in keys]
- rows = [self.key2row.get(key, -1.) for key in keys]
+ rows = [self.key2row.get(int(key), -1) for key in keys]
return xp.asarray(rows, dtype="i")
else:
row2key = {row: key for key, row in self.key2row.items()}
diff --git a/spacy/vocab.pxd b/spacy/vocab.pxd
index 9c951b2b7..815de0765 100644
--- a/spacy/vocab.pxd
+++ b/spacy/vocab.pxd
@@ -32,7 +32,6 @@ cdef class Vocab:
cdef public object writing_system
cdef public object get_noun_chunks
cdef readonly int length
- cdef public object _unused_object # TODO remove in v4, see #9150
cdef public object lex_attr_getters
cdef public object cfg
diff --git a/spacy/vocab.pyi b/spacy/vocab.pyi
index 4cc359c47..41964703b 100644
--- a/spacy/vocab.pyi
+++ b/spacy/vocab.pyi
@@ -72,7 +72,6 @@ def unpickle_vocab(
sstore: StringStore,
vectors: Any,
morphology: Any,
- _unused_object: Any,
lex_attr_getters: Any,
lookups: Any,
get_noun_chunks: Any,
diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx
index 428cadd82..d780dec0d 100644
--- a/spacy/vocab.pyx
+++ b/spacy/vocab.pyx
@@ -268,8 +268,7 @@ cdef class Vocab:
cdef int i
tokens = self.mem.alloc(len(substrings) + 1, sizeof(TokenC))
for i, props in enumerate(substrings):
- props = intify_attrs(props, strings_map=self.strings,
- _do_deprecated=True)
+ props = intify_attrs(props, strings_map=self.strings)
token = &tokens[i]
# Set the special tokens up to have arbitrary attributes
lex = self.get_by_orth(self.mem, props[ORTH])
@@ -559,21 +558,18 @@ def pickle_vocab(vocab):
sstore = vocab.strings
vectors = vocab.vectors
morph = vocab.morphology
- _unused_object = vocab._unused_object
lex_attr_getters = srsly.pickle_dumps(vocab.lex_attr_getters)
lookups = vocab.lookups
get_noun_chunks = vocab.get_noun_chunks
return (unpickle_vocab,
- (sstore, vectors, morph, _unused_object, lex_attr_getters, lookups, get_noun_chunks))
+ (sstore, vectors, morph, lex_attr_getters, lookups, get_noun_chunks))
-def unpickle_vocab(sstore, vectors, morphology, _unused_object,
- lex_attr_getters, lookups, get_noun_chunks):
+def unpickle_vocab(sstore, vectors, morphology, lex_attr_getters, lookups, get_noun_chunks):
cdef Vocab vocab = Vocab()
vocab.vectors = vectors
vocab.strings = sstore
vocab.morphology = morphology
- vocab._unused_object = _unused_object
vocab.lex_attr_getters = srsly.pickle_loads(lex_attr_getters)
vocab.lookups = lookups
vocab.get_noun_chunks = get_noun_chunks
diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md
index a7499f8e2..213aa7455 100644
--- a/website/docs/api/architectures.md
+++ b/website/docs/api/architectures.md
@@ -586,7 +586,7 @@ consists of either two or three subnetworks:
run once for each batch.
- **lower**: Construct a feature-specific vector for each `(token, feature)`
pair. This is also run once for each batch. Constructing the state
- representation is then simply a matter of summing the component features and
+ representation is then a matter of summing the component features and
applying the non-linearity.
- **upper** (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is used
@@ -619,7 +619,7 @@ consists of either two or three subnetworks:
> ```
Build a tagger model, using a provided token-to-vector component. The tagger
-model simply adds a linear layer with softmax activation to predict scores given
+model adds a linear layer with softmax activation to predict scores given
the token vectors.
| Name | Description |
@@ -911,5 +911,5 @@ A function that reads an existing `KnowledgeBase` from file.
A function that takes as input a [`KnowledgeBase`](/api/kb) and a
[`Span`](/api/span) object denoting a named entity, and returns a list of
plausible [`Candidate`](/api/kb/#candidate) objects. The default
-`CandidateGenerator` simply uses the text of a mention to find its potential
+`CandidateGenerator` uses the text of a mention to find its potential
aliases in the `KnowledgeBase`. Note that this function is case-dependent.
diff --git a/website/docs/api/attributeruler.md b/website/docs/api/attributeruler.md
index 965bffbcc..f56e15b29 100644
--- a/website/docs/api/attributeruler.md
+++ b/website/docs/api/attributeruler.md
@@ -1,7 +1,7 @@
---
title: AttributeRuler
tag: class
-source: spacy/pipeline/attributeruler.py
+source: spacy/pipeline/attribute_ruler.py
new: 3
teaser: 'Pipeline component for rule-based token attribute assignment'
api_string_name: attribute_ruler
@@ -34,7 +34,7 @@ how the component should be configured. You can override its settings via the
| `validate` | Whether patterns should be validated (passed to the `Matcher`). Defaults to `False`. ~~bool~~ |
```python
-%%GITHUB_SPACY/spacy/pipeline/attributeruler.py
+%%GITHUB_SPACY/spacy/pipeline/attribute_ruler.py
```
## AttributeRuler.\_\_init\_\_ {#init tag="method"}
diff --git a/website/docs/api/attributes.md b/website/docs/api/attributes.md
new file mode 100644
index 000000000..adacd3898
--- /dev/null
+++ b/website/docs/api/attributes.md
@@ -0,0 +1,78 @@
+---
+title: Attributes
+teaser: Token attributes
+source: spacy/attrs.pyx
+---
+
+[Token](/api/token) attributes are specified using internal IDs in many places
+including:
+
+- [`Matcher` patterns](/api/matcher#patterns),
+- [`Doc.to_array`](/api/doc#to_array) and
+ [`Doc.from_array`](/api/doc#from_array)
+- [`Doc.has_annotation`](/api/doc#has_annotation)
+- [`MultiHashEmbed`](/api/architectures#MultiHashEmbed) Tok2Vec architecture
+ `attrs`
+
+> ```python
+> import spacy
+> from spacy.attrs import DEP
+>
+> nlp = spacy.blank("en")
+> doc = nlp("There are many attributes.")
+>
+> # DEP always has the same internal value
+> assert DEP == 76
+>
+> # "DEP" is automatically converted to DEP
+> assert DEP == nlp.vocab.strings["DEP"]
+> assert doc.has_annotation(DEP) == doc.has_annotation("DEP")
+>
+> # look up IDs in spacy.attrs.IDS
+> from spacy.attrs import IDS
+> assert IDS["DEP"] == DEP
+> ```
+
+All methods automatically convert between the string version of an ID (`"DEP"`)
+and the internal integer symbols (`DEP`). The internal IDs can be imported from
+`spacy.attrs` or retrieved from the [`StringStore`](/api/stringstore). A map
+from string attribute names to internal attribute IDs is stored in
+`spacy.attrs.IDS`.
+
+The corresponding [`Token` object attributes](/api/token#attributes) can be
+accessed using the same names in lowercase, e.g. `token.orth` or `token.length`.
+For attributes that represent string values, the internal integer ID is
+accessed as `Token.attr`, e.g. `token.dep`, while the string value can be
+retrieved by appending `_` as in `token.dep_`.
+
+
+| Attribute | Description |
+| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `DEP` | The token's dependency label. ~~str~~ |
+| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ |
+| `ENT_IOB` | The IOB part of the token's entity tag. Uses custom integer vaues rather than the string store: unset is `0`, `I` is `1`, `O` is `2`, and `B` is `3`. ~~str~~ |
+| `ENT_KB_ID` | The token's entity knowledge base ID. ~~str~~ |
+| `ENT_TYPE` | The token's entity label. ~~str~~ |
+| `IS_ALPHA` | Token text consists of alphabetic characters. ~~bool~~ |
+| `IS_ASCII` | Token text consists of ASCII characters. ~~bool~~ |
+| `IS_DIGIT` | Token text consists of digits. ~~bool~~ |
+| `IS_LOWER` | Token text is in lowercase. ~~bool~~ |
+| `IS_PUNCT` | Token is punctuation. ~~bool~~ |
+| `IS_SPACE` | Token is whitespace. ~~bool~~ |
+| `IS_STOP` | Token is a stop word. ~~bool~~ |
+| `IS_TITLE` | Token text is in titlecase. ~~bool~~ |
+| `IS_UPPER` | Token text is in uppercase. ~~bool~~ |
+| `LEMMA` | The token's lemma. ~~str~~ |
+| `LENGTH` | The length of the token text. ~~int~~ |
+| `LIKE_EMAIL` | Token text resembles an email address. ~~bool~~ |
+| `LIKE_NUM` | Token text resembles a number. ~~bool~~ |
+| `LIKE_URL` | Token text resembles a URL. ~~bool~~ |
+| `LOWER` | The lowercase form of the token text. ~~str~~ |
+| `MORPH` | The token's morphological analysis. ~~MorphAnalysis~~ |
+| `NORM` | The normalized form of the token text. ~~str~~ |
+| `ORTH` | The exact verbatim text of a token. ~~str~~ |
+| `POS` | The token's universal part of speech (UPOS). ~~str~~ |
+| `SENT_START` | Token is start of sentence. ~~bool~~ |
+| `SHAPE` | The token's shape. ~~str~~ |
+| `SPACY` | Token has a trailing space. ~~bool~~ |
+| `TAG` | The token's fine-grained part of speech. ~~str~~ |
diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md
index 7ad83de9c..8621719b9 100644
--- a/website/docs/api/cli.md
+++ b/website/docs/api/cli.md
@@ -77,14 +77,15 @@ $ python -m spacy info [--markdown] [--silent] [--exclude]
$ python -m spacy info [model] [--markdown] [--silent] [--exclude]
```
-| Name | Description |
-| ------------------------------------------------ | --------------------------------------------------------------------------------------------- |
-| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
-| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
-| `--silent`, `-s` 2.0.12 | Don't print anything, just return the values. ~~bool (flag)~~ |
-| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
-| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
-| **PRINTS** | Information about your spaCy installation. |
+| Name | Description |
+| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- |
+| `model` | A trained pipeline, i.e. package name or path (optional). ~~Optional[str] \(option)~~ |
+| `--markdown`, `-md` | Print information as Markdown. ~~bool (flag)~~ |
+| `--silent`, `-s` 2.0.12 | Don't print anything, just return the values. ~~bool (flag)~~ |
+| `--exclude`, `-e` | Comma-separated keys to exclude from the print-out. Defaults to `"labels"`. ~~Optional[str]~~ |
+| `--url`, `-u` 3.5.0 | Print the URL to download the most recent compatible version of the pipeline. Requires a pipeline name. ~~bool (flag)~~ |
+| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
+| **PRINTS** | Information about your spaCy installation. |
## validate {#validate new="2" tag="command"}
diff --git a/website/docs/api/data-formats.md b/website/docs/api/data-formats.md
index b7aedc511..ce06c4ea8 100644
--- a/website/docs/api/data-formats.md
+++ b/website/docs/api/data-formats.md
@@ -395,12 +395,13 @@ file to keep track of your settings and hyperparameters and your own
> "pos": List[str],
> "morphs": List[str],
> "sent_starts": List[Optional[bool]],
-> "deps": List[string],
+> "deps": List[str],
> "heads": List[int],
> "entities": List[str],
> "entities": List[(int, int, str)],
> "cats": Dict[str, float],
> "links": Dict[(int, int), dict],
+> "spans": Dict[str, List[Tuple]],
> }
> ```
@@ -417,9 +418,10 @@ file to keep track of your settings and hyperparameters and your own
| `deps` | List of string values indicating the [dependency relation](/usage/linguistic-features#dependency-parse) of a token to its head. ~~List[str]~~ |
| `heads` | List of integer values indicating the dependency head of each token, referring to the absolute index of each token in the text. ~~List[int]~~ |
| `entities` | **Option 1:** List of [BILUO tags](/usage/linguistic-features#accessing-ner) per token of the format `"{action}-{label}"`, or `None` for unannotated tokens. ~~List[str]~~ |
-| `entities` | **Option 2:** List of `"(start, end, label)"` tuples defining all entities in the text. ~~List[Tuple[int, int, str]]~~ |
+| `entities` | **Option 2:** List of `(start_char, end_char, label)` tuples defining all entities in the text. ~~List[Tuple[int, int, str]]~~ |
| `cats` | Dictionary of `label`/`value` pairs indicating how relevant a certain [text category](/api/textcategorizer) is for the text. ~~Dict[str, float]~~ |
| `links` | Dictionary of `offset`/`dict` pairs defining [named entity links](/usage/linguistic-features#entity-linking). The character offsets are linked to a dictionary of relevant knowledge base IDs. ~~Dict[Tuple[int, int], Dict]~~ |
+| `spans` | Dictionary of `spans_key`/`List[Tuple]` pairs defining the spans for each spans key as `(start_char, end_char, label, kb_id)` tuples. ~~Dict[str, List[Tuple[int, int, str, str]]~~ |
diff --git a/website/docs/api/dependencymatcher.md b/website/docs/api/dependencymatcher.md
index 356adcda7..cae4221bf 100644
--- a/website/docs/api/dependencymatcher.md
+++ b/website/docs/api/dependencymatcher.md
@@ -62,7 +62,7 @@ of relations, see the usage guide on
-### Operators
+### Operators {#operators}
The following operators are supported by the `DependencyMatcher`, most of which
come directly from
@@ -82,6 +82,11 @@ come directly from
| `A $- B` | `B` is a left immediate sibling of `A`, i.e. `A` and `B` have the same parent and `A.i == B.i + 1`. |
| `A $++ B` | `B` is a right sibling of `A`, i.e. `A` and `B` have the same parent and `A.i < B.i`. |
| `A $-- B` | `B` is a left sibling of `A`, i.e. `A` and `B` have the same parent and `A.i > B.i`. |
+| `A >++ B` | `B` is a right child of `A`, i.e. `A` is a parent of `B` and `A.i < B.i` _(not in Semgrex)_. |
+| `A >-- B` | `B` is a left child of `A`, i.e. `A` is a parent of `B` and `A.i > B.i` _(not in Semgrex)_. |
+| `A <++ B` | `B` is a right parent of `A`, i.e. `A` is a child of `B` and `A.i < B.i` _(not in Semgrex)_. |
+| `A <-- B` | `B` is a left parent of `A`, i.e. `A` is a child of `B` and `A.i > B.i` _(not in Semgrex)_. |
+
## DependencyMatcher.\_\_init\_\_ {#init tag="method"}
diff --git a/website/docs/api/dependencyparser.md b/website/docs/api/dependencyparser.md
index 103e0826e..27e315592 100644
--- a/website/docs/api/dependencyparser.md
+++ b/website/docs/api/dependencyparser.md
@@ -158,10 +158,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/dependencyparser#call) and
## DependencyParser.initialize {#initialize tag="method" new="3"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -179,7 +179,7 @@ This method was previously called `begin_training`.
>
> ```python
> parser = nlp.add_pipe("parser")
-> parser.initialize(lambda: [], nlp=nlp)
+> parser.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -193,7 +193,7 @@ This method was previously called `begin_training`.
| Name | Description |
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[Dict[str, Dict[str, int]]]~~ |
diff --git a/website/docs/api/doc.md b/website/docs/api/doc.md
index f97f4ad83..136e7785d 100644
--- a/website/docs/api/doc.md
+++ b/website/docs/api/doc.md
@@ -751,22 +751,23 @@ The L2 norm of the document's vector representation.
## Attributes {#attributes}
-| Name | Description |
-| ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------- |
-| `text` | A string representation of the document text. ~~str~~ |
-| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
-| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
-| `vocab` | The store of lexical types. ~~Vocab~~ |
-| `tensor` 2 | Container for dense vector representations. ~~numpy.ndarray~~ |
-| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
-| `lang` 2.1 | Language of the document's vocabulary. ~~int~~ |
-| `lang_` 2.1 | Language of the document's vocabulary. ~~str~~ |
-| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
-| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
-| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
-| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
-| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ |
-| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
+| Name | Description |
+| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------- |
+| `text` | A string representation of the document text. ~~str~~ |
+| `text_with_ws` | An alias of `Doc.text`, provided for duck-type compatibility with `Span` and `Token`. ~~str~~ |
+| `mem` | The document's local memory heap, for all C data it owns. ~~cymem.Pool~~ |
+| `vocab` | The store of lexical types. ~~Vocab~~ |
+| `tensor` 2 | Container for dense vector representations. ~~numpy.ndarray~~ |
+| `user_data` | A generic storage area, for user custom data. ~~Dict[str, Any]~~ |
+| `lang` 2.1 | Language of the document's vocabulary. ~~int~~ |
+| `lang_` 2.1 | Language of the document's vocabulary. ~~str~~ |
+| `sentiment` | The document's positivity/negativity score, if available. ~~float~~ |
+| `user_hooks` | A dictionary that allows customization of the `Doc`'s properties. ~~Dict[str, Callable]~~ |
+| `user_token_hooks` | A dictionary that allows customization of properties of `Token` children. ~~Dict[str, Callable]~~ |
+| `user_span_hooks` | A dictionary that allows customization of properties of `Span` children. ~~Dict[str, Callable]~~ |
+| `has_unknown_spaces` | Whether the document was constructed without known spacing between tokens (typically when created from gold tokenization). ~~bool~~ |
+| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ |
+| `activations` 4.0 | A dictionary of activations per trainable pipe (available when the `save_activations` option of a pipe is enabled). ~~Dict[str, Option[Any]]~~ |
## Serialization fields {#serialization-fields}
diff --git a/website/docs/api/edittreelemmatizer.md b/website/docs/api/edittreelemmatizer.md
index 99a705f5e..8bee74316 100644
--- a/website/docs/api/edittreelemmatizer.md
+++ b/website/docs/api/edittreelemmatizer.md
@@ -44,14 +44,15 @@ architectures and their arguments and hyperparameters.
> nlp.add_pipe("trainable_lemmatizer", config=config, name="lemmatizer")
> ```
-| Setting | Description |
-| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `model` | A model instance that predicts the edit tree probabilities. The output vectors should match the number of edit trees in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
-| `backoff` | ~~Token~~ attribute to use when no applicable edit tree is found. Defaults to `orth`. ~~str~~ |
-| `min_tree_freq` | Minimum frequency of an edit tree in the training set to be used. Defaults to `3`. ~~int~~ |
-| `overwrite` | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ |
-| `top_k` | The number of most probable edit trees to try before resorting to `backoff`. Defaults to `1`. ~~int~~ |
-| `scorer` | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"lemma"`. ~~Optional[Callable]~~ |
+| Setting | Description |
+| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `model` | A model instance that predicts the edit tree probabilities. The output vectors should match the number of edit trees in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
+| `backoff` | ~~Token~~ attribute to use when no applicable edit tree is found. Defaults to `orth`. ~~str~~ |
+| `min_tree_freq` | Minimum frequency of an edit tree in the training set to be used. Defaults to `3`. ~~int~~ |
+| `overwrite` | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ |
+| `top_k` | The number of most probable edit trees to try before resorting to `backoff`. Defaults to `1`. ~~int~~ |
+| `scorer` | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"lemma"`. ~~Optional[Callable]~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"tree_ids"`. ~~Union[bool, list[str]]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/edit_tree_lemmatizer.py
@@ -141,10 +142,10 @@ and [`pipe`](/api/edittreelemmatizer#pipe) delegate to the
## EditTreeLemmatizer.initialize {#initialize tag="method" new="3"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -156,7 +157,7 @@ config.
>
> ```python
> lemmatizer = nlp.add_pipe("trainable_lemmatizer", name="lemmatizer")
-> lemmatizer.initialize(lambda: [], nlp=nlp)
+> lemmatizer.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -170,7 +171,7 @@ config.
| Name | Description |
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[Iterable[str]]~~ |
diff --git a/website/docs/api/entitylinker.md b/website/docs/api/entitylinker.md
index 8e0d6087a..07dd02634 100644
--- a/website/docs/api/entitylinker.md
+++ b/website/docs/api/entitylinker.md
@@ -47,22 +47,25 @@ architectures and their arguments and hyperparameters.
> "model": DEFAULT_NEL_MODEL,
> "entity_vector_length": 64,
> "get_candidates": {'@misc': 'spacy.CandidateGenerator.v1'},
+> "threshold": None,
> }
> nlp.add_pipe("entity_linker", config=config)
> ```
-| Setting | Description |
-| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `labels_discard` | NER labels that will automatically get a "NIL" prediction. Defaults to `[]`. ~~Iterable[str]~~ |
-| `n_sents` | The number of neighbouring sentences to take into account. Defaults to 0. ~~int~~ |
-| `incl_prior` | Whether or not to include prior probabilities from the KB in the model. Defaults to `True`. ~~bool~~ |
-| `incl_context` | Whether or not to include the local context in the model. Defaults to `True`. ~~bool~~ |
-| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [EntityLinker](/api/architectures#EntityLinker). ~~Model~~ |
-| `entity_vector_length` | Size of encoding vectors in the KB. Defaults to `64`. ~~int~~ |
-| `use_gold_ents` | Whether to copy entities from the gold docs or not. Defaults to `True`. If `False`, entities must be set in the training data or by an annotating component in the pipeline. ~~int~~ |
-| `get_candidates` | Function that generates plausible candidates for a given `Span` object. Defaults to [CandidateGenerator](/api/architectures#CandidateGenerator), a function looking up exact, case-dependent aliases in the KB. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ |
-| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `True`. ~~bool~~ |
-| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_links`](/api/scorer#score_links). ~~Optional[Callable]~~ |
+| Setting | Description |
+| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `labels_discard` | NER labels that will automatically get a "NIL" prediction. Defaults to `[]`. ~~Iterable[str]~~ |
+| `n_sents` | The number of neighbouring sentences to take into account. Defaults to 0. ~~int~~ |
+| `incl_prior` | Whether or not to include prior probabilities from the KB in the model. Defaults to `True`. ~~bool~~ |
+| `incl_context` | Whether or not to include the local context in the model. Defaults to `True`. ~~bool~~ |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [EntityLinker](/api/architectures#EntityLinker). ~~Model~~ |
+| `entity_vector_length` | Size of encoding vectors in the KB. Defaults to `64`. ~~int~~ |
+| `use_gold_ents` | Whether to copy entities from the gold docs or not. Defaults to `True`. If `False`, entities must be set in the training data or by an annotating component in the pipeline. ~~int~~ |
+| `get_candidates` | Function that generates plausible candidates for a given `Span` object. Defaults to [CandidateGenerator](/api/architectures#CandidateGenerator), a function looking up exact, case-dependent aliases in the KB. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ |
+| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `True`. ~~bool~~ |
+| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_links`](/api/scorer#score_links). ~~Optional[Callable]~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"ents"` and `"scores"`. ~~Union[bool, list[str]]~~ |
+| `threshold` 3.4 | Confidence threshold for entity predictions. The default of `None` implies that all predictions are accepted, otherwise those with a score beneath the treshold are discarded. If there are no predictions with scores above the threshold, the linked entity is `NIL`. ~~Optional[float]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/entity_linker.py
@@ -95,20 +98,21 @@ custom knowledge base, you should either call
[`set_kb`](/api/entitylinker#set_kb) or provide a `kb_loader` in the
[`initialize`](/api/entitylinker#initialize) call.
-| Name | Description |
-| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| `vocab` | The shared vocabulary. ~~Vocab~~ |
-| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ |
-| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
-| _keyword-only_ | |
-| `entity_vector_length` | Size of encoding vectors in the KB. ~~int~~ |
-| `get_candidates` | Function that generates plausible candidates for a given `Span` object. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ |
-| `labels_discard` | NER labels that will automatically get a `"NIL"` prediction. ~~Iterable[str]~~ |
-| `n_sents` | The number of neighbouring sentences to take into account. ~~int~~ |
-| `incl_prior` | Whether or not to include prior probabilities from the KB in the model. ~~bool~~ |
-| `incl_context` | Whether or not to include the local context in the model. ~~bool~~ |
-| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `True`. ~~bool~~ |
-| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_links`](/api/scorer#score_links). ~~Optional[Callable]~~ |
+| Name | Description |
+| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `vocab` | The shared vocabulary. ~~Vocab~~ |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model~~ |
+| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
+| _keyword-only_ | |
+| `entity_vector_length` | Size of encoding vectors in the KB. ~~int~~ |
+| `get_candidates` | Function that generates plausible candidates for a given `Span` object. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ |
+| `labels_discard` | NER labels that will automatically get a `"NIL"` prediction. ~~Iterable[str]~~ |
+| `n_sents` | The number of neighbouring sentences to take into account. ~~int~~ |
+| `incl_prior` | Whether or not to include prior probabilities from the KB in the model. ~~bool~~ |
+| `incl_context` | Whether or not to include the local context in the model. ~~bool~~ |
+| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `True`. ~~bool~~ |
+| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_links`](/api/scorer#score_links). ~~Optional[Callable]~~ |
+| `threshold` 3.4 | Confidence threshold for entity predictions. The default of `None` implies that all predictions are accepted, otherwise those with a score beneath the treshold are discarded. If there are no predictions with scores above the threshold, the linked entity is `NIL`. ~~Optional[float]~~ |
## EntityLinker.\_\_call\_\_ {#call tag="method"}
@@ -182,10 +186,10 @@ with the current vocab.
## EntityLinker.initialize {#initialize tag="method" new="3"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize).
@@ -205,15 +209,15 @@ This method was previously called `begin_training`.
>
> ```python
> entity_linker = nlp.add_pipe("entity_linker")
-> entity_linker.initialize(lambda: [], nlp=nlp, kb_loader=my_kb)
+> entity_linker.initialize(lambda: examples, nlp=nlp, kb_loader=my_kb)
> ```
-| Name | Description |
-| -------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
-| _keyword-only_ | |
-| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
-| `kb_loader` | Function that creates a [`KnowledgeBase`](/api/kb) from a `Vocab` instance. ~~Callable[[Vocab], KnowledgeBase]~~ |
+| Name | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
+| _keyword-only_ | |
+| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
+| `kb_loader` | Function that creates a [`KnowledgeBase`](/api/kb) from a `Vocab` instance. ~~Callable[[Vocab], KnowledgeBase]~~ |
## EntityLinker.predict {#predict tag="method"}
diff --git a/website/docs/api/entityrecognizer.md b/website/docs/api/entityrecognizer.md
index 7c153f064..a535e8316 100644
--- a/website/docs/api/entityrecognizer.md
+++ b/website/docs/api/entityrecognizer.md
@@ -154,10 +154,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/entityrecognizer#call) and
## EntityRecognizer.initialize {#initialize tag="method" new="3"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -175,7 +175,7 @@ This method was previously called `begin_training`.
>
> ```python
> ner = nlp.add_pipe("ner")
-> ner.initialize(lambda: [], nlp=nlp)
+> ner.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -189,7 +189,7 @@ This method was previously called `begin_training`.
| Name | Description |
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[Dict[str, Dict[str, int]]]~~ |
diff --git a/website/docs/api/entityruler.md b/website/docs/api/entityruler.md
index c2ba33f01..ef7acbbf1 100644
--- a/website/docs/api/entityruler.md
+++ b/website/docs/api/entityruler.md
@@ -1,7 +1,7 @@
---
title: EntityRuler
tag: class
-source: spacy/pipeline/entityruler.py
+source: spacy/pipeline/entity_ruler.py
new: 2.1
teaser: 'Pipeline component for rule-based named entity recognition'
api_string_name: entity_ruler
@@ -64,7 +64,7 @@ how the component should be configured. You can override its settings via the
| `scorer` | The scoring method. Defaults to [`spacy.scorer.get_ner_prf`](/api/scorer#get_ner_prf). ~~Optional[Callable]~~ |
```python
-%%GITHUB_SPACY/spacy/pipeline/entityruler.py
+%%GITHUB_SPACY/spacy/pipeline/entity_ruler.py
```
## EntityRuler.\_\_init\_\_ {#init tag="method"}
diff --git a/website/docs/api/language.md b/website/docs/api/language.md
index 9a413efaf..ed763e36a 100644
--- a/website/docs/api/language.md
+++ b/website/docs/api/language.md
@@ -63,17 +63,18 @@ spaCy loads a model under the hood based on its
> nlp = Language.from_config(config)
> ```
-| Name | Description |
-| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
-| _keyword-only_ | |
-| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
-| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~List[str]~~ |
-| `exclude` | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
-| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
-| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
-| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
-| **RETURNS** | The initialized object. ~~Language~~ |
+| Name | Description |
+| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `config` | The loaded config. ~~Union[Dict[str, Any], Config]~~ |
+| _keyword-only_ | |
+| `vocab` | A `Vocab` object. If `True`, a vocab is created using the default language data settings. ~~Vocab~~ |
+| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
+| `meta` | [Meta data](/api/data-formats#meta) overrides. ~~Dict[str, Any]~~ |
+| `auto_fill` | Whether to automatically fill in missing values in the config, based on defaults and function argument annotations. Defaults to `True`. ~~bool~~ |
+| `validate` | Whether to validate the component config and arguments against the types expected by the factory. Defaults to `True`. ~~bool~~ |
+| **RETURNS** | The initialized object. ~~Language~~ |
## Language.component {#component tag="classmethod" new="3"}
@@ -695,8 +696,8 @@ As of spaCy v3.0, the `disable_pipes` method has been renamed to `select_pipes`:
| Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------ |
| _keyword-only_ | |
-| `disable` | Name(s) of pipeline components to disable. ~~Optional[Union[str, Iterable[str]]]~~ |
-| `enable` | Name(s) of pipeline components that will not be disabled. ~~Optional[Union[str, Iterable[str]]]~~ |
+| `disable` | Name(s) of pipeline component(s) to disable. ~~Optional[Union[str, Iterable[str]]]~~ |
+| `enable` | Name(s) of pipeline component(s) that will not be disabled. ~~Optional[Union[str, Iterable[str]]]~~ |
| **RETURNS** | The disabled pipes that can be restored by calling the object's `.restore()` method. ~~DisabledPipes~~ |
## Language.get_factory_meta {#get_factory_meta tag="classmethod" new="3"}
diff --git a/website/docs/api/legacy.md b/website/docs/api/legacy.md
index b93bdde0e..d4e1c47c2 100644
--- a/website/docs/api/legacy.md
+++ b/website/docs/api/legacy.md
@@ -242,6 +242,59 @@ added to an existing vectors table. See more details in
## Loggers {#loggers}
+These functions are available from `@spacy.registry.loggers`.
+
+### spacy.ConsoleLogger.v1 {#ConsoleLogger_v1}
+
+> #### Example config
+>
+> ```ini
+> [training.logger]
+> @loggers = "spacy.ConsoleLogger.v1"
+> progress_bar = true
+> ```
+
+Writes the results of a training step to the console in a tabular format.
+
+
+
+```cli
+$ python -m spacy train config.cfg
+```
+
+```
+ℹ Using CPU
+ℹ Loading config and nlp from: config.cfg
+ℹ Pipeline: ['tok2vec', 'tagger']
+ℹ Start training
+ℹ Training. Initial learn rate: 0.0
+
+E # LOSS TOK2VEC LOSS TAGGER TAG_ACC SCORE
+--- ------ ------------ ----------- ------- ------
+ 0 0 0.00 86.20 0.22 0.00
+ 0 200 3.08 18968.78 34.00 0.34
+ 0 400 31.81 22539.06 33.64 0.34
+ 0 600 92.13 22794.91 43.80 0.44
+ 0 800 183.62 21541.39 56.05 0.56
+ 0 1000 352.49 25461.82 65.15 0.65
+ 0 1200 422.87 23708.82 71.84 0.72
+ 0 1400 601.92 24994.79 76.57 0.77
+ 0 1600 662.57 22268.02 80.20 0.80
+ 0 1800 1101.50 28413.77 82.56 0.83
+ 0 2000 1253.43 28736.36 85.00 0.85
+ 0 2200 1411.02 28237.53 87.42 0.87
+ 0 2400 1605.35 28439.95 88.70 0.89
+```
+
+Note that the cumulative loss keeps increasing within one epoch, but should
+start decreasing across epochs.
+
+
+
+| Name | Description |
+| -------------- | --------------------------------------------------------- |
+| `progress_bar` | Whether the logger should print the progress bar ~~bool~~ |
+
Logging utilities for spaCy are implemented in the
[`spacy-loggers`](https://github.com/explosion/spacy-loggers) repo, and the
functions are typically available from `@spacy.registry.loggers`.
diff --git a/website/docs/api/lemmatizer.md b/website/docs/api/lemmatizer.md
index 75387305a..905096338 100644
--- a/website/docs/api/lemmatizer.md
+++ b/website/docs/api/lemmatizer.md
@@ -70,7 +70,7 @@ lemmatizer is available. The lemmatizer modes `rule` and `pos_lookup` require
[`token.pos`](/api/token) from a previous pipeline component (see example
pipeline configurations in the
[pretrained pipeline design details](/models#design-cnn)) or rely on third-party
-libraries (`pymorphy2`).
+libraries (`pymorphy3`).
| Language | Default Mode |
| -------- | ------------ |
@@ -86,9 +86,9 @@ libraries (`pymorphy2`).
| `nb` | `rule` |
| `nl` | `rule` |
| `pl` | `pos_lookup` |
-| `ru` | `pymorphy2` |
+| `ru` | `pymorphy3` |
| `sv` | `rule` |
-| `uk` | `pymorphy2` |
+| `uk` | `pymorphy3` |
```python
%%GITHUB_SPACY/spacy/pipeline/lemmatizer.py
@@ -118,7 +118,7 @@ shortcut for this and instantiate the component using its string name and
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
| _keyword-only_ | |
| mode | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ |
-| overwrite | Whether to overwrite existing lemmas. ~~bool~ |
+| overwrite | Whether to overwrite existing lemmas. ~~bool~~ |
## Lemmatizer.\_\_call\_\_ {#call tag="method"}
diff --git a/website/docs/api/matcher.md b/website/docs/api/matcher.md
index 9daa0658d..ff6923cf2 100644
--- a/website/docs/api/matcher.md
+++ b/website/docs/api/matcher.md
@@ -59,15 +59,20 @@ matched:
> [
> {"POS": "ADJ", "OP": "*"},
> {"POS": "NOUN", "OP": "+"}
+> {"POS": "PROPN", "OP": "{2}"}
> ]
> ```
-| OP | Description |
-| --- | ---------------------------------------------------------------- |
-| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
-| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
-| `+` | Require the pattern to match 1 or more times. |
-| `*` | Allow the pattern to match 0 or more times. |
+| OP | Description |
+| ------- | ---------------------------------------------------------------------- |
+| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
+| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
+| `+` | Require the pattern to match 1 or more times. |
+| `*` | Allow the pattern to match 0 or more times. |
+| `{n}` | Require the pattern to match exactly _n_ times. |
+| `{n,m}` | Require the pattern to match at least _n_ but not more than _m_ times. |
+| `{n,}` | Require the pattern to match at least _n_ times. |
+| `{,m}` | Require the pattern to match at most _m_ times. |
Token patterns can also map to a **dictionary of properties** instead of a
single value to indicate whether the expected value is a member of a list or how
@@ -194,25 +199,11 @@ will be overwritten.
> [{"LOWER": "hello"}, {"LOWER": "world"}],
> [{"ORTH": "Google"}, {"ORTH": "Maps"}]
> ]
-> matcher.add("TEST_PATTERNS", patterns)
+> matcher.add("TEST_PATTERNS", patterns, on_match=on_match)
> doc = nlp("HELLO WORLD on Google Maps.")
> matches = matcher(doc)
> ```
-
-
-As of spaCy v3.0, `Matcher.add` takes a list of patterns as the second argument
-(instead of a variable number of arguments). The `on_match` callback becomes an
-optional keyword argument.
-
-```diff
-patterns = [[{"TEXT": "Google"}, {"TEXT": "Now"}], [{"TEXT": "GoogleNow"}]]
-- matcher.add("GoogleNow", on_match, *patterns)
-+ matcher.add("GoogleNow", patterns, on_match=on_match)
-```
-
-
-
| Name | Description |
| ----------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `match_id` | An ID for the thing you're matching. ~~str~~ |
diff --git a/website/docs/api/morphologizer.md b/website/docs/api/morphologizer.md
index 434c56833..97444b157 100644
--- a/website/docs/api/morphologizer.md
+++ b/website/docs/api/morphologizer.md
@@ -42,12 +42,13 @@ architectures and their arguments and hyperparameters.
> nlp.add_pipe("morphologizer", config=config)
> ```
-| Setting | Description |
-| ---------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | The model to use. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
-| `overwrite` 3.2 | Whether the values of existing features are overwritten. Defaults to `True`. ~~bool~~ |
-| `extend` 3.2 | Whether existing feature types (whose values may or may not be overwritten depending on `overwrite`) are preserved. Defaults to `False`. ~~bool~~ |
-| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attributes `"pos"` and `"morph"` and [`Scorer.score_token_attr_per_feat`](/api/scorer#score_token_attr_per_feat) for the attribute `"morph"`. ~~Optional[Callable]~~ |
+| Setting | Description |
+| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `model` | The model to use. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
+| `overwrite` 3.2 | Whether the values of existing features are overwritten. Defaults to `True`. ~~bool~~ |
+| `extend` 3.2 | Whether existing feature types (whose values may or may not be overwritten depending on `overwrite`) are preserved. Defaults to `False`. ~~bool~~ |
+| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attributes `"pos"` and `"morph"` and [`Scorer.score_token_attr_per_feat`](/api/scorer#score_token_attr_per_feat) for the attribute `"morph"`. ~~Optional[Callable]~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"label_ids"`. ~~Union[bool, list[str]]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/morphologizer.pyx
@@ -147,10 +148,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/morphologizer#call) and
## Morphologizer.initialize {#initialize tag="method"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -162,7 +163,7 @@ config.
>
> ```python
> morphologizer = nlp.add_pipe("morphologizer")
-> morphologizer.initialize(lambda: [], nlp=nlp)
+> morphologizer.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -176,7 +177,7 @@ config.
| Name | Description |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[dict]~~ |
@@ -399,9 +400,9 @@ coarse-grained POS as the feature `POS`.
> assert "Mood=Ind|POS=VERB|Tense=Past|VerbForm=Fin" in morphologizer.labels
> ```
-| Name | Description |
-| ----------- | ------------------------------------------------------ |
-| **RETURNS** | The labels added to the component. ~~Tuple[str, ...]~~ |
+| Name | Description |
+| ----------- | --------------------------------------------------------- |
+| **RETURNS** | The labels added to the component. ~~Iterable[str, ...]~~ |
## Morphologizer.label_data {#label_data tag="property" new="3"}
diff --git a/website/docs/api/phrasematcher.md b/website/docs/api/phrasematcher.md
index 2cef9ac2a..b06198916 100644
--- a/website/docs/api/phrasematcher.md
+++ b/website/docs/api/phrasematcher.md
@@ -116,10 +116,10 @@ Check whether the matcher contains rules for a match ID.
## PhraseMatcher.add {#add tag="method"}
Add a rule to the matcher, consisting of an ID key, one or more patterns, and a
-callback function to act on the matches. The callback function will receive the
-arguments `matcher`, `doc`, `i` and `matches`. If a pattern already exists for
-the given ID, the patterns will be extended. An `on_match` callback will be
-overwritten.
+optional callback function to act on the matches. The callback function will
+receive the arguments `matcher`, `doc`, `i` and `matches`. If a pattern already
+exists for the given ID, the patterns will be extended. An `on_match` callback
+will be overwritten.
> #### Example
>
@@ -134,20 +134,6 @@ overwritten.
> matches = matcher(doc)
> ```
-
-
-As of spaCy v3.0, `PhraseMatcher.add` takes a list of patterns as the second
-argument (instead of a variable number of arguments). The `on_match` callback
-becomes an optional keyword argument.
-
-```diff
-patterns = [nlp("health care reform"), nlp("healthcare reform")]
-- matcher.add("HEALTH", on_match, *patterns)
-+ matcher.add("HEALTH", patterns, on_match=on_match)
-```
-
-
-
| Name | Description |
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `key` | An ID for the thing you're matching. ~~str~~ |
diff --git a/website/docs/api/sentencerecognizer.md b/website/docs/api/sentencerecognizer.md
index 29bf10393..03744e1b5 100644
--- a/website/docs/api/sentencerecognizer.md
+++ b/website/docs/api/sentencerecognizer.md
@@ -39,11 +39,12 @@ architectures and their arguments and hyperparameters.
> nlp.add_pipe("senter", config=config)
> ```
-| Setting | Description |
-| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
-| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ |
-| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for the attribute `"sents"`. ~~Optional[Callable]~~ |
+| Setting | Description |
+| ----------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
+| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ |
+| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for the attribute `"sents"`. ~~Optional[Callable]~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"label_ids"`. ~~Union[bool, list[str]]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/senter.pyx
@@ -132,10 +133,10 @@ and [`pipe`](/api/sentencerecognizer#pipe) delegate to the
## SentenceRecognizer.initialize {#initialize tag="method"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize).
@@ -144,14 +145,14 @@ by [`Language.initialize`](/api/language#initialize).
>
> ```python
> senter = nlp.add_pipe("senter")
-> senter.initialize(lambda: [], nlp=nlp)
+> senter.initialize(lambda: examples, nlp=nlp)
> ```
-| Name | Description |
-| -------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
-| _keyword-only_ | |
-| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
+| Name | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
+| _keyword-only_ | |
+| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
## SentenceRecognizer.predict {#predict tag="method"}
diff --git a/website/docs/api/span.md b/website/docs/api/span.md
index 89f608994..be522c31f 100644
--- a/website/docs/api/span.md
+++ b/website/docs/api/span.md
@@ -561,8 +561,8 @@ overlaps with will be returned.
| `lemma_` | The span's lemma. Equivalent to `"".join(token.text_with_ws for token in span)`. ~~str~~ |
| `kb_id` | The hash value of the knowledge base ID referred to by the span. ~~int~~ |
| `kb_id_` | The knowledge base ID referred to by the span. ~~str~~ |
-| `ent_id` | The hash value of the named entity the root token is an instance of. ~~int~~ |
-| `ent_id_` | The string ID of the named entity the root token is an instance of. ~~str~~ |
+| `ent_id` | Alias for `id`: the hash value of the span's ID. ~~int~~ |
+| `ent_id_` | Alias for `id_`: the span's ID. ~~str~~ |
| `id` | The hash value of the span's ID. ~~int~~ |
| `id_` | The span's ID. ~~str~~ |
| `sentiment` | A scalar value indicating the positivity or negativity of the span. ~~float~~ |
diff --git a/website/docs/api/spancategorizer.md b/website/docs/api/spancategorizer.md
index f09ac8bdb..e07ad3577 100644
--- a/website/docs/api/spancategorizer.md
+++ b/website/docs/api/spancategorizer.md
@@ -52,14 +52,15 @@ architectures and their arguments and hyperparameters.
> nlp.add_pipe("spancat", config=config)
> ```
-| Setting | Description |
-| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `suggester` | A function that [suggests spans](#suggesters). Spans are returned as a ragged array with two integer columns, for the start and end positions. Defaults to [`ngram_suggester`](#ngram_suggester). ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ |
-| `model` | A model instance that is given a a list of documents and `(start, end)` indices representing candidate span offsets. The model predicts a probability for each category for each span. Defaults to [SpanCategorizer](/api/architectures#SpanCategorizer). ~~Model[Tuple[List[Doc], Ragged], Floats2d]~~ |
-| `spans_key` | Key of the [`Doc.spans`](/api/doc#spans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ |
-| `threshold` | Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to `0.5`. ~~float~~ |
-| `max_positive` | Maximum number of labels to consider positive per span. Defaults to `None`, indicating no limit. ~~Optional[int]~~ |
-| `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ |
+| Setting | Description |
+| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `suggester` | A function that [suggests spans](#suggesters). Spans are returned as a ragged array with two integer columns, for the start and end positions. Defaults to [`ngram_suggester`](#ngram_suggester). ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ |
+| `model` | A model instance that is given a a list of documents and `(start, end)` indices representing candidate span offsets. The model predicts a probability for each category for each span. Defaults to [SpanCategorizer](/api/architectures#SpanCategorizer). ~~Model[Tuple[List[Doc], Ragged], Floats2d]~~ |
+| `spans_key` | Key of the [`Doc.spans`](/api/doc#spans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ |
+| `threshold` | Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to `0.5`. ~~float~~ |
+| `max_positive` | Maximum number of labels to consider positive per span. Defaults to `None`, indicating no limit. ~~Optional[int]~~ |
+| `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"indices"` and `"scores"`. ~~Union[bool, list[str]]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/spancat.py
@@ -93,7 +94,7 @@ shortcut for this and instantiate the component using its string name and
| `suggester` | A function that [suggests spans](#suggesters). Spans are returned as a ragged array with two integer columns, for the start and end positions. ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ |
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
| _keyword-only_ | |
-| `spans_key` | Key of the [`Doc.spans`](/api/doc#sans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ |
+| `spans_key` | Key of the [`Doc.spans`](/api/doc#sans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ |
| `threshold` | Minimum probability to consider a prediction positive. Spans with a positive prediction will be saved on the Doc. Defaults to `0.5`. ~~float~~ |
| `max_positive` | Maximum number of labels to consider positive per span. Defaults to `None`, indicating no limit. ~~Optional[int]~~ |
@@ -147,10 +148,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/spancategorizer#call) and
## SpanCategorizer.initialize {#initialize tag="method"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -162,7 +163,7 @@ config.
>
> ```python
> spancat = nlp.add_pipe("spancat")
-> spancat.initialize(lambda: [], nlp=nlp)
+> spancat.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -176,7 +177,7 @@ config.
| Name | Description |
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[Iterable[str]]~~ |
diff --git a/website/docs/api/spangroup.md b/website/docs/api/spangroup.md
index 8dbdefc01..2d1cf73c4 100644
--- a/website/docs/api/spangroup.md
+++ b/website/docs/api/spangroup.md
@@ -255,9 +255,10 @@ Return a copy of the span group.
> new_group = doc.spans["errors"].copy()
> ```
-| Name | Description |
-| ----------- | ----------------------------------------------- |
-| **RETURNS** | A copy of the `SpanGroup` object. ~~SpanGroup~~ |
+| Name | Description |
+| ----------- | -------------------------------------------------------------------------------------------------- |
+| `doc` | The document to which the copy is bound. Defaults to `None` for the current doc. ~~Optional[Doc]~~ |
+| **RETURNS** | A copy of the `SpanGroup` object. ~~SpanGroup~~ |
## SpanGroup.to_bytes {#to_bytes tag="method"}
diff --git a/website/docs/api/spanruler.md b/website/docs/api/spanruler.md
index a1c222714..b573f7c58 100644
--- a/website/docs/api/spanruler.md
+++ b/website/docs/api/spanruler.md
@@ -2,7 +2,7 @@
title: SpanRuler
tag: class
source: spacy/pipeline/span_ruler.py
-new: 3.3.1
+new: 3.3
teaser: 'Pipeline component for rule-based span and named entity recognition'
api_string_name: span_ruler
api_trainable: false
diff --git a/website/docs/api/tagger.md b/website/docs/api/tagger.md
index b51864d3a..0d77d9bf4 100644
--- a/website/docs/api/tagger.md
+++ b/website/docs/api/tagger.md
@@ -40,12 +40,13 @@ architectures and their arguments and hyperparameters.
> nlp.add_pipe("tagger", config=config)
> ```
-| Setting | Description |
-| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `model` | A model instance that predicts the tag probabilities. The output vectors should match the number of tags in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
-| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ |
-| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"tag"`. ~~Optional[Callable]~~ |
-| `neg_prefix` 3.2.1 | The prefix used to specify incorrect tags while training. The tagger will learn not to predict exactly this tag. Defaults to `!`. ~~str~~ |
+| Setting | Description |
+| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `model` | A model instance that predicts the tag probabilities. The output vectors should match the number of tags in size, and be normalized as probabilities (all scores between 0 and 1, with the rows summing to `1`). Defaults to [Tagger](/api/architectures#Tagger). ~~Model[List[Doc], List[Floats2d]]~~ |
+| `overwrite` 3.2 | Whether existing annotation is overwritten. Defaults to `False`. ~~bool~~ |
+| `scorer` 3.2 | The scoring method. Defaults to [`Scorer.score_token_attr`](/api/scorer#score_token_attr) for the attribute `"tag"`. ~~Optional[Callable]~~ |
+| `neg_prefix` 3.2.1 | The prefix used to specify incorrect tags while training. The tagger will learn not to predict exactly this tag. Defaults to `!`. ~~str~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. Saved activations are `"probabilities"` and `"label_ids"`. ~~Union[bool, list[str]]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/tagger.pyx
@@ -130,10 +131,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/tagger#call) and
## Tagger.initialize {#initialize tag="method" new="3"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -151,7 +152,7 @@ This method was previously called `begin_training`.
>
> ```python
> tagger = nlp.add_pipe("tagger")
-> tagger.initialize(lambda: [], nlp=nlp)
+> tagger.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -165,7 +166,7 @@ This method was previously called `begin_training`.
| Name | Description |
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[Iterable[str]]~~ |
diff --git a/website/docs/api/textcategorizer.md b/website/docs/api/textcategorizer.md
index 2ff569bad..d8a609693 100644
--- a/website/docs/api/textcategorizer.md
+++ b/website/docs/api/textcategorizer.md
@@ -84,6 +84,7 @@ architectures and their arguments and hyperparameters.
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
| `model` | A model instance that predicts scores for each category. Defaults to [TextCatEnsemble](/api/architectures#TextCatEnsemble). ~~Model[List[Doc], List[Floats2d]]~~ |
+| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
```python
%%GITHUB_SPACY/spacy/pipeline/textcat.py
@@ -116,14 +117,15 @@ Create a new pipeline instance. In your application, you would normally use a
shortcut for this and instantiate the component using its string name and
[`nlp.add_pipe`](/api/language#create_pipe).
-| Name | Description |
-| -------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| `vocab` | The shared vocabulary. ~~Vocab~~ |
-| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ |
-| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
-| _keyword-only_ | |
-| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
-| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
+| Name | Description |
+| ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
+| `vocab` | The shared vocabulary. ~~Vocab~~ |
+| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ |
+| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
+| _keyword-only_ | |
+| `threshold` | Cutoff to consider a prediction "positive", relevant when printing accuracy results. ~~float~~ |
+| `scorer` | The scoring method. Defaults to [`Scorer.score_cats`](/api/scorer#score_cats) for the attribute `"cats"`. ~~Optional[Callable]~~ |
+| `save_activations` 4.0 | Save activations in `Doc` when annotating. The supported activations is `"probabilities"`. ~~Union[bool, list[str]]~~ |
## TextCategorizer.\_\_call\_\_ {#call tag="method"}
@@ -175,10 +177,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/textcategorizer#call) and
## TextCategorizer.initialize {#initialize tag="method" new="3"}
Initialize the component for training. `get_examples` should be a function that
-returns an iterable of [`Example`](/api/example) objects. The data examples are
-used to **initialize the model** of the component and can either be the full
-training data or a representative sample. Initialization includes validating the
-network,
+returns an iterable of [`Example`](/api/example) objects. **At least one example
+should be supplied.** The data examples are used to **initialize the model** of
+the component and can either be the full training data or a representative
+sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize) and lets you customize
@@ -196,7 +198,7 @@ This method was previously called `begin_training`.
>
> ```python
> textcat = nlp.add_pipe("textcat")
-> textcat.initialize(lambda: [], nlp=nlp)
+> textcat.initialize(lambda: examples, nlp=nlp)
> ```
>
> ```ini
@@ -211,7 +213,7 @@ This method was previously called `begin_training`.
| Name | Description |
| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `labels` | The label information to add to the component, as provided by the [`label_data`](#label_data) property after initialization. To generate a reusable JSON file from your data, you should run the [`init labels`](/api/cli#init-labels) command. If no labels are provided, the `get_examples` callback is used to extract the labels from the data, which may be a lot slower. ~~Optional[Iterable[str]]~~ |
diff --git a/website/docs/api/tok2vec.md b/website/docs/api/tok2vec.md
index 70c352b4d..2dcb1a013 100644
--- a/website/docs/api/tok2vec.md
+++ b/website/docs/api/tok2vec.md
@@ -127,10 +127,10 @@ and [`set_annotations`](/api/tok2vec#set_annotations) methods.
Initialize the component for training and return an
[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a
-function that returns an iterable of [`Example`](/api/example) objects. The data
-examples are used to **initialize the model** of the component and can either be
-the full training data or a representative sample. Initialization includes
-validating the network,
+function that returns an iterable of [`Example`](/api/example) objects. **At
+least one example should be supplied.** The data examples are used to
+**initialize the model** of the component and can either be the full training
+data or a representative sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize).
@@ -139,14 +139,14 @@ by [`Language.initialize`](/api/language#initialize).
>
> ```python
> tok2vec = nlp.add_pipe("tok2vec")
-> tok2vec.initialize(lambda: [], nlp=nlp)
+> tok2vec.initialize(lambda: examples, nlp=nlp)
> ```
-| Name | Description |
-| -------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
-| _keyword-only_ | |
-| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
+| Name | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
+| _keyword-only_ | |
+| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
## Tok2Vec.predict {#predict tag="method"}
diff --git a/website/docs/api/token.md b/website/docs/api/token.md
index d43cd3ff1..73447e4d3 100644
--- a/website/docs/api/token.md
+++ b/website/docs/api/token.md
@@ -425,8 +425,8 @@ The L2 norm of the token's vector representation.
| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ |
| `ent_kb_id` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ |
| `ent_kb_id_` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ |
-| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ |
-| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ |
+| `ent_id` | ID of the entity the token is an instance of, if any. ~~int~~ |
+| `ent_id_` | ID of the entity the token is an instance of, if any. ~~str~~ |
| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md
index 889c6437c..220b2d6e9 100644
--- a/website/docs/api/top-level.md
+++ b/website/docs/api/top-level.md
@@ -45,15 +45,16 @@ specified separately using the new `exclude` keyword argument.
> nlp = spacy.load("en_core_web_sm", exclude=["parser", "tagger"])
> ```
-| Name | Description |
-| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
-| _keyword-only_ | |
-| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
-| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
-| `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
-| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
-| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
+| Name | Description |
+| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `name` | Pipeline to load, i.e. package name or path. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
+| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~Union[str, Iterable[str]]~~ |
+| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
+| `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ |
+| **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ |
Essentially, `spacy.load()` is a convenience wrapper that reads the pipeline's
[`config.cfg`](/api/data-formats#config), uses the language and pipeline
@@ -239,7 +240,7 @@ browser. Will run a simple web server.
| Name | Description |
| --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span]], Doc, Span]~~ |
-| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` 3.3. Defaults to `"dep"`. ~~str~~ |
+| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` 3.3. Defaults to `"dep"`. ~~str~~ |
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
@@ -264,7 +265,7 @@ Render a dependency parse tree or named entity visualization.
| Name | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `docs` | Document(s) or span(s) to visualize. ~~Union[Iterable[Union[Doc, Span, dict]], Doc, Span, dict]~~ |
-| `style` | Visualization style,`"dep"`, `"ent"` or `"span"` 3.3. Defaults to `"dep"`. ~~str~~ |
+| `style` | Visualization style, `"dep"`, `"ent"` or `"span"` 3.3. Defaults to `"dep"`. ~~str~~ |
| `page` | Render markup as full HTML page. Defaults to `True`. ~~bool~~ |
| `minify` | Minify HTML markup. Defaults to `False`. ~~bool~~ |
| `options` | [Visualizer-specific options](#displacy_options), e.g. colors. ~~Dict[str, Any]~~ |
@@ -272,6 +273,73 @@ Render a dependency parse tree or named entity visualization.
| `jupyter` | Explicitly enable or disable "[Jupyter](http://jupyter.org/) mode" to return markup ready to be rendered in a notebook. Detected automatically if `None` (default). ~~Optional[bool]~~ |
| **RETURNS** | The rendered HTML markup. ~~str~~ |
+### displacy.parse_deps {#displacy.parse_deps tag="method" new="2"}
+
+Generate dependency parse in `{'words': [], 'arcs': []}` format. For use with
+the `manual=True` argument in `displacy.render`.
+
+> #### Example
+>
+> ```python
+> import spacy
+> from spacy import displacy
+> nlp = spacy.load("en_core_web_sm")
+> doc = nlp("This is a sentence.")
+> deps_parse = displacy.parse_deps(doc)
+> html = displacy.render(deps_parse, style="dep", manual=True)
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------------- |
+| `orig_doc` | Doc to parse dependencies. ~~Doc~~ |
+| `options` | Dependency parse specific visualisation options. ~~Dict[str, Any]~~ |
+| **RETURNS** | Generated dependency parse keyed by words and arcs. ~~dict~~ |
+
+### displacy.parse_ents {#displacy.parse_ents tag="method" new="2"}
+
+Generate named entities in `[{start: i, end: i, label: 'label'}]` format. For
+use with the `manual=True` argument in `displacy.render`.
+
+> #### Example
+>
+> ```python
+> import spacy
+> from spacy import displacy
+> nlp = spacy.load("en_core_web_sm")
+> doc = nlp("But Google is starting from behind.")
+> ents_parse = displacy.parse_ents(doc)
+> html = displacy.render(ents_parse, style="ent", manual=True)
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------------- |
+| `doc` | Doc to parse entities. ~~Doc~~ |
+| `options` | NER-specific visualisation options. ~~Dict[str, Any]~~ |
+| **RETURNS** | Generated entities keyed by text (original text) and ents. ~~dict~~ |
+
+### displacy.parse_spans {#displacy.parse_spans tag="method" new="2"}
+
+Generate spans in `[{start_token: i, end_token: i, label: 'label'}]` format. For
+use with the `manual=True` argument in `displacy.render`.
+
+> #### Example
+>
+> ```python
+> import spacy
+> from spacy import displacy
+> nlp = spacy.load("en_core_web_sm")
+> doc = nlp("But Google is starting from behind.")
+> doc.spans['orgs'] = [doc[1:2]]
+> ents_parse = displacy.parse_spans(doc, options={"spans_key" : "orgs"})
+> html = displacy.render(ents_parse, style="span", manual=True)
+> ```
+
+| Name | Description |
+| ----------- | ------------------------------------------------------------------- |
+| `doc` | Doc to parse entities. ~~Doc~~ |
+| `options` | Span-specific visualisation options. ~~Dict[str, Any]~~ |
+| **RETURNS** | Generated entities keyed by text (original text) and ents. ~~dict~~ |
+
### Visualizer options {#displacy_options}
The `options` argument lets you specify additional settings for each visualizer.
@@ -383,7 +451,7 @@ factories.
| Registry name | Description |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `architectures` | Registry for functions that create [model architectures](/api/architectures). Can be used to register custom model architectures and reference them in the `config.cfg`. |
-| `augmenters` | Registry for functions that create [data augmentation](#augmenters) callbacks for corpora and other training data iterators. |
+| `augmenters` | Registry for functions that create [data augmentation](#augmenters) callbacks for corpora and other training data iterators. |
| `batchers` | Registry for training and evaluation [data batchers](#batchers). |
| `callbacks` | Registry for custom callbacks to [modify the `nlp` object](/usage/training#custom-code-nlp-callbacks) before training. |
| `displacy_colors` | Registry for custom color scheme for the [`displacy` NER visualizer](/usage/visualizers). Automatically reads from [entry points](/usage/saving-loading#entry-points). |
@@ -437,7 +505,7 @@ finished. To log each training step, a
and the accuracy scores on the development set.
The built-in, default logger is the ConsoleLogger, which prints results to the
-console in tabular format. The
+console in tabular format and saves them to a `jsonl` file. The
[spacy-loggers](https://github.com/explosion/spacy-loggers) package, included as
a dependency of spaCy, enables other loggers, such as one that sends results to
a [Weights & Biases](https://www.wandb.com/) dashboard.
@@ -445,16 +513,20 @@ a [Weights & Biases](https://www.wandb.com/) dashboard.
Instead of using one of the built-in loggers, you can
[implement your own](/usage/training#custom-logging).
-#### spacy.ConsoleLogger.v1 {#ConsoleLogger tag="registered function"}
+#### spacy.ConsoleLogger.v2 {#ConsoleLogger tag="registered function"}
> #### Example config
>
> ```ini
> [training.logger]
-> @loggers = "spacy.ConsoleLogger.v1"
+> @loggers = "spacy.ConsoleLogger.v2"
+> progress_bar = true
+> console_output = true
+> output_file = "training_log.jsonl"
> ```
-Writes the results of a training step to the console in a tabular format.
+Writes the results of a training step to the console in a tabular format and
+saves them to a `jsonl` file.
@@ -468,22 +540,23 @@ $ python -m spacy train config.cfg
ℹ Pipeline: ['tok2vec', 'tagger']
ℹ Start training
ℹ Training. Initial learn rate: 0.0
+ℹ Saving results to training_log.jsonl
E # LOSS TOK2VEC LOSS TAGGER TAG_ACC SCORE
--- ------ ------------ ----------- ------- ------
- 1 0 0.00 86.20 0.22 0.00
- 1 200 3.08 18968.78 34.00 0.34
- 1 400 31.81 22539.06 33.64 0.34
- 1 600 92.13 22794.91 43.80 0.44
- 1 800 183.62 21541.39 56.05 0.56
- 1 1000 352.49 25461.82 65.15 0.65
- 1 1200 422.87 23708.82 71.84 0.72
- 1 1400 601.92 24994.79 76.57 0.77
- 1 1600 662.57 22268.02 80.20 0.80
- 1 1800 1101.50 28413.77 82.56 0.83
- 1 2000 1253.43 28736.36 85.00 0.85
- 1 2200 1411.02 28237.53 87.42 0.87
- 1 2400 1605.35 28439.95 88.70 0.89
+ 0 0 0.00 86.20 0.22 0.00
+ 0 200 3.08 18968.78 34.00 0.34
+ 0 400 31.81 22539.06 33.64 0.34
+ 0 600 92.13 22794.91 43.80 0.44
+ 0 800 183.62 21541.39 56.05 0.56
+ 0 1000 352.49 25461.82 65.15 0.65
+ 0 1200 422.87 23708.82 71.84 0.72
+ 0 1400 601.92 24994.79 76.57 0.77
+ 0 1600 662.57 22268.02 80.20 0.80
+ 0 1800 1101.50 28413.77 82.56 0.83
+ 0 2000 1253.43 28736.36 85.00 0.85
+ 0 2200 1411.02 28237.53 87.42 0.87
+ 0 2400 1605.35 28439.95 88.70 0.89
```
Note that the cumulative loss keeps increasing within one epoch, but should
@@ -491,6 +564,12 @@ start decreasing across epochs.
+| Name | Description |
+| ---------------- | --------------------------------------------------------------------- |
+| `progress_bar` | Whether the logger should print the progress bar ~~bool~~ |
+| `console_output` | Whether the logger should print the logs on the console. ~~bool~~ |
+| `output_file` | The file to save the training logs to. ~~Optional[Union[str, Path]]~~ |
+
## Readers {#readers}
### File readers {#file-readers source="github.com/explosion/srsly" new="3"}
@@ -970,15 +1049,16 @@ and create a `Language` object. The model data will then be loaded in via
> nlp = util.load_model("/path/to/data")
> ```
-| Name | Description |
-| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `name` | Package name or path. ~~str~~ |
-| _keyword-only_ | |
-| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
-| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~List[str]~~ |
-| `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
-| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
-| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ |
+| Name | Description |
+| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `name` | Package name or path. ~~str~~ |
+| _keyword-only_ | |
+| `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
+| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `exclude` | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
+| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
+| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ |
### util.load_model_from_init_py {#util.load_model_from_init_py tag="function" new="2"}
@@ -994,15 +1074,16 @@ A helper function to use in the `load()` method of a pipeline package's
> return load_model_from_init_py(__file__, **overrides)
> ```
-| Name | Description |
-| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `init_file` | Path to package's `__init__.py`, i.e. `__file__`. ~~Union[str, Path]~~ |
-| _keyword-only_ | |
-| `vocab` 3 | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
-| `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ |
-| `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ |
-| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
-| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ |
+| Name | Description |
+| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `init_file` | Path to package's `__init__.py`, i.e. `__file__`. ~~Union[str, Path]~~ |
+| _keyword-only_ | |
+| `vocab` 3 | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ |
+| `disable` | Name(s) of pipeline component(s) to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `enable` 3.4 | Name(s) of pipeline component(s) to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled, but can be enabled again using [`nlp.enable_pipe`](/api/language#enable_pipe). ~~Union[str, Iterable[str]]~~ |
+| `exclude` 3 | Name(s) of pipeline component(s) to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~Union[str, Iterable[str]]~~ |
+| `config` 3 | Config overrides as nested dict or flat dict keyed by section values in dot notation, e.g. `"nlp.pipeline"`. ~~Union[Dict[str, Any], Config]~~ |
+| **RETURNS** | `Language` class with the loaded pipeline. ~~Language~~ |
### util.load_config {#util.load_config tag="function" new="3"}
diff --git a/website/docs/api/transformer.md b/website/docs/api/transformer.md
index b1673cdbe..e747ad383 100644
--- a/website/docs/api/transformer.md
+++ b/website/docs/api/transformer.md
@@ -175,10 +175,10 @@ applied to the `Doc` in order. Both [`__call__`](/api/transformer#call) and
Initialize the component for training and return an
[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a
-function that returns an iterable of [`Example`](/api/example) objects. The data
-examples are used to **initialize the model** of the component and can either be
-the full training data or a representative sample. Initialization includes
-validating the network,
+function that returns an iterable of [`Example`](/api/example) objects. **At
+least one example should be supplied.** The data examples are used to
+**initialize the model** of the component and can either be the full training
+data or a representative sample. Initialization includes validating the network,
[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and
setting up the label scheme based on the data. This method is typically called
by [`Language.initialize`](/api/language#initialize).
@@ -187,14 +187,14 @@ by [`Language.initialize`](/api/language#initialize).
>
> ```python
> trf = nlp.add_pipe("transformer")
-> trf.initialize(lambda: iter([]), nlp=nlp)
+> trf.initialize(lambda: examples, nlp=nlp)
> ```
-| Name | Description |
-| -------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
-| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. ~~Callable[[], Iterable[Example]]~~ |
-| _keyword-only_ | |
-| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
+| Name | Description |
+| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ |
+| _keyword-only_ | |
+| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
## Transformer.predict {#predict tag="method"}
diff --git a/website/docs/usage/index.md b/website/docs/usage/index.md
index d2aa08d73..1f4869606 100644
--- a/website/docs/usage/index.md
+++ b/website/docs/usage/index.md
@@ -130,8 +130,8 @@ grateful to use the work of Chainer's [CuPy](https://cupy.chainer.org) module,
which provides a numpy-compatible interface for GPU arrays.
spaCy can be installed for a CUDA-compatible GPU by specifying `spacy[cuda]`,
-`spacy[cuda102]`, `spacy[cuda112]`, `spacy[cuda113]`, etc. If you know your
-CUDA version, using the more explicit specifier allows CuPy to be installed via
+`spacy[cuda102]`, `spacy[cuda112]`, `spacy[cuda113]`, etc. If you know your CUDA
+version, using the more explicit specifier allows CuPy to be installed via
wheel, saving some compilation time. The specifiers should install
[`cupy`](https://cupy.chainer.org).
@@ -195,29 +195,73 @@ How to install compilers and related build tools:
[Visual Studio Express](https://www.visualstudio.com/vs/visual-studio-express/)
that matches the version that was used to compile your Python interpreter.
+#### Using build constraints when compiling from source
+
+If you install spaCy from source or with `pip` for platforms where there are not
+binary wheels on PyPI, you may need to use build constraints if any package in
+your environment requires an older version of `numpy`.
+
+If `numpy` gets downgraded from the most recent release at any point after
+you've compiled `spacy`, you might see an error that looks like this:
+
+```none
+numpy.ndarray size changed, may indicate binary incompatibility.
+```
+
+To fix this, create a new virtual environment and install `spacy` and all of its
+dependencies using build constraints.
+[Build constraints](https://pip.pypa.io/en/stable/user_guide/#constraints-files)
+specify an older version of `numpy` that is only used while compiling `spacy`,
+and then your runtime environment can use any newer version of `numpy` and still
+be compatible. In addition, use `--no-cache-dir` to ignore any previously cached
+wheels so that all relevant packages are recompiled from scratch:
+
+```shell
+PIP_CONSTRAINT=https://raw.githubusercontent.com/explosion/spacy/master/build-constraints.txt \
+pip install spacy --no-cache-dir
+```
+
+Our build constraints currently specify the oldest supported `numpy` available
+on PyPI for `x86_64` and `aarch64`. Depending on your platform and environment,
+you may want to customize the specific versions of `numpy`. For other platforms,
+you can have a look at SciPy's
+[`oldest-supported-numpy`](https://github.com/scipy/oldest-supported-numpy/blob/main/setup.cfg)
+package to see what the oldest recommended versions of `numpy` are.
+
+(_Warning_: don't use `pip install -c constraints.txt` instead of
+`PIP_CONSTRAINT`, since this isn't applied to the isolated build environments.)
+
#### Additional options for developers {#source-developers}
Some additional options may be useful for spaCy developers who are editing the
source code and recompiling frequently.
-- Install in editable mode. Changes to `.py` files will be reflected as soon as
- the files are saved, but edits to Cython files (`.pxd`, `.pyx`) will require
- the `pip install` or `python setup.py build_ext` command below to be run
- again. Before installing in editable mode, be sure you have removed any
- previous installs with `pip uninstall spacy`, which you may need to run
- multiple times to remove all traces of earlier installs.
+- Install in editable mode. Changes to `.py` files will be reflected as soon
+ as the files are saved, but edits to Cython files (`.pxd`, `.pyx`) will
+ require the `pip install` command below to be run again. Before installing in
+ editable mode, be sure you have removed any previous installs with
+ `pip uninstall spacy`, which you may need to run multiple times to remove all
+ traces of earlier installs.
```bash
$ pip install -r requirements.txt
$ pip install --no-build-isolation --editable .
```
-- Build in parallel using `N` CPUs to speed up compilation and then install in
- editable mode:
+- Build in parallel. Starting in v3.4.0, you can specify the number of
+ build jobs with the environment variable `SPACY_NUM_BUILD_JOBS`:
```bash
$ pip install -r requirements.txt
- $ python setup.py build_ext --inplace -j N
+ $ SPACY_NUM_BUILD_JOBS=4 pip install --no-build-isolation --editable .
+ ```
+
+- For editable mode and parallel builds with `python setup.py` instead of `pip`
+ (no longer recommended):
+
+ ```bash
+ $ pip install -r requirements.txt
+ $ python setup.py build_ext --inplace -j 4
$ python setup.py develop
```
diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md
index c547ec0bc..82472c67e 100644
--- a/website/docs/usage/linguistic-features.md
+++ b/website/docs/usage/linguistic-features.md
@@ -11,8 +11,8 @@ menu:
- ['Tokenization', 'tokenization']
- ['Merging & Splitting', 'retokenization']
- ['Sentence Segmentation', 'sbd']
- - ['Vectors & Similarity', 'vectors-similarity']
- ['Mappings & Exceptions', 'mappings-exceptions']
+ - ['Vectors & Similarity', 'vectors-similarity']
- ['Language Data', 'language-data']
---
@@ -1899,7 +1899,7 @@ access to some nice Latin vectors. You can then pass the directory path to
> ```
```cli
-$ wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/word-vectors-v2/cc.la.300.vec.gz
+$ wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.la.300.vec.gz
$ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg
```
diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md
index 56992e7e3..3b1558bd8 100644
--- a/website/docs/usage/models.md
+++ b/website/docs/usage/models.md
@@ -268,18 +268,49 @@ used for training the current [Japanese pipelines](/models/ja).
### Korean language support {#korean}
-> #### mecab-ko tokenizer
+There are currently three built-in options for Korean tokenization, two based on
+[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md) and one
+using the rule-based tokenizer.
+
+> #### Default mecab-ko tokenizer
>
> ```python
+> # uses mecab-ko-dic
> nlp = spacy.blank("ko")
+>
+> # with custom mecab args
+> mecab_args = "-d /path/to/dicdir -u /path/to/userdic"
+> config = {"nlp": {"tokenizer": {"mecab_args": mecab_args}}}
+> nlp = spacy.blank("ko", config=config)
> ```
-The default MeCab-based Korean tokenizer requires:
+The default MeCab-based Korean tokenizer requires the python package
+[`mecab-ko`](https://pypi.org/project/mecab-ko/) and no further system
+requirements.
+
+The `natto-py` MeCab-based tokenizer (the previous default for spaCy v3.4 and
+earlier) is available as `spacy.KoreanNattoTokenizer.v1`. It requires:
- [mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md)
- [mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic)
- [natto-py](https://github.com/buruzaemon/natto-py)
+To use this tokenizer, edit `[nlp.tokenizer]` in your config:
+
+> #### natto-py MeCab-ko tokenizer
+>
+> ```python
+> config = {"nlp": {"tokenizer": {"@tokenizers": "spacy.KoreanNattoTokenizer.v1"}}}
+> nlp = spacy.blank("ko", config=config)
+> ```
+
+```ini
+### config.cfg
+[nlp]
+lang = "ko"
+tokenizer = {"@tokenizers" = "spacy.KoreanNattoTokenizer.v1"}
+```
+
For some Korean datasets and tasks, the
[rule-based tokenizer](/usage/linguistic-features#tokenization) is better-suited
than MeCab. To configure a Korean pipeline with the rule-based tokenizer:
@@ -365,15 +396,32 @@ pipeline package can be found.
To download a trained pipeline directly using
[pip](https://pypi.python.org/pypi/pip), point `pip install` to the URL or local
path of the wheel file or archive. Installing the wheel is usually more
-efficient. To find the direct link to a package, head over to the
-[releases](https://github.com/explosion/spacy-models/releases), right click on
-the archive link and copy it to your clipboard.
+efficient.
+
+> #### Pipeline Package URLs {#pipeline-urls}
+>
+> Pretrained pipeline distributions are hosted on
+> [Github Releases](https://github.com/explosion/spacy-models/releases), and you
+> can find download links there, as well as on the model page. You can also get
+> URLs directly from the command line by using `spacy info` with the `--url`
+> flag, which may be useful for automation.
+>
+> ```bash
+> spacy info en_core_web_sm --url
+> ```
+>
+> This command will print the URL for the latest version of a pipeline
+> compatible with the version of spaCy you're using. Note that in order to look
+> up the compatibility information an internet connection is required.
```bash
# With external URL
$ pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0-py3-none-any.whl
$ pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz
+# Using spacy info to get the external URL
+$ pip install $(spacy info en_core_web_sm --url)
+
# With local file
$ pip install /Users/you/en_core_web_sm-3.0.0-py3-none-any.whl
$ pip install /Users/you/en_core_web_sm-3.0.0.tar.gz
@@ -514,21 +562,16 @@ should be specifying them directly.
Because pipeline packages are valid Python packages, you can add them to your
application's `requirements.txt`. If you're running your own internal PyPi
installation, you can upload the pipeline packages there. pip's
-[requirements file format](https://pip.pypa.io/en/latest/reference/pip_install/#requirements-file-format)
-supports both package names to download via a PyPi server, as well as direct
-URLs.
+[requirements file format](https://pip.pypa.io/en/latest/reference/requirements-file-format/)
+supports both package names to download via a PyPi server, as well as
+[direct URLs](#pipeline-urls).
```text
### requirements.txt
spacy>=3.0.0,<4.0.0
-https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz#egg=en_core_web_sm
+en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.0/en_core_web_sm-3.4.0-py3-none-any.whl
```
-Specifying `#egg=` with the package name tells pip which package to expect from
-the download URL. This way, the package won't be re-downloaded and overwritten
-if it's already installed - just like when you're downloading a package from
-PyPi.
-
All pipeline packages are versioned and specify their spaCy dependency. This
ensures cross-compatibility and lets you specify exact version requirements for
each pipeline. If you've [trained](/usage/training) your own pipeline, you can
diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md
index 4f75b5193..bd28810ae 100644
--- a/website/docs/usage/processing-pipelines.md
+++ b/website/docs/usage/processing-pipelines.md
@@ -362,6 +362,18 @@ nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"])
nlp.enable_pipe("tagger")
```
+In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is
+set, all components except for those in `enable` are disabled.
+
+```python
+# Load the complete pipeline, but disable all components except for tok2vec and tagger
+nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"])
+# Has the same effect, as NER is already not part of enabled set of components
+nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"], disable=["ner"])
+# Will raise an error, as the sets of enabled and disabled components are conflicting
+nlp = spacy.load("en_core_web_sm", enable=["ner"], disable=["ner"])
+```
+
As of v3.0, the `disable` keyword argument specifies components to load but
diff --git a/website/docs/usage/rule-based-matching.md b/website/docs/usage/rule-based-matching.md
index e4ba4b2af..bf1891df1 100644
--- a/website/docs/usage/rule-based-matching.md
+++ b/website/docs/usage/rule-based-matching.md
@@ -374,12 +374,16 @@ punctuation marks, or specify optional tokens. Note that there are no nested or
scoped quantifiers – instead, you can build those behaviors with `on_match`
callbacks.
-| OP | Description |
-| --- | ---------------------------------------------------------------- |
-| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
-| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
-| `+` | Require the pattern to match 1 or more times. |
-| `*` | Allow the pattern to match zero or more times. |
+| OP | Description |
+|---------|------------------------------------------------------------------------|
+| `!` | Negate the pattern, by requiring it to match exactly 0 times. |
+| `?` | Make the pattern optional, by allowing it to match 0 or 1 times. |
+| `+` | Require the pattern to match 1 or more times. |
+| `*` | Allow the pattern to match zero or more times. |
+| `{n}` | Require the pattern to match exactly _n_ times. |
+| `{n,m}` | Require the pattern to match at least _n_ but not more than _m_ times. |
+| `{n,}` | Require the pattern to match at least _n_ times. |
+| `{,m}` | Require the pattern to match at most _m_ times. |
> #### Example
>
@@ -1363,14 +1367,14 @@ patterns = [{"label": "ORG", "pattern": "Apple", "id": "apple"},
ruler.add_patterns(patterns)
doc1 = nlp("Apple is opening its first big office in San Francisco.")
-print([(ent.text, ent.label_, ent.ent_id_) for ent in doc1.ents])
+print([(ent.text, ent.label_, ent.id_) for ent in doc1.ents])
doc2 = nlp("Apple is opening its first big office in San Fran.")
-print([(ent.text, ent.label_, ent.ent_id_) for ent in doc2.ents])
+print([(ent.text, ent.label_, ent.id_) for ent in doc2.ents])
```
If the `id` attribute is included in the [`EntityRuler`](/api/entityruler)
-patterns, the `ent_id_` property of the matched entity is set to the `id` given
+patterns, the `id_` property of the matched entity is set to the `id` given
in the patterns. So in the example above it's easy to identify that "San
Francisco" and "San Fran" are both the same entity.
diff --git a/website/docs/usage/saving-loading.md b/website/docs/usage/saving-loading.md
index af140e7a7..9a4b584a3 100644
--- a/website/docs/usage/saving-loading.md
+++ b/website/docs/usage/saving-loading.md
@@ -195,7 +195,7 @@ the data to and from a JSON file.
>
> To see custom serialization methods in action, check out the new
> [`EntityRuler`](/api/entityruler) component and its
-> [source](%%GITHUB_SPACY/spacy/pipeline/entityruler.py). Patterns added to the
+> [source](%%GITHUB_SPACY/spacy/pipeline/entity_ruler.py). Patterns added to the
> component will be saved to a `.jsonl` file if the pipeline is serialized to
> disk, and to a bytestring if the pipeline is serialized to bytes. This allows
> saving out a pipeline with a rule-based entity recognizer and including all
@@ -203,11 +203,14 @@ the data to and from a JSON file.
```python
### {highlight="16-23,25-30"}
+import json
+from spacy import Language
from spacy.util import ensure_path
@Language.factory("my_component")
class CustomComponent:
- def __init__(self):
+ def __init__(self, nlp: Language, name: str = "my_component"):
+ self.name = name
self.data = []
def __call__(self, doc):
@@ -231,7 +234,7 @@ class CustomComponent:
# This will receive the directory path + /my_component
data_path = path / "data.json"
with data_path.open("r", encoding="utf8") as f:
- self.data = json.loads(f)
+ self.data = json.load(f)
return self
```
diff --git a/website/docs/usage/v3-4.md b/website/docs/usage/v3-4.md
new file mode 100644
index 000000000..7cc4570d5
--- /dev/null
+++ b/website/docs/usage/v3-4.md
@@ -0,0 +1,143 @@
+---
+title: What's New in v3.4
+teaser: New features and how to upgrade
+menu:
+ - ['New Features', 'features']
+ - ['Upgrading Notes', 'upgrading']
+---
+
+## New features {#features hidden="true"}
+
+spaCy v3.4 brings typing and speed improvements along with new vectors for
+English CNN pipelines and new trained pipelines for Croatian. This release also
+includes prebuilt linux aarch64 wheels for all spaCy dependencies distributed by
+Explosion.
+
+### Typing improvements {#typing}
+
+spaCy v3.4 supports pydantic v1.9 and mypy 0.950+ through extensive updates to
+types in Thinc v8.1.
+
+### Speed improvements {#speed}
+
+- For the parser, use C `saxpy`/`sgemm` provided by the `Ops` implementation in
+ order to use Accelerate through `thinc-apple-ops`.
+- Improved speed of vector lookups.
+- Improved speed for `Example.get_aligned_parse` and `Example.get_aligned`.
+
+## Additional features and improvements
+
+- Min/max `{n,m}` operator for `Matcher` patterns.
+- Language updates:
+ - Improve tokenization for Cyrillic combining diacritics.
+ - Improve English tokenizer exceptions for contractions with
+ this/that/these/those.
+- Updated `spacy project clone` to try both `main` and `master` branches by
+ default.
+- Added confidence threshold for named entity linker.
+- Improved handling of Typer optional default values for `init_config_cli`.
+- Added cycle detection in parser projectivization methods.
+- Added counts for NER labels in `debug data`.
+- Support for adding NVTX ranges to `TrainablePipe` components.
+- Support env variable `SPACY_NUM_BUILD_JOBS` to specify the number of build
+ jobs to run in parallel with `pip`.
+
+## Trained pipelines {#pipelines}
+
+### New trained pipelines {#new-pipelines}
+
+v3.4 introduces new CPU/CNN pipelines for Croatian, which use the trainable
+lemmatizer and [floret vectors](https://github.com/explosion/floret). Due to the
+use of [Bloom embeddings](https://explosion.ai/blog/bloom-embeddings) and
+subwords, the pipelines have compact vectors with no out-of-vocabulary words.
+
+| Package | UPOS | Parser LAS | NER F |
+| ----------------------------------------------- | ---: | ---------: | ----: |
+| [`hr_core_news_sm`](/models/hr#hr_core_news_sm) | 96.6 | 77.5 | 76.1 |
+| [`hr_core_news_md`](/models/hr#hr_core_news_md) | 97.3 | 80.1 | 81.8 |
+| [`hr_core_news_lg`](/models/hr#hr_core_news_lg) | 97.5 | 80.4 | 83.0 |
+
+### Pipeline updates {#pipeline-updates}
+
+All CNN pipelines have been extended with whitespace augmentation.
+
+The English CNN pipelines have new word vectors:
+
+| Package | Model Version | TAG | Parser LAS | NER F |
+| ----------------------------------------------- | ------------- | ---: | ---------: | ----: |
+| [`en_core_news_md`](/models/en#en_core_news_md) | v3.3.0 | 97.3 | 90.1 | 84.6 |
+| [`en_core_news_md`](/models/en#en_core_news_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 |
+| [`en_core_news_lg`](/models/en#en_core_news_md) | v3.3.0 | 97.4 | 90.1 | 85.3 |
+| [`en_core_news_lg`](/models/en#en_core_news_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 |
+
+## Notes about upgrading from v3.3 {#upgrading}
+
+### Doc.has_vector
+
+`Doc.has_vector` now matches `Token.has_vector` and `Span.has_vector`: it
+returns `True` if at least one token in the doc has a vector rather than
+checking only whether the vocab contains vectors.
+
+### Using trained pipelines with floret vectors
+
+If you're using a trained pipeline for Croatian, Finnish, Korean or Swedish with
+new texts and working with `Doc` objects, you shouldn't notice any difference
+between floret vectors and default vectors.
+
+If you use vectors for similarity comparisons, there are a few differences,
+mainly because a floret pipeline doesn't include any kind of frequency-based
+word list similar to the list of in-vocabulary vector keys with default vectors.
+
+- If your workflow iterates over the vector keys, you should use an external
+ word list instead:
+
+ ```diff
+ - lexemes = [nlp.vocab[orth] for orth in nlp.vocab.vectors]
+ + lexemes = [nlp.vocab[word] for word in external_word_list]
+ ```
+
+- `Vectors.most_similar` is not supported because there's no fixed list of
+ vectors to compare your vectors to.
+
+### Pipeline package version compatibility {#version-compat}
+
+> #### Using legacy implementations
+>
+> In spaCy v3, you'll still be able to load and reference legacy implementations
+> via [`spacy-legacy`](https://github.com/explosion/spacy-legacy), even if the
+> components or architectures change and newer versions are available in the
+> core library.
+
+When you're loading a pipeline package trained with an earlier version of spaCy
+v3, you will see a warning telling you that the pipeline may be incompatible.
+This doesn't necessarily have to be true, but we recommend running your
+pipelines against your test suite or evaluation data to make sure there are no
+unexpected results.
+
+If you're using one of the [trained pipelines](/models) we provide, you should
+run [`spacy download`](/api/cli#download) to update to the latest version. To
+see an overview of all installed packages and their compatibility, you can run
+[`spacy validate`](/api/cli#validate).
+
+If you've trained your own custom pipeline and you've confirmed that it's still
+working as expected, you can update the spaCy version requirements in the
+[`meta.json`](/api/data-formats#meta):
+
+```diff
+- "spacy_version": ">=3.3.0,<3.4.0",
++ "spacy_version": ">=3.3.0,<3.5.0",
+```
+
+### Updating v3.3 configs
+
+To update a config from spaCy v3.3 with the new v3.4 settings, run
+[`init fill-config`](/api/cli#init-fill-config):
+
+```cli
+$ python -m spacy init fill-config config-v3.3.cfg config-v3.4.cfg
+```
+
+In many cases ([`spacy train`](/api/cli#train),
+[`spacy.load`](/api/top-level#spacy.load)), the new defaults will be filled in
+automatically, but you'll need to fill in the new settings to run
+[`debug config`](/api/cli#debug) and [`debug data`](/api/cli#debug-data).
diff --git a/website/docs/usage/visualizers.md b/website/docs/usage/visualizers.md
index d2892b863..da847d939 100644
--- a/website/docs/usage/visualizers.md
+++ b/website/docs/usage/visualizers.md
@@ -198,12 +198,12 @@ import DisplacySpanHtml from 'images/displacy-span.html'
The span visualizer lets you customize the following `options`:
-| Argument | Description |
-|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `spans_key` | Which spans key to render spans from. Default is `"sc"`. ~~str~~ |
+| Argument | Description |
+| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `spans_key` | Which spans key to render spans from. Default is `"sc"`. ~~str~~ |
| `templates` | Dictionary containing the keys `"span"`, `"slice"`, and `"start"`. These dictate how the overall span, a span slice, and the starting token will be rendered. ~~Optional[Dict[str, str]~~ |
-| `kb_url_template` | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in ~~Optional[str]~~ |
-| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ |
+| `kb_url_template` | Optional template to construct the KB url for the entity to link to. Expects a python f-string format with single field to fill in ~~Optional[str]~~ |
+| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ |
Because spans can be stored across different keys in `doc.spans`, you need to specify
which one displaCy should use with `spans_key` (`sc` is the default).
@@ -343,9 +343,21 @@ want to visualize output from other libraries, like [NLTK](http://www.nltk.org)
or
[SyntaxNet](https://github.com/tensorflow/models/tree/master/research/syntaxnet).
If you set `manual=True` on either `render()` or `serve()`, you can pass in data
-in displaCy's format as a dictionary (instead of `Doc` objects).
+in displaCy's format as a dictionary (instead of `Doc` objects). There are helper
+functions for converting `Doc` objects to displaCy's format for use with `manual=True`:
+[`displacy.parse_deps`](/api/top-level#displacy.parse_deps),
+[`displacy.parse_ents`](/api/top-level#displacy.parse_ents),
+and [`displacy.parse_spans`](/api/top-level#displacy.parse_spans).
-> #### Example
+> #### Example with parse function
+>
+> ```python
+> doc = nlp("But Google is starting from behind.")
+> ex = displacy.parse_ents(doc)
+> html = displacy.render(ex, style="ent", manual=True)
+> ```
+
+> #### Example with raw data
>
> ```python
> ex = [{"text": "But Google is starting from behind.",
@@ -354,6 +366,7 @@ in displaCy's format as a dictionary (instead of `Doc` objects).
> html = displacy.render(ex, style="ent", manual=True)
> ```
+
```python
### DEP input
{
@@ -389,6 +402,18 @@ in displaCy's format as a dictionary (instead of `Doc` objects).
}
```
+```python
+### SPANS input
+{
+ "text": "Welcome to the Bank of China.",
+ "spans": [
+ {"start_token": 3, "end_token": 6, "label": "ORG"},
+ {"start_token": 5, "end_token": 6, "label": "GPE"},
+ ],
+ "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."],
+}
+```
+
## Using displaCy in a web application {#webapp}
If you want to use the visualizers as part of a web application, for example to
diff --git a/website/meta/languages.json b/website/meta/languages.json
index 64ca7a082..0028b4a5f 100644
--- a/website/meta/languages.json
+++ b/website/meta/languages.json
@@ -162,7 +162,12 @@
{
"code": "hr",
"name": "Croatian",
- "has_examples": true
+ "has_examples": true,
+ "models": [
+ "hr_core_news_sm",
+ "hr_core_news_md",
+ "hr_core_news_lg"
+ ]
},
{
"code": "hsb",
@@ -260,6 +265,11 @@
"name": "Luxembourgish",
"has_examples": true
},
+ {
+ "code": "lg",
+ "name": "Luganda",
+ "has_examples": true
+ },
{
"code": "lij",
"name": "Ligurian",
@@ -364,8 +374,8 @@
"has_examples": true,
"dependencies": [
{
- "name": "pymorphy2",
- "url": "https://github.com/kmike/pymorphy2"
+ "name": "pymorphy3",
+ "url": "https://github.com/no-plagiarism/pymorphy3"
}
],
"models": [
@@ -462,10 +472,20 @@
"code": "uk",
"name": "Ukrainian",
"has_examples": true,
+ "models": [
+ "uk_core_news_sm",
+ "uk_core_news_md",
+ "uk_core_news_lg",
+ "uk_core_news_trf"
+ ],
"dependencies": [
{
- "name": "pymorphy2",
- "url": "https://github.com/kmike/pymorphy2"
+ "name": "pymorphy3",
+ "url": "https://github.com/no-plagiarism/pymorphy3"
+ },
+ {
+ "name": "pymorphy3-dicts-uk",
+ "url": "https://github.com/no-plagiarism/pymorphy3-dicts"
}
]
},
diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json
index c23f0a255..1b743636c 100644
--- a/website/meta/sidebars.json
+++ b/website/meta/sidebars.json
@@ -12,7 +12,9 @@
{ "text": "New in v3.0", "url": "/usage/v3" },
{ "text": "New in v3.1", "url": "/usage/v3-1" },
{ "text": "New in v3.2", "url": "/usage/v3-2" },
- { "text": "New in v3.3", "url": "/usage/v3-3" }
+ { "text": "New in v3.2", "url": "/usage/v3-2" },
+ { "text": "New in v3.3", "url": "/usage/v3-3" },
+ { "text": "New in v3.4", "url": "/usage/v3-4" }
]
},
{
@@ -124,6 +126,7 @@
{
"label": "Other",
"items": [
+ { "text": "Attributes", "url": "/api/attributes" },
{ "text": "Corpus", "url": "/api/corpus" },
{ "text": "KnowledgeBase", "url": "/api/kb" },
{ "text": "Lookups", "url": "/api/lookups" },
diff --git a/website/meta/site.json b/website/meta/site.json
index 97051011f..360a72178 100644
--- a/website/meta/site.json
+++ b/website/meta/site.json
@@ -28,7 +28,7 @@
},
"binderUrl": "explosion/spacy-io-binder",
"binderBranch": "spacy.io",
- "binderVersion": "3.0",
+ "binderVersion": "3.4",
"sections": [
{ "id": "usage", "title": "Usage Documentation", "theme": "blue" },
{ "id": "models", "title": "Models Documentation", "theme": "blue" },
diff --git a/website/meta/universe.json b/website/meta/universe.json
index b7f340f52..9145855c6 100644
--- a/website/meta/universe.json
+++ b/website/meta/universe.json
@@ -1,5 +1,92 @@
{
"resources": [
+ {
+ "id": "concepcy",
+ "title": "concepCy",
+ "slogan": "A multilingual knowledge graph in spaCy",
+ "description": "A spaCy wrapper for ConceptNet, a freely-available semantic network designed to help computers understand the meaning of words.",
+ "github": "JulesBelveze/concepcy",
+ "pip": "concepcy",
+ "code_example": [
+ "import spacy",
+ "import concepcy",
+ "",
+ "nlp = spacy.load('en_core_web_sm')",
+ "# Using default concepCy configuration",
+ "nlp.add_pipe('concepcy')",
+ "",
+ "doc = nlp('WHO is a lovely company')",
+ "",
+ "# Access all the 'RelatedTo' relations from the Doc",
+ "for word, relations in doc._.relatedto.items():",
+ " print(f'Word: {word}\n{relations}')",
+ "",
+ "# Access the 'RelatedTo' relations word by word",
+ "for token in doc:",
+ " print(f'Word: {token}\n{token._.relatedto}')"
+ ],
+ "category": ["pipeline"],
+ "image": "https://github.com/JulesBelveze/concepcy/blob/main/figures/concepcy.png",
+ "tags": ["semantic", "ConceptNet"],
+ "author": "Jules Belveze",
+ "author_links": {
+ "github": "JulesBelveze",
+ "website": "https://www.linkedin.com/in/jules-belveze/"
+ }
+ },
+ {
+ "id": "spacyfishing",
+ "title": "spaCy fishing",
+ "slogan": "Named entity disambiguation and linking on Wikidata in spaCy with Entity-Fishing.",
+ "description": "A spaCy wrapper of Entity-Fishing for named entity disambiguation and linking against a Wikidata knowledge base.",
+ "github": "Lucaterre/spacyfishing",
+ "pip": "spacyfishing",
+ "code_example": [
+ "import spacy",
+ "text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'",
+ "nlp = spacy.load('en_core_web_sm')",
+ "nlp.add_pipe('entityfishing')",
+ "doc = nlp(text)",
+ "for span in doc.ents:",
+ " print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))",
+ "# ('Victor Hugo', 'PERSON', 'Q535', 'https://www.wikidata.org/wiki/Q535', 0.972)",
+ "# ('Honoré de Balzac', 'PERSON', 'Q9711', 'https://www.wikidata.org/wiki/Q9711', 0.9724)",
+ "# ('French', 'NORP', 'Q121842', 'https://www.wikidata.org/wiki/Q121842', 0.3739)",
+ "# ('Paris', 'GPE', 'Q90', 'https://www.wikidata.org/wiki/Q90', 0.5652)",
+ "## Set parameter `extra_info` to `True` and check also span._.description, span._.src_description, span._.normal_term, span._.other_ids"
+ ],
+ "category": ["models", "pipeline"],
+ "image": "https://raw.githubusercontent.com/Lucaterre/spacyfishing/main/docs/spacyfishing-logo-resized.png",
+ "tags": ["NER", "NEL"],
+ "author": "Lucas Terriel",
+ "author_links": {
+ "twitter": "TerreLuca",
+ "github": "Lucaterre"
+ }
+ },
+ {
+ "id": "aim-spacy",
+ "title": "Aim-spaCy",
+ "slogan": "Aim-spaCy is an Aim-based spaCy experiment tracker.",
+ "description": "Aim-spaCy helps to easily collect, store and explore training logs for spaCy, including: hyper-parameters, metrics and displaCy visualizations",
+ "github": "aimhubio/aim-spacy",
+ "pip": "aim-spacy",
+ "code_example": [
+ "https://github.com/aimhubio/aim-spacy/tree/master/examples"
+ ],
+ "code_language": "python",
+ "url": "https://aimstack.io/spacy",
+ "thumb": "https://user-images.githubusercontent.com/13848158/172912427-ee9327ea-3cd8-47fa-8427-6c0d36cd831f.png",
+ "image": "https://user-images.githubusercontent.com/13848158/136364717-0939222c-55b6-44f0-ad32-d9ab749546e4.png",
+ "author": "AimStack",
+ "author_links": {
+ "twitter": "aimstackio",
+ "github": "aimhubio",
+ "website": "https://aimstack.io"
+ },
+ "category": ["visualizers"],
+ "tags": ["experiment-tracking", "visualization"]
+ },
{
"id": "spacy-report",
"title": "spacy-report",
@@ -32,7 +119,7 @@
"code_language": "python",
"author": "Leap Beyond",
"author_links": {
- "github": "https://github.com/LeapBeyond",
+ "github": "LeapBeyond",
"website": "https://leapbeyond.ai"
},
"code_example": [
@@ -55,8 +142,8 @@
"code_language": "python",
"author": "Peter Baumgartner",
"author_links": {
- "twitter" : "https://twitter.com/pmbaumgartner",
- "github": "https://github.com/pmbaumgartner",
+ "twitter" : "pmbaumgartner",
+ "github": "pmbaumgartner",
"website": "https://www.peterbaumgartner.com/"
},
"code_example": [
@@ -75,8 +162,8 @@
"code_language": "python",
"author": "Explosion",
"author_links": {
- "twitter" : "https://twitter.com/explosion_ai",
- "github": "https://github.com/explosion",
+ "twitter" : "explosion_ai",
+ "github": "explosion",
"website": "https://explosion.ai/"
},
"code_example": [
@@ -492,6 +579,37 @@
"website": "https://koaning.io"
}
},
+ {
+ "id": "bertopic",
+ "title": "BERTopic",
+ "slogan": "Leveraging BERT and c-TF-IDF to create easily interpretable topics.",
+ "description": "BERTopic is a topic modeling technique that leverages embedding models and c-TF-IDF to create dense clusters allowing for easily interpretable topics whilst keeping important words in the topic descriptions. BERTopic supports guided, (semi-) supervised, hierarchical, and dynamic topic modeling.",
+ "github": "maartengr/bertopic",
+ "pip": "bertopic",
+ "thumb": "https://i.imgur.com/Rx2LfBm.png",
+ "image": "https://raw.githubusercontent.com/MaartenGr/BERTopic/master/images/topic_visualization.gif",
+ "code_example": [
+ "import spacy",
+ "from bertopic import BERTopic",
+ "from sklearn.datasets import fetch_20newsgroups",
+ "",
+ "docs = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))['data']",
+ "nlp = spacy.load('en_core_web_md', exclude=['tagger', 'parser', 'ner', 'attribute_ruler', 'lemmatizer'])",
+ "",
+ "topic_model = BERTopic(embedding_model=nlp)",
+ "topics, probs = topic_model.fit_transform(docs)",
+ "",
+ "fig = topic_model.visualize_topics()",
+ "fig.show()"
+ ],
+ "category": ["visualizers", "training"],
+ "author": "Maarten Grootendorst",
+ "author_links": {
+ "twitter": "maartengr",
+ "github": "maartengr",
+ "website": "https://maartengrootendorst.com"
+ }
+ },
{
"id": "tokenwiser",
"title": "tokenwiser",
@@ -548,8 +666,8 @@
"code_language": "python",
"author": "Keith Rozario",
"author_links": {
- "twitter" : "https://twitter.com/keithrozario",
- "github": "https://github.com/keithrozario",
+ "twitter" : "keithrozario",
+ "github": "keithrozario",
"website": "https://www.keithrozario.com"
},
"code_example": [
@@ -697,43 +815,6 @@
"category": ["standalone", "research"],
"tags": ["pytorch"]
},
- {
- "id": "NeuroNER",
- "title": "NeuroNER",
- "slogan": "Named-entity recognition using neural networks",
- "github": "Franck-Dernoncourt/NeuroNER",
- "category": ["models"],
- "pip": "pyneuroner[cpu]",
- "code_example": [
- "from neuroner import neuromodel",
- "nn = neuromodel.NeuroNER(train_model=False, use_pretrained_model=True)"
- ],
- "tags": ["standalone"]
- },
- {
- "id": "NLPre",
- "title": "NLPre",
- "slogan": "Natural Language Preprocessing Library for health data and more",
- "github": "NIHOPA/NLPre",
- "pip": "nlpre",
- "code_example": [
- "from nlpre import titlecaps, dedash, identify_parenthetical_phrases",
- "from nlpre import replace_acronyms, replace_from_dictionary",
- "ABBR = identify_parenthetical_phrases()(text)",
- "parsers = [dedash(), titlecaps(), replace_acronyms(ABBR),",
- " replace_from_dictionary(prefix='MeSH_')]",
- "for f in parsers:",
- " text = f(text)",
- "print(text)"
- ],
- "category": ["scientific", "biomedical"],
- "author": "Travis Hoppe",
- "author_links": {
- "github": "thoppe",
- "twitter": "metasemantic",
- "website": "http://thoppe.github.io/"
- }
- },
{
"id": "Chatterbot",
"title": "Chatterbot",
@@ -836,78 +917,6 @@
"github": "shigapov"
}
},
- {
- "id": "spacy_hunspell",
- "slogan": "Add spellchecking and spelling suggestions to your spaCy pipeline using Hunspell",
- "description": "This package uses the [spaCy 2.0 extensions](https://spacy.io/usage/processing-pipelines#extensions) to add [Hunspell](http://hunspell.github.io) support for spellchecking.",
- "github": "tokestermw/spacy_hunspell",
- "pip": "spacy_hunspell",
- "code_example": [
- "import spacy",
- "from spacy_hunspell import spaCyHunSpell",
- "",
- "nlp = spacy.load('en_core_web_sm')",
- "hunspell = spaCyHunSpell(nlp, 'mac')",
- "nlp.add_pipe(hunspell)",
- "doc = nlp('I can haz cheezeburger.')",
- "haz = doc[2]",
- "haz._.hunspell_spell # False",
- "haz._.hunspell_suggest # ['ha', 'haze', 'hazy', 'has', 'hat', 'had', 'hag', 'ham', 'hap', 'hay', 'haw', 'ha z']"
- ],
- "author": "Motoki Wu",
- "author_links": {
- "github": "tokestermw",
- "twitter": "plusepsilon"
- },
- "category": ["pipeline"],
- "tags": ["spellcheck"]
- },
- {
- "id": "spacy_grammar",
- "slogan": "Language Tool style grammar handling with spaCy",
- "description": "This packages leverages the [Matcher API](https://spacy.io/docs/usage/rule-based-matching) in spaCy to quickly match on spaCy tokens not dissimilar to regex. It reads a `grammar.yml` file to load up custom patterns and returns the results inside `Doc`, `Span`, and `Token`. It is extensible through adding rules to `grammar.yml` (though currently only the simple string matching is implemented).",
- "github": "tokestermw/spacy_grammar",
- "code_example": [
- "import spacy",
- "from spacy_grammar.grammar import Grammar",
- "",
- "nlp = spacy.load('en')",
- "grammar = Grammar(nlp)",
- "nlp.add_pipe(grammar)",
- "doc = nlp('I can haz cheeseburger.')",
- "doc._.has_grammar_error # True"
- ],
- "author": "Motoki Wu",
- "author_links": {
- "github": "tokestermw",
- "twitter": "plusepsilon"
- },
- "category": ["pipeline"]
- },
- {
- "id": "spacy_kenlm",
- "slogan": "KenLM extension for spaCy 2.0",
- "github": "tokestermw/spacy_kenlm",
- "pip": "spacy_kenlm",
- "code_example": [
- "import spacy",
- "from spacy_kenlm import spaCyKenLM",
- "",
- "nlp = spacy.load('en_core_web_sm')",
- "spacy_kenlm = spaCyKenLM() # default model from test.arpa",
- "nlp.add_pipe(spacy_kenlm)",
- "doc = nlp('How are you?')",
- "doc._.kenlm_score # doc score",
- "doc[:2]._.kenlm_score # span score",
- "doc[2]._.kenlm_score # token score"
- ],
- "author": "Motoki Wu",
- "author_links": {
- "github": "tokestermw",
- "twitter": "plusepsilon"
- },
- "category": ["pipeline"]
- },
{
"id": "spacy_readability",
"slogan": "Add text readability meta data to Doc objects",
@@ -976,34 +985,6 @@
},
"category": ["pipeline"]
},
- {
- "id": "spacy-lookup",
- "slogan": "A powerful entity matcher for very large dictionaries, using the FlashText module",
- "description": "spaCy v2.0 extension and pipeline component for adding Named Entities metadata to `Doc` objects. Detects Named Entities using dictionaries. The extension sets the custom `Doc`, `Token` and `Span` attributes `._.is_entity`, `._.entity_type`, `._.has_entities` and `._.entities`. Named Entities are matched using the python module `flashtext`, and looked up in the data provided by different dictionaries.",
- "github": "mpuig/spacy-lookup",
- "pip": "spacy-lookup",
- "code_example": [
- "import spacy",
- "from spacy_lookup import Entity",
- "",
- "nlp = spacy.load('en')",
- "entity = Entity(keywords_list=['python', 'product manager', 'java platform'])",
- "nlp.add_pipe(entity, last=True)",
- "",
- "doc = nlp(\"I am a product manager for a java and python.\")",
- "assert doc._.has_entities == True",
- "assert doc[0]._.is_entity == False",
- "assert doc[3]._.entity_desc == 'product manager'",
- "assert doc[3]._.is_entity == True",
- "",
- "print([(token.text, token._.canonical) for token in doc if token._.is_entity])"
- ],
- "author": "Marc Puig",
- "author_links": {
- "github": "mpuig"
- },
- "category": ["pipeline"]
- },
{
"id": "spacy-iwnlp",
"slogan": "German lemmatization with IWNLP",
@@ -1205,6 +1186,46 @@
"category": ["pipeline", "models", "training"],
"tags": ["pipeline", "models", "transformers"]
},
+ {
+ "id": "asent",
+ "title": "Asent",
+ "slogan": "Fast, flexible and transparent sentiment analysis",
+ "description": "Asent is a rule-based sentiment analysis library for Python made using spaCy. It is inspired by VADER, but uses a more modular ruleset, that allows the user to change e.g. the method for finding negations. Furthermore it includes visualisers to visualize the model predictions, making the model easily interpretable.",
+ "github": "kennethenevoldsen/asent",
+ "pip": "asent",
+ "code_example": [
+ "import spacy",
+ "import asent",
+ "",
+ "# load spacy pipeline",
+ "nlp = spacy.blank('en')",
+ "nlp.add_pipe('sentencizer')",
+ "",
+ "# add the rule-based sentiment model",
+ "nlp.add_pipe('asent_en_v1')",
+ "",
+ "# try an example",
+ "text = 'I am not very happy, but I am also not especially sad'",
+ "doc = nlp(text)",
+ "",
+ "# print polarity of document, scaled to be between -1, and 1",
+ "print(doc._.polarity)",
+ "# neg=0.0 neu=0.631 pos=0.369 compound=0.7526",
+ "",
+ "# Naturally, a simple score can be quite unsatisfying, thus Asent implements a series of visualizer to interpret the results:",
+ "asent.visualize(doc, style='prediction')",
+ " # or",
+ "asent.visualize(doc[:5], style='analysis')"
+ ],
+ "thumb": "https://github.com/KennethEnevoldsen/asent/raw/main/docs/img/logo_black_font.png?raw=true",
+ "author": "Kenneth Enevoldsen",
+ "author_links": {
+ "github": "KennethEnevoldsen",
+ "website": "https://www.kennethenevoldsen.com"
+ },
+ "category": ["pipeline", "models"],
+ "tags": ["pipeline", "models", "sentiment"]
+ },
{
"id": "textdescriptives",
"title": "TextDescriptives",
@@ -1270,21 +1291,6 @@
"github": "huggingface"
}
},
- {
- "id": "spacy-vis",
- "slogan": "A visualisation tool for spaCy using Hierplane",
- "description": "A visualiser for spaCy annotations. This visualisation uses the [Hierplane](https://allenai.github.io/hierplane/) Library to render the dependency parse from spaCy's models. It also includes visualisation of entities and POS tags within nodes.",
- "github": "DeNeutoy/spacy-vis",
- "url": "http://spacyvis.allennlp.org/spacy-parser",
- "thumb": "https://i.imgur.com/DAG9QFd.jpg",
- "image": "https://raw.githubusercontent.com/DeNeutoy/spacy-vis/master/img/example.gif",
- "author": "Mark Neumann",
- "author_links": {
- "twitter": "MarkNeumannnn",
- "github": "DeNeutoy"
- },
- "category": ["visualizers"]
- },
{
"id": "matcher-explorer",
"title": "Rule-based Matcher Explorer",
@@ -2272,7 +2278,7 @@
"author": "Daniel Whitenack & Chris Benson",
"author_links": {
"website": "https://changelog.com/practicalai",
- "twitter": "https://twitter.com/PracticalAIFM"
+ "twitter": "PracticalAIFM"
},
"category": ["podcasts"]
},
@@ -2288,29 +2294,6 @@
"youtube": "8u57WSXVpmw",
"category": ["videos"]
},
- {
- "id": "adam_qas",
- "title": "ADAM: Question Answering System",
- "slogan": "A question answering system that extracts answers from Wikipedia to questions posed in natural language.",
- "github": "5hirish/adam_qas",
- "pip": "qas",
- "code_example": [
- "git clone https://github.com/5hirish/adam_qas.git",
- "cd adam_qas",
- "pip install -r requirements.txt",
- "python -m qas.adam 'When was linux kernel version 4.0 released ?'"
- ],
- "code_language": "bash",
- "thumb": "https://shirishkadam.files.wordpress.com/2018/04/mini_alleviate.png",
- "author": "Shirish Kadam",
- "author_links": {
- "twitter": "5hirish",
- "github": "5hirish",
- "website": "https://shirishkadam.com/"
- },
- "category": ["standalone"],
- "tags": ["question-answering", "elasticsearch"]
- },
{
"id": "self-attentive-parser",
"title": "Berkeley Neural Parser",
@@ -2408,20 +2391,6 @@
"category": ["nonpython"],
"tags": ["javascript"]
},
- {
- "id": "spacy-raspberry",
- "title": "spacy-raspberry",
- "slogan": "64bit Raspberry Pi image for spaCy and neuralcoref",
- "github": "boehm-e/spacy-raspberry",
- "thumb": "https://i.imgur.com/VCJMrE6.png",
- "image": "https://raw.githubusercontent.com/boehm-e/spacy-raspberry/master/imgs/preview.png",
- "author": "Erwan Boehm",
- "author_links": {
- "github": "boehm-e"
- },
- "category": ["apis"],
- "tags": ["raspberrypi"]
- },
{
"id": "spacy-wordnet",
"title": "spacy-wordnet",
@@ -2492,35 +2461,6 @@
"category": ["standalone", "pipeline"],
"tags": ["linguistics", "computational linguistics", "conll", "conll-u"]
},
- {
- "id": "spacy-langdetect",
- "title": "spacy-langdetect",
- "slogan": "A fully customizable language detection pipeline for spaCy",
- "description": "This module allows you to add language detection capabilites to your spaCy pipeline. Also supports custom language detectors!",
- "pip": "spacy-langdetect",
- "code_example": [
- "import spacy",
- "from spacy_langdetect import LanguageDetector",
- "nlp = spacy.load('en')",
- "nlp.add_pipe(LanguageDetector(), name='language_detector', last=True)",
- "text = 'This is an english text.'",
- "doc = nlp(text)",
- "# document level language detection. Think of it like average language of the document!",
- "print(doc._.language)",
- "# sentence level language detection",
- "for sent in doc.sents:",
- " print(sent, sent._.language)"
- ],
- "code_language": "python",
- "author": "Abhijit Balaji",
- "author_links": {
- "github": "Abhijit-2592",
- "website": "https://abhijit-2592.github.io/"
- },
- "github": "Abhijit-2592/spacy-langdetect",
- "category": ["pipeline"],
- "tags": ["language-detection"]
- },
{
"id": "ludwig",
"title": "Ludwig",
@@ -2698,7 +2638,7 @@
" Add the courgette, garlic, red peppers and oregano and cook for 2–3 minutes.",
" Later, add some oranges and chickens.\"\"\"",
"",
- "# use any model that has internal spacy embeddings",
+ "# use any model that has internal spacy embeddings",
"nlp = spacy.load('en_core_web_lg')",
"nlp.add_pipe(\"concise_concepts\", ",
" config={\"data\": data}",
@@ -2744,7 +2684,7 @@
" At that location, Nissin was founded.",
" Many students survived by eating these noodles, but they don't even know him.\"\"\"",
"",
- "# use any model that has internal spacy embeddings",
+ "# use any model that has internal spacy embeddings",
"nlp = spacy.load('en_core_web_sm')",
"nlp.add_pipe(",
" \"xx_coref\", config={\"chunk_size\": 2500, \"chunk_overlap\": 2, \"device\": 0})",
@@ -2821,7 +2761,7 @@
"slogan": "Information extraction from English and German texts based on predicate logic",
"github": "explosion/holmes-extractor",
"url": "https://github.com/explosion/holmes-extractor",
- "description": "Holmes is a Python 3 library that supports a number of use cases involving information extraction from English and German texts, including chatbot, structural extraction, topic matching and supervised document classification. There is a [website demonstrating intelligent search based on topic matching](https://demo.holmes.prod.demos.explosion.services).",
+ "description": "Holmes is a Python 3 library that supports a number of use cases involving information extraction from English and German texts, including chatbot, structural extraction, topic matching and supervised document classification. There is a [website demonstrating intelligent search based on topic matching](https://holmes-demo.explosion.services).",
"pip": "holmes-extractor",
"category": ["pipeline", "standalone"],
"tags": ["chatbots", "text-processing"],
@@ -2927,7 +2867,7 @@
"doc = nlp(\"AE died in Princeton in 1955.\")",
"",
"print(doc._.clauses)",
- "# Output:",
+ "# Output:",
"# ",
"",
"propositions = doc._.clauses[0].to_propositions(as_text=True)",
@@ -3019,35 +2959,6 @@
],
"author": "Stefan Daniel Dumitrescu, Andrei-Marius Avram"
},
- {
- "id": "num_fh",
- "title": "Numeric Fused-Head",
- "slogan": "Numeric Fused-Head Identificaiton and Resolution in English",
- "description": "This package provide a wrapper for the Numeric Fused-Head in English. It provides another information layer on numbers that refer to another entity which is not obvious from the syntactic tree.",
- "github": "yanaiela/num_fh",
- "pip": "num_fh",
- "category": ["pipeline", "research"],
- "code_example": [
- "import spacy",
- "from num_fh import NFH",
- "nlp = spacy.load('en_core_web_sm')",
- "nfh = NFH(nlp)",
- "nlp.add_pipe(nfh, first=False)",
- "doc = nlp(\"I told you two, that only one of them is the one who will get 2 or 3 icecreams\")",
- "",
- "assert doc[16]._.is_nfh == True",
- "assert doc[18]._.is_nfh == False",
- "assert doc[3]._.is_deter_nfh == True",
- "assert doc[16]._.is_deter_nfh == False",
- "assert len(doc._.nfh) == 4"
- ],
- "author": "Yanai Elazar",
- "author_links": {
- "github": "yanaiela",
- "twitter": "yanaiela",
- "website": "https://yanaiela.github.io"
- }
- },
{
"id": "Healthsea",
"title": "Healthsea",
@@ -3138,6 +3049,7 @@
"from pysbd.utils import PySBDFactory",
"",
"nlp = spacy.blank('en')",
+ "# Caution: works with spaCy<=2.x.x",
"nlp.add_pipe(PySBDFactory(nlp))",
"",
"doc = nlp('My name is Jonas E. Smith. Please turn to p. 55.')",
@@ -3721,7 +3633,7 @@
"",
"#Lexico Semantic (LxSem) Features",
"TTRF = LingFeat.TTRF_() #Type Token Ratio Features",
- "VarF = LingFeat.VarF_() #Noun/Verb/Adj/Adv Variation Features",
+ "VarF = LingFeat.VarF_() #Noun/Verb/Adj/Adv Variation Features",
"PsyF = LingFeat.PsyF_() #Psycholinguistic Difficulty of Words (AoA Kuperman)",
"WoLF = LingFeat.WorF_() #Word Familiarity from Frequency Count (SubtlexUS)",
"",
@@ -4057,6 +3969,21 @@
},
"category": ["biomedical", "scientific", "research", "pipeline"],
"tags": ["clinical"]
+ },
+ {
+ "id": "sent-pattern",
+ "title": "English Interpretation Sentence Pattern",
+ "slogan": "English interpretation for accurate translation from English to Japanese",
+ "description": "This package categorizes English sentences into one of five basic sentence patterns and identifies the subject, verb, object, and other components. The five basic sentence patterns are based on C. T. Onions's Advanced English Syntax and are frequently used when teaching English in Japan.",
+ "github": "lll-lll-lll-lll/sent-pattern",
+ "pip": "sent-pattern",
+ "author": "Shunpei Nakayama",
+ "author_links": {
+ "twitter": "ExZ79575296",
+ "github": "lll-lll-lll-lll"
+ },
+ "category": ["pipeline"],
+ "tags": ["interpretation", "ja"]
}
],
diff --git a/website/src/templates/index.js b/website/src/templates/index.js
index bdbdbd431..a0ba4503e 100644
--- a/website/src/templates/index.js
+++ b/website/src/templates/index.js
@@ -120,8 +120,8 @@ const AlertSpace = ({ nightly, legacy }) => {
}
const navAlert = (
-
- 💥 Out now: spaCy v3.3
+
+ 💥 Out now: spaCy v3.4
)
diff --git a/website/src/templates/models.js b/website/src/templates/models.js
index 69cec3376..16a2360d5 100644
--- a/website/src/templates/models.js
+++ b/website/src/templates/models.js
@@ -76,6 +76,7 @@ const MODEL_META = {
benchmark_ner: 'NER accuracy',
benchmark_speed: 'Speed',
compat: 'Latest compatible package version for your spaCy installation',
+ download_link: 'Download link for the pipeline',
}
const LABEL_SCHEME_META = {
@@ -114,7 +115,11 @@ function formatVectors(data) {
if (!data) return 'n/a'
if (Object.values(data).every(n => n === 0)) return 'context vectors only'
const { keys, vectors, width } = data
- return `${abbrNum(keys)} keys, ${abbrNum(vectors)} unique vectors (${width} dimensions)`
+ if (keys >= 0) {
+ return `${abbrNum(keys)} keys, ${abbrNum(vectors)} unique vectors (${width} dimensions)`
+ } else {
+ return `${abbrNum(vectors)} floret vectors (${width} dimensions)`
+ }
}
function formatAccuracy(data, lang) {
@@ -134,6 +139,13 @@ function formatAccuracy(data, lang) {
.filter(item => item)
}
+function formatDownloadLink(lang, name, version) {
+ const fullName = `${lang}_${name}-${version}`
+ const filename = `${fullName}-py3-none-any.whl`
+ const url = `https://github.com/explosion/spacy-models/releases/download/${fullName}/${filename}`
+ return {filename}
+}
+
function formatModelMeta(data) {
return {
fullName: `${data.lang}_${data.name}-${data.version}`,
@@ -150,6 +162,7 @@ function formatModelMeta(data) {
labels: isEmptyObj(data.labels) ? null : data.labels,
vectors: formatVectors(data.vectors),
accuracy: formatAccuracy(data.performance, data.lang),
+ download_link: formatDownloadLink(data.lang, data.name, data.version),
}
}
@@ -240,6 +253,7 @@ const Model = ({
{ label: 'Components', content: components, help: MODEL_META.components },
{ label: 'Pipeline', content: pipeline, help: MODEL_META.pipeline },
{ label: 'Vectors', content: meta.vectors, help: MODEL_META.vecs },
+ { label: 'Download Link', content: meta.download_link, help: MODEL_META.download_link },
{ label: 'Sources', content: sources, help: MODEL_META.sources },
{ label: 'Author', content: author },
{ label: 'License', content: license },
diff --git a/website/src/templates/universe.js b/website/src/templates/universe.js
index 10f2520d9..48ffa3add 100644
--- a/website/src/templates/universe.js
+++ b/website/src/templates/universe.js
@@ -142,10 +142,10 @@ const UniverseContent = ({ content = [], categories, theme, pageContext, mdxComp
The Universe database is open-source and collected in a simple JSON file.
For more details on the formats and available fields, see the documentation.
Looking for inspiration your own spaCy plugin or extension? Check out the
-
- project idea
+
+ project idea
- label on the issue tracker.
+ section in Discussions.
diff --git a/website/src/widgets/quickstart-install.js b/website/src/widgets/quickstart-install.js
index 926d76ae3..61c0678dd 100644
--- a/website/src/widgets/quickstart-install.js
+++ b/website/src/widgets/quickstart-install.js
@@ -25,6 +25,7 @@ const CUDA = {
'11.4': 'cuda114',
'11.5': 'cuda115',
'11.6': 'cuda116',
+ '11.7': 'cuda117',
}
const LANG_EXTRAS = ['ja'] // only for languages with models