diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..c9f30d1d3 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: [https://explosion.ai/merch, https://explosion.ai/tailored-solutions] diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 6c3985a93..2bbdd64c7 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -16,7 +16,7 @@ jobs: if: github.repository_owner == 'explosion' runs-on: ubuntu-latest steps: - - uses: dessant/lock-threads@v4 + - uses: dessant/lock-threads@v5 with: process-only: 'issues' issue-inactive-days: '30' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 619570090..2a236b6bd 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -31,16 +31,25 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.7" - architecture: x64 - name: black run: | python -m pip install black -c requirements.txt python -m black spacy --check + - name: isort + run: | + python -m pip install isort -c requirements.txt + python -m isort spacy --check - name: flake8 run: | python -m pip install flake8==5.0.4 python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics + - name: cython-lint + run: | + python -m pip install cython-lint -c requirements.txt + # E501: line too log, W291: trailing whitespace, E266: too many leading '#' for block comment + cython-lint spacy --ignore E501,W291,E266 + tests: name: Test needs: Validate @@ -48,10 +57,8 @@ jobs: fail-fast: true matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python_version: ["3.11"] + python_version: ["3.12"] include: - - os: ubuntu-20.04 - python_version: "3.6" - os: windows-latest python_version: "3.7" - os: macos-latest @@ -60,6 +67,8 @@ jobs: python_version: "3.9" - os: windows-latest python_version: "3.10" + - os: macos-latest + python_version: "3.11" runs-on: ${{ matrix.os }} @@ -71,7 +80,6 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python_version }} - architecture: x64 - name: Install dependencies run: | @@ -85,7 +93,7 @@ jobs: - name: Run mypy run: | python -m mypy spacy - if: matrix.python_version != '3.6' + if: matrix.python_version != '3.7' - name: Delete source directory and .egg-info run: | @@ -107,22 +115,22 @@ jobs: - name: Test import run: python -W error -c "import spacy" -# - name: "Test download CLI" -# run: | -# python -m spacy download ca_core_news_sm -# python -m spacy download ca_core_news_md -# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" -# if: matrix.python_version == '3.9' -# -# - name: "Test download_url in info CLI" -# run: | -# python -W error -m spacy info ca_core_news_sm | grep -q download_url -# if: matrix.python_version == '3.9' -# -# - name: "Test no warnings on load (#11713)" -# run: | -# python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" -# if: matrix.python_version == '3.9' + - name: "Test download CLI" + run: | + python -m spacy download ca_core_news_sm + python -m spacy download ca_core_news_md + python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" + if: matrix.python_version == '3.9' + + - name: "Test download_url in info CLI" + run: | + python -W error -m spacy info ca_core_news_sm | grep -q download_url + if: matrix.python_version == '3.9' + + - name: "Test no warnings on load (#11713)" + run: | + python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" + if: matrix.python_version == '3.9' - name: "Test convert CLI" run: | @@ -146,17 +154,17 @@ jobs: python -m spacy train ner.cfg --paths.train ner-token-per-line-conll2003.spacy --paths.dev ner-token-per-line-conll2003.spacy --training.max_steps 10 --gpu-id -1 if: matrix.python_version == '3.9' -# - name: "Test assemble CLI" -# run: | -# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" -# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir -# if: matrix.python_version == '3.9' -# -# - name: "Test assemble CLI vectors warning" -# run: | -# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" -# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 -# if: matrix.python_version == '3.9' + - name: "Test assemble CLI" + run: | + python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" + PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir + if: matrix.python_version == '3.9' + + - name: "Test assemble CLI vectors warning" + run: | + python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" + python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 + if: matrix.python_version == '3.9' - name: "Install test requirements" run: | diff --git a/.github/workflows/universe_validation.yml b/.github/workflows/universe_validation.yml index a1e3253a9..4d492500c 100644 --- a/.github/workflows/universe_validation.yml +++ b/.github/workflows/universe_validation.yml @@ -26,7 +26,6 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.7" - architecture: x64 - name: Validate website/meta/universe.json run: | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f6f6dab59..ed75e1fd8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -452,10 +452,9 @@ and plugins in spaCy v3.0, and we can't wait to see what you build with it! spaCy website. If you're sharing your project on Twitter, feel free to tag [@spacy_io](https://twitter.com/spacy_io) so we can check it out. -- Once your extension is published, you can open an issue on the - [issue tracker](https://github.com/explosion/spacy/issues) to suggest it for the - [resources directory](https://spacy.io/usage/resources#extensions) on the - website. +- Once your extension is published, you can open a + [PR](https://github.com/explosion/spaCy/pulls) to suggest it for the + [Universe](https://spacy.io/universe) page. 📖 **For more tips and best practices, see the [checklist for developing spaCy extensions](https://spacy.io/usage/processing-pipelines#extensions).** diff --git a/LICENSE b/LICENSE index d76864579..979f5ade7 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (C) 2016-2022 ExplosionAI GmbH, 2016 spaCy GmbH, 2015 Matthew Honnibal +Copyright (C) 2016-2023 ExplosionAI GmbH, 2016 spaCy GmbH, 2015 Matthew Honnibal Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index 4de628663..c8f68be7f 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,11 @@ SHELL := /bin/bash ifndef SPACY_EXTRAS -override SPACY_EXTRAS = spacy-lookups-data==1.0.2 jieba spacy-pkuseg==0.0.28 sudachipy sudachidict_core pymorphy2 +override SPACY_EXTRAS = spacy-lookups-data==1.0.3 endif ifndef PYVER -override PYVER = 3.6 +override PYVER = 3.8 endif VENV := ./env$(PYVER) diff --git a/README.md b/README.md index 59d3ee9ee..afa96363b 100644 --- a/README.md +++ b/README.md @@ -6,23 +6,20 @@ spaCy is a library for **advanced Natural Language Processing** in Python and Cython. It's built on the very latest research, and was designed from day one to be used in real products. -spaCy comes with -[pretrained pipelines](https://spacy.io/models) and -currently supports tokenization and training for **70+ languages**. It features -state-of-the-art speed and **neural network models** for tagging, -parsing, **named entity recognition**, **text classification** and more, -multi-task learning with pretrained **transformers** like BERT, as well as a +spaCy comes with [pretrained pipelines](https://spacy.io/models) and currently +supports tokenization and training for **70+ languages**. It features +state-of-the-art speed and **neural network models** for tagging, parsing, +**named entity recognition**, **text classification** and more, multi-task +learning with pretrained **transformers** like BERT, as well as a production-ready [**training system**](https://spacy.io/usage/training) and easy model packaging, deployment and workflow management. spaCy is commercial -open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). +open-source software, released under the +[MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). -💥 **We'd love to hear more about your experience with spaCy!** -[Fill out our survey here.](https://form.typeform.com/to/aMel9q9f) - -💫 **Version 3.5 out now!** +💫 **Version 3.7 out now!** [Check out the release notes here.](https://github.com/explosion/spaCy/releases) -[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) +[![tests](https://github.com/explosion/spaCy/actions/workflows/tests.yml/badge.svg)](https://github.com/explosion/spaCy/actions/workflows/tests.yml) [![Current Release Version](https://img.shields.io/github/release/explosion/spacy.svg?style=flat-square&logo=github)](https://github.com/explosion/spaCy/releases) [![pypi Version](https://img.shields.io/pypi/v/spacy.svg?style=flat-square&logo=pypi&logoColor=white)](https://pypi.org/project/spacy/) [![conda Version](https://img.shields.io/conda/vn/conda-forge/spacy.svg?style=flat-square&logo=conda-forge&logoColor=white)](https://anaconda.org/conda-forge/spacy) @@ -35,35 +32,42 @@ open-source software, released under the [MIT license](https://github.com/explos ## 📖 Documentation -| Documentation | | -| ----------------------------- | ---------------------------------------------------------------------- | -| ⭐️ **[spaCy 101]** | New to spaCy? Here's everything you need to know! | -| 📚 **[Usage Guides]** | How to use spaCy and its features. | -| 🚀 **[New in v3.0]** | New features, backwards incompatibilities and migration guide. | -| 🪐 **[Project Templates]** | End-to-end workflows you can clone, modify and run. | -| 🎛 **[API Reference]** | The detailed reference for spaCy's API. | -| 📦 **[Models]** | Download trained pipelines for spaCy. | -| 🌌 **[Universe]** | Plugins, extensions, demos and books from the spaCy ecosystem. | -| ⚙️ **[spaCy VS Code Extension]** | Additional tooling and features for working with spaCy's config files. | -| 👩‍🏫 **[Online Course]** | Learn spaCy in this free and interactive online course. | -| 📺 **[Videos]** | Our YouTube channel with video tutorials, talks and more. | -| 🛠 **[Changelog]** | Changes and version history. | -| 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | -| spaCy Tailored Pipelines | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-pipelines)** | -| spaCy Tailored Pipelines | Bespoke advice for problem solving, strategy and analysis for applied NLP projects. Services include data strategy, code reviews, pipeline design and annotation coaching. Curious? Fill in our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-analysis)** | +| Documentation | | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ⭐️ **[spaCy 101]** | New to spaCy? Here's everything you need to know! | +| 📚 **[Usage Guides]** | How to use spaCy and its features. | +| 🚀 **[New in v3.0]** | New features, backwards incompatibilities and migration guide. | +| 🪐 **[Project Templates]** | End-to-end workflows you can clone, modify and run. | +| 🎛 **[API Reference]** | The detailed reference for spaCy's API. | +| ⏩ **[GPU Processing]** | Use spaCy with CUDA-compatible GPU processing. | +| 📦 **[Models]** | Download trained pipelines for spaCy. | +| 🦙 **[Large Language Models]** | Integrate LLMs into spaCy pipelines. | +| 🌌 **[Universe]** | Plugins, extensions, demos and books from the spaCy ecosystem. | +| ⚙️ **[spaCy VS Code Extension]** | Additional tooling and features for working with spaCy's config files. | +| 👩‍🏫 **[Online Course]** | Learn spaCy in this free and interactive online course. | +| 📰 **[Blog]** | Read about current spaCy and Prodigy development, releases, talks and more from Explosion. | +| 📺 **[Videos]** | Our YouTube channel with video tutorials, talks and more. | +| 🛠 **[Changelog]** | Changes and version history. | +| 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | +| 👕 **[Swag]** | Support us and our work with unique, custom-designed swag! | +| Tailored Solutions | Custom NLP consulting, implementation and strategic advice by spaCy’s core development team. Streamlined, production-ready, predictable and maintainable. Send us an email or take our 5-minute questionnaire, and well'be in touch! **[Learn more →](https://explosion.ai/tailored-solutions)** | [spacy 101]: https://spacy.io/usage/spacy-101 [new in v3.0]: https://spacy.io/usage/v3 [usage guides]: https://spacy.io/usage/ [api reference]: https://spacy.io/api/ +[gpu processing]: https://spacy.io/usage#gpu [models]: https://spacy.io/models +[large language models]: https://spacy.io/usage/large-language-models [universe]: https://spacy.io/universe -[spaCy VS Code Extension]: https://github.com/explosion/spacy-vscode +[spacy vs code extension]: https://github.com/explosion/spacy-vscode [videos]: https://www.youtube.com/c/ExplosionAI [online course]: https://course.spacy.io +[blog]: https://explosion.ai [project templates]: https://github.com/explosion/projects [changelog]: https://spacy.io/usage#changelog [contribute]: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md +[swag]: https://explosion.ai/merch ## 💬 Where to ask questions @@ -92,7 +96,9 @@ more people can benefit from it. - State-of-the-art speed - Production-ready **training system** - Linguistically-motivated **tokenization** -- Components for named **entity recognition**, part-of-speech-tagging, dependency parsing, sentence segmentation, **text classification**, lemmatization, morphological analysis, entity linking and more +- Components for named **entity recognition**, part-of-speech-tagging, + dependency parsing, sentence segmentation, **text classification**, + lemmatization, morphological analysis, entity linking and more - Easily extensible with **custom components** and attributes - Support for custom models in **PyTorch**, **TensorFlow** and other frameworks - Built in **visualizers** for syntax and NER @@ -109,7 +115,7 @@ For detailed installation instructions, see the - **Operating system**: macOS / OS X · Linux · Windows (Cygwin, MinGW, Visual Studio) -- **Python version**: Python 3.6+ (only 64 bit) +- **Python version**: Python 3.7+ (only 64 bit) - **Package managers**: [pip] · [conda] (via `conda-forge`) [pip]: https://pypi.org/project/spacy/ @@ -118,8 +124,8 @@ For detailed installation instructions, see the ### pip Using pip, spaCy releases are available as source packages and binary wheels. -Before you install spaCy and its dependencies, make sure that -your `pip`, `setuptools` and `wheel` are up to date. +Before you install spaCy and its dependencies, make sure that your `pip`, +`setuptools` and `wheel` are up to date. ```bash pip install -U pip setuptools wheel @@ -174,9 +180,9 @@ with the new version. ## 📦 Download model packages -Trained pipelines for spaCy can be installed as **Python packages**. This -means that they're a component of your application, just like any other module. -Models can be installed using spaCy's [`download`](https://spacy.io/api/cli#download) +Trained pipelines for spaCy can be installed as **Python packages**. This means +that they're a component of your application, just like any other module. Models +can be installed using spaCy's [`download`](https://spacy.io/api/cli#download) command, or manually by pointing pip to a path or URL. | Documentation | | @@ -242,8 +248,7 @@ do that depends on your system. | **Mac** | Install a recent version of [XCode](https://developer.apple.com/xcode/), including the so-called "Command Line Tools". macOS and OS X ship with Python and git preinstalled. | | **Windows** | Install a version of the [Visual C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) or [Visual Studio Express](https://visualstudio.microsoft.com/vs/express/) that matches the version that was used to compile your Python interpreter. | -For more details -and instructions, see the documentation on +For more details and instructions, see the documentation on [compiling spaCy from source](https://spacy.io/usage#source) and the [quickstart widget](https://spacy.io/usage#section-quickstart) to get the right commands for your platform and Python version. diff --git a/build-constraints.txt b/build-constraints.txt index c1e82f1b0..b1cf596ca 100644 --- a/build-constraints.txt +++ b/build-constraints.txt @@ -1,9 +1,6 @@ -# build version constraints for use with wheelwright + multibuild -numpy==1.15.0; python_version<='3.7' and platform_machine!='aarch64' -numpy==1.19.2; python_version<='3.7' and platform_machine=='aarch64' +# build version constraints for use with wheelwright +numpy==1.15.0; python_version=='3.7' and platform_machine!='aarch64' +numpy==1.19.2; python_version=='3.7' and platform_machine=='aarch64' numpy==1.17.3; python_version=='3.8' and platform_machine!='aarch64' numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64' -numpy==1.19.3; python_version=='3.9' -numpy==1.21.3; python_version=='3.10' -numpy==1.23.2; python_version=='3.11' -numpy; python_version>='3.12' +numpy>=1.25.0; python_version>='3.9' diff --git a/extra/DEVELOPER_DOCS/Listeners.md b/extra/DEVELOPER_DOCS/Listeners.md index 3a71082e0..72c036880 100644 --- a/extra/DEVELOPER_DOCS/Listeners.md +++ b/extra/DEVELOPER_DOCS/Listeners.md @@ -1,14 +1,17 @@ # Listeners -1. [Overview](#1-overview) -2. [Initialization](#2-initialization) - - [A. Linking listeners to the embedding component](#2a-linking-listeners-to-the-embedding-component) - - [B. Shape inference](#2b-shape-inference) -3. [Internal communication](#3-internal-communication) - - [A. During prediction](#3a-during-prediction) - - [B. During training](#3b-during-training) - - [C. Frozen components](#3c-frozen-components) -4. [Replacing listener with standalone](#4-replacing-listener-with-standalone) +- [1. Overview](#1-overview) +- [2. Initialization](#2-initialization) + - [2A. Linking listeners to the embedding component](#2a-linking-listeners-to-the-embedding-component) + - [2B. Shape inference](#2b-shape-inference) +- [3. Internal communication](#3-internal-communication) + - [3A. During prediction](#3a-during-prediction) + - [3B. During training](#3b-during-training) + - [Training with multiple listeners](#training-with-multiple-listeners) + - [3C. Frozen components](#3c-frozen-components) + - [The Tok2Vec or Transformer is frozen](#the-tok2vec-or-transformer-is-frozen) + - [The upstream component is frozen](#the-upstream-component-is-frozen) +- [4. Replacing listener with standalone](#4-replacing-listener-with-standalone) ## 1. Overview @@ -62,7 +65,7 @@ of this `find_listener()` method will specifically identify sublayers of a model If it's a Transformer-based pipeline, a [`transformer` component](https://github.com/explosion/spacy-transformers/blob/master/spacy_transformers/pipeline_component.py) -has a similar implementation but its `find_listener()` function will specifically look for `TransformerListener` +has a similar implementation but its `find_listener()` function will specifically look for `TransformerListener` sublayers of downstream components. ### 2B. Shape inference @@ -154,7 +157,7 @@ as a tagger or a parser. This used to be impossible before 3.1, but has become s embedding component in the [`annotating_components`](https://spacy.io/usage/training#annotating-components) list of the config. This works like any other "annotating component" because it relies on the `Doc` attributes. -However, if the `Tok2Vec` or `Transformer` is frozen, and not present in `annotating_components`, and a related +However, if the `Tok2Vec` or `Transformer` is frozen, and not present in `annotating_components`, and a related listener isn't frozen, then a `W086` warning is shown and further training of the pipeline will likely end with `E954`. #### The upstream component is frozen @@ -216,5 +219,17 @@ new_model = tok2vec_model.attrs["replace_listener"](new_model) ``` The new config and model are then properly stored on the `nlp` object. -Note that this functionality (running the replacement for a transformer listener) was broken prior to +Note that this functionality (running the replacement for a transformer listener) was broken prior to `spacy-transformers` 1.0.5. + +In spaCy 3.7, `Language.replace_listeners` was updated to pass the following additional arguments to the `replace_listener` callback: +the listener to be replaced and the `tok2vec`/`transformer` pipe from which the new model was copied. To maintain backwards-compatiblity, +the method only passes these extra arguments for callbacks that support them: + +``` +def replace_listener_pre_37(copied_tok2vec_model): + ... + +def replace_listener_post_37(copied_tok2vec_model, replaced_listener, tok2vec_pipe): + ... +``` diff --git a/licenses/3rd_party_licenses.txt b/licenses/3rd_party_licenses.txt index 851e09585..9b037a496 100644 --- a/licenses/3rd_party_licenses.txt +++ b/licenses/3rd_party_licenses.txt @@ -158,3 +158,45 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +SciPy +----- + +* Files: scorer.py + +The implementation of trapezoid() is adapted from SciPy, which is distributed +under the following license: + +New BSD License + +Copyright (c) 2001-2002 Enthought, Inc. 2003-2023, SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pyproject.toml b/pyproject.toml index 9cd96ac2d..bfd7e68d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,11 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=8.1.8,<8.2.0", - "numpy>=1.15.0", + "thinc>=8.2.2,<8.3.0", + "numpy>=1.15.0; python_version < '3.9'", + "numpy>=1.25.0; python_version >= '3.9'", ] build-backend = "setuptools.build_meta" + +[tool.isort] +profile = "black" diff --git a/requirements.txt b/requirements.txt index b979929c5..54b8f22a1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,20 +3,20 @@ spacy-legacy>=3.0.11,<3.1.0 spacy-loggers>=1.0.0,<2.0.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=8.1.8,<8.2.0 +thinc>=8.2.2,<8.3.0 ml_datasets>=0.2.0,<0.3.0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.9.1,<1.2.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 typer>=0.3.0,<0.10.0 -pathy>=0.10.0 -smart-open>=5.2.1,<7.0.0 +weasel>=0.1.0,<0.5.0 # Third party dependencies -numpy>=1.15.0 +numpy>=1.15.0; python_version < "3.9" +numpy>=1.19.0; python_version >= "3.9" requests>=2.13.0,<3.0.0 tqdm>=4.38.0,<5.0.0 -pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0 +pydantic>=1.7.4,!=1.8,!=1.8.1,<3.0.0 jinja2 langcodes>=3.2.0,<4.0.0 # Official Python utilities @@ -31,10 +31,11 @@ pytest-timeout>=1.3.0,<2.0.0 mock>=2.0.0,<3.0.0 flake8>=3.8.0,<6.0.0 hypothesis>=3.27.0,<7.0.0 -mypy>=0.990,<1.1.0; platform_machine != "aarch64" and python_version >= "3.7" -types-dataclasses>=0.1.3; python_version < "3.7" +mypy>=1.5.0,<1.6.0; platform_machine != "aarch64" and python_version >= "3.8" types-mock>=0.1.1 types-setuptools>=57.0.0 types-requests types-setuptools>=57.0.0 black==22.3.0 +cython-lint>=0.15.0 +isort>=5.0,<6.0 diff --git a/setup.cfg b/setup.cfg index 45734888f..899e808cb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,12 +17,12 @@ classifiers = Operating System :: Microsoft :: Windows Programming Language :: Cython Programming Language :: Python :: 3 - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 Topic :: Scientific/Engineering project_urls = Release notes = https://github.com/explosion/spaCy/releases @@ -31,15 +31,18 @@ project_urls = [options] zip_safe = false include_package_data = true -python_requires = >=3.6 +python_requires = >=3.7 +# NOTE: This section is superseded by pyproject.toml and will be removed in +# spaCy v4 setup_requires = cython>=0.25,<3.0 - numpy>=1.15.0 + numpy>=1.15.0; python_version < "3.9" + numpy>=1.19.0; python_version >= "3.9" # We also need our Cython packages here to compile against cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 murmurhash>=0.28.0,<1.1.0 - thinc>=8.1.8,<8.2.0 + thinc>=8.2.2,<8.3.0 install_requires = # Our libraries spacy-legacy>=3.0.11,<3.1.0 @@ -47,18 +50,18 @@ install_requires = murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=8.1.8,<8.2.0 + thinc>=8.2.2,<8.3.0 wasabi>=0.9.1,<1.2.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 + weasel>=0.1.0,<0.5.0 # Third-party dependencies typer>=0.3.0,<0.10.0 - pathy>=0.10.0 - smart-open>=5.2.1,<7.0.0 tqdm>=4.38.0,<5.0.0 - numpy>=1.15.0 + numpy>=1.15.0; python_version < "3.9" + numpy>=1.19.0; python_version >= "3.9" requests>=2.13.0,<3.0.0 - pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0 + pydantic>=1.7.4,!=1.8,!=1.8.1,<3.0.0 jinja2 # Official Python utilities setuptools @@ -74,9 +77,7 @@ console_scripts = lookups = spacy_lookups_data>=1.0.3,<1.1.0 transformers = - spacy_transformers>=1.1.2,<1.3.0 -ray = - spacy_ray>=0.1.0,<1.0.0 + spacy_transformers>=1.1.2,<1.4.0 cuda = cupy>=5.0.0b4,<13.0.0 cuda80 = @@ -111,6 +112,8 @@ cuda117 = cupy-cuda117>=5.0.0b4,<13.0.0 cuda11x = cupy-cuda11x>=11.0.0,<13.0.0 +cuda12x = + cupy-cuda12x>=11.5.0,<13.0.0 cuda-autodetect = cupy-wheel>=11.0.0,<13.0.0 apple = diff --git a/setup.py b/setup.py index 243554c7a..33178662d 100755 --- a/setup.py +++ b/setup.py @@ -1,10 +1,9 @@ #!/usr/bin/env python from setuptools import Extension, setup, find_packages import sys -import platform import numpy -from distutils.command.build_ext import build_ext -from distutils.sysconfig import get_python_inc +from setuptools.command.build_ext import build_ext +from sysconfig import get_path from pathlib import Path import shutil from Cython.Build import cythonize @@ -79,6 +78,7 @@ COMPILER_DIRECTIVES = { "language_level": -3, "embedsignature": True, "annotation_typing": False, + "profile": sys.version_info < (3, 12), } # Files to copy into the package that are otherwise not included COPY_FILES = { @@ -88,30 +88,6 @@ COPY_FILES = { } -def is_new_osx(): - """Check whether we're on OSX >= 10.7""" - if sys.platform != "darwin": - return False - mac_ver = platform.mac_ver()[0] - if mac_ver.startswith("10"): - minor_version = int(mac_ver.split(".")[1]) - if minor_version >= 7: - return True - else: - return False - return False - - -if is_new_osx(): - # On Mac, use libc++ because Apple deprecated use of - # libstdc - COMPILE_OPTIONS["other"].append("-stdlib=libc++") - LINK_OPTIONS["other"].append("-lc++") - # g++ (used by unix compiler on mac) links to libstdc++ as a default lib. - # See: https://stackoverflow.com/questions/1653047/avoid-linking-to-libstdc - LINK_OPTIONS["other"].append("-nodefaultlibs") - - # By subclassing build_extensions we have the actual compiler that will be used which is really known only after finalize_options # http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used class build_ext_options: @@ -204,7 +180,7 @@ def setup_package(): include_dirs = [ numpy.get_include(), - get_python_inc(plat_specific=True), + get_path("include"), ] ext_modules = [] ext_modules.append( diff --git a/spacy/__init__.py b/spacy/__init__.py index c3568bc5c..1a18ad0d5 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -1,6 +1,6 @@ -from typing import Union, Iterable, Dict, Any -from pathlib import Path import sys +from pathlib import Path +from typing import Any, Dict, Iterable, Union # set library-specific custom warning handling before doing anything else from .errors import setup_default_warnings @@ -8,20 +8,17 @@ from .errors import setup_default_warnings setup_default_warnings() # noqa: E402 # These are imported as part of the API -from thinc.api import prefer_gpu, require_gpu, require_cpu # noqa: F401 -from thinc.api import Config +from thinc.api import Config, prefer_gpu, require_cpu, require_gpu # noqa: F401 from . import pipeline # noqa: F401 -from .cli.info import info # noqa: F401 -from .glossary import explain # noqa: F401 -from .about import __version__ # noqa: F401 -from .util import registry, logger # noqa: F401 - -from .errors import Errors -from .language import Language -from .vocab import Vocab from . import util - +from .about import __version__ # noqa: F401 +from .cli.info import info # noqa: F401 +from .errors import Errors +from .glossary import explain # noqa: F401 +from .language import Language +from .util import logger, registry # noqa: F401 +from .vocab import Vocab if sys.maxunicode == 65535: raise SystemError(Errors.E130) diff --git a/spacy/about.py b/spacy/about.py index 7c0a59b4e..f5ee66dae 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,7 +1,5 @@ # fmt: off __title__ = "spacy" -__version__ = "3.6.0.dev1" +__version__ = "3.7.4" __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" -__projects__ = "https://github.com/explosion/projects" -__projects_branch__ = "v3" diff --git a/spacy/attrs.pxd b/spacy/attrs.pxd index 33d5372de..fbbac0ec2 100644 --- a/spacy/attrs.pxd +++ b/spacy/attrs.pxd @@ -1,6 +1,7 @@ # Reserve 64 values for flag features from . cimport symbols + cdef enum attr_id_t: NULL_ATTR IS_ALPHA @@ -95,4 +96,4 @@ cdef enum attr_id_t: ENT_ID = symbols.ENT_ID IDX - SENT_END \ No newline at end of file + SENT_END diff --git a/spacy/attrs.pyx b/spacy/attrs.pyx index dc8eed7c3..363dd094d 100644 --- a/spacy/attrs.pyx +++ b/spacy/attrs.pyx @@ -1,3 +1,4 @@ +# cython: profile=False from .errors import Errors IOB_STRINGS = ("", "I", "O", "B") @@ -117,7 +118,7 @@ def intify_attrs(stringy_attrs, strings_map=None, _do_deprecated=False): if "pos" in stringy_attrs: stringy_attrs["TAG"] = stringy_attrs.pop("pos") if "morph" in stringy_attrs: - morphs = stringy_attrs.pop("morph") + morphs = stringy_attrs.pop("morph") # no-cython-lint if "number" in stringy_attrs: stringy_attrs.pop("number") if "tenspect" in stringy_attrs: diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index 868526b42..3095778fe 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -1,35 +1,40 @@ from wasabi import msg +# Needed for testing +from . import download as download_module # noqa: F401 from ._util import app, setup_cli # noqa: F401 +from .apply import apply # noqa: F401 +from .assemble import assemble_cli # noqa: F401 # These are the actual functions, NOT the wrapped CLI commands. The CLI commands # are registered automatically and won't have to be imported here. from .benchmark_speed import benchmark_speed_cli # noqa: F401 -from .download import download # noqa: F401 -from .info import info # noqa: F401 -from .package import package # noqa: F401 -from .profile import profile # noqa: F401 -from .train import train_cli # noqa: F401 -from .assemble import assemble_cli # noqa: F401 -from .pretrain import pretrain # noqa: F401 -from .debug_data import debug_data # noqa: F401 -from .debug_config import debug_config # noqa: F401 -from .debug_model import debug_model # noqa: F401 -from .debug_diff import debug_diff # noqa: F401 -from .evaluate import evaluate # noqa: F401 -from .apply import apply # noqa: F401 from .convert import convert # noqa: F401 -from .init_pipeline import init_pipeline_cli # noqa: F401 -from .init_config import init_config, fill_config # noqa: F401 -from .validate import validate # noqa: F401 -from .project.clone import project_clone # noqa: F401 -from .project.assets import project_assets # noqa: F401 -from .project.run import project_run # noqa: F401 -from .project.dvc import project_update_dvc # noqa: F401 -from .project.push import project_push # noqa: F401 -from .project.pull import project_pull # noqa: F401 -from .project.document import project_document # noqa: F401 +from .debug_config import debug_config # noqa: F401 +from .debug_data import debug_data # noqa: F401 +from .debug_diff import debug_diff # noqa: F401 +from .debug_model import debug_model # noqa: F401 +from .download import download # noqa: F401 +from .evaluate import evaluate # noqa: F401 +from .find_function import find_function # noqa: F401 from .find_threshold import find_threshold # noqa: F401 +from .info import info # noqa: F401 +from .init_config import fill_config, init_config # noqa: F401 +from .init_pipeline import init_pipeline_cli # noqa: F401 +from .package import package # noqa: F401 +from .pretrain import pretrain # noqa: F401 +from .profile import profile # noqa: F401 +from .project.assets import project_assets # type: ignore[attr-defined] # noqa: F401 +from .project.clone import project_clone # type: ignore[attr-defined] # noqa: F401 +from .project.document import ( # type: ignore[attr-defined] # noqa: F401 + project_document, +) +from .project.dvc import project_update_dvc # type: ignore[attr-defined] # noqa: F401 +from .project.pull import project_pull # type: ignore[attr-defined] # noqa: F401 +from .project.push import project_push # type: ignore[attr-defined] # noqa: F401 +from .project.run import project_run # type: ignore[attr-defined] # noqa: F401 +from .train import train_cli # type: ignore[attr-defined] # noqa: F401 +from .validate import validate # type: ignore[attr-defined] # noqa: F401 @app.command("link", no_args_is_help=True, deprecated=True, hidden=True) diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index f104feff9..fa41e6a08 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -1,36 +1,50 @@ -from typing import Dict, Any, Union, List, Optional, Tuple, Iterable -from typing import TYPE_CHECKING, overload -import sys -import shutil -from pathlib import Path -from wasabi import msg, Printer -import srsly import hashlib +import os +import shutil +import sys +from configparser import InterpolationError +from contextlib import contextmanager +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, + overload, +) + +import srsly import typer from click import NoSuchOption from click.parser import split_arg_string -from typer.main import get_command -from contextlib import contextmanager from thinc.api import Config, ConfigValidationError, require_gpu from thinc.util import gpu_is_available -from configparser import InterpolationError -import os +from typer.main import get_command +from wasabi import Printer, msg +from weasel import app as project_cli -from ..compat import Literal -from ..schemas import ProjectConfigSchema, validate -from ..util import import_file, run_command, make_tempdir, registry, logger -from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS from .. import about - -if TYPE_CHECKING: - from pathy import FluidPath # noqa: F401 - +from ..compat import Literal +from ..schemas import validate +from ..util import ( + ENV_VARS, + SimpleFrozenDict, + import_file, + is_compatible_version, + logger, + make_tempdir, + registry, + run_command, +) SDIST_SUFFIX = ".tar.gz" WHEEL_SUFFIX = "-py3-none-any.whl" PROJECT_FILE = "project.yml" -PROJECT_LOCK = "project.lock" COMMAND = "python -m spacy" NAME = "spacy" HELP = """spaCy Command-line Interface @@ -56,11 +70,10 @@ Opt = typer.Option app = typer.Typer(name=NAME, help=HELP) benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True) -project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True) debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True) init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True) -app.add_typer(project_cli) +app.add_typer(project_cli, name="project", help=PROJECT_HELP, no_args_is_help=True) app.add_typer(debug_cli) app.add_typer(benchmark_cli) app.add_typer(init_cli) @@ -135,148 +148,6 @@ def _parse_override(value: Any) -> Any: return str(value) -def load_project_config( - path: Path, interpolate: bool = True, overrides: Dict[str, Any] = SimpleFrozenDict() -) -> Dict[str, Any]: - """Load the project.yml file from a directory and validate it. Also make - sure that all directories defined in the config exist. - - path (Path): The path to the project directory. - interpolate (bool): Whether to substitute project variables. - overrides (Dict[str, Any]): Optional config overrides. - RETURNS (Dict[str, Any]): The loaded project.yml. - """ - config_path = path / PROJECT_FILE - if not config_path.exists(): - msg.fail(f"Can't find {PROJECT_FILE}", config_path, exits=1) - invalid_err = f"Invalid {PROJECT_FILE}. Double-check that the YAML is correct." - try: - config = srsly.read_yaml(config_path) - except ValueError as e: - msg.fail(invalid_err, e, exits=1) - errors = validate(ProjectConfigSchema, config) - if errors: - msg.fail(invalid_err) - print("\n".join(errors)) - sys.exit(1) - validate_project_version(config) - validate_project_commands(config) - if interpolate: - err = f"{PROJECT_FILE} validation error" - with show_validation_error(title=err, hint_fill=False): - config = substitute_project_variables(config, overrides) - # Make sure directories defined in config exist - for subdir in config.get("directories", []): - dir_path = path / subdir - if not dir_path.exists(): - dir_path.mkdir(parents=True) - return config - - -def substitute_project_variables( - config: Dict[str, Any], - overrides: Dict[str, Any] = SimpleFrozenDict(), - key: str = "vars", - env_key: str = "env", -) -> Dict[str, Any]: - """Interpolate variables in the project file using the config system. - - config (Dict[str, Any]): The project config. - overrides (Dict[str, Any]): Optional config overrides. - key (str): Key containing variables in project config. - env_key (str): Key containing environment variable mapping in project config. - RETURNS (Dict[str, Any]): The interpolated project config. - """ - config.setdefault(key, {}) - config.setdefault(env_key, {}) - # Substitute references to env vars with their values - for config_var, env_var in config[env_key].items(): - config[env_key][config_var] = _parse_override(os.environ.get(env_var, "")) - # Need to put variables in the top scope again so we can have a top-level - # section "project" (otherwise, a list of commands in the top scope wouldn't) - # be allowed by Thinc's config system - cfg = Config({"project": config, key: config[key], env_key: config[env_key]}) - cfg = Config().from_str(cfg.to_str(), overrides=overrides) - interpolated = cfg.interpolate() - return dict(interpolated["project"]) - - -def validate_project_version(config: Dict[str, Any]) -> None: - """If the project defines a compatible spaCy version range, chec that it's - compatible with the current version of spaCy. - - config (Dict[str, Any]): The loaded config. - """ - spacy_version = config.get("spacy_version", None) - if spacy_version and not is_compatible_version(about.__version__, spacy_version): - err = ( - f"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) " - f"that's not compatible with the version of spaCy you're running " - f"({about.__version__}). You can edit version requirement in the " - f"{PROJECT_FILE} to load it, but the project may not run as expected." - ) - msg.fail(err, exits=1) - - -def validate_project_commands(config: Dict[str, Any]) -> None: - """Check that project commands and workflows are valid, don't contain - duplicates, don't clash and only refer to commands that exist. - - config (Dict[str, Any]): The loaded config. - """ - command_names = [cmd["name"] for cmd in config.get("commands", [])] - workflows = config.get("workflows", {}) - duplicates = set([cmd for cmd in command_names if command_names.count(cmd) > 1]) - if duplicates: - err = f"Duplicate commands defined in {PROJECT_FILE}: {', '.join(duplicates)}" - msg.fail(err, exits=1) - for workflow_name, workflow_steps in workflows.items(): - if workflow_name in command_names: - err = f"Can't use workflow name '{workflow_name}': name already exists as a command" - msg.fail(err, exits=1) - for step in workflow_steps: - if step not in command_names: - msg.fail( - f"Unknown command specified in workflow '{workflow_name}': {step}", - f"Workflows can only refer to commands defined in the 'commands' " - f"section of the {PROJECT_FILE}.", - exits=1, - ) - - -def get_hash(data, exclude: Iterable[str] = tuple()) -> str: - """Get the hash for a JSON-serializable object. - - data: The data to hash. - exclude (Iterable[str]): Top-level keys to exclude if data is a dict. - RETURNS (str): The hash. - """ - if isinstance(data, dict): - data = {k: v for k, v in data.items() if k not in exclude} - data_str = srsly.json_dumps(data, sort_keys=True).encode("utf8") - return hashlib.md5(data_str).hexdigest() - - -def get_checksum(path: Union[Path, str]) -> str: - """Get the checksum for a file or directory given its file path. If a - directory path is provided, this uses all files in that directory. - - path (Union[Path, str]): The file or directory path. - RETURNS (str): The checksum. - """ - path = Path(path) - if not (path.is_file() or path.is_dir()): - msg.fail(f"Can't get checksum for {path}: not a file or directory", exits=1) - if path.is_file(): - return hashlib.md5(Path(path).read_bytes()).hexdigest() - else: - # TODO: this is currently pretty slow - dir_checksum = hashlib.md5() - for sub_file in sorted(fp for fp in path.rglob("*") if fp.is_file()): - dir_checksum.update(sub_file.read_bytes()) - return dir_checksum.hexdigest() - - @contextmanager def show_validation_error( file_path: Optional[Union[str, Path]] = None, @@ -334,166 +205,10 @@ def import_code(code_path: Optional[Union[Path, str]]) -> None: msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1) -def upload_file(src: Path, dest: Union[str, "FluidPath"]) -> None: - """Upload a file. - - src (Path): The source path. - url (str): The destination URL to upload to. - """ - import smart_open - - # Create parent directories for local paths - if isinstance(dest, Path): - if not dest.parent.exists(): - dest.parent.mkdir(parents=True) - - dest = str(dest) - with smart_open.open(dest, mode="wb") as output_file: - with src.open(mode="rb") as input_file: - output_file.write(input_file.read()) - - -def download_file( - src: Union[str, "FluidPath"], dest: Path, *, force: bool = False -) -> None: - """Download a file using smart_open. - - url (str): The URL of the file. - dest (Path): The destination path. - force (bool): Whether to force download even if file exists. - If False, the download will be skipped. - """ - import smart_open - - if dest.exists() and not force: - return None - src = str(src) - with smart_open.open(src, mode="rb", compression="disable") as input_file: - with dest.open(mode="wb") as output_file: - shutil.copyfileobj(input_file, output_file) - - -def ensure_pathy(path): - """Temporary helper to prevent importing Pathy globally (which can cause - slow and annoying Google Cloud warning).""" - from pathy import Pathy # noqa: F811 - - return Pathy.fluid(path) - - -def git_checkout( - repo: str, subpath: str, dest: Path, *, branch: str = "master", sparse: bool = False -): - git_version = get_git_version() - if dest.exists(): - msg.fail("Destination of checkout must not exist", exits=1) - if not dest.parent.exists(): - msg.fail("Parent of destination of checkout must exist", exits=1) - if sparse and git_version >= (2, 22): - return git_sparse_checkout(repo, subpath, dest, branch) - elif sparse: - # Only show warnings if the user explicitly wants sparse checkout but - # the Git version doesn't support it - err_old = ( - f"You're running an old version of Git (v{git_version[0]}.{git_version[1]}) " - f"that doesn't fully support sparse checkout yet." - ) - err_unk = "You're running an unknown version of Git, so sparse checkout has been disabled." - msg.warn( - f"{err_unk if git_version == (0, 0) else err_old} " - f"This means that more files than necessary may be downloaded " - f"temporarily. To only download the files needed, make sure " - f"you're using Git v2.22 or above." - ) - with make_tempdir() as tmp_dir: - cmd = f"git -C {tmp_dir} clone {repo} . -b {branch}" - run_command(cmd, capture=True) - # We need Path(name) to make sure we also support subdirectories - try: - source_path = tmp_dir / Path(subpath) - if not is_subpath_of(tmp_dir, source_path): - err = f"'{subpath}' is a path outside of the cloned repository." - msg.fail(err, repo, exits=1) - shutil.copytree(str(source_path), str(dest)) - except FileNotFoundError: - err = f"Can't clone {subpath}. Make sure the directory exists in the repo (branch '{branch}')" - msg.fail(err, repo, exits=1) - - -def git_sparse_checkout(repo, subpath, dest, branch): - # We're using Git, partial clone and sparse checkout to - # only clone the files we need - # This ends up being RIDICULOUS. omg. - # So, every tutorial and SO post talks about 'sparse checkout'...But they - # go and *clone* the whole repo. Worthless. And cloning part of a repo - # turns out to be completely broken. The only way to specify a "path" is.. - # a path *on the server*? The contents of which, specifies the paths. Wat. - # Obviously this is hopelessly broken and insecure, because you can query - # arbitrary paths on the server! So nobody enables this. - # What we have to do is disable *all* files. We could then just checkout - # the path, and it'd "work", but be hopelessly slow...Because it goes and - # transfers every missing object one-by-one. So the final piece is that we - # need to use some weird git internals to fetch the missings in bulk, and - # *that* we can do by path. - # We're using Git and sparse checkout to only clone the files we need - with make_tempdir() as tmp_dir: - # This is the "clone, but don't download anything" part. - cmd = ( - f"git clone {repo} {tmp_dir} --no-checkout --depth 1 " - f"-b {branch} --filter=blob:none" - ) - run_command(cmd) - # Now we need to find the missing filenames for the subpath we want. - # Looking for this 'rev-list' command in the git --help? Hah. - cmd = f"git -C {tmp_dir} rev-list --objects --all --missing=print -- {subpath}" - ret = run_command(cmd, capture=True) - git_repo = _http_to_git(repo) - # Now pass those missings into another bit of git internals - missings = " ".join([x[1:] for x in ret.stdout.split() if x.startswith("?")]) - if not missings: - err = ( - f"Could not find any relevant files for '{subpath}'. " - f"Did you specify a correct and complete path within repo '{repo}' " - f"and branch {branch}?" - ) - msg.fail(err, exits=1) - cmd = f"git -C {tmp_dir} fetch-pack {git_repo} {missings}" - run_command(cmd, capture=True) - # And finally, we can checkout our subpath - cmd = f"git -C {tmp_dir} checkout {branch} {subpath}" - run_command(cmd, capture=True) - - # Get a subdirectory of the cloned path, if appropriate - source_path = tmp_dir / Path(subpath) - if not is_subpath_of(tmp_dir, source_path): - err = f"'{subpath}' is a path outside of the cloned repository." - msg.fail(err, repo, exits=1) - - shutil.move(str(source_path), str(dest)) - - -def git_repo_branch_exists(repo: str, branch: str) -> bool: - """Uses 'git ls-remote' to check if a repository and branch exists - - repo (str): URL to get repo. - branch (str): Branch on repo to check. - RETURNS (bool): True if repo:branch exists. - """ - get_git_version() - cmd = f"git ls-remote {repo} {branch}" - # We might be tempted to use `--exit-code` with `git ls-remote`, but - # `run_command` handles the `returncode` for us, so we'll rely on - # the fact that stdout returns '' if the requested branch doesn't exist - ret = run_command(cmd, capture=True) - exists = ret.stdout != "" - return exists - - def get_git_version( error: str = "Could not run 'git'. Make sure it's installed and the executable is available.", ) -> Tuple[int, int]: """Get the version of git and raise an error if calling 'git --version' fails. - error (str): The error message to show. RETURNS (Tuple[int, int]): The version as a (major, minor) tuple. Returns (0, 0) if the version couldn't be determined. @@ -509,30 +224,6 @@ def get_git_version( return int(version[0]), int(version[1]) -def _http_to_git(repo: str) -> str: - if repo.startswith("http://"): - repo = repo.replace(r"http://", r"https://") - if repo.startswith(r"https://"): - repo = repo.replace("https://", "git@").replace("/", ":", 1) - if repo.endswith("/"): - repo = repo[:-1] - repo = f"{repo}.git" - return repo - - -def is_subpath_of(parent, child): - """ - Check whether `child` is a path contained within `parent`. - """ - # Based on https://stackoverflow.com/a/37095733 . - - # In Python 3.9, the `Path.is_relative_to()` method will supplant this, so - # we can stop using crusty old os.path functions. - parent_realpath = os.path.realpath(parent) - child_realpath = os.path.realpath(child) - return os.path.commonpath([parent_realpath, child_realpath]) == parent_realpath - - @overload def string_to_list(value: str, intify: Literal[False] = ...) -> List[str]: ... diff --git a/spacy/cli/apply.py b/spacy/cli/apply.py index f0df4e757..ffd810506 100644 --- a/spacy/cli/apply.py +++ b/spacy/cli/apply.py @@ -1,18 +1,15 @@ -import tqdm -import srsly - from itertools import chain from pathlib import Path -from typing import Optional, List, Iterable, cast, Union +from typing import Iterable, List, Optional, Union, cast +import srsly +import tqdm from wasabi import msg -from ._util import app, Arg, Opt, setup_gpu, import_code, walk_directory - from ..tokens import Doc, DocBin -from ..vocab import Vocab from ..util import ensure_path, load_model - +from ..vocab import Vocab +from ._util import Arg, Opt, app, import_code, setup_gpu, walk_directory path_help = """Location of the documents to predict on. Can be a single file in .spacy format or a .jsonl file. @@ -136,7 +133,9 @@ def apply( if len(text_files) > 0: streams.append(_stream_texts(text_files)) datagen = cast(DocOrStrStream, chain(*streams)) - for doc in tqdm.tqdm(nlp.pipe(datagen, batch_size=batch_size, n_process=n_process)): + for doc in tqdm.tqdm( + nlp.pipe(datagen, batch_size=batch_size, n_process=n_process), disable=None + ): docbin.add(doc) if output_file.suffix == "": output_file = output_file.with_suffix(".spacy") diff --git a/spacy/cli/assemble.py b/spacy/cli/assemble.py index 1cfa290a3..f74bbacb5 100644 --- a/spacy/cli/assemble.py +++ b/spacy/cli/assemble.py @@ -1,13 +1,20 @@ -from typing import Optional -from pathlib import Path -from wasabi import msg -import typer import logging +from pathlib import Path +from typing import Optional + +import typer +from wasabi import msg -from ._util import app, Arg, Opt, parse_config_overrides, show_validation_error -from ._util import import_code from .. import util from ..util import get_sourced_components, load_model_from_config +from ._util import ( + Arg, + Opt, + app, + import_code, + parse_config_overrides, + show_validation_error, +) @app.command( @@ -33,7 +40,8 @@ def assemble_cli( DOCS: https://spacy.io/api/cli#assemble """ - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) # Make sure all files and paths exists if they are needed if not config_path or (str(config_path) != "-" and not config_path.exists()): msg.fail("Config file not found", config_path, exits=1) diff --git a/spacy/cli/benchmark_speed.py b/spacy/cli/benchmark_speed.py index 4eb20a5fa..4dd10049c 100644 --- a/spacy/cli/benchmark_speed.py +++ b/spacy/cli/benchmark_speed.py @@ -1,18 +1,19 @@ -from typing import Iterable, List, Optional import random -from itertools import islice -import numpy -from pathlib import Path import time -from tqdm import tqdm +from itertools import islice +from pathlib import Path +from typing import Iterable, List, Optional + +import numpy import typer +from tqdm import tqdm from wasabi import msg from .. import util from ..language import Language from ..tokens import Doc from ..training import Corpus -from ._util import Arg, Opt, benchmark_cli, setup_gpu +from ._util import Arg, Opt, benchmark_cli, import_code, setup_gpu @benchmark_cli.command( @@ -29,12 +30,14 @@ def benchmark_speed_cli( use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"), n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,), warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"), + code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"), # fmt: on ): """ Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark data in the binary .spacy format. """ + import_code(code_path) setup_gpu(use_gpu=use_gpu, silent=False) nlp = util.load_model(model) @@ -88,7 +91,7 @@ class Quartiles: def annotate( nlp: Language, docs: List[Doc], batch_size: Optional[int] ) -> numpy.ndarray: - docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size) + docs = nlp.pipe(tqdm(docs, unit="doc", disable=None), batch_size=batch_size) wps = [] while True: with time_context() as elapsed: @@ -170,5 +173,5 @@ def print_outliers(sample: numpy.ndarray): def warmup( nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int] ) -> numpy.ndarray: - docs = warmup_epochs * docs + docs = [doc.copy() for doc in docs * warmup_epochs] return annotate(nlp, docs, batch_size) diff --git a/spacy/cli/convert.py b/spacy/cli/convert.py index 68d454b3e..a66a68133 100644 --- a/spacy/cli/convert.py +++ b/spacy/cli/convert.py @@ -1,18 +1,22 @@ -from typing import Callable, Iterable, Mapping, Optional, Any, Union -from enum import Enum -from pathlib import Path -from wasabi import Printer -import srsly +import itertools import re import sys -import itertools +from enum import Enum +from pathlib import Path +from typing import Any, Callable, Iterable, Mapping, Optional, Union + +import srsly +from wasabi import Printer -from ._util import app, Arg, Opt, walk_directory -from ..training import docs_to_json from ..tokens import Doc, DocBin -from ..training.converters import iob_to_docs, conll_ner_to_docs, json_to_docs -from ..training.converters import conllu_to_docs - +from ..training import docs_to_json +from ..training.converters import ( + conll_ner_to_docs, + conllu_to_docs, + iob_to_docs, + json_to_docs, +) +from ._util import Arg, Opt, app, walk_directory # Converters are matched by file extension except for ner/iob, which are # matched by file extension and content. To add a converter, add a new diff --git a/spacy/cli/debug_config.py b/spacy/cli/debug_config.py index 409fac4ed..0e5382cd9 100644 --- a/spacy/cli/debug_config.py +++ b/spacy/cli/debug_config.py @@ -1,15 +1,22 @@ -from typing import Optional, Dict, Any, Union, List from pathlib import Path -from wasabi import msg, table +from typing import Any, Dict, List, Optional, Union + +import typer from thinc.api import Config from thinc.config import VARIABLE_RE -import typer +from wasabi import msg, table -from ._util import Arg, Opt, show_validation_error, parse_config_overrides -from ._util import import_code, debug_cli +from .. import util from ..schemas import ConfigSchemaInit, ConfigSchemaTraining from ..util import registry -from .. import util +from ._util import ( + Arg, + Opt, + debug_cli, + import_code, + parse_config_overrides, + show_validation_error, +) @debug_cli.command( diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 2826cd084..af3c24f3b 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -1,31 +1,49 @@ -from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union -from typing import cast, overload -from pathlib import Path -from collections import Counter -import sys -import srsly -from wasabi import Printer, MESSAGES, msg -import typer import math -import numpy +import sys +from collections import Counter +from pathlib import Path +from typing import ( + Any, + Dict, + Iterable, + List, + Optional, + Sequence, + Set, + Tuple, + Union, + cast, + overload, +) -from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides -from ._util import import_code, debug_cli, _format_number -from ..training import Example, remove_bilu_prefix -from ..training.initialize import get_sourced_components -from ..schemas import ConfigSchemaTraining -from ..pipeline import TrainablePipe +import numpy +import srsly +import typer +from wasabi import MESSAGES, Printer, msg + +from .. import util +from ..compat import Literal +from ..language import Language +from ..morphology import Morphology +from ..pipeline import Morphologizer, SpanCategorizer, TrainablePipe +from ..pipeline._edit_tree_internals.edit_trees import EditTrees from ..pipeline._parser_internals import nonproj from ..pipeline._parser_internals.nonproj import DELIMITER -from ..pipeline import Morphologizer, SpanCategorizer -from ..pipeline._edit_tree_internals.edit_trees import EditTrees -from ..morphology import Morphology -from ..language import Language +from ..schemas import ConfigSchemaTraining +from ..training import Example, remove_bilu_prefix +from ..training.initialize import get_sourced_components from ..util import registry, resolve_dot_names -from ..compat import Literal from ..vectors import Mode as VectorsMode -from .. import util - +from ._util import ( + Arg, + Opt, + _format_number, + app, + debug_cli, + import_code, + parse_config_overrides, + show_validation_error, +) # Minimum number of expected occurrences of NER label in data to train new label NEW_LABEL_THRESHOLD = 50 @@ -212,7 +230,7 @@ def debug_data( else: msg.info("No word vectors present in the package") - if "spancat" in factory_names: + if "spancat" in factory_names or "spancat_singlelabel" in factory_names: model_labels_spancat = _get_labels_from_spancat(nlp) has_low_data_warning = False has_no_neg_warning = False @@ -830,7 +848,7 @@ def _compile_gold( data["boundary_cross_ents"] += 1 elif label == "-": data["ner"]["-"] += 1 - if "spancat" in factory_names: + if "spancat" in factory_names or "spancat_singlelabel" in factory_names: for spans_key in list(eg.reference.spans.keys()): # Obtain the span frequency if spans_key not in data["spancat"]: @@ -1028,7 +1046,7 @@ def _get_labels_from_spancat(nlp: Language) -> Dict[str, Set[str]]: pipe_names = [ pipe_name for pipe_name in nlp.pipe_names - if nlp.get_pipe_meta(pipe_name).factory == "spancat" + if nlp.get_pipe_meta(pipe_name).factory in ("spancat", "spancat_singlelabel") ] labels: Dict[str, Set[str]] = {} for pipe_name in pipe_names: diff --git a/spacy/cli/debug_diff.py b/spacy/cli/debug_diff.py index 6697c38ae..c53b0acab 100644 --- a/spacy/cli/debug_diff.py +++ b/spacy/cli/debug_diff.py @@ -1,13 +1,13 @@ +from pathlib import Path from typing import Optional import typer -from wasabi import Printer, diff_strings, MarkdownRenderer -from pathlib import Path from thinc.api import Config +from wasabi import MarkdownRenderer, Printer, diff_strings -from ._util import debug_cli, Arg, Opt, show_validation_error, parse_config_overrides from ..util import load_config -from .init_config import init_config, Optimizations +from ._util import Arg, Opt, debug_cli, parse_config_overrides, show_validation_error +from .init_config import Optimizations, init_config @debug_cli.command( diff --git a/spacy/cli/debug_model.py b/spacy/cli/debug_model.py index 190094d81..8a0fd4889 100644 --- a/spacy/cli/debug_model.py +++ b/spacy/cli/debug_model.py @@ -1,19 +1,32 @@ -from typing import Dict, Any, Optional -from pathlib import Path import itertools +from pathlib import Path +from typing import Any, Dict, Optional + +import typer +from thinc.api import ( + Model, + data_validation, + fix_random_seed, + set_dropout_rate, + set_gpu_allocator, +) +from wasabi import msg from spacy.training import Example from spacy.util import resolve_dot_names -from wasabi import msg -from thinc.api import fix_random_seed, set_dropout_rate -from thinc.api import Model, data_validation, set_gpu_allocator -import typer -from ._util import Arg, Opt, debug_cli, show_validation_error -from ._util import parse_config_overrides, string_to_list, setup_gpu +from .. import util from ..schemas import ConfigSchemaTraining from ..util import registry -from .. import util +from ._util import ( + Arg, + Opt, + debug_cli, + parse_config_overrides, + setup_gpu, + show_validation_error, + string_to_list, +) @debug_cli.command( diff --git a/spacy/cli/download.py b/spacy/cli/download.py index df4bca53d..4261fb830 100644 --- a/spacy/cli/download.py +++ b/spacy/cli/download.py @@ -1,14 +1,22 @@ -from typing import Optional, Sequence -import requests import sys -from wasabi import msg -import typer +from typing import Optional, Sequence +from urllib.parse import urljoin + +import requests +import typer +from wasabi import msg -from ._util import app, Arg, Opt, WHEEL_SUFFIX, SDIST_SUFFIX from .. import about -from ..util import is_package, get_minor_version, run_command -from ..util import is_prerelease_version from ..errors import OLD_MODEL_SHORTCUTS +from ..util import ( + get_minor_version, + is_in_interactive, + is_in_jupyter, + is_package, + is_prerelease_version, + run_command, +) +from ._util import SDIST_SUFFIX, WHEEL_SUFFIX, Arg, Opt, app @app.command( @@ -56,6 +64,13 @@ def download( ) pip_args = pip_args + ("--no-deps",) if direct: + # Reject model names with '/', in order to prevent shenanigans. + if "/" in model: + msg.fail( + title="Model download rejected", + text=f"Cannot download model '{model}'. Models are expected to be file names, not URLs or fragments", + exits=True, + ) components = model.split("-") model_name = "".join(components[:-1]) version = components[-1] @@ -77,6 +92,27 @@ def download( "Download and installation successful", f"You can now load the package via spacy.load('{model_name}')", ) + if is_in_jupyter(): + reload_deps_msg = ( + "If you are in a Jupyter or Colab notebook, you may need to " + "restart Python in order to load all the package's dependencies. " + "You can do this by selecting the 'Restart kernel' or 'Restart " + "runtime' option." + ) + msg.warn( + "Restart to reload dependencies", + reload_deps_msg, + ) + elif is_in_interactive(): + reload_deps_msg = ( + "If you are in an interactive Python session, you may need to " + "exit and restart Python to load all the package's dependencies. " + "You can exit with Ctrl-D (or Ctrl-Z and Enter on Windows)." + ) + msg.warn( + "Restart to reload dependencies", + reload_deps_msg, + ) def get_model_filename(model_name: str, version: str, sdist: bool = False) -> str: @@ -125,7 +161,16 @@ def get_latest_version(model: str) -> str: def download_model( filename: str, user_pip_args: Optional[Sequence[str]] = None ) -> None: - download_url = about.__download_url__ + "/" + filename + # Construct the download URL carefully. We need to make sure we don't + # allow relative paths or other shenanigans to trick us into download + # from outside our own repo. + base_url = about.__download_url__ + # urljoin requires that the path ends with /, or the last path part will be dropped + if not base_url.endswith("/"): + base_url = about.__download_url__ + "/" + download_url = urljoin(base_url, filename) + if not download_url.startswith(about.__download_url__): + raise ValueError(f"Download from {filename} rejected. Was it a relative path?") pip_args = list(user_pip_args) if user_pip_args is not None else [] cmd = [sys.executable, "-m", "pip", "install"] + pip_args + [download_url] run_command(cmd) diff --git a/spacy/cli/evaluate.py b/spacy/cli/evaluate.py index 9fcdd18be..2276ca6b0 100644 --- a/spacy/cli/evaluate.py +++ b/spacy/cli/evaluate.py @@ -1,16 +1,16 @@ -from typing import Optional, List, Dict, Any, Union -from wasabi import Printer -from pathlib import Path import re +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + import srsly from thinc.api import fix_random_seed +from wasabi import Printer -from ..training import Corpus -from ..tokens import Doc -from ._util import app, Arg, Opt, setup_gpu, import_code, benchmark_cli +from .. import displacy, util from ..scorer import Scorer -from .. import util -from .. import displacy +from ..tokens import Doc +from ..training import Corpus +from ._util import Arg, Opt, app, benchmark_cli, import_code, setup_gpu @benchmark_cli.command( @@ -28,6 +28,7 @@ def evaluate_cli( displacy_path: Optional[Path] = Opt(None, "--displacy-path", "-dp", help="Directory to output rendered parses as HTML", exists=True, file_okay=False), displacy_limit: int = Opt(25, "--displacy-limit", "-dl", help="Limit of parses to render as HTML"), per_component: bool = Opt(False, "--per-component", "-P", help="Return scores per component, only applicable when an output JSON file is specified."), + spans_key: str = Opt("sc", "--spans-key", "-sk", help="Spans key to use when evaluating Doc.spans"), # fmt: on ): """ @@ -53,6 +54,7 @@ def evaluate_cli( displacy_limit=displacy_limit, per_component=per_component, silent=False, + spans_key=spans_key, ) diff --git a/spacy/cli/find_function.py b/spacy/cli/find_function.py new file mode 100644 index 000000000..f99ce2adc --- /dev/null +++ b/spacy/cli/find_function.py @@ -0,0 +1,69 @@ +from typing import Optional, Tuple + +from catalogue import RegistryError +from wasabi import msg + +from ..util import registry +from ._util import Arg, Opt, app + + +@app.command("find-function") +def find_function_cli( + # fmt: off + func_name: str = Arg(..., help="Name of the registered function."), + registry_name: Optional[str] = Opt(None, "--registry", "-r", help="Name of the catalogue registry."), + # fmt: on +): + """ + Find the module, path and line number to the file the registered + function is defined in, if available. + + func_name (str): Name of the registered function. + registry_name (Optional[str]): Name of the catalogue registry. + + DOCS: https://spacy.io/api/cli#find-function + """ + if not registry_name: + registry_names = registry.get_registry_names() + for name in registry_names: + if registry.has(name, func_name): + registry_name = name + break + + if not registry_name: + msg.fail( + f"Couldn't find registered function: '{func_name}'", + exits=1, + ) + + assert registry_name is not None + find_function(func_name, registry_name) + + +def find_function(func_name: str, registry_name: str) -> Tuple[str, int]: + registry_desc = None + try: + registry_desc = registry.find(registry_name, func_name) + except RegistryError as e: + msg.fail( + f"Couldn't find registered function: '{func_name}' in registry '{registry_name}'", + ) + msg.fail(f"{e}", exits=1) + assert registry_desc is not None + + registry_path = None + line_no = None + if registry_desc["file"]: + registry_path = registry_desc["file"] + line_no = registry_desc["line_no"] + + if not registry_path or not line_no: + msg.fail( + f"Couldn't find path to registered function: '{func_name}' in registry '{registry_name}'", + exits=1, + ) + assert registry_path is not None + assert line_no is not None + + msg.good(f"Found registered function '{func_name}' at {registry_path}:{line_no}") + return str(registry_path), int(line_no) diff --git a/spacy/cli/find_threshold.py b/spacy/cli/find_threshold.py index 875978eee..3e86495e7 100644 --- a/spacy/cli/find_threshold.py +++ b/spacy/cli/find_threshold.py @@ -1,17 +1,17 @@ import functools +import logging import operator from pathlib import Path -import logging -from typing import Optional, Tuple, Any, Dict, List +from typing import Any, Dict, List, Optional, Tuple import numpy import wasabi.tables -from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer -from ..errors import Errors -from ..training import Corpus -from ._util import app, Arg, Opt, import_code, setup_gpu from .. import util +from ..errors import Errors +from ..pipeline import MultiLabel_TextCategorizer, TextCategorizer +from ..training import Corpus +from ._util import Arg, Opt, app, import_code, setup_gpu _DEFAULTS = { "n_trials": 11, @@ -52,8 +52,8 @@ def find_threshold_cli( DOCS: https://spacy.io/api/cli#find-threshold """ - - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) import_code(code_path) find_threshold( model=model, diff --git a/spacy/cli/info.py b/spacy/cli/info.py index d82bf3fbc..8bfc6b54f 100644 --- a/spacy/cli/info.py +++ b/spacy/cli/info.py @@ -1,15 +1,15 @@ -from typing import Optional, Dict, Any, Union, List -import platform import json +import platform from pathlib import Path -from wasabi import Printer, MarkdownRenderer -import srsly +from typing import Any, Dict, List, Optional, Union -from ._util import app, Arg, Opt, string_to_list -from .download import get_model_filename, get_latest_version -from .. import util -from .. import about +import srsly +from wasabi import MarkdownRenderer, Printer + +from .. import about, util from ..compat import importlib_metadata +from ._util import Arg, Opt, app, string_to_list +from .download import get_latest_version, get_model_filename @app.command("info") diff --git a/spacy/cli/init_config.py b/spacy/cli/init_config.py index b634caa4c..a7c03d00f 100644 --- a/spacy/cli/init_config.py +++ b/spacy/cli/init_config.py @@ -1,19 +1,26 @@ -from typing import Optional, List, Tuple +import re from enum import Enum from pathlib import Path -from wasabi import Printer, diff_strings -from thinc.api import Config +from typing import List, Optional, Tuple + import srsly -import re from jinja2 import Template +from thinc.api import Config +from wasabi import Printer, diff_strings from .. import util from ..language import DEFAULT_CONFIG_PRETRAIN_PATH from ..schemas import RecommendationSchema from ..util import SimpleFrozenList -from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND -from ._util import string_to_list, import_code - +from ._util import ( + COMMAND, + Arg, + Opt, + import_code, + init_cli, + show_validation_error, + string_to_list, +) ROOT = Path(__file__).parent / "templates" TEMPLATE_PATH = ROOT / "quickstart_training.jinja" diff --git a/spacy/cli/init_pipeline.py b/spacy/cli/init_pipeline.py index d53a61b8e..21eea8edf 100644 --- a/spacy/cli/init_pipeline.py +++ b/spacy/cli/init_pipeline.py @@ -1,15 +1,23 @@ -from typing import Optional import logging from pathlib import Path -from wasabi import msg -import typer +from typing import Optional + import srsly +import typer +from wasabi import msg from .. import util -from ..training.initialize import init_nlp, convert_vectors from ..language import Language -from ._util import init_cli, Arg, Opt, parse_config_overrides, show_validation_error -from ._util import import_code, setup_gpu +from ..training.initialize import convert_vectors, init_nlp +from ._util import ( + Arg, + Opt, + import_code, + init_cli, + parse_config_overrides, + setup_gpu, + show_validation_error, +) @init_cli.command("vectors") @@ -24,13 +32,15 @@ def init_vectors_cli( name: Optional[str] = Opt(None, "--name", "-n", help="Optional name for the word vectors, e.g. en_core_web_lg.vectors"), verbose: bool = Opt(False, "--verbose", "-V", "-VV", help="Display more information for debugging purposes"), jsonl_loc: Optional[Path] = Opt(None, "--lexemes-jsonl", "-j", help="Location of JSONL-formatted attributes file", hidden=True), + attr: str = Opt("ORTH", "--attr", "-a", help="Optional token attribute to use for vectors, e.g. LOWER or NORM"), # fmt: on ): """Convert word vectors for use with spaCy. Will export an nlp object that you can use in the [initialize] block of your config to initialize a model with vectors. """ - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) msg.info(f"Creating blank nlp object for language '{lang}'") nlp = util.get_lang_class(lang)() if jsonl_loc is not None: @@ -42,6 +52,7 @@ def init_vectors_cli( prune=prune, name=name, mode=mode, + attr=attr, ) msg.good(f"Successfully converted {len(nlp.vocab.vectors)} vectors") nlp.to_disk(output_dir) @@ -77,7 +88,8 @@ def init_pipeline_cli( use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU") # fmt: on ): - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) overrides = parse_config_overrides(ctx.args) import_code(code_path) setup_gpu(use_gpu) @@ -106,7 +118,8 @@ def init_labels_cli( """Generate JSON files for the labels in the data. This helps speed up the training process, since spaCy won't have to preprocess the data to extract the labels.""" - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) if not output_path.exists(): output_path.mkdir(parents=True) overrides = parse_config_overrides(ctx.args) diff --git a/spacy/cli/package.py b/spacy/cli/package.py index 6351f28eb..9421199f1 100644 --- a/spacy/cli/package.py +++ b/spacy/cli/package.py @@ -1,18 +1,21 @@ -from typing import Optional, Union, Any, Dict, List, Tuple, cast -import shutil -from pathlib import Path -from wasabi import Printer, MarkdownRenderer, get_raw_input -from thinc.api import Config -from collections import defaultdict -from catalogue import RegistryError -import srsly -import sys +import os import re +import shutil +import subprocess +import sys +from collections import defaultdict +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union, cast -from ._util import app, Arg, Opt, string_to_list, WHEEL_SUFFIX, SDIST_SUFFIX -from ..schemas import validate, ModelMetaSchema -from .. import util -from .. import about +import srsly +from catalogue import RegistryError +from thinc.api import Config +from wasabi import MarkdownRenderer, Printer, get_raw_input + +from .. import about, util +from ..compat import importlib_metadata +from ..schemas import ModelMetaSchema, validate +from ._util import SDIST_SUFFIX, WHEEL_SUFFIX, Arg, Opt, app, string_to_list @app.command("package") @@ -35,7 +38,7 @@ def package_cli( specified output directory, and the data will be copied over. If --create-meta is set and a meta.json already exists in the output directory, the existing values will be used as the defaults in the command-line prompt. - After packaging, "python setup.py sdist" is run in the package directory, + After packaging, "python -m build --sdist" is run in the package directory, which will create a .tar.gz archive that can be installed via "pip install". If additional code files are provided (e.g. Python files containing custom @@ -78,9 +81,17 @@ def package( input_path = util.ensure_path(input_dir) output_path = util.ensure_path(output_dir) meta_path = util.ensure_path(meta_path) - if create_wheel and not has_wheel(): - err = "Generating a binary .whl file requires wheel to be installed" - msg.fail(err, "pip install wheel", exits=1) + if create_wheel and not has_wheel() and not has_build(): + err = ( + "Generating wheels requires 'build' or 'wheel' (deprecated) to be installed" + ) + msg.fail(err, "pip install build", exits=1) + if not has_build(): + msg.warn( + "Generating packages without the 'build' package is deprecated and " + "will not be supported in the future. To install 'build': pip " + "install build" + ) if not input_path or not input_path.exists(): msg.fail("Can't locate pipeline data", input_path, exits=1) if not output_path or not output_path.exists(): @@ -184,12 +195,37 @@ def package( msg.good(f"Successfully created package directory '{model_name_v}'", main_path) if create_sdist: with util.working_dir(main_path): - util.run_command([sys.executable, "setup.py", "sdist"], capture=False) + # run directly, since util.run_command is not designed to continue + # after a command fails + ret = subprocess.run( + [sys.executable, "-m", "build", ".", "--sdist"], + env=os.environ.copy(), + ) + if ret.returncode != 0: + msg.warn( + "Creating sdist with 'python -m build' failed. Falling " + "back to deprecated use of 'python setup.py sdist'" + ) + util.run_command([sys.executable, "setup.py", "sdist"], capture=False) zip_file = main_path / "dist" / f"{model_name_v}{SDIST_SUFFIX}" msg.good(f"Successfully created zipped Python package", zip_file) if create_wheel: with util.working_dir(main_path): - util.run_command([sys.executable, "setup.py", "bdist_wheel"], capture=False) + # run directly, since util.run_command is not designed to continue + # after a command fails + ret = subprocess.run( + [sys.executable, "-m", "build", ".", "--wheel"], + env=os.environ.copy(), + ) + if ret.returncode != 0: + msg.warn( + "Creating wheel with 'python -m build' failed. Falling " + "back to deprecated use of 'wheel' with " + "'python setup.py bdist_wheel'" + ) + util.run_command( + [sys.executable, "setup.py", "bdist_wheel"], capture=False + ) wheel_name_squashed = re.sub("_+", "_", model_name_v) wheel = main_path / "dist" / f"{wheel_name_squashed}{WHEEL_SUFFIX}" msg.good(f"Successfully created binary wheel", wheel) @@ -209,6 +245,17 @@ def has_wheel() -> bool: return False +def has_build() -> bool: + # it's very likely that there is a local directory named build/ (especially + # in an editable install), so an import check is not sufficient; instead + # check that there is a package version + try: + importlib_metadata.version("build") + return True + except importlib_metadata.PackageNotFoundError: # type: ignore[attr-defined] + return False + + def get_third_party_dependencies( config: Config, exclude: List[str] = util.SimpleFrozenList() ) -> List[str]: @@ -403,7 +450,7 @@ def _format_sources(data: Any) -> str: if author: result += " ({})".format(author) sources.append(result) - return "
".join(sources) + return "
".join(sources) def _format_accuracy(data: Dict[str, Any], exclude: List[str] = ["speed"]) -> str: diff --git a/spacy/cli/pretrain.py b/spacy/cli/pretrain.py index 45042e605..446c40510 100644 --- a/spacy/cli/pretrain.py +++ b/spacy/cli/pretrain.py @@ -1,13 +1,21 @@ -from typing import Optional -from pathlib import Path -from wasabi import msg -import typer import re +from pathlib import Path +from typing import Optional + +import typer +from wasabi import msg -from ._util import app, Arg, Opt, parse_config_overrides, show_validation_error -from ._util import import_code, setup_gpu from ..training.pretrain import pretrain from ..util import load_config +from ._util import ( + Arg, + Opt, + app, + import_code, + parse_config_overrides, + setup_gpu, + show_validation_error, +) @app.command( diff --git a/spacy/cli/profile.py b/spacy/cli/profile.py index 3c282c73d..e5b8f1193 100644 --- a/spacy/cli/profile.py +++ b/spacy/cli/profile.py @@ -1,17 +1,18 @@ -from typing import Optional, Sequence, Union, Iterator -import tqdm -from pathlib import Path -import srsly import cProfile +import itertools import pstats import sys -import itertools -from wasabi import msg, Printer -import typer +from pathlib import Path +from typing import Iterator, Optional, Sequence, Union + +import srsly +import tqdm +import typer +from wasabi import Printer, msg -from ._util import app, debug_cli, Arg, Opt, NAME from ..language import Language from ..util import load_model +from ._util import NAME, Arg, Opt, app, debug_cli @debug_cli.command("profile") @@ -70,7 +71,7 @@ def profile(model: str, inputs: Optional[Path] = None, n_texts: int = 10000) -> def parse_texts(nlp: Language, texts: Sequence[str]) -> None: - for doc in nlp.pipe(tqdm.tqdm(texts), batch_size=16): + for doc in nlp.pipe(tqdm.tqdm(texts, disable=None), batch_size=16): pass diff --git a/spacy/cli/project/assets.py b/spacy/cli/project/assets.py index 8f35b2d23..591d1959e 100644 --- a/spacy/cli/project/assets.py +++ b/spacy/cli/project/assets.py @@ -1,206 +1 @@ -from typing import Any, Dict, Optional -from pathlib import Path -from wasabi import msg -import os -import re -import shutil -import requests -import typer - -from ...util import ensure_path, working_dir -from .._util import project_cli, Arg, Opt, PROJECT_FILE, load_project_config -from .._util import get_checksum, download_file, git_checkout, get_git_version -from .._util import SimpleFrozenDict, parse_config_overrides - -# Whether assets are extra if `extra` is not set. -EXTRA_DEFAULT = False - - -@project_cli.command( - "assets", - context_settings={"allow_extra_args": True, "ignore_unknown_options": True}, -) -def project_assets_cli( - # fmt: off - ctx: typer.Context, # This is only used to read additional arguments - project_dir: Path = Arg(Path.cwd(), help="Path to cloned project. Defaults to current working directory.", exists=True, file_okay=False), - sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse checkout for assets provided via Git, to only check out and clone the files needed. Requires Git v22.2+."), - extra: bool = Opt(False, "--extra", "-e", help="Download all assets, including those marked as 'extra'.") - # fmt: on -): - """Fetch project assets like datasets and pretrained weights. Assets are - defined in the "assets" section of the project.yml. If a checksum is - provided in the project.yml, the file is only downloaded if no local file - with the same checksum exists. - - DOCS: https://spacy.io/api/cli#project-assets - """ - overrides = parse_config_overrides(ctx.args) - project_assets( - project_dir, - overrides=overrides, - sparse_checkout=sparse_checkout, - extra=extra, - ) - - -def project_assets( - project_dir: Path, - *, - overrides: Dict[str, Any] = SimpleFrozenDict(), - sparse_checkout: bool = False, - extra: bool = False, -) -> None: - """Fetch assets for a project using DVC if possible. - - project_dir (Path): Path to project directory. - sparse_checkout (bool): Use sparse checkout for assets provided via Git, to only check out and clone the files - needed. - extra (bool): Whether to download all assets, including those marked as 'extra'. - """ - project_path = ensure_path(project_dir) - config = load_project_config(project_path, overrides=overrides) - assets = [ - asset - for asset in config.get("assets", []) - if extra or not asset.get("extra", EXTRA_DEFAULT) - ] - if not assets: - msg.warn( - f"No assets specified in {PROJECT_FILE} (if assets are marked as extra, download them with --extra)", - exits=0, - ) - msg.info(f"Fetching {len(assets)} asset(s)") - - for asset in assets: - dest = (project_dir / asset["dest"]).resolve() - checksum = asset.get("checksum") - if "git" in asset: - git_err = ( - f"Cloning spaCy project templates requires Git and the 'git' command. " - f"Make sure it's installed and that the executable is available." - ) - get_git_version(error=git_err) - if dest.exists(): - # If there's already a file, check for checksum - if checksum and checksum == get_checksum(dest): - msg.good( - f"Skipping download with matching checksum: {asset['dest']}" - ) - continue - else: - if dest.is_dir(): - shutil.rmtree(dest) - else: - dest.unlink() - if "repo" not in asset["git"] or asset["git"]["repo"] is None: - msg.fail( - "A git asset must include 'repo', the repository address.", exits=1 - ) - if "path" not in asset["git"] or asset["git"]["path"] is None: - msg.fail( - "A git asset must include 'path' - use \"\" to get the entire repository.", - exits=1, - ) - git_checkout( - asset["git"]["repo"], - asset["git"]["path"], - dest, - branch=asset["git"].get("branch"), - sparse=sparse_checkout, - ) - msg.good(f"Downloaded asset {dest}") - else: - url = asset.get("url") - if not url: - # project.yml defines asset without URL that the user has to place - check_private_asset(dest, checksum) - continue - fetch_asset(project_path, url, dest, checksum) - - -def check_private_asset(dest: Path, checksum: Optional[str] = None) -> None: - """Check and validate assets without a URL (private assets that the user - has to provide themselves) and give feedback about the checksum. - - dest (Path): Destination path of the asset. - checksum (Optional[str]): Optional checksum of the expected file. - """ - if not Path(dest).exists(): - err = f"No URL provided for asset. You need to add this file yourself: {dest}" - msg.warn(err) - else: - if not checksum: - msg.good(f"Asset already exists: {dest}") - elif checksum == get_checksum(dest): - msg.good(f"Asset exists with matching checksum: {dest}") - else: - msg.fail(f"Asset available but with incorrect checksum: {dest}") - - -def fetch_asset( - project_path: Path, url: str, dest: Path, checksum: Optional[str] = None -) -> None: - """Fetch an asset from a given URL or path. If a checksum is provided and a - local file exists, it's only re-downloaded if the checksum doesn't match. - - project_path (Path): Path to project directory. - url (str): URL or path to asset. - checksum (Optional[str]): Optional expected checksum of local file. - RETURNS (Optional[Path]): The path to the fetched asset or None if fetching - the asset failed. - """ - dest_path = (project_path / dest).resolve() - if dest_path.exists(): - # If there's already a file, check for checksum - if checksum: - if checksum == get_checksum(dest_path): - msg.good(f"Skipping download with matching checksum: {dest}") - return - else: - # If there's not a checksum, make sure the file is a possibly valid size - if os.path.getsize(dest_path) == 0: - msg.warn(f"Asset exists but with size of 0 bytes, deleting: {dest}") - os.remove(dest_path) - # We might as well support the user here and create parent directories in - # case the asset dir isn't listed as a dir to create in the project.yml - if not dest_path.parent.exists(): - dest_path.parent.mkdir(parents=True) - with working_dir(project_path): - url = convert_asset_url(url) - try: - download_file(url, dest_path) - msg.good(f"Downloaded asset {dest}") - except requests.exceptions.RequestException as e: - if Path(url).exists() and Path(url).is_file(): - # If it's a local file, copy to destination - shutil.copy(url, str(dest_path)) - msg.good(f"Copied local asset {dest}") - else: - msg.fail(f"Download failed: {dest}", e) - if checksum and checksum != get_checksum(dest_path): - msg.fail(f"Checksum doesn't match value defined in {PROJECT_FILE}: {dest}") - - -def convert_asset_url(url: str) -> str: - """Check and convert the asset URL if needed. - - url (str): The asset URL. - RETURNS (str): The converted URL. - """ - # If the asset URL is a regular GitHub URL it's likely a mistake - if ( - re.match(r"(http(s?)):\/\/github.com", url) - and "releases/download" not in url - and "/raw/" not in url - ): - converted = url.replace("github.com", "raw.githubusercontent.com") - converted = re.sub(r"/(tree|blob)/", "/", converted) - msg.warn( - "Downloading from a regular GitHub URL. This will only download " - "the source of the page, not the actual file. Converting the URL " - "to a raw URL.", - converted, - ) - return converted - return url +from weasel.cli.assets import * diff --git a/spacy/cli/project/clone.py b/spacy/cli/project/clone.py index 14b4ed9b5..11d2511a3 100644 --- a/spacy/cli/project/clone.py +++ b/spacy/cli/project/clone.py @@ -1,115 +1 @@ -from typing import Optional -from pathlib import Path -from wasabi import msg -import subprocess -import re - -from ... import about -from ...util import ensure_path -from .._util import project_cli, Arg, Opt, COMMAND, PROJECT_FILE -from .._util import git_checkout, get_git_version, git_repo_branch_exists - -DEFAULT_REPO = about.__projects__ -DEFAULT_PROJECTS_BRANCH = about.__projects_branch__ -DEFAULT_BRANCHES = ["main", "master"] - - -@project_cli.command("clone") -def project_clone_cli( - # fmt: off - name: str = Arg(..., help="The name of the template to clone"), - dest: Optional[Path] = Arg(None, help="Where to clone the project. Defaults to current working directory", exists=False), - repo: str = Opt(DEFAULT_REPO, "--repo", "-r", help="The repository to clone from"), - branch: Optional[str] = Opt(None, "--branch", "-b", help=f"The branch to clone from. If not provided, will attempt {', '.join(DEFAULT_BRANCHES)}"), - sparse_checkout: bool = Opt(False, "--sparse", "-S", help="Use sparse Git checkout to only check out and clone the files needed. Requires Git v22.2+.") - # fmt: on -): - """Clone a project template from a repository. Calls into "git" and will - only download the files from the given subdirectory. The GitHub repo - defaults to the official spaCy template repo, but can be customized - (including using a private repo). - - DOCS: https://spacy.io/api/cli#project-clone - """ - if dest is None: - dest = Path.cwd() / Path(name).parts[-1] - if repo == DEFAULT_REPO and branch is None: - branch = DEFAULT_PROJECTS_BRANCH - - if branch is None: - for default_branch in DEFAULT_BRANCHES: - if git_repo_branch_exists(repo, default_branch): - branch = default_branch - break - if branch is None: - default_branches_msg = ", ".join(f"'{b}'" for b in DEFAULT_BRANCHES) - msg.fail( - "No branch provided and attempted default " - f"branches {default_branches_msg} do not exist.", - exits=1, - ) - else: - if not git_repo_branch_exists(repo, branch): - msg.fail(f"repo: {repo} (branch: {branch}) does not exist.", exits=1) - assert isinstance(branch, str) - project_clone(name, dest, repo=repo, branch=branch, sparse_checkout=sparse_checkout) - - -def project_clone( - name: str, - dest: Path, - *, - repo: str = about.__projects__, - branch: str = about.__projects_branch__, - sparse_checkout: bool = False, -) -> None: - """Clone a project template from a repository. - - name (str): Name of subdirectory to clone. - dest (Path): Destination path of cloned project. - repo (str): URL of Git repo containing project templates. - branch (str): The branch to clone from - """ - dest = ensure_path(dest) - check_clone(name, dest, repo) - project_dir = dest.resolve() - repo_name = re.sub(r"(http(s?)):\/\/github.com/", "", repo) - try: - git_checkout(repo, name, dest, branch=branch, sparse=sparse_checkout) - except subprocess.CalledProcessError: - err = f"Could not clone '{name}' from repo '{repo_name}' (branch '{branch}')" - msg.fail(err, exits=1) - msg.good(f"Cloned '{name}' from '{repo_name}' (branch '{branch}')", project_dir) - if not (project_dir / PROJECT_FILE).exists(): - msg.warn(f"No {PROJECT_FILE} found in directory") - else: - msg.good(f"Your project is now ready!") - print(f"To fetch the assets, run:\n{COMMAND} project assets {dest}") - - -def check_clone(name: str, dest: Path, repo: str) -> None: - """Check and validate that the destination path can be used to clone. Will - check that Git is available and that the destination path is suitable. - - name (str): Name of the directory to clone from the repo. - dest (Path): Local destination of cloned directory. - repo (str): URL of the repo to clone from. - """ - git_err = ( - f"Cloning spaCy project templates requires Git and the 'git' command. " - f"To clone a project without Git, copy the files from the '{name}' " - f"directory in the {repo} to {dest} manually." - ) - get_git_version(error=git_err) - if not dest: - msg.fail(f"Not a valid directory to clone project: {dest}", exits=1) - if dest.exists(): - # Directory already exists (not allowed, clone needs to create it) - msg.fail(f"Can't clone project, directory already exists: {dest}", exits=1) - if not dest.parent.exists(): - # We're not creating parents, parent dir should exist - msg.fail( - f"Can't clone project, parent directory doesn't exist: {dest.parent}. " - f"Create the necessary folder(s) first before continuing.", - exits=1, - ) +from weasel.cli.clone import * diff --git a/spacy/cli/project/document.py b/spacy/cli/project/document.py index 1ba43a958..1952524a9 100644 --- a/spacy/cli/project/document.py +++ b/spacy/cli/project/document.py @@ -1,115 +1 @@ -from pathlib import Path -from wasabi import msg, MarkdownRenderer - -from ...util import working_dir -from .._util import project_cli, Arg, Opt, PROJECT_FILE, load_project_config - - -DOCS_URL = "https://spacy.io" -INTRO_PROJECT = f"""The [`{PROJECT_FILE}`]({PROJECT_FILE}) defines the data assets required by the -project, as well as the available commands and workflows. For details, see the -[spaCy projects documentation]({DOCS_URL}/usage/projects).""" -INTRO_COMMANDS = f"""The following commands are defined by the project. They -can be executed using [`spacy project run [name]`]({DOCS_URL}/api/cli#project-run). -Commands are only re-run if their inputs have changed.""" -INTRO_WORKFLOWS = f"""The following workflows are defined by the project. They -can be executed using [`spacy project run [name]`]({DOCS_URL}/api/cli#project-run) -and will run the specified commands in order. Commands are only re-run if their -inputs have changed.""" -INTRO_ASSETS = f"""The following assets are defined by the project. They can -be fetched by running [`spacy project assets`]({DOCS_URL}/api/cli#project-assets) -in the project directory.""" -# These markers are added to the Markdown and can be used to update the file in -# place if it already exists. Only the auto-generated part will be replaced. -MARKER_START = "" -MARKER_END = "" -# If this marker is used in an existing README, it's ignored and not replaced -MARKER_IGNORE = "" - - -@project_cli.command("document") -def project_document_cli( - # fmt: off - project_dir: Path = Arg(Path.cwd(), help="Path to cloned project. Defaults to current working directory.", exists=True, file_okay=False), - output_file: Path = Opt("-", "--output", "-o", help="Path to output Markdown file for output. Defaults to - for standard output"), - no_emoji: bool = Opt(False, "--no-emoji", "-NE", help="Don't use emoji") - # fmt: on -): - """ - Auto-generate a README.md for a project. If the content is saved to a file, - hidden markers are added so you can add custom content before or after the - auto-generated section and only the auto-generated docs will be replaced - when you re-run the command. - - DOCS: https://spacy.io/api/cli#project-document - """ - project_document(project_dir, output_file, no_emoji=no_emoji) - - -def project_document( - project_dir: Path, output_file: Path, *, no_emoji: bool = False -) -> None: - is_stdout = str(output_file) == "-" - config = load_project_config(project_dir) - md = MarkdownRenderer(no_emoji=no_emoji) - md.add(MARKER_START) - title = config.get("title") - description = config.get("description") - md.add(md.title(1, f"spaCy Project{f': {title}' if title else ''}", "🪐")) - if description: - md.add(description) - md.add(md.title(2, PROJECT_FILE, "📋")) - md.add(INTRO_PROJECT) - # Commands - cmds = config.get("commands", []) - data = [(md.code(cmd["name"]), cmd.get("help", "")) for cmd in cmds] - if data: - md.add(md.title(3, "Commands", "⏯")) - md.add(INTRO_COMMANDS) - md.add(md.table(data, ["Command", "Description"])) - # Workflows - wfs = config.get("workflows", {}).items() - data = [(md.code(n), " → ".join(md.code(w) for w in stp)) for n, stp in wfs] - if data: - md.add(md.title(3, "Workflows", "⏭")) - md.add(INTRO_WORKFLOWS) - md.add(md.table(data, ["Workflow", "Steps"])) - # Assets - assets = config.get("assets", []) - data = [] - for a in assets: - source = "Git" if a.get("git") else "URL" if a.get("url") else "Local" - dest_path = a["dest"] - dest = md.code(dest_path) - if source == "Local": - # Only link assets if they're in the repo - with working_dir(project_dir) as p: - if (p / dest_path).exists(): - dest = md.link(dest, dest_path) - data.append((dest, source, a.get("description", ""))) - if data: - md.add(md.title(3, "Assets", "🗂")) - md.add(INTRO_ASSETS) - md.add(md.table(data, ["File", "Source", "Description"])) - md.add(MARKER_END) - # Output result - if is_stdout: - print(md.text) - else: - content = md.text - if output_file.exists(): - with output_file.open("r", encoding="utf8") as f: - existing = f.read() - if MARKER_IGNORE in existing: - msg.warn("Found ignore marker in existing file: skipping", output_file) - return - if MARKER_START in existing and MARKER_END in existing: - msg.info("Found existing file: only replacing auto-generated docs") - before = existing.split(MARKER_START)[0] - after = existing.split(MARKER_END)[1] - content = f"{before}{content}{after}" - else: - msg.warn("Replacing existing file") - with output_file.open("w", encoding="utf8") as f: - f.write(content) - msg.good("Saved project documentation", output_file) +from weasel.cli.document import * diff --git a/spacy/cli/project/dvc.py b/spacy/cli/project/dvc.py index a15353855..aa1ae7dd9 100644 --- a/spacy/cli/project/dvc.py +++ b/spacy/cli/project/dvc.py @@ -1,207 +1 @@ -"""This module contains helpers and subcommands for integrating spaCy projects -with Data Version Controk (DVC). https://dvc.org""" -from typing import Dict, Any, List, Optional, Iterable -import subprocess -from pathlib import Path -from wasabi import msg - -from .._util import PROJECT_FILE, load_project_config, get_hash, project_cli -from .._util import Arg, Opt, NAME, COMMAND -from ...util import working_dir, split_command, join_command, run_command -from ...util import SimpleFrozenList - - -DVC_CONFIG = "dvc.yaml" -DVC_DIR = ".dvc" -UPDATE_COMMAND = "dvc" -DVC_CONFIG_COMMENT = f"""# This file is auto-generated by spaCy based on your {PROJECT_FILE}. If you've -# edited your {PROJECT_FILE}, you can regenerate this file by running: -# {COMMAND} project {UPDATE_COMMAND}""" - - -@project_cli.command(UPDATE_COMMAND) -def project_update_dvc_cli( - # fmt: off - project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), - workflow: Optional[str] = Arg(None, help=f"Name of workflow defined in {PROJECT_FILE}. Defaults to first workflow if not set."), - verbose: bool = Opt(False, "--verbose", "-V", help="Print more info"), - quiet: bool = Opt(False, "--quiet", "-q", help="Print less info"), - force: bool = Opt(False, "--force", "-F", help="Force update DVC config"), - # fmt: on -): - """Auto-generate Data Version Control (DVC) config. A DVC - project can only define one pipeline, so you need to specify one workflow - defined in the project.yml. If no workflow is specified, the first defined - workflow is used. The DVC config will only be updated if the project.yml - changed. - - DOCS: https://spacy.io/api/cli#project-dvc - """ - project_update_dvc(project_dir, workflow, verbose=verbose, quiet=quiet, force=force) - - -def project_update_dvc( - project_dir: Path, - workflow: Optional[str] = None, - *, - verbose: bool = False, - quiet: bool = False, - force: bool = False, -) -> None: - """Update the auto-generated Data Version Control (DVC) config file. A DVC - project can only define one pipeline, so you need to specify one workflow - defined in the project.yml. Will only update the file if the checksum changed. - - project_dir (Path): The project directory. - workflow (Optional[str]): Optional name of workflow defined in project.yml. - If not set, the first workflow will be used. - verbose (bool): Print more info. - quiet (bool): Print less info. - force (bool): Force update DVC config. - """ - config = load_project_config(project_dir) - updated = update_dvc_config( - project_dir, config, workflow, verbose=verbose, quiet=quiet, force=force - ) - help_msg = "To execute the workflow with DVC, run: dvc repro" - if updated: - msg.good(f"Updated DVC config from {PROJECT_FILE}", help_msg) - else: - msg.info(f"No changes found in {PROJECT_FILE}, no update needed", help_msg) - - -def update_dvc_config( - path: Path, - config: Dict[str, Any], - workflow: Optional[str] = None, - verbose: bool = False, - quiet: bool = False, - force: bool = False, -) -> bool: - """Re-run the DVC commands in dry mode and update dvc.yaml file in the - project directory. The file is auto-generated based on the config. The - first line of the auto-generated file specifies the hash of the config - dict, so if any of the config values change, the DVC config is regenerated. - - path (Path): The path to the project directory. - config (Dict[str, Any]): The loaded project.yml. - verbose (bool): Whether to print additional info (via DVC). - quiet (bool): Don't output anything (via DVC). - force (bool): Force update, even if hashes match. - RETURNS (bool): Whether the DVC config file was updated. - """ - ensure_dvc(path) - workflows = config.get("workflows", {}) - workflow_names = list(workflows.keys()) - check_workflows(workflow_names, workflow) - if not workflow: - workflow = workflow_names[0] - config_hash = get_hash(config) - path = path.resolve() - dvc_config_path = path / DVC_CONFIG - if dvc_config_path.exists(): - # Check if the file was generated using the current config, if not, redo - with dvc_config_path.open("r", encoding="utf8") as f: - ref_hash = f.readline().strip().replace("# ", "") - if ref_hash == config_hash and not force: - return False # Nothing has changed in project.yml, don't need to update - dvc_config_path.unlink() - dvc_commands = [] - config_commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} - - # some flags that apply to every command - flags = [] - if verbose: - flags.append("--verbose") - if quiet: - flags.append("--quiet") - - for name in workflows[workflow]: - command = config_commands[name] - deps = command.get("deps", []) - outputs = command.get("outputs", []) - outputs_no_cache = command.get("outputs_no_cache", []) - if not deps and not outputs and not outputs_no_cache: - continue - # Default to the working dir as the project path since dvc.yaml is auto-generated - # and we don't want arbitrary paths in there - project_cmd = ["python", "-m", NAME, "project", "run", name] - deps_cmd = [c for cl in [["-d", p] for p in deps] for c in cl] - outputs_cmd = [c for cl in [["-o", p] for p in outputs] for c in cl] - outputs_nc_cmd = [c for cl in [["-O", p] for p in outputs_no_cache] for c in cl] - - dvc_cmd = ["run", *flags, "-n", name, "-w", str(path), "--no-exec"] - if command.get("no_skip"): - dvc_cmd.append("--always-changed") - full_cmd = [*dvc_cmd, *deps_cmd, *outputs_cmd, *outputs_nc_cmd, *project_cmd] - dvc_commands.append(join_command(full_cmd)) - - if not dvc_commands: - # If we don't check for this, then there will be an error when reading the - # config, since DVC wouldn't create it. - msg.fail( - "No usable commands for DVC found. This can happen if none of your " - "commands have dependencies or outputs.", - exits=1, - ) - - with working_dir(path): - for c in dvc_commands: - dvc_command = "dvc " + c - run_command(dvc_command) - with dvc_config_path.open("r+", encoding="utf8") as f: - content = f.read() - f.seek(0, 0) - f.write(f"# {config_hash}\n{DVC_CONFIG_COMMENT}\n{content}") - return True - - -def check_workflows(workflows: List[str], workflow: Optional[str] = None) -> None: - """Validate workflows provided in project.yml and check that a given - workflow can be used to generate a DVC config. - - workflows (List[str]): Names of the available workflows. - workflow (Optional[str]): The name of the workflow to convert. - """ - if not workflows: - msg.fail( - f"No workflows defined in {PROJECT_FILE}. To generate a DVC config, " - f"define at least one list of commands.", - exits=1, - ) - if workflow is not None and workflow not in workflows: - msg.fail( - f"Workflow '{workflow}' not defined in {PROJECT_FILE}. " - f"Available workflows: {', '.join(workflows)}", - exits=1, - ) - if not workflow: - msg.warn( - f"No workflow specified for DVC pipeline. Using the first workflow " - f"defined in {PROJECT_FILE}: '{workflows[0]}'" - ) - - -def ensure_dvc(project_dir: Path) -> None: - """Ensure that the "dvc" command is available and that the current project - directory is an initialized DVC project. - """ - try: - subprocess.run(["dvc", "--version"], stdout=subprocess.DEVNULL) - except Exception: - msg.fail( - "To use spaCy projects with DVC (Data Version Control), DVC needs " - "to be installed and the 'dvc' command needs to be available", - "You can install the Python package from pip (pip install dvc) or " - "conda (conda install -c conda-forge dvc). For more details, see the " - "documentation: https://dvc.org/doc/install", - exits=1, - ) - if not (project_dir / ".dvc").exists(): - msg.fail( - "Project not initialized as a DVC project", - "To initialize a DVC project, you can run 'dvc init' in the project " - "directory. For more details, see the documentation: " - "https://dvc.org/doc/command-reference/init", - exits=1, - ) +from weasel.cli.dvc import * diff --git a/spacy/cli/project/pull.py b/spacy/cli/project/pull.py index 8894baa50..5e603273d 100644 --- a/spacy/cli/project/pull.py +++ b/spacy/cli/project/pull.py @@ -1,67 +1 @@ -from pathlib import Path -from wasabi import msg -from .remote_storage import RemoteStorage -from .remote_storage import get_command_hash -from .._util import project_cli, Arg, logger -from .._util import load_project_config -from .run import update_lockfile - - -@project_cli.command("pull") -def project_pull_cli( - # fmt: off - remote: str = Arg("default", help="Name or path of remote storage"), - project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), - # fmt: on -): - """Retrieve available precomputed outputs from a remote storage. - You can alias remotes in your project.yml by mapping them to storage paths. - A storage can be anything that the smart-open library can upload to, e.g. - AWS, Google Cloud Storage, SSH, local directories etc. - - DOCS: https://spacy.io/api/cli#project-pull - """ - for url, output_path in project_pull(project_dir, remote): - if url is not None: - msg.good(f"Pulled {output_path} from {url}") - - -def project_pull(project_dir: Path, remote: str, *, verbose: bool = False): - # TODO: We don't have tests for this :(. It would take a bit of mockery to - # set up. I guess see if it breaks first? - config = load_project_config(project_dir) - if remote in config.get("remotes", {}): - remote = config["remotes"][remote] - storage = RemoteStorage(project_dir, remote) - commands = list(config.get("commands", [])) - # We use a while loop here because we don't know how the commands - # will be ordered. A command might need dependencies from one that's later - # in the list. - while commands: - for i, cmd in enumerate(list(commands)): - logger.debug("CMD: %s.", cmd["name"]) - deps = [project_dir / dep for dep in cmd.get("deps", [])] - if all(dep.exists() for dep in deps): - cmd_hash = get_command_hash("", "", deps, cmd["script"]) - for output_path in cmd.get("outputs", []): - url = storage.pull(output_path, command_hash=cmd_hash) - logger.debug( - "URL: %s for %s with command hash %s", - url, - output_path, - cmd_hash, - ) - yield url, output_path - - out_locs = [project_dir / out for out in cmd.get("outputs", [])] - if all(loc.exists() for loc in out_locs): - update_lockfile(project_dir, cmd) - # We remove the command from the list here, and break, so that - # we iterate over the loop again. - commands.pop(i) - break - else: - logger.debug("Dependency missing. Skipping %s outputs.", cmd["name"]) - else: - # If we didn't break the for loop, break the while loop. - break +from weasel.cli.pull import * diff --git a/spacy/cli/project/push.py b/spacy/cli/project/push.py index a8178de21..3a8e8869d 100644 --- a/spacy/cli/project/push.py +++ b/spacy/cli/project/push.py @@ -1,69 +1 @@ -from pathlib import Path -from wasabi import msg -from .remote_storage import RemoteStorage -from .remote_storage import get_content_hash, get_command_hash -from .._util import load_project_config -from .._util import project_cli, Arg, logger - - -@project_cli.command("push") -def project_push_cli( - # fmt: off - remote: str = Arg("default", help="Name or path of remote storage"), - project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), - # fmt: on -): - """Persist outputs to a remote storage. You can alias remotes in your - project.yml by mapping them to storage paths. A storage can be anything that - the smart-open library can upload to, e.g. AWS, Google Cloud Storage, SSH, - local directories etc. - - DOCS: https://spacy.io/api/cli#project-push - """ - for output_path, url in project_push(project_dir, remote): - if url is None: - msg.info(f"Skipping {output_path}") - else: - msg.good(f"Pushed {output_path} to {url}") - - -def project_push(project_dir: Path, remote: str): - """Persist outputs to a remote storage. You can alias remotes in your project.yml - by mapping them to storage paths. A storage can be anything that the smart-open - library can upload to, e.g. gcs, aws, ssh, local directories etc - """ - config = load_project_config(project_dir) - if remote in config.get("remotes", {}): - remote = config["remotes"][remote] - storage = RemoteStorage(project_dir, remote) - for cmd in config.get("commands", []): - logger.debug("CMD: %s", cmd["name"]) - deps = [project_dir / dep for dep in cmd.get("deps", [])] - if any(not dep.exists() for dep in deps): - logger.debug("Dependency missing. Skipping %s outputs", cmd["name"]) - continue - cmd_hash = get_command_hash( - "", "", [project_dir / dep for dep in cmd.get("deps", [])], cmd["script"] - ) - logger.debug("CMD_HASH: %s", cmd_hash) - for output_path in cmd.get("outputs", []): - output_loc = project_dir / output_path - if output_loc.exists() and _is_not_empty_dir(output_loc): - url = storage.push( - output_path, - command_hash=cmd_hash, - content_hash=get_content_hash(output_loc), - ) - logger.debug( - "URL: %s for output %s with cmd_hash %s", url, output_path, cmd_hash - ) - yield output_path, url - - -def _is_not_empty_dir(loc: Path): - if not loc.is_dir(): - return True - elif any(_is_not_empty_dir(child) for child in loc.iterdir()): - return True - else: - return False +from weasel.cli.push import * diff --git a/spacy/cli/project/remote_storage.py b/spacy/cli/project/remote_storage.py index 076541580..29409150f 100644 --- a/spacy/cli/project/remote_storage.py +++ b/spacy/cli/project/remote_storage.py @@ -1,205 +1 @@ -from typing import Optional, List, Dict, TYPE_CHECKING -import os -import site -import hashlib -import urllib.parse -import tarfile -from pathlib import Path -from wasabi import msg - -from .._util import get_hash, get_checksum, upload_file, download_file -from .._util import ensure_pathy, make_tempdir -from ...util import get_minor_version, ENV_VARS, check_bool_env_var -from ...git_info import GIT_VERSION -from ... import about -from ...errors import Errors - -if TYPE_CHECKING: - from pathy import FluidPath # noqa: F401 - - -class RemoteStorage: - """Push and pull outputs to and from a remote file storage. - - Remotes can be anything that `smart-open` can support: AWS, GCS, file system, - ssh, etc. - """ - - def __init__(self, project_root: Path, url: str, *, compression="gz"): - self.root = project_root - self.url = ensure_pathy(url) - self.compression = compression - - def push(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath": - """Compress a file or directory within a project and upload it to a remote - storage. If an object exists at the full URL, nothing is done. - - Within the remote storage, files are addressed by their project path - (url encoded) and two user-supplied hashes, representing their creation - context and their file contents. If the URL already exists, the data is - not uploaded. Paths are archived and compressed prior to upload. - """ - loc = self.root / path - if not loc.exists(): - raise IOError(f"Cannot push {loc}: does not exist.") - url = self.make_url(path, command_hash, content_hash) - if url.exists(): - return url - tmp: Path - with make_tempdir() as tmp: - tar_loc = tmp / self.encode_name(str(path)) - mode_string = f"w:{self.compression}" if self.compression else "w" - with tarfile.open(tar_loc, mode=mode_string) as tar_file: - tar_file.add(str(loc), arcname=str(path)) - upload_file(tar_loc, url) - return url - - def pull( - self, - path: Path, - *, - command_hash: Optional[str] = None, - content_hash: Optional[str] = None, - ) -> Optional["FluidPath"]: - """Retrieve a file from the remote cache. If the file already exists, - nothing is done. - - If the command_hash and/or content_hash are specified, only matching - results are returned. If no results are available, an error is raised. - """ - dest = self.root / path - if dest.exists(): - return None - url = self.find(path, command_hash=command_hash, content_hash=content_hash) - if url is None: - return url - else: - # Make sure the destination exists - if not dest.parent.exists(): - dest.parent.mkdir(parents=True) - tmp: Path - with make_tempdir() as tmp: - tar_loc = tmp / url.parts[-1] - download_file(url, tar_loc) - mode_string = f"r:{self.compression}" if self.compression else "r" - with tarfile.open(tar_loc, mode=mode_string) as tar_file: - # This requires that the path is added correctly, relative - # to root. This is how we set things up in push() - - # Disallow paths outside the current directory for the tar - # file (CVE-2007-4559, directory traversal vulnerability) - def is_within_directory(directory, target): - abs_directory = os.path.abspath(directory) - abs_target = os.path.abspath(target) - prefix = os.path.commonprefix([abs_directory, abs_target]) - return prefix == abs_directory - - def safe_extract(tar, path): - for member in tar.getmembers(): - member_path = os.path.join(path, member.name) - if not is_within_directory(path, member_path): - raise ValueError(Errors.E852) - tar.extractall(path) - - safe_extract(tar_file, self.root) - return url - - def find( - self, - path: Path, - *, - command_hash: Optional[str] = None, - content_hash: Optional[str] = None, - ) -> Optional["FluidPath"]: - """Find the best matching version of a file within the storage, - or `None` if no match can be found. If both the creation and content hash - are specified, only exact matches will be returned. Otherwise, the most - recent matching file is preferred. - """ - name = self.encode_name(str(path)) - urls = [] - if command_hash is not None and content_hash is not None: - url = self.url / name / command_hash / content_hash - urls = [url] if url.exists() else [] - elif command_hash is not None: - if (self.url / name / command_hash).exists(): - urls = list((self.url / name / command_hash).iterdir()) - else: - if (self.url / name).exists(): - for sub_dir in (self.url / name).iterdir(): - urls.extend(sub_dir.iterdir()) - if content_hash is not None: - urls = [url for url in urls if url.parts[-1] == content_hash] - if len(urls) >= 2: - try: - urls.sort(key=lambda x: x.stat().last_modified) # type: ignore - except Exception: - msg.warn( - "Unable to sort remote files by last modified. The file(s) " - "pulled from the cache may not be the most recent." - ) - return urls[-1] if urls else None - - def make_url(self, path: Path, command_hash: str, content_hash: str) -> "FluidPath": - """Construct a URL from a subpath, a creation hash and a content hash.""" - return self.url / self.encode_name(str(path)) / command_hash / content_hash - - def encode_name(self, name: str) -> str: - """Encode a subpath into a URL-safe name.""" - return urllib.parse.quote_plus(name) - - -def get_content_hash(loc: Path) -> str: - return get_checksum(loc) - - -def get_command_hash( - site_hash: str, env_hash: str, deps: List[Path], cmd: List[str] -) -> str: - """Create a hash representing the execution of a command. This includes the - currently installed packages, whatever environment variables have been marked - as relevant, and the command. - """ - if check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION): - spacy_v = GIT_VERSION - else: - spacy_v = str(get_minor_version(about.__version__) or "") - dep_checksums = [get_checksum(dep) for dep in sorted(deps)] - hashes = [spacy_v, site_hash, env_hash] + dep_checksums - hashes.extend(cmd) - creation_bytes = "".join(hashes).encode("utf8") - return hashlib.md5(creation_bytes).hexdigest() - - -def get_site_hash(): - """Hash the current Python environment's site-packages contents, including - the name and version of the libraries. The list we're hashing is what - `pip freeze` would output. - """ - site_dirs = site.getsitepackages() - if site.ENABLE_USER_SITE: - site_dirs.extend(site.getusersitepackages()) - packages = set() - for site_dir in site_dirs: - site_dir = Path(site_dir) - for subpath in site_dir.iterdir(): - if subpath.parts[-1].endswith("dist-info"): - packages.add(subpath.parts[-1].replace(".dist-info", "")) - package_bytes = "".join(sorted(packages)).encode("utf8") - return hashlib.md5sum(package_bytes).hexdigest() - - -def get_env_hash(env: Dict[str, str]) -> str: - """Construct a hash of the environment variables that will be passed into - the commands. - - Values in the env dict may be references to the current os.environ, using - the syntax $ENV_VAR to mean os.environ[ENV_VAR] - """ - env_vars = {} - for key, value in env.items(): - if value.startswith("$"): - env_vars[key] = os.environ.get(value[1:], "") - else: - env_vars[key] = value - return get_hash(env_vars) +from weasel.cli.remote_storage import * diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index 0f4858a99..cc6a5ac42 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -1,360 +1 @@ -from typing import Optional, List, Dict, Sequence, Any, Iterable, Tuple -import os.path -from pathlib import Path - -from wasabi import msg -from wasabi.util import locale_escape -import sys -import srsly -import typer - -from ... import about -from ...git_info import GIT_VERSION -from ...util import working_dir, run_command, split_command, is_cwd, join_command -from ...util import SimpleFrozenList, is_minor_version_match, ENV_VARS -from ...util import check_bool_env_var, SimpleFrozenDict -from .._util import PROJECT_FILE, PROJECT_LOCK, load_project_config, get_hash -from .._util import get_checksum, project_cli, Arg, Opt, COMMAND, parse_config_overrides - - -@project_cli.command( - "run", context_settings={"allow_extra_args": True, "ignore_unknown_options": True} -) -def project_run_cli( - # fmt: off - ctx: typer.Context, # This is only used to read additional arguments - subcommand: str = Arg(None, help=f"Name of command defined in the {PROJECT_FILE}"), - project_dir: Path = Arg(Path.cwd(), help="Location of project directory. Defaults to current working directory.", exists=True, file_okay=False), - force: bool = Opt(False, "--force", "-F", help="Force re-running steps, even if nothing changed"), - dry: bool = Opt(False, "--dry", "-D", help="Perform a dry run and don't execute scripts"), - show_help: bool = Opt(False, "--help", help="Show help message and available subcommands") - # fmt: on -): - """Run a named command or workflow defined in the project.yml. If a workflow - name is specified, all commands in the workflow are run, in order. If - commands define dependencies and/or outputs, they will only be re-run if - state has changed. - - DOCS: https://spacy.io/api/cli#project-run - """ - if show_help or not subcommand: - print_run_help(project_dir, subcommand) - else: - overrides = parse_config_overrides(ctx.args) - project_run(project_dir, subcommand, overrides=overrides, force=force, dry=dry) - - -def project_run( - project_dir: Path, - subcommand: str, - *, - overrides: Dict[str, Any] = SimpleFrozenDict(), - force: bool = False, - dry: bool = False, - capture: bool = False, - skip_requirements_check: bool = False, -) -> None: - """Run a named script defined in the project.yml. If the script is part - of the default pipeline (defined in the "run" section), DVC is used to - execute the command, so it can determine whether to rerun it. It then - calls into "exec" to execute it. - - project_dir (Path): Path to project directory. - subcommand (str): Name of command to run. - overrides (Dict[str, Any]): Optional config overrides. - force (bool): Force re-running, even if nothing changed. - dry (bool): Perform a dry run and don't execute commands. - capture (bool): Whether to capture the output and errors of individual commands. - If False, the stdout and stderr will not be redirected, and if there's an error, - sys.exit will be called with the return code. You should use capture=False - when you want to turn over execution to the command, and capture=True - when you want to run the command more like a function. - skip_requirements_check (bool): Whether to skip the requirements check. - """ - config = load_project_config(project_dir, overrides=overrides) - commands = {cmd["name"]: cmd for cmd in config.get("commands", [])} - workflows = config.get("workflows", {}) - validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) - - req_path = project_dir / "requirements.txt" - if not skip_requirements_check: - if config.get("check_requirements", True) and os.path.exists(req_path): - with req_path.open() as requirements_file: - _check_requirements([req.strip() for req in requirements_file]) - - if subcommand in workflows: - msg.info(f"Running workflow '{subcommand}'") - for cmd in workflows[subcommand]: - project_run( - project_dir, - cmd, - overrides=overrides, - force=force, - dry=dry, - capture=capture, - skip_requirements_check=True, - ) - else: - cmd = commands[subcommand] - for dep in cmd.get("deps", []): - if not (project_dir / dep).exists(): - err = f"Missing dependency specified by command '{subcommand}': {dep}" - err_help = "Maybe you forgot to run the 'project assets' command or a previous step?" - err_exits = 1 if not dry else None - msg.fail(err, err_help, exits=err_exits) - check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION) - with working_dir(project_dir) as current_dir: - msg.divider(subcommand) - rerun = check_rerun(current_dir, cmd, check_spacy_commit=check_spacy_commit) - if not rerun and not force: - msg.info(f"Skipping '{cmd['name']}': nothing changed") - else: - run_commands(cmd["script"], dry=dry, capture=capture) - if not dry: - update_lockfile(current_dir, cmd) - - -def print_run_help(project_dir: Path, subcommand: Optional[str] = None) -> None: - """Simulate a CLI help prompt using the info available in the project.yml. - - project_dir (Path): The project directory. - subcommand (Optional[str]): The subcommand or None. If a subcommand is - provided, the subcommand help is shown. Otherwise, the top-level help - and a list of available commands is printed. - """ - config = load_project_config(project_dir) - config_commands = config.get("commands", []) - commands = {cmd["name"]: cmd for cmd in config_commands} - workflows = config.get("workflows", {}) - project_loc = "" if is_cwd(project_dir) else project_dir - if subcommand: - validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand) - print(f"Usage: {COMMAND} project run {subcommand} {project_loc}") - if subcommand in commands: - help_text = commands[subcommand].get("help") - if help_text: - print(f"\n{help_text}\n") - elif subcommand in workflows: - steps = workflows[subcommand] - print(f"\nWorkflow consisting of {len(steps)} commands:") - steps_data = [ - (f"{i + 1}. {step}", commands[step].get("help", "")) - for i, step in enumerate(steps) - ] - msg.table(steps_data) - help_cmd = f"{COMMAND} project run [COMMAND] {project_loc} --help" - print(f"For command details, run: {help_cmd}") - else: - print("") - title = config.get("title") - if title: - print(f"{locale_escape(title)}\n") - if config_commands: - print(f"Available commands in {PROJECT_FILE}") - print(f"Usage: {COMMAND} project run [COMMAND] {project_loc}") - msg.table([(cmd["name"], cmd.get("help", "")) for cmd in config_commands]) - if workflows: - print(f"Available workflows in {PROJECT_FILE}") - print(f"Usage: {COMMAND} project run [WORKFLOW] {project_loc}") - msg.table([(name, " -> ".join(steps)) for name, steps in workflows.items()]) - - -def run_commands( - commands: Iterable[str] = SimpleFrozenList(), - silent: bool = False, - dry: bool = False, - capture: bool = False, -) -> None: - """Run a sequence of commands in a subprocess, in order. - - commands (List[str]): The string commands. - silent (bool): Don't print the commands. - dry (bool): Perform a dry run and don't execut anything. - capture (bool): Whether to capture the output and errors of individual commands. - If False, the stdout and stderr will not be redirected, and if there's an error, - sys.exit will be called with the return code. You should use capture=False - when you want to turn over execution to the command, and capture=True - when you want to run the command more like a function. - """ - for c in commands: - command = split_command(c) - # Not sure if this is needed or a good idea. Motivation: users may often - # use commands in their config that reference "python" and we want to - # make sure that it's always executing the same Python that spaCy is - # executed with and the pip in the same env, not some other Python/pip. - # Also ensures cross-compatibility if user 1 writes "python3" (because - # that's how it's set up on their system), and user 2 without the - # shortcut tries to re-run the command. - if len(command) and command[0] in ("python", "python3"): - command[0] = sys.executable - elif len(command) and command[0] in ("pip", "pip3"): - command = [sys.executable, "-m", "pip", *command[1:]] - if not silent: - print(f"Running command: {join_command(command)}") - if not dry: - run_command(command, capture=capture) - - -def validate_subcommand( - commands: Sequence[str], workflows: Sequence[str], subcommand: str -) -> None: - """Check that a subcommand is valid and defined. Raises an error otherwise. - - commands (Sequence[str]): The available commands. - subcommand (str): The subcommand. - """ - if not commands and not workflows: - msg.fail(f"No commands or workflows defined in {PROJECT_FILE}", exits=1) - if subcommand not in commands and subcommand not in workflows: - help_msg = [] - if subcommand in ["assets", "asset"]: - help_msg.append("Did you mean to run: python -m spacy project assets?") - if commands: - help_msg.append(f"Available commands: {', '.join(commands)}") - if workflows: - help_msg.append(f"Available workflows: {', '.join(workflows)}") - msg.fail( - f"Can't find command or workflow '{subcommand}' in {PROJECT_FILE}", - ". ".join(help_msg), - exits=1, - ) - - -def check_rerun( - project_dir: Path, - command: Dict[str, Any], - *, - check_spacy_version: bool = True, - check_spacy_commit: bool = False, -) -> bool: - """Check if a command should be rerun because its settings or inputs/outputs - changed. - - project_dir (Path): The current project directory. - command (Dict[str, Any]): The command, as defined in the project.yml. - strict_version (bool): - RETURNS (bool): Whether to re-run the command. - """ - # Always rerun if no-skip is set - if command.get("no_skip", False): - return True - lock_path = project_dir / PROJECT_LOCK - if not lock_path.exists(): # We don't have a lockfile, run command - return True - data = srsly.read_yaml(lock_path) - if command["name"] not in data: # We don't have info about this command - return True - entry = data[command["name"]] - # Always run commands with no outputs (otherwise they'd always be skipped) - if not entry.get("outs", []): - return True - # Always rerun if spaCy version or commit hash changed - spacy_v = entry.get("spacy_version") - commit = entry.get("spacy_git_version") - if check_spacy_version and not is_minor_version_match(spacy_v, about.__version__): - info = f"({spacy_v} in {PROJECT_LOCK}, {about.__version__} current)" - msg.info(f"Re-running '{command['name']}': spaCy minor version changed {info}") - return True - if check_spacy_commit and commit != GIT_VERSION: - info = f"({commit} in {PROJECT_LOCK}, {GIT_VERSION} current)" - msg.info(f"Re-running '{command['name']}': spaCy commit changed {info}") - return True - # If the entry in the lockfile matches the lockfile entry that would be - # generated from the current command, we don't rerun because it means that - # all inputs/outputs, hashes and scripts are the same and nothing changed - lock_entry = get_lock_entry(project_dir, command) - exclude = ["spacy_version", "spacy_git_version"] - return get_hash(lock_entry, exclude=exclude) != get_hash(entry, exclude=exclude) - - -def update_lockfile(project_dir: Path, command: Dict[str, Any]) -> None: - """Update the lockfile after running a command. Will create a lockfile if - it doesn't yet exist and will add an entry for the current command, its - script and dependencies/outputs. - - project_dir (Path): The current project directory. - command (Dict[str, Any]): The command, as defined in the project.yml. - """ - lock_path = project_dir / PROJECT_LOCK - if not lock_path.exists(): - srsly.write_yaml(lock_path, {}) - data = {} - else: - data = srsly.read_yaml(lock_path) - data[command["name"]] = get_lock_entry(project_dir, command) - srsly.write_yaml(lock_path, data) - - -def get_lock_entry(project_dir: Path, command: Dict[str, Any]) -> Dict[str, Any]: - """Get a lockfile entry for a given command. An entry includes the command, - the script (command steps) and a list of dependencies and outputs with - their paths and file hashes, if available. The format is based on the - dvc.lock files, to keep things consistent. - - project_dir (Path): The current project directory. - command (Dict[str, Any]): The command, as defined in the project.yml. - RETURNS (Dict[str, Any]): The lockfile entry. - """ - deps = get_fileinfo(project_dir, command.get("deps", [])) - outs = get_fileinfo(project_dir, command.get("outputs", [])) - outs_nc = get_fileinfo(project_dir, command.get("outputs_no_cache", [])) - return { - "cmd": f"{COMMAND} run {command['name']}", - "script": command["script"], - "deps": deps, - "outs": [*outs, *outs_nc], - "spacy_version": about.__version__, - "spacy_git_version": GIT_VERSION, - } - - -def get_fileinfo(project_dir: Path, paths: List[str]) -> List[Dict[str, Optional[str]]]: - """Generate the file information for a list of paths (dependencies, outputs). - Includes the file path and the file's checksum. - - project_dir (Path): The current project directory. - paths (List[str]): The file paths. - RETURNS (List[Dict[str, str]]): The lockfile entry for a file. - """ - data = [] - for path in paths: - file_path = project_dir / path - md5 = get_checksum(file_path) if file_path.exists() else None - data.append({"path": path, "md5": md5}) - return data - - -def _check_requirements(requirements: List[str]) -> Tuple[bool, bool]: - """Checks whether requirements are installed and free of version conflicts. - requirements (List[str]): List of requirements. - RETURNS (Tuple[bool, bool]): Whether (1) any packages couldn't be imported, (2) any packages with version conflicts - exist. - """ - import pkg_resources - - failed_pkgs_msgs: List[str] = [] - conflicting_pkgs_msgs: List[str] = [] - - for req in requirements: - try: - pkg_resources.require(req) - except pkg_resources.DistributionNotFound as dnf: - failed_pkgs_msgs.append(dnf.report()) - except pkg_resources.VersionConflict as vc: - conflicting_pkgs_msgs.append(vc.report()) - except Exception: - msg.warn( - f"Unable to check requirement: {req} " - "Checks are currently limited to requirement specifiers " - "(PEP 508)" - ) - - if len(failed_pkgs_msgs) or len(conflicting_pkgs_msgs): - msg.warn( - title="Missing requirements or requirement conflicts detected. Make sure your Python environment is set up " - "correctly and you installed all requirements specified in your project's requirements.txt: " - ) - for pgk_msg in failed_pkgs_msgs + conflicting_pkgs_msgs: - msg.text(pgk_msg) - - return len(failed_pkgs_msgs) > 0, len(conflicting_pkgs_msgs) > 0 +from weasel.cli.run import * diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index e3ca73cfb..2817147f3 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -130,7 +130,7 @@ grad_factor = 1.0 {% if "span_finder" in components -%} [components.span_finder] factory = "span_finder" -max_length = null +max_length = 25 min_length = null scorer = {"@scorers":"spacy.span_finder_scorer.v1"} spans_key = "sc" @@ -271,8 +271,9 @@ grad_factor = 1.0 @layers = "reduce_mean.v1" [components.textcat.model.linear_model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = true +length = 262144 ngram_size = 1 no_output_layer = false @@ -308,8 +309,9 @@ grad_factor = 1.0 @layers = "reduce_mean.v1" [components.textcat_multilabel.model.linear_model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = false +length = 262144 ngram_size = 1 no_output_layer = false @@ -419,7 +421,7 @@ width = ${components.tok2vec.model.encode.width} {% if "span_finder" in components %} [components.span_finder] factory = "span_finder" -max_length = null +max_length = 25 min_length = null scorer = {"@scorers":"spacy.span_finder_scorer.v1"} spans_key = "sc" @@ -542,14 +544,15 @@ nO = null width = ${components.tok2vec.model.encode.width} [components.textcat.model.linear_model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = true +length = 262144 ngram_size = 1 no_output_layer = false {% else -%} [components.textcat.model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = true ngram_size = 1 no_output_layer = false @@ -570,15 +573,17 @@ nO = null width = ${components.tok2vec.model.encode.width} [components.textcat_multilabel.model.linear_model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = false +length = 262144 ngram_size = 1 no_output_layer = false {% else -%} [components.textcat_multilabel.model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = false +length = 262144 ngram_size = 1 no_output_layer = false {%- endif %} diff --git a/spacy/cli/train.py b/spacy/cli/train.py index cc22cbba6..c72e13b26 100644 --- a/spacy/cli/train.py +++ b/spacy/cli/train.py @@ -1,15 +1,23 @@ -from typing import Optional, Dict, Any, Union -from pathlib import Path -from wasabi import msg -import typer import logging import sys +from pathlib import Path +from typing import Any, Dict, Optional, Union + +import typer +from wasabi import msg -from ._util import app, Arg, Opt, parse_config_overrides, show_validation_error -from ._util import import_code, setup_gpu -from ..training.loop import train as train_nlp -from ..training.initialize import init_nlp from .. import util +from ..training.initialize import init_nlp +from ..training.loop import train as train_nlp +from ._util import ( + Arg, + Opt, + app, + import_code, + parse_config_overrides, + setup_gpu, + show_validation_error, +) @app.command( @@ -39,7 +47,8 @@ def train_cli( DOCS: https://spacy.io/api/cli#train """ - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) overrides = parse_config_overrides(ctx.args) import_code(code_path) train(config_path, output_path, use_gpu=use_gpu, overrides=overrides) diff --git a/spacy/cli/validate.py b/spacy/cli/validate.py index a918e9a39..0426f05fd 100644 --- a/spacy/cli/validate.py +++ b/spacy/cli/validate.py @@ -1,14 +1,21 @@ -from typing import Tuple -from pathlib import Path import sys -import requests -from wasabi import msg, Printer import warnings +from pathlib import Path +from typing import Tuple + +import requests +from wasabi import Printer, msg -from ._util import app from .. import about -from ..util import get_package_version, get_installed_models, get_minor_version -from ..util import get_package_path, get_model_meta, is_compatible_version +from ..util import ( + get_installed_models, + get_minor_version, + get_model_meta, + get_package_path, + get_package_version, + is_compatible_version, +) +from ._util import app @app.command("validate") diff --git a/spacy/compat.py b/spacy/compat.py index 89132735d..522fa30dd 100644 --- a/spacy/compat.py +++ b/spacy/compat.py @@ -1,5 +1,6 @@ """Helpers for Python and platform compatibility.""" import sys + from thinc.util import copy_array try: diff --git a/spacy/default_config.cfg b/spacy/default_config.cfg index 694fb732f..b005eef40 100644 --- a/spacy/default_config.cfg +++ b/spacy/default_config.cfg @@ -26,6 +26,9 @@ batch_size = 1000 [nlp.tokenizer] @tokenizers = "spacy.Tokenizer.v1" +[nlp.vectors] +@vectors = "spacy.Vectors.v1" + # The pipeline components and their models [components] diff --git a/spacy/displacy/__init__.py b/spacy/displacy/__init__.py index f42dad0c9..bde2d04fe 100644 --- a/spacy/displacy/__init__.py +++ b/spacy/displacy/__init__.py @@ -4,15 +4,13 @@ spaCy's built in visualization suite for dependencies and named entities. DOCS: https://spacy.io/api/top-level#displacy USAGE: https://spacy.io/usage/visualizers """ -from typing import Union, Iterable, Optional, Dict, Any, Callable import warnings +from typing import Any, Callable, Dict, Iterable, Optional, Union -from .render import DependencyRenderer, EntityRenderer, SpanRenderer -from ..tokens import Doc, Span from ..errors import Errors, Warnings -from ..util import is_in_jupyter -from ..util import find_available_port - +from ..tokens import Doc, Span +from ..util import find_available_port, is_in_jupyter +from .render import DependencyRenderer, EntityRenderer, SpanRenderer _html = {} RENDER_WRAPPER = None @@ -68,7 +66,7 @@ def render( if jupyter or (jupyter is None and is_in_jupyter()): # return HTML rendered by IPython display() # See #4840 for details on span wrapper to disable mathjax - from IPython.core.display import display, HTML + from IPython.core.display import HTML, display return display(HTML('{}'.format(html))) return html diff --git a/spacy/displacy/render.py b/spacy/displacy/render.py index f74222dc2..40b9986e8 100644 --- a/spacy/displacy/render.py +++ b/spacy/displacy/render.py @@ -1,15 +1,28 @@ -from typing import Any, Dict, List, Optional, Tuple, Union import uuid -import itertools +from typing import Any, Dict, List, Optional, Tuple, Union from ..errors import Errors from ..util import escape_html, minify_html, registry -from .templates import TPL_DEP_ARCS, TPL_DEP_SVG, TPL_DEP_WORDS -from .templates import TPL_DEP_WORDS_LEMMA, TPL_ENT, TPL_ENT_RTL, TPL_ENTS -from .templates import TPL_FIGURE, TPL_KB_LINK, TPL_PAGE, TPL_SPAN -from .templates import TPL_SPAN_RTL, TPL_SPAN_SLICE, TPL_SPAN_SLICE_RTL -from .templates import TPL_SPAN_START, TPL_SPAN_START_RTL, TPL_SPANS -from .templates import TPL_TITLE +from .templates import ( + TPL_DEP_ARCS, + TPL_DEP_SVG, + TPL_DEP_WORDS, + TPL_DEP_WORDS_LEMMA, + TPL_ENT, + TPL_ENT_RTL, + TPL_ENTS, + TPL_FIGURE, + TPL_KB_LINK, + TPL_PAGE, + TPL_SPAN, + TPL_SPAN_RTL, + TPL_SPAN_SLICE, + TPL_SPAN_SLICE_RTL, + TPL_SPAN_START, + TPL_SPAN_START_RTL, + TPL_SPANS, + TPL_TITLE, +) DEFAULT_LANG = "en" DEFAULT_DIR = "ltr" @@ -129,7 +142,25 @@ class SpanRenderer: spans (list): Individual entity spans and their start, end, label, kb_id and kb_url. title (str / None): Document title set in Doc.user_data['title']. """ - per_token_info = [] + per_token_info = self._assemble_per_token_info(tokens, spans) + markup = self._render_markup(per_token_info) + markup = TPL_SPANS.format(content=markup, dir=self.direction) + if title: + markup = TPL_TITLE.format(title=title) + markup + return markup + + @staticmethod + def _assemble_per_token_info( + tokens: List[str], spans: List[Dict[str, Any]] + ) -> List[Dict[str, List[Dict[str, Any]]]]: + """Assembles token info used to generate markup in render_spans(). + tokens (List[str]): Tokens in text. + spans (List[Dict[str, Any]]): Spans in text. + RETURNS (List[Dict[str, List[Dict, str, Any]]]): Per token info needed to render HTML markup for given tokens + and spans. + """ + per_token_info: List[Dict[str, List[Dict[str, Any]]]] = [] + # we must sort so that we can correctly describe when spans need to "stack" # which is determined by their start token, then span length (longer spans on top), # then break any remaining ties with the span label @@ -141,21 +172,22 @@ class SpanRenderer: s["label"], ), ) + for s in spans: # this is the vertical 'slot' that the span will be rendered in # vertical_position = span_label_offset + (offset_step * (slot - 1)) s["render_slot"] = 0 + for idx, token in enumerate(tokens): # Identify if a token belongs to a Span (and which) and if it's a # start token of said Span. We'll use this for the final HTML render token_markup: Dict[str, Any] = {} token_markup["text"] = token - concurrent_spans = 0 + intersecting_spans: List[Dict[str, Any]] = [] entities = [] for span in spans: ent = {} if span["start_token"] <= idx < span["end_token"]: - concurrent_spans += 1 span_start = idx == span["start_token"] ent["label"] = span["label"] ent["is_start"] = span_start @@ -163,7 +195,12 @@ class SpanRenderer: # When the span starts, we need to know how many other # spans are on the 'span stack' and will be rendered. # This value becomes the vertical render slot for this entire span - span["render_slot"] = concurrent_spans + span["render_slot"] = ( + intersecting_spans[-1]["render_slot"] + if len(intersecting_spans) + else 0 + ) + 1 + intersecting_spans.append(span) ent["render_slot"] = span["render_slot"] kb_id = span.get("kb_id", "") kb_url = span.get("kb_url", "#") @@ -180,11 +217,8 @@ class SpanRenderer: span["render_slot"] = 0 token_markup["entities"] = entities per_token_info.append(token_markup) - markup = self._render_markup(per_token_info) - markup = TPL_SPANS.format(content=markup, dir=self.direction) - if title: - markup = TPL_TITLE.format(title=title) + markup - return markup + + return per_token_info def _render_markup(self, per_token_info: List[Dict[str, Any]]) -> str: """Render the markup from per-token information""" @@ -204,7 +238,7 @@ class SpanRenderer: + (self.offset_step * (len(entities) - 1)) ) markup += self.span_template.format( - text=token["text"], + text=escape_html(token["text"]), span_slices=slices, span_starts=starts, total_height=total_height, @@ -300,6 +334,8 @@ class DependencyRenderer: self.lang = settings.get("lang", DEFAULT_LANG) render_id = f"{id_prefix}-{i}" svg = self.render_svg(render_id, p["words"], p["arcs"]) + if p.get("title"): + svg = TPL_TITLE.format(title=p.get("title")) + svg rendered.append(svg) if page: content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered]) @@ -552,7 +588,7 @@ class EntityRenderer: for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: - markup += "
" + markup += "
" if self.ents is None or label.upper() in self.ents: color = self.colors.get(label.upper(), self.default_color) ent_settings = { @@ -570,7 +606,7 @@ class EntityRenderer: for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: - markup += "
" + markup += "
" markup = TPL_ENTS.format(content=markup, dir=self.direction) if title: markup = TPL_TITLE.format(title=title) + markup diff --git a/spacy/errors.py b/spacy/errors.py index 928c3be90..cf9a7b708 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -1,4 +1,5 @@ import warnings + from .compat import Literal @@ -215,6 +216,11 @@ class Warnings(metaclass=ErrorsWithCodes): W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option " "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.") W124 = ("{host}:{port} is already in use, using the nearest available port {serve_port} as an alternative.") + W125 = ("The StaticVectors key_attr is no longer used. To set a custom " + "key attribute for vectors, configure it through Vectors(attr=) or " + "'spacy init vectors --attr'") + W126 = ("These keys are unsupported: {unsupported}") + W127 = ("Not all `Language.pipe` worker processes completed successfully") class Errors(metaclass=ErrorsWithCodes): @@ -222,7 +228,6 @@ class Errors(metaclass=ErrorsWithCodes): E002 = ("Can't find factory for '{name}' for language {lang} ({lang_code}). " "This usually happens when spaCy calls `nlp.{method}` with a custom " "component name that's not registered on the current language class. " - "If you're using a Transformer, make sure to install 'spacy-transformers'. " "If you're using a custom component, make sure you've added the " "decorator `@Language.component` (for function components) or " "`@Language.factory` (for class components).\n\nAvailable " @@ -549,12 +554,12 @@ class Errors(metaclass=ErrorsWithCodes): "during training, make sure to include it in 'annotating components'") # New errors added in v3.x + E849 = ("The vocab only supports {method} for vectors of type " + "spacy.vectors.Vectors, not {vectors_type}.") E850 = ("The PretrainVectors objective currently only supports default or " "floret vectors, not {mode} vectors.") E851 = ("The 'textcat' component labels should only have values of 0 or 1, " "but found value of '{val}'.") - E852 = ("The tar file pulled from the remote attempted an unsafe path " - "traversal.") E853 = ("Unsupported component factory name '{name}'. The character '.' is " "not permitted in factory names.") E854 = ("Unable to set doc.ents. Check that the 'ents_filter' does not " @@ -738,8 +743,8 @@ class Errors(metaclass=ErrorsWithCodes): "model from a shortcut, which is obsolete as of spaCy v3.0. To " "load the model, use its full name instead:\n\n" "nlp = spacy.load(\"{full}\")\n\nFor more details on the available " - "models, see the models directory: https://spacy.io/models. If you " - "want to create a blank model, use spacy.blank: " + "models, see the models directory: https://spacy.io/models and if " + "you want to create a blank model, use spacy.blank: " "nlp = spacy.blank(\"{name}\")") E942 = ("Executing `after_{name}` callback failed. Expected the function to " "return an initialized nlp object but got: {value}. Maybe " @@ -977,6 +982,12 @@ class Errors(metaclass=ErrorsWithCodes): " 'min_length': {min_length}, 'max_length': {max_length}") E1054 = ("The text, including whitespace, must match between reference and " "predicted docs when training {component}.") + E1055 = ("The 'replace_listener' callback expects {num_params} parameters, " + "but only callbacks with one or three parameters are supported") + E1056 = ("The `TextCatBOW` architecture expects a length of at least 1, was {length}.") + E1057 = ("The `TextCatReduce` architecture must be used with at least one " + "reduction. Please enable one of `use_reduce_first`, " + "`use_reduce_last`, `use_reduce_max` or `use_reduce_mean`.") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/glossary.py b/spacy/glossary.py index d2240fbba..1f628698b 100644 --- a/spacy/glossary.py +++ b/spacy/glossary.py @@ -1,4 +1,5 @@ import warnings + from .errors import Warnings diff --git a/spacy/kb/__init__.py b/spacy/kb/__init__.py index 1d70a9b34..93a65ab61 100644 --- a/spacy/kb/__init__.py +++ b/spacy/kb/__init__.py @@ -1,3 +1,11 @@ +from .candidate import Candidate, get_candidates, get_candidates_batch from .kb import KnowledgeBase from .kb_in_memory import InMemoryLookupKB -from .candidate import Candidate, get_candidates, get_candidates_batch + +__all__ = [ + "Candidate", + "KnowledgeBase", + "InMemoryLookupKB", + "get_candidates", + "get_candidates_batch", +] diff --git a/spacy/kb/candidate.pxd b/spacy/kb/candidate.pxd index 942ce9dd0..80fcbc459 100644 --- a/spacy/kb/candidate.pxd +++ b/spacy/kb/candidate.pxd @@ -1,8 +1,11 @@ -from .kb cimport KnowledgeBase from libcpp.vector cimport vector -from ..typedefs cimport hash_t -# Object used by the Entity Linker that summarizes one entity-alias candidate combination. +from ..typedefs cimport hash_t +from .kb cimport KnowledgeBase + + +# Object used by the Entity Linker that summarizes one entity-alias candidate +# combination. cdef class Candidate: cdef readonly KnowledgeBase kb cdef hash_t entity_hash diff --git a/spacy/kb/candidate.pyx b/spacy/kb/candidate.pyx index c89efeb03..4369676e2 100644 --- a/spacy/kb/candidate.pyx +++ b/spacy/kb/candidate.pyx @@ -1,19 +1,31 @@ -# cython: infer_types=True, profile=True +# cython: infer_types=True from typing import Iterable + from .kb cimport KnowledgeBase + from ..tokens import Span + cdef class Candidate: - """A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved - to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking - algorithm which will disambiguate the various candidates to the correct one. + """A `Candidate` object refers to a textual mention (`alias`) that may or + may not be resolved to a specific `entity` from a Knowledge Base. This + will be used as input for the entity linking algorithm which will + disambiguate the various candidates to the correct one. Each candidate (alias, entity) pair is assigned a certain prior probability. DOCS: https://spacy.io/api/kb/#candidate-init """ - def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob): + def __init__( + self, + KnowledgeBase kb, + entity_hash, + entity_freq, + entity_vector, + alias_hash, + prior_prob + ): self.kb = kb self.entity_hash = entity_hash self.entity_freq = entity_freq @@ -56,7 +68,8 @@ cdef class Candidate: def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]: """ - Return candidate entities for a given mention and fetching appropriate entries from the index. + Return candidate entities for a given mention and fetching appropriate + entries from the index. kb (KnowledgeBase): Knowledge base to query. mention (Span): Entity mention for which to identify candidates. RETURNS (Iterable[Candidate]): Identified candidates. @@ -64,9 +77,12 @@ def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]: return kb.get_candidates(mention) -def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]: +def get_candidates_batch( + kb: KnowledgeBase, mentions: Iterable[Span] +) -> Iterable[Iterable[Candidate]]: """ - Return candidate entities for the given mentions and fetching appropriate entries from the index. + Return candidate entities for the given mentions and fetching appropriate entries + from the index. kb (KnowledgeBase): Knowledge base to query. mention (Iterable[Span]): Entity mentions for which to identify candidates. RETURNS (Iterable[Iterable[Candidate]]): Identified candidates. diff --git a/spacy/kb/kb.pxd b/spacy/kb/kb.pxd index 1adeef8ae..263469546 100644 --- a/spacy/kb/kb.pxd +++ b/spacy/kb/kb.pxd @@ -2,8 +2,10 @@ from cymem.cymem cimport Pool from libc.stdint cimport int64_t + from ..vocab cimport Vocab + cdef class KnowledgeBase: cdef Pool mem cdef readonly Vocab vocab diff --git a/spacy/kb/kb.pyx b/spacy/kb/kb.pyx index ce4bc0138..c7db34e16 100644 --- a/spacy/kb/kb.pyx +++ b/spacy/kb/kb.pyx @@ -1,18 +1,20 @@ -# cython: infer_types=True, profile=True +# cython: infer_types=True from pathlib import Path from typing import Iterable, Tuple, Union + from cymem.cymem cimport Pool -from .candidate import Candidate +from ..errors import Errors from ..tokens import Span from ..util import SimpleFrozenList -from ..errors import Errors +from .candidate import Candidate cdef class KnowledgeBase: - """A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases, - to support entity linking of named entities to real-world concepts. + """A `KnowledgeBase` instance stores unique identifiers for entities and + their textual aliases, to support entity linking of named entities to + real-world concepts. This is an abstract class and requires its operations to be implemented. DOCS: https://spacy.io/api/kb @@ -30,10 +32,13 @@ cdef class KnowledgeBase: self.entity_vector_length = entity_vector_length self.mem = Pool() - def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]: + def get_candidates_batch( + self, mentions: Iterable[Span] + ) -> Iterable[Iterable[Candidate]]: """ - Return candidate entities for specified texts. Each candidate defines the entity, the original alias, - and the prior probability of that alias resolving to that entity. + Return candidate entities for specified texts. Each candidate defines + the entity, the original alias, and the prior probability of that + alias resolving to that entity. If no candidate is found for a given text, an empty list is returned. mentions (Iterable[Span]): Mentions for which to get candidates. RETURNS (Iterable[Iterable[Candidate]]): Identified candidates. @@ -42,14 +47,17 @@ cdef class KnowledgeBase: def get_candidates(self, mention: Span) -> Iterable[Candidate]: """ - Return candidate entities for specified text. Each candidate defines the entity, the original alias, + Return candidate entities for specified text. Each candidate defines + the entity, the original alias, and the prior probability of that alias resolving to that entity. If the no candidate is found for a given text, an empty list is returned. mention (Span): Mention for which to get candidates. RETURNS (Iterable[Candidate]): Identified candidates. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="get_candidates", name=self.__name__ + ) ) def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]: @@ -67,7 +75,9 @@ cdef class KnowledgeBase: RETURNS (Iterable[float]): Vector for specified entity. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="get_vector", name=self.__name__ + ) ) def to_bytes(self, **kwargs) -> bytes: @@ -75,7 +85,9 @@ cdef class KnowledgeBase: RETURNS (bytes): Current state as binary string. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="to_bytes", name=self.__name__ + ) ) def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()): @@ -84,25 +96,35 @@ cdef class KnowledgeBase: exclude (Tuple[str]): Properties to exclude when restoring KB. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="from_bytes", name=self.__name__ + ) ) - def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None: + def to_disk( + self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList() + ) -> None: """ Write KnowledgeBase content to disk. path (Union[str, Path]): Target file path. exclude (Iterable[str]): List of components to exclude. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="to_disk", name=self.__name__ + ) ) - def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None: + def from_disk( + self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList() + ) -> None: """ Load KnowledgeBase content from disk. path (Union[str, Path]): Target file path. exclude (Iterable[str]): List of components to exclude. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="from_disk", name=self.__name__ + ) ) diff --git a/spacy/kb/kb_in_memory.pxd b/spacy/kb/kb_in_memory.pxd index 825a6bde9..e0e33301a 100644 --- a/spacy/kb/kb_in_memory.pxd +++ b/spacy/kb/kb_in_memory.pxd @@ -1,11 +1,11 @@ """Knowledge-base for entity or concept linking.""" -from preshed.maps cimport PreshMap -from libcpp.vector cimport vector from libc.stdint cimport int32_t, int64_t from libc.stdio cimport FILE +from libcpp.vector cimport vector +from preshed.maps cimport PreshMap +from ..structs cimport AliasC, KBEntryC from ..typedefs cimport hash_t -from ..structs cimport KBEntryC, AliasC from .kb cimport KnowledgeBase ctypedef vector[KBEntryC] entry_vec @@ -55,23 +55,28 @@ cdef class InMemoryLookupKB(KnowledgeBase): # optional data, we can let users configure a DB as the backend for this. cdef object _features_table - cdef inline int64_t c_add_vector(self, vector[float] entity_vector) nogil: """Add an entity vector to the vectors table.""" cdef int64_t new_index = self._vectors_table.size() self._vectors_table.push_back(entity_vector) return new_index - - cdef inline int64_t c_add_entity(self, hash_t entity_hash, float freq, - int32_t vector_index, int feats_row) nogil: + cdef inline int64_t c_add_entity( + self, + hash_t entity_hash, + float freq, + int32_t vector_index, + int feats_row + ) nogil: """Add an entry to the vector of entries. - After calling this method, make sure to update also the _entry_index using the return value""" + After calling this method, make sure to update also the _entry_index + using the return value""" # This is what we'll map the entity hash key to. It's where the entry will sit # in the vector of entries, so we can get it later. cdef int64_t new_index = self._entries.size() - # Avoid struct initializer to enable nogil, cf https://github.com/cython/cython/issues/1642 + # Avoid struct initializer to enable nogil, cf. + # https://github.com/cython/cython/issues/1642 cdef KBEntryC entry entry.entity_hash = entity_hash entry.vector_index = vector_index @@ -81,11 +86,17 @@ cdef class InMemoryLookupKB(KnowledgeBase): self._entries.push_back(entry) return new_index - cdef inline int64_t c_add_aliases(self, hash_t alias_hash, vector[int64_t] entry_indices, vector[float] probs) nogil: - """Connect a mention to a list of potential entities with their prior probabilities . - After calling this method, make sure to update also the _alias_index using the return value""" - # This is what we'll map the alias hash key to. It's where the alias will be defined - # in the vector of aliases. + cdef inline int64_t c_add_aliases( + self, + hash_t alias_hash, + vector[int64_t] entry_indices, + vector[float] probs + ) nogil: + """Connect a mention to a list of potential entities with their prior + probabilities. After calling this method, make sure to update also the + _alias_index using the return value""" + # This is what we'll map the alias hash key to. It's where the alias will be + # defined in the vector of aliases. cdef int64_t new_index = self._aliases_table.size() # Avoid struct initializer to enable nogil @@ -98,8 +109,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): cdef inline void _create_empty_vectors(self, hash_t dummy_hash) nogil: """ - Initializing the vectors and making sure the first element of each vector is a dummy, - because the PreshMap maps pointing to indices in these vectors can not contain 0 as value + Initializing the vectors and making sure the first element of each vector is a + dummy, because the PreshMap maps pointing to indices in these vectors can not + contain 0 as value. cf. https://github.com/explosion/preshed/issues/17 """ cdef int32_t dummy_value = 0 @@ -130,12 +142,18 @@ cdef class InMemoryLookupKB(KnowledgeBase): cdef class Writer: cdef FILE* _fp - cdef int write_header(self, int64_t nr_entries, int64_t entity_vector_length) except -1 + cdef int write_header( + self, int64_t nr_entries, int64_t entity_vector_length + ) except -1 cdef int write_vector_element(self, float element) except -1 - cdef int write_entry(self, hash_t entry_hash, float entry_freq, int32_t vector_index) except -1 + cdef int write_entry( + self, hash_t entry_hash, float entry_freq, int32_t vector_index + ) except -1 cdef int write_alias_length(self, int64_t alias_length) except -1 - cdef int write_alias_header(self, hash_t alias_hash, int64_t candidate_length) except -1 + cdef int write_alias_header( + self, hash_t alias_hash, int64_t candidate_length + ) except -1 cdef int write_alias(self, int64_t entry_index, float prob) except -1 cdef int _write(self, void* value, size_t size) except -1 @@ -143,12 +161,18 @@ cdef class Writer: cdef class Reader: cdef FILE* _fp - cdef int read_header(self, int64_t* nr_entries, int64_t* entity_vector_length) except -1 + cdef int read_header( + self, int64_t* nr_entries, int64_t* entity_vector_length + ) except -1 cdef int read_vector_element(self, float* element) except -1 - cdef int read_entry(self, hash_t* entity_hash, float* freq, int32_t* vector_index) except -1 + cdef int read_entry( + self, hash_t* entity_hash, float* freq, int32_t* vector_index + ) except -1 cdef int read_alias_length(self, int64_t* alias_length) except -1 - cdef int read_alias_header(self, hash_t* alias_hash, int64_t* candidate_length) except -1 + cdef int read_alias_header( + self, hash_t* alias_hash, int64_t* candidate_length + ) except -1 cdef int read_alias(self, int64_t* entry_index, float* prob) except -1 cdef int _read(self, void* value, size_t size) except -1 diff --git a/spacy/kb/kb_in_memory.pyx b/spacy/kb/kb_in_memory.pyx index 2a74d047b..2b21f246a 100644 --- a/spacy/kb/kb_in_memory.pyx +++ b/spacy/kb/kb_in_memory.pyx @@ -1,29 +1,35 @@ -# cython: infer_types=True, profile=True -from typing import Iterable, Callable, Dict, Any, Union +# cython: infer_types=True +from typing import Any, Callable, Dict, Iterable import srsly -from preshed.maps cimport PreshMap -from cpython.exc cimport PyErr_SetFromErrno -from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek -from libc.stdint cimport int32_t, int64_t -from libcpp.vector cimport vector -from pathlib import Path +from cpython.exc cimport PyErr_SetFromErrno +from libc.stdint cimport int32_t, int64_t +from libc.stdio cimport fclose, feof, fopen, fread, fseek, fwrite +from libcpp.vector cimport vector +from preshed.maps cimport PreshMap + import warnings +from pathlib import Path from ..tokens import Span + from ..typedefs cimport hash_t -from ..errors import Errors, Warnings + from .. import util +from ..errors import Errors, Warnings from ..util import SimpleFrozenList, ensure_path + from ..vocab cimport Vocab from .kb cimport KnowledgeBase + from .candidate import Candidate as Candidate cdef class InMemoryLookupKB(KnowledgeBase): - """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases, - to support entity linking of named entities to real-world concepts. + """An `InMemoryLookupKB` instance stores unique identifiers for entities + and their textual aliases, to support entity linking of named entities to + real-world concepts. DOCS: https://spacy.io/api/inmemorylookupkb """ @@ -66,7 +72,8 @@ cdef class InMemoryLookupKB(KnowledgeBase): def add_entity(self, str entity, float freq, vector[float] entity_vector): """ - Add an entity to the KB, optionally specifying its log probability based on corpus frequency + Add an entity to the KB, optionally specifying its log probability + based on corpus frequency. Return the hash of the entity ID/name at the end. """ cdef hash_t entity_hash = self.vocab.strings.add(entity) @@ -78,14 +85,20 @@ cdef class InMemoryLookupKB(KnowledgeBase): # Raise an error if the provided entity vector is not of the correct length if len(entity_vector) != self.entity_vector_length: - raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length)) + raise ValueError( + Errors.E141.format( + found=len(entity_vector), required=self.entity_vector_length + ) + ) vector_index = self.c_add_vector(entity_vector=entity_vector) - new_index = self.c_add_entity(entity_hash=entity_hash, - freq=freq, - vector_index=vector_index, - feats_row=-1) # Features table currently not implemented + new_index = self.c_add_entity( + entity_hash=entity_hash, + freq=freq, + vector_index=vector_index, + feats_row=-1 + ) # Features table currently not implemented self._entry_index[entity_hash] = new_index return entity_hash @@ -110,7 +123,12 @@ cdef class InMemoryLookupKB(KnowledgeBase): else: entity_vector = vector_list[i] if len(entity_vector) != self.entity_vector_length: - raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length)) + raise ValueError( + Errors.E141.format( + found=len(entity_vector), + required=self.entity_vector_length + ) + ) entry.entity_hash = entity_hash entry.freq = freq_list[i] @@ -144,11 +162,15 @@ cdef class InMemoryLookupKB(KnowledgeBase): previous_alias_nr = self.get_size_aliases() # Throw an error if the length of entities and probabilities are not the same if not len(entities) == len(probabilities): - raise ValueError(Errors.E132.format(alias=alias, - entities_length=len(entities), - probabilities_length=len(probabilities))) + raise ValueError( + Errors.E132.format( + alias=alias, + entities_length=len(entities), + probabilities_length=len(probabilities)) + ) - # Throw an error if the probabilities sum up to more than 1 (allow for some rounding errors) + # Throw an error if the probabilities sum up to more than 1 (allow for + # some rounding errors) prob_sum = sum(probabilities) if prob_sum > 1.00001: raise ValueError(Errors.E133.format(alias=alias, sum=prob_sum)) @@ -165,40 +187,47 @@ cdef class InMemoryLookupKB(KnowledgeBase): for entity, prob in zip(entities, probabilities): entity_hash = self.vocab.strings[entity] - if not entity_hash in self._entry_index: + if entity_hash not in self._entry_index: raise ValueError(Errors.E134.format(entity=entity)) entry_index = self._entry_index.get(entity_hash) entry_indices.push_back(int(entry_index)) probs.push_back(float(prob)) - new_index = self.c_add_aliases(alias_hash=alias_hash, entry_indices=entry_indices, probs=probs) + new_index = self.c_add_aliases( + alias_hash=alias_hash, entry_indices=entry_indices, probs=probs + ) self._alias_index[alias_hash] = new_index if previous_alias_nr + 1 != self.get_size_aliases(): raise RuntimeError(Errors.E891.format(alias=alias)) return alias_hash - def append_alias(self, str alias, str entity, float prior_prob, ignore_warnings=False): + def append_alias( + self, str alias, str entity, float prior_prob, ignore_warnings=False + ): """ - For an alias already existing in the KB, extend its potential entities with one more. + For an alias already existing in the KB, extend its potential entities + with one more. Throw a warning if either the alias or the entity is unknown, or when the combination is already previously recorded. Throw an error if this entity+prior prob would exceed the sum of 1. - For efficiency, it's best to use the method `add_alias` as much as possible instead of this one. + For efficiency, it's best to use the method `add_alias` as much as + possible instead of this one. """ # Check if the alias exists in the KB cdef hash_t alias_hash = self.vocab.strings[alias] - if not alias_hash in self._alias_index: + if alias_hash not in self._alias_index: raise ValueError(Errors.E176.format(alias=alias)) # Check if the entity exists in the KB cdef hash_t entity_hash = self.vocab.strings[entity] - if not entity_hash in self._entry_index: + if entity_hash not in self._entry_index: raise ValueError(Errors.E134.format(entity=entity)) entry_index = self._entry_index.get(entity_hash) - # Throw an error if the prior probabilities (including the new one) sum up to more than 1 + # Throw an error if the prior probabilities (including the new one) + # sum up to more than 1 alias_index = self._alias_index.get(alias_hash) alias_entry = self._aliases_table[alias_index] current_sum = sum([p for p in alias_entry.probs]) @@ -231,12 +260,13 @@ cdef class InMemoryLookupKB(KnowledgeBase): def get_alias_candidates(self, str alias) -> Iterable[Candidate]: """ - Return candidate entities for an alias. Each candidate defines the entity, the original alias, - and the prior probability of that alias resolving to that entity. + Return candidate entities for an alias. Each candidate defines the + entity, the original alias, and the prior probability of that alias + resolving to that entity. If the alias is not known in the KB, and empty list is returned. """ cdef hash_t alias_hash = self.vocab.strings[alias] - if not alias_hash in self._alias_index: + if alias_hash not in self._alias_index: return [] alias_index = self._alias_index.get(alias_hash) alias_entry = self._aliases_table[alias_index] @@ -244,10 +274,14 @@ cdef class InMemoryLookupKB(KnowledgeBase): return [Candidate(kb=self, entity_hash=self._entries[entry_index].entity_hash, entity_freq=self._entries[entry_index].freq, - entity_vector=self._vectors_table[self._entries[entry_index].vector_index], + entity_vector=self._vectors_table[ + self._entries[entry_index].vector_index + ], alias_hash=alias_hash, prior_prob=prior_prob) - for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs) + for (entry_index, prior_prob) in zip( + alias_entry.entry_indices, alias_entry.probs + ) if entry_index != 0] def get_vector(self, str entity): @@ -261,8 +295,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): return self._vectors_table[self._entries[entry_index].vector_index] def get_prior_prob(self, str entity, str alias): - """ Return the prior probability of a given alias being linked to a given entity, - or return 0.0 when this combination is not known in the knowledge base""" + """ Return the prior probability of a given alias being linked to a + given entity, or return 0.0 when this combination is not known in the + knowledge base.""" cdef hash_t alias_hash = self.vocab.strings[alias] cdef hash_t entity_hash = self.vocab.strings[entity] @@ -273,7 +308,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): entry_index = self._entry_index[entity_hash] alias_entry = self._aliases_table[alias_index] - for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs): + for (entry_index, prior_prob) in zip( + alias_entry.entry_indices, alias_entry.probs + ): if self._entries[entry_index].entity_hash == entity_hash: return prior_prob @@ -283,13 +320,19 @@ cdef class InMemoryLookupKB(KnowledgeBase): """Serialize the current state to a binary string. """ def serialize_header(): - header = (self.get_size_entities(), self.get_size_aliases(), self.entity_vector_length) + header = ( + self.get_size_entities(), + self.get_size_aliases(), + self.entity_vector_length + ) return srsly.json_dumps(header) def serialize_entries(): i = 1 tuples = [] - for entry_hash, entry_index in sorted(self._entry_index.items(), key=lambda x: x[1]): + for entry_hash, entry_index in sorted( + self._entry_index.items(), key=lambda x: x[1] + ): entry = self._entries[entry_index] assert entry.entity_hash == entry_hash assert entry_index == i @@ -302,7 +345,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): headers = [] indices_lists = [] probs_lists = [] - for alias_hash, alias_index in sorted(self._alias_index.items(), key=lambda x: x[1]): + for alias_hash, alias_index in sorted( + self._alias_index.items(), key=lambda x: x[1] + ): alias = self._aliases_table[alias_index] assert alias_index == i candidate_length = len(alias.entry_indices) @@ -360,7 +405,7 @@ cdef class InMemoryLookupKB(KnowledgeBase): indices = srsly.json_loads(all_data[1]) probs = srsly.json_loads(all_data[2]) for header, indices, probs in zip(headers, indices, probs): - alias_hash, candidate_length = header + alias_hash, _candidate_length = header alias.entry_indices = indices alias.probs = probs self._aliases_table[i] = alias @@ -409,10 +454,14 @@ cdef class InMemoryLookupKB(KnowledgeBase): writer.write_vector_element(element) i = i+1 - # dumping the entry records in the order in which they are in the _entries vector. - # index 0 is a dummy object not stored in the _entry_index and can be ignored. + # dumping the entry records in the order in which they are in the + # _entries vector. + # index 0 is a dummy object not stored in the _entry_index and can + # be ignored. i = 1 - for entry_hash, entry_index in sorted(self._entry_index.items(), key=lambda x: x[1]): + for entry_hash, entry_index in sorted( + self._entry_index.items(), key=lambda x: x[1] + ): entry = self._entries[entry_index] assert entry.entity_hash == entry_hash assert entry_index == i @@ -424,7 +473,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): # dumping the aliases in the order in which they are in the _alias_index vector. # index 0 is a dummy object not stored in the _aliases_table and can be ignored. i = 1 - for alias_hash, alias_index in sorted(self._alias_index.items(), key=lambda x: x[1]): + for alias_hash, alias_index in sorted( + self._alias_index.items(), key=lambda x: x[1] + ): alias = self._aliases_table[alias_index] assert alias_index == i @@ -530,7 +581,8 @@ cdef class Writer: def __init__(self, path): assert isinstance(path, Path) content = bytes(path) - cdef bytes bytes_loc = content.encode('utf8') if type(content) == str else content + cdef bytes bytes_loc = content.encode('utf8') \ + if type(content) == str else content self._fp = fopen(bytes_loc, 'wb') if not self._fp: raise IOError(Errors.E146.format(path=path)) @@ -540,14 +592,18 @@ cdef class Writer: cdef size_t status = fclose(self._fp) assert status == 0 - cdef int write_header(self, int64_t nr_entries, int64_t entity_vector_length) except -1: + cdef int write_header( + self, int64_t nr_entries, int64_t entity_vector_length + ) except -1: self._write(&nr_entries, sizeof(nr_entries)) self._write(&entity_vector_length, sizeof(entity_vector_length)) cdef int write_vector_element(self, float element) except -1: self._write(&element, sizeof(element)) - cdef int write_entry(self, hash_t entry_hash, float entry_freq, int32_t vector_index) except -1: + cdef int write_entry( + self, hash_t entry_hash, float entry_freq, int32_t vector_index + ) except -1: self._write(&entry_hash, sizeof(entry_hash)) self._write(&entry_freq, sizeof(entry_freq)) self._write(&vector_index, sizeof(vector_index)) @@ -556,7 +612,9 @@ cdef class Writer: cdef int write_alias_length(self, int64_t alias_length) except -1: self._write(&alias_length, sizeof(alias_length)) - cdef int write_alias_header(self, hash_t alias_hash, int64_t candidate_length) except -1: + cdef int write_alias_header( + self, hash_t alias_hash, int64_t candidate_length + ) except -1: self._write(&alias_hash, sizeof(alias_hash)) self._write(&candidate_length, sizeof(candidate_length)) @@ -572,16 +630,19 @@ cdef class Writer: cdef class Reader: def __init__(self, path): content = bytes(path) - cdef bytes bytes_loc = content.encode('utf8') if type(content) == str else content + cdef bytes bytes_loc = content.encode('utf8') \ + if type(content) == str else content self._fp = fopen(bytes_loc, 'rb') if not self._fp: PyErr_SetFromErrno(IOError) - status = fseek(self._fp, 0, 0) # this can be 0 if there is no header + fseek(self._fp, 0, 0) # this can be 0 if there is no header def __dealloc__(self): fclose(self._fp) - cdef int read_header(self, int64_t* nr_entries, int64_t* entity_vector_length) except -1: + cdef int read_header( + self, int64_t* nr_entries, int64_t* entity_vector_length + ) except -1: status = self._read(nr_entries, sizeof(int64_t)) if status < 1: if feof(self._fp): @@ -601,7 +662,9 @@ cdef class Reader: return 0 # end of file raise IOError(Errors.E145.format(param="vector element")) - cdef int read_entry(self, hash_t* entity_hash, float* freq, int32_t* vector_index) except -1: + cdef int read_entry( + self, hash_t* entity_hash, float* freq, int32_t* vector_index + ) except -1: status = self._read(entity_hash, sizeof(hash_t)) if status < 1: if feof(self._fp): @@ -632,7 +695,9 @@ cdef class Reader: return 0 # end of file raise IOError(Errors.E145.format(param="alias length")) - cdef int read_alias_header(self, hash_t* alias_hash, int64_t* candidate_length) except -1: + cdef int read_alias_header( + self, hash_t* alias_hash, int64_t* candidate_length + ) except -1: status = self._read(alias_hash, sizeof(hash_t)) if status < 1: if feof(self._fp): diff --git a/spacy/lang/af/__init__.py b/spacy/lang/af/__init__.py index 553fcbf4c..8bd73c7ad 100644 --- a/spacy/lang/af/__init__.py +++ b/spacy/lang/af/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class AfrikaansDefaults(BaseDefaults): diff --git a/spacy/lang/am/__init__.py b/spacy/lang/am/__init__.py index ddae556d6..284823eaa 100644 --- a/spacy/lang/am/__init__.py +++ b/spacy/lang/am/__init__.py @@ -1,12 +1,11 @@ -from .stop_words import STOP_WORDS +from ...attrs import LANG +from ...language import BaseDefaults, Language +from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_SUFFIXES - +from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...language import Language, BaseDefaults -from ...attrs import LANG -from ...util import update_exc class AmharicDefaults(BaseDefaults): diff --git a/spacy/lang/am/punctuation.py b/spacy/lang/am/punctuation.py index 555a179fa..87447b054 100644 --- a/spacy/lang/am/punctuation.py +++ b/spacy/lang/am/punctuation.py @@ -1,5 +1,11 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY -from ..char_classes import UNITS, ALPHA_UPPER +from ..char_classes import ( + ALPHA_UPPER, + CURRENCY, + LIST_ELLIPSES, + LIST_PUNCT, + LIST_QUOTES, + UNITS, +) _list_punct = LIST_PUNCT + "፡ ። ፣ ፤ ፥ ፦ ፧ ፠ ፨".strip().split() diff --git a/spacy/lang/am/tokenizer_exceptions.py b/spacy/lang/am/tokenizer_exceptions.py index 9472fe918..1ccf996ca 100644 --- a/spacy/lang/am/tokenizer_exceptions.py +++ b/spacy/lang/am/tokenizer_exceptions.py @@ -1,5 +1,4 @@ -from ...symbols import ORTH, NORM - +from ...symbols import NORM, ORTH _exc = {} diff --git a/spacy/lang/ar/__init__.py b/spacy/lang/ar/__init__.py index 18c1f90ed..d50b0722c 100644 --- a/spacy/lang/ar/__init__.py +++ b/spacy/lang/ar/__init__.py @@ -1,8 +1,8 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ...language import Language, BaseDefaults class ArabicDefaults(BaseDefaults): diff --git a/spacy/lang/ar/punctuation.py b/spacy/lang/ar/punctuation.py index f30204c02..cf03fc68e 100644 --- a/spacy/lang/ar/punctuation.py +++ b/spacy/lang/ar/punctuation.py @@ -1,5 +1,11 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY -from ..char_classes import UNITS, ALPHA_UPPER +from ..char_classes import ( + ALPHA_UPPER, + CURRENCY, + LIST_ELLIPSES, + LIST_PUNCT, + LIST_QUOTES, + UNITS, +) _suffixes = ( LIST_PUNCT diff --git a/spacy/lang/ar/tokenizer_exceptions.py b/spacy/lang/ar/tokenizer_exceptions.py index 7c385bef8..eb16876f5 100644 --- a/spacy/lang/ar/tokenizer_exceptions.py +++ b/spacy/lang/ar/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/az/__init__.py b/spacy/lang/az/__init__.py index 476898364..32949aa3e 100644 --- a/spacy/lang/az/__init__.py +++ b/spacy/lang/az/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class AzerbaijaniDefaults(BaseDefaults): diff --git a/spacy/lang/az/lex_attrs.py b/spacy/lang/az/lex_attrs.py index 73a5e2762..96fb7f020 100644 --- a/spacy/lang/az/lex_attrs.py +++ b/spacy/lang/az/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - # Eleven, twelve etc. are written separate: on bir, on iki _num_words = [ diff --git a/spacy/lang/bg/__init__.py b/spacy/lang/bg/__init__.py index c9176b946..acca63ba1 100644 --- a/spacy/lang/bg/__init__.py +++ b/spacy/lang/bg/__init__.py @@ -1,12 +1,14 @@ +from ...attrs import LANG +from ...language import BaseDefaults, Language +from ...util import update_exc +from ..punctuation import ( + COMBINING_DIACRITICS_TOKENIZER_INFIXES, + COMBINING_DIACRITICS_TOKENIZER_SUFFIXES, +) +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .lex_attrs import LEX_ATTRS -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES -from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults -from ...attrs import LANG -from ...util import update_exc class BulgarianDefaults(BaseDefaults): diff --git a/spacy/lang/bg/lex_attrs.py b/spacy/lang/bg/lex_attrs.py index bba3c74cd..0b7942aec 100644 --- a/spacy/lang/bg/lex_attrs.py +++ b/spacy/lang/bg/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "нула", "едно", diff --git a/spacy/lang/bg/tokenizer_exceptions.py b/spacy/lang/bg/tokenizer_exceptions.py index 0f484b778..89d466daf 100644 --- a/spacy/lang/bg/tokenizer_exceptions.py +++ b/spacy/lang/bg/tokenizer_exceptions.py @@ -4,8 +4,7 @@ References: (countries, occupations, fields of studies and more). """ -from ...symbols import ORTH, NORM - +from ...symbols import NORM, ORTH _exc = {} diff --git a/spacy/lang/bn/__init__.py b/spacy/lang/bn/__init__.py index 6d0331e00..6a5d37f5b 100644 --- a/spacy/lang/bn/__init__.py +++ b/spacy/lang/bn/__init__.py @@ -1,10 +1,12 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES -from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults + +from ...language import BaseDefaults, Language from ...pipeline import Lemmatizer +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class BengaliDefaults(BaseDefaults): diff --git a/spacy/lang/bn/punctuation.py b/spacy/lang/bn/punctuation.py index becfe8d2a..ddb91cef1 100644 --- a/spacy/lang/bn/punctuation.py +++ b/spacy/lang/bn/punctuation.py @@ -1,6 +1,14 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_ICONS -from ..char_classes import ALPHA_LOWER, ALPHA, HYPHENS, CONCAT_QUOTES, UNITS - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + UNITS, +) _currency = r"\$¢£€¥฿৳" _quotes = CONCAT_QUOTES.replace("'", "") diff --git a/spacy/lang/bn/tokenizer_exceptions.py b/spacy/lang/bn/tokenizer_exceptions.py index e666522b8..016bf0fc5 100644 --- a/spacy/lang/bn/tokenizer_exceptions.py +++ b/spacy/lang/bn/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/ca/__init__.py b/spacy/lang/ca/__init__.py index a3def660d..8b2f3e85a 100755 --- a/spacy/lang/ca/__init__.py +++ b/spacy/lang/ca/__init__.py @@ -1,14 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults +from ...language import BaseDefaults, Language from .lemmatizer import CatalanLemmatizer +from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class CatalanDefaults(BaseDefaults): diff --git a/spacy/lang/ca/lex_attrs.py b/spacy/lang/ca/lex_attrs.py index be8b7a6ea..3e99da0e0 100644 --- a/spacy/lang/ca/lex_attrs.py +++ b/spacy/lang/ca/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "zero", "un", diff --git a/spacy/lang/ca/punctuation.py b/spacy/lang/ca/punctuation.py index 8e2f09828..6914f67a7 100755 --- a/spacy/lang/ca/punctuation.py +++ b/spacy/lang/ca/punctuation.py @@ -1,9 +1,18 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_ICONS -from ..char_classes import LIST_CURRENCY -from ..char_classes import CURRENCY -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT -from ..char_classes import merge_chars, _units - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + _units, + merge_chars, +) ELISION = " ' ’ ".strip().replace(" ", "").replace("\n", "") diff --git a/spacy/lang/ca/syntax_iterators.py b/spacy/lang/ca/syntax_iterators.py index 917e07c93..16a4c6a81 100644 --- a/spacy/lang/ca/syntax_iterators.py +++ b/spacy/lang/ca/syntax_iterators.py @@ -1,7 +1,8 @@ -from typing import Union, Iterator, Tuple -from ...tokens import Doc, Span -from ...symbols import NOUN, PROPN +from typing import Iterator, Tuple, Union + from ...errors import Errors +from ...symbols import NOUN, PROPN +from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: diff --git a/spacy/lang/ca/tokenizer_exceptions.py b/spacy/lang/ca/tokenizer_exceptions.py index b261b3498..67165780e 100755 --- a/spacy/lang/ca/tokenizer_exceptions.py +++ b/spacy/lang/ca/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/cs/__init__.py b/spacy/lang/cs/__init__.py index 3e70e4078..9ea60afdf 100644 --- a/spacy/lang/cs/__init__.py +++ b/spacy/lang/cs/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class CzechDefaults(BaseDefaults): diff --git a/spacy/lang/da/__init__.py b/spacy/lang/da/__init__.py index e148a7b4f..372f372dd 100644 --- a/spacy/lang/da/__init__.py +++ b/spacy/lang/da/__init__.py @@ -1,9 +1,9 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from ...language import BaseDefaults, Language +from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class DanishDefaults(BaseDefaults): diff --git a/spacy/lang/da/lex_attrs.py b/spacy/lang/da/lex_attrs.py index 403af686c..8e0420912 100644 --- a/spacy/lang/da/lex_attrs.py +++ b/spacy/lang/da/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - # Source http://fjern-uv.dk/tal.php _num_words = """nul en et to tre fire fem seks syv otte ni ti diff --git a/spacy/lang/da/punctuation.py b/spacy/lang/da/punctuation.py index e050ab7aa..f70fe3d64 100644 --- a/spacy/lang/da/punctuation.py +++ b/spacy/lang/da/punctuation.py @@ -1,8 +1,13 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + LIST_ELLIPSES, + LIST_ICONS, +) from ..punctuation import TOKENIZER_SUFFIXES - _quotes = CONCAT_QUOTES.replace("'", "") _infixes = ( diff --git a/spacy/lang/da/syntax_iterators.py b/spacy/lang/da/syntax_iterators.py index a0b70f004..60224f0b1 100644 --- a/spacy/lang/da/syntax_iterators.py +++ b/spacy/lang/da/syntax_iterators.py @@ -1,7 +1,8 @@ -from typing import Union, Iterator, Tuple -from ...tokens import Doc, Span -from ...symbols import NOUN, PROPN, PRON, VERB, AUX +from typing import Iterator, Tuple, Union + from ...errors import Errors +from ...symbols import AUX, NOUN, PRON, PROPN, VERB +from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: diff --git a/spacy/lang/da/tokenizer_exceptions.py b/spacy/lang/da/tokenizer_exceptions.py index ce25c546b..649d12022 100644 --- a/spacy/lang/da/tokenizer_exceptions.py +++ b/spacy/lang/da/tokenizer_exceptions.py @@ -2,10 +2,9 @@ Tokenizer Exceptions. Source: https://forkortelse.dk/ and various others. """ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/de/__init__.py b/spacy/lang/de/__init__.py index 65863c098..4f45b2357 100644 --- a/spacy/lang/de/__init__.py +++ b/spacy/lang/de/__init__.py @@ -1,8 +1,8 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES +from ...language import BaseDefaults, Language +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class GermanDefaults(BaseDefaults): diff --git a/spacy/lang/de/punctuation.py b/spacy/lang/de/punctuation.py index 69d402237..862207649 100644 --- a/spacy/lang/de/punctuation.py +++ b/spacy/lang/de/punctuation.py @@ -1,9 +1,18 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, LIST_PUNCT, LIST_QUOTES -from ..char_classes import CURRENCY, UNITS, PUNCT -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + UNITS, +) from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES - _prefixes = ["``"] + BASE_TOKENIZER_PREFIXES _suffixes = ( diff --git a/spacy/lang/de/syntax_iterators.py b/spacy/lang/de/syntax_iterators.py index e80504998..544fe299c 100644 --- a/spacy/lang/de/syntax_iterators.py +++ b/spacy/lang/de/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/de/tokenizer_exceptions.py b/spacy/lang/de/tokenizer_exceptions.py index 21d99cffe..3f1aeeccd 100644 --- a/spacy/lang/de/tokenizer_exceptions.py +++ b/spacy/lang/de/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "auf'm": [{ORTH: "auf"}, {ORTH: "'m", NORM: "dem"}], diff --git a/spacy/lang/dsb/__init__.py b/spacy/lang/dsb/__init__.py index c66092a0c..096eced19 100644 --- a/spacy/lang/dsb/__init__.py +++ b/spacy/lang/dsb/__init__.py @@ -1,6 +1,6 @@ +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class LowerSorbianDefaults(BaseDefaults): diff --git a/spacy/lang/el/__init__.py b/spacy/lang/el/__init__.py index 53dd9be8e..00e52bd97 100644 --- a/spacy/lang/el/__init__.py +++ b/spacy/lang/el/__init__.py @@ -1,13 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .syntax_iterators import SYNTAX_ITERATORS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES +from ...language import BaseDefaults, Language from .lemmatizer import GreekLemmatizer -from ...language import Language, BaseDefaults +from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class GreekDefaults(BaseDefaults): diff --git a/spacy/lang/el/get_pos_from_wiktionary.py b/spacy/lang/el/get_pos_from_wiktionary.py index 369973cc0..10b54d112 100644 --- a/spacy/lang/el/get_pos_from_wiktionary.py +++ b/spacy/lang/el/get_pos_from_wiktionary.py @@ -1,5 +1,6 @@ def get_pos_from_wiktionary(): import re + from gensim.corpora.wikicorpus import extract_pages regex = re.compile(r"==={{(\w+)\|el}}===") diff --git a/spacy/lang/el/punctuation.py b/spacy/lang/el/punctuation.py index 2d5690407..b8b717bac 100644 --- a/spacy/lang/el/punctuation.py +++ b/spacy/lang/el/punctuation.py @@ -1,6 +1,16 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY -from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS -from ..char_classes import CONCAT_QUOTES, CURRENCY +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + HYPHENS, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, +) _units = ( "km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft " diff --git a/spacy/lang/el/syntax_iterators.py b/spacy/lang/el/syntax_iterators.py index 18fa46695..31c7dccf7 100644 --- a/spacy/lang/el/syntax_iterators.py +++ b/spacy/lang/el/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/el/tokenizer_exceptions.py b/spacy/lang/el/tokenizer_exceptions.py index 0a36d5d2b..41317ba97 100644 --- a/spacy/lang/el/tokenizer_exceptions.py +++ b/spacy/lang/el/tokenizer_exceptions.py @@ -1,6 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/en/__init__.py b/spacy/lang/en/__init__.py index 876186979..c4bcfb938 100644 --- a/spacy/lang/en/__init__.py +++ b/spacy/lang/en/__init__.py @@ -1,13 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .syntax_iterators import SYNTAX_ITERATORS -from .punctuation import TOKENIZER_INFIXES +from ...language import BaseDefaults, Language from .lemmatizer import EnglishLemmatizer -from ...language import Language, BaseDefaults +from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class EnglishDefaults(BaseDefaults): diff --git a/spacy/lang/en/lex_attrs.py b/spacy/lang/en/lex_attrs.py index ab9353919..7f9dce948 100644 --- a/spacy/lang/en/lex_attrs.py +++ b/spacy/lang/en/lex_attrs.py @@ -6,7 +6,8 @@ _num_words = [ "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety", "hundred", "thousand", - "million", "billion", "trillion", "quadrillion", "gajillion", "bazillion" + "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", + "septillion", "octillion", "nonillion", "decillion", "gajillion", "bazillion" ] _ordinal_words = [ "first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eighth", @@ -14,7 +15,8 @@ _ordinal_words = [ "fifteenth", "sixteenth", "seventeenth", "eighteenth", "nineteenth", "twentieth", "thirtieth", "fortieth", "fiftieth", "sixtieth", "seventieth", "eightieth", "ninetieth", "hundredth", "thousandth", "millionth", "billionth", - "trillionth", "quadrillionth", "gajillionth", "bazillionth" + "trillionth", "quadrillionth", "quintillionth", "sextillionth", "septillionth", + "octillionth", "nonillionth", "decillionth", "gajillionth", "bazillionth" ] # fmt: on diff --git a/spacy/lang/en/punctuation.py b/spacy/lang/en/punctuation.py index 5d3eb792e..775c6b001 100644 --- a/spacy/lang/en/punctuation.py +++ b/spacy/lang/en/punctuation.py @@ -1,5 +1,12 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) _infixes = ( LIST_ELLIPSES diff --git a/spacy/lang/en/syntax_iterators.py b/spacy/lang/en/syntax_iterators.py index 7904e5621..140ae0a5c 100644 --- a/spacy/lang/en/syntax_iterators.py +++ b/spacy/lang/en/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/en/tokenizer_exceptions.py b/spacy/lang/en/tokenizer_exceptions.py index 7886e28cb..dd3650c18 100644 --- a/spacy/lang/en/tokenizer_exceptions.py +++ b/spacy/lang/en/tokenizer_exceptions.py @@ -1,8 +1,8 @@ from typing import Dict, List -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM -from ...util import update_exc +from ...symbols import NORM, ORTH +from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc: Dict[str, List[Dict]] = {} _exclude = [ diff --git a/spacy/lang/es/__init__.py b/spacy/lang/es/__init__.py index e75955202..bcaed8672 100644 --- a/spacy/lang/es/__init__.py +++ b/spacy/lang/es/__init__.py @@ -1,12 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS + +from ...language import BaseDefaults, Language from .lemmatizer import SpanishLemmatizer -from .syntax_iterators import SYNTAX_ITERATORS +from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class SpanishDefaults(BaseDefaults): diff --git a/spacy/lang/es/lemmatizer.py b/spacy/lang/es/lemmatizer.py index ca5fc08c8..ee5d38e84 100644 --- a/spacy/lang/es/lemmatizer.py +++ b/spacy/lang/es/lemmatizer.py @@ -1,5 +1,5 @@ -from typing import List, Optional, Tuple import re +from typing import List, Optional, Tuple from ...pipeline import Lemmatizer from ...tokens import Token @@ -163,7 +163,7 @@ class SpanishLemmatizer(Lemmatizer): for old, new in self.lookups.get_table("lemma_rules").get("det", []): if word == old: return [new] - # If none of the specfic rules apply, search in the common rules for + # If none of the specific rules apply, search in the common rules for # determiners and pronouns that follow a unique pattern for # lemmatization. If the word is in the list, return the corresponding # lemma. @@ -291,7 +291,7 @@ class SpanishLemmatizer(Lemmatizer): for old, new in self.lookups.get_table("lemma_rules").get("pron", []): if word == old: return [new] - # If none of the specfic rules apply, search in the common rules for + # If none of the specific rules apply, search in the common rules for # determiners and pronouns that follow a unique pattern for # lemmatization. If the word is in the list, return the corresponding # lemma. diff --git a/spacy/lang/es/lex_attrs.py b/spacy/lang/es/lex_attrs.py index 9d1fa93b8..4c477eaee 100644 --- a/spacy/lang/es/lex_attrs.py +++ b/spacy/lang/es/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "cero", "uno", diff --git a/spacy/lang/es/punctuation.py b/spacy/lang/es/punctuation.py index e9552371e..3d20518cd 100644 --- a/spacy/lang/es/punctuation.py +++ b/spacy/lang/es/punctuation.py @@ -1,8 +1,17 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES -from ..char_classes import LIST_ICONS, CURRENCY, LIST_UNITS, PUNCT -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA -from ..char_classes import merge_chars - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + LIST_UNITS, + PUNCT, + merge_chars, +) _list_units = [u for u in LIST_UNITS if u != "%"] _units = merge_chars(" ".join(_list_units)) diff --git a/spacy/lang/es/syntax_iterators.py b/spacy/lang/es/syntax_iterators.py index f2ca2a678..96df444a3 100644 --- a/spacy/lang/es/syntax_iterators.py +++ b/spacy/lang/es/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/es/tokenizer_exceptions.py b/spacy/lang/es/tokenizer_exceptions.py index 74cdc143d..2ea0ed8b7 100644 --- a/spacy/lang/es/tokenizer_exceptions.py +++ b/spacy/lang/es/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "pal": [{ORTH: "pa"}, {ORTH: "l", NORM: "el"}], diff --git a/spacy/lang/et/__init__.py b/spacy/lang/et/__init__.py index 274bc1309..9ec7e6006 100644 --- a/spacy/lang/et/__init__.py +++ b/spacy/lang/et/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class EstonianDefaults(BaseDefaults): diff --git a/spacy/lang/eu/__init__.py b/spacy/lang/eu/__init__.py index 3346468bd..81f9c4a18 100644 --- a/spacy/lang/eu/__init__.py +++ b/spacy/lang/eu/__init__.py @@ -1,7 +1,7 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class BasqueDefaults(BaseDefaults): diff --git a/spacy/lang/eu/punctuation.py b/spacy/lang/eu/punctuation.py index 5d35d0a25..382bfc75c 100644 --- a/spacy/lang/eu/punctuation.py +++ b/spacy/lang/eu/punctuation.py @@ -1,4 +1,3 @@ from ..punctuation import TOKENIZER_SUFFIXES - _suffixes = TOKENIZER_SUFFIXES diff --git a/spacy/lang/fa/__init__.py b/spacy/lang/fa/__init__.py index 914e4c27d..e5baa8b4a 100644 --- a/spacy/lang/fa/__init__.py +++ b/spacy/lang/fa/__init__.py @@ -1,12 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_SUFFIXES -from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults + +from ...language import BaseDefaults, Language from ...pipeline import Lemmatizer +from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class PersianDefaults(BaseDefaults): diff --git a/spacy/lang/fa/lex_attrs.py b/spacy/lang/fa/lex_attrs.py index 99b8e2787..065e81bd6 100644 --- a/spacy/lang/fa/lex_attrs.py +++ b/spacy/lang/fa/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - MIM = "م" ZWNJ_O_MIM = "‌ام" YE_NUN = "ین" diff --git a/spacy/lang/fa/punctuation.py b/spacy/lang/fa/punctuation.py index 4b258c13d..c1ee570ce 100644 --- a/spacy/lang/fa/punctuation.py +++ b/spacy/lang/fa/punctuation.py @@ -1,5 +1,11 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY -from ..char_classes import UNITS, ALPHA_UPPER +from ..char_classes import ( + ALPHA_UPPER, + CURRENCY, + LIST_ELLIPSES, + LIST_PUNCT, + LIST_QUOTES, + UNITS, +) _suffixes = ( LIST_PUNCT diff --git a/spacy/lang/fa/syntax_iterators.py b/spacy/lang/fa/syntax_iterators.py index 8207884b0..3052369a7 100644 --- a/spacy/lang/fa/syntax_iterators.py +++ b/spacy/lang/fa/syntax_iterators.py @@ -1,7 +1,8 @@ -from typing import Union, Iterator, Tuple -from ...tokens import Doc, Span -from ...symbols import NOUN, PROPN, PRON +from typing import Iterator, Tuple, Union + from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN +from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: diff --git a/spacy/lang/fa/tokenizer_exceptions.py b/spacy/lang/fa/tokenizer_exceptions.py index 30df798ab..3b31b7f67 100644 --- a/spacy/lang/fa/tokenizer_exceptions.py +++ b/spacy/lang/fa/tokenizer_exceptions.py @@ -1,5 +1,4 @@ -from ...symbols import ORTH, NORM - +from ...symbols import NORM, ORTH TOKENIZER_EXCEPTIONS = { ".ق ": [{ORTH: ".ق "}], diff --git a/spacy/lang/fi/__init__.py b/spacy/lang/fi/__init__.py index c3a0cf451..3e371b9b5 100644 --- a/spacy/lang/fi/__init__.py +++ b/spacy/lang/fi/__init__.py @@ -1,9 +1,9 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class FinnishDefaults(BaseDefaults): diff --git a/spacy/lang/fi/lex_attrs.py b/spacy/lang/fi/lex_attrs.py index 4d500cead..9eec41b3d 100644 --- a/spacy/lang/fi/lex_attrs.py +++ b/spacy/lang/fi/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "nolla", "yksi", diff --git a/spacy/lang/fi/punctuation.py b/spacy/lang/fi/punctuation.py index 6e14dde38..29ddc3111 100644 --- a/spacy/lang/fi/punctuation.py +++ b/spacy/lang/fi/punctuation.py @@ -1,8 +1,14 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, LIST_HYPHENS -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + LIST_ELLIPSES, + LIST_HYPHENS, + LIST_ICONS, +) from ..punctuation import TOKENIZER_SUFFIXES - _quotes = CONCAT_QUOTES.replace("'", "") DASHES = "|".join(x for x in LIST_HYPHENS if x != "-") diff --git a/spacy/lang/fi/syntax_iterators.py b/spacy/lang/fi/syntax_iterators.py index 6b481e51f..6e2216713 100644 --- a/spacy/lang/fi/syntax_iterators.py +++ b/spacy/lang/fi/syntax_iterators.py @@ -1,7 +1,8 @@ from typing import Iterator, Tuple, Union -from ...tokens import Doc, Span -from ...symbols import NOUN, PROPN, PRON + from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN +from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: diff --git a/spacy/lang/fi/tokenizer_exceptions.py b/spacy/lang/fi/tokenizer_exceptions.py index 465333b0a..881d5b91d 100644 --- a/spacy/lang/fi/tokenizer_exceptions.py +++ b/spacy/lang/fi/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/fo/__init__.py b/spacy/lang/fo/__init__.py new file mode 100644 index 000000000..db18f1a5d --- /dev/null +++ b/spacy/lang/fo/__init__.py @@ -0,0 +1,18 @@ +from ...language import BaseDefaults, Language +from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS + + +class FaroeseDefaults(BaseDefaults): + tokenizer_exceptions = TOKENIZER_EXCEPTIONS + infixes = TOKENIZER_INFIXES + suffixes = TOKENIZER_SUFFIXES + prefixes = TOKENIZER_PREFIXES + + +class Faroese(Language): + lang = "fo" + Defaults = FaroeseDefaults + + +__all__ = ["Faroese"] diff --git a/spacy/lang/fo/tokenizer_exceptions.py b/spacy/lang/fo/tokenizer_exceptions.py new file mode 100644 index 000000000..856b72200 --- /dev/null +++ b/spacy/lang/fo/tokenizer_exceptions.py @@ -0,0 +1,90 @@ +from ...symbols import ORTH +from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS + +_exc = {} + +for orth in [ + "apr.", + "aug.", + "avgr.", + "árg.", + "ávís.", + "beinl.", + "blkv.", + "blaðkv.", + "blm.", + "blaðm.", + "bls.", + "blstj.", + "blaðstj.", + "des.", + "eint.", + "febr.", + "fyrrv.", + "góðk.", + "h.m.", + "innt.", + "jan.", + "kl.", + "m.a.", + "mðr.", + "mió.", + "nr.", + "nto.", + "nov.", + "nút.", + "o.a.", + "o.a.m.", + "o.a.tíl.", + "o.fl.", + "ff.", + "o.m.a.", + "o.o.", + "o.s.fr.", + "o.tíl.", + "o.ø.", + "okt.", + "omf.", + "pst.", + "ritstj.", + "sbr.", + "sms.", + "smst.", + "smb.", + "sb.", + "sbrt.", + "sp.", + "sept.", + "spf.", + "spsk.", + "t.e.", + "t.s.", + "t.s.s.", + "tlf.", + "tel.", + "tsk.", + "t.o.v.", + "t.d.", + "uml.", + "ums.", + "uppl.", + "upprfr.", + "uppr.", + "útg.", + "útl.", + "útr.", + "vanl.", + "v.", + "v.h.", + "v.ø.o.", + "viðm.", + "viðv.", + "vm.", + "v.m.", +]: + _exc[orth] = [{ORTH: orth}] + capitalized = orth.capitalize() + _exc[capitalized] = [{ORTH: capitalized}] + +TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc) diff --git a/spacy/lang/fr/__init__.py b/spacy/lang/fr/__init__.py index 27d2a915e..a8bc7f53e 100644 --- a/spacy/lang/fr/__init__.py +++ b/spacy/lang/fr/__init__.py @@ -1,15 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .punctuation import TOKENIZER_SUFFIXES -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .syntax_iterators import SYNTAX_ITERATORS +from ...language import BaseDefaults, Language from .lemmatizer import FrenchLemmatizer -from ...language import Language, BaseDefaults +from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS class FrenchDefaults(BaseDefaults): diff --git a/spacy/lang/fr/lex_attrs.py b/spacy/lang/fr/lex_attrs.py index 811312ad7..9cf508a07 100644 --- a/spacy/lang/fr/lex_attrs.py +++ b/spacy/lang/fr/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = set( """ zero un une deux trois quatre cinq six sept huit neuf dix diff --git a/spacy/lang/fr/punctuation.py b/spacy/lang/fr/punctuation.py index 873d01d87..a3b178a2f 100644 --- a/spacy/lang/fr/punctuation.py +++ b/spacy/lang/fr/punctuation.py @@ -1,8 +1,16 @@ -from ..punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY -from ..char_classes import CONCAT_QUOTES, UNITS, ALPHA, ALPHA_LOWER, ALPHA_UPPER -from ..char_classes import merge_chars - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_ELLIPSES, + LIST_PUNCT, + LIST_QUOTES, + UNITS, + merge_chars, +) +from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES ELISION = "' ’".replace(" ", "") HYPHENS = r"- – — ‐ ‑".replace(" ", "") diff --git a/spacy/lang/fr/syntax_iterators.py b/spacy/lang/fr/syntax_iterators.py index 5849c40b3..a6bf3d3ca 100644 --- a/spacy/lang/fr/syntax_iterators.py +++ b/spacy/lang/fr/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/fr/tokenizer_exceptions.py b/spacy/lang/fr/tokenizer_exceptions.py index 2e88b58cf..fa2062ef9 100644 --- a/spacy/lang/fr/tokenizer_exceptions.py +++ b/spacy/lang/fr/tokenizer_exceptions.py @@ -1,11 +1,10 @@ import re -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from .punctuation import ELISION, HYPHENS -from ..char_classes import ALPHA_LOWER, ALPHA from ...symbols import ORTH from ...util import update_exc - +from ..char_classes import ALPHA, ALPHA_LOWER +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from .punctuation import ELISION, HYPHENS # not using the large _tokenizer_exceptions_list by default as it slows down the tokenizer # from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS diff --git a/spacy/lang/ga/__init__.py b/spacy/lang/ga/__init__.py index 3be53bc7a..6f9a27a14 100644 --- a/spacy/lang/ga/__init__.py +++ b/spacy/lang/ga/__init__.py @@ -2,10 +2,10 @@ from typing import Optional from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults +from ...language import BaseDefaults, Language from .lemmatizer import IrishLemmatizer +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class IrishDefaults(BaseDefaults): diff --git a/spacy/lang/ga/lemmatizer.py b/spacy/lang/ga/lemmatizer.py index 47aec8fd4..c9fbfbc19 100644 --- a/spacy/lang/ga/lemmatizer.py +++ b/spacy/lang/ga/lemmatizer.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Tuple +from typing import Dict, List, Tuple from ...pipeline import Lemmatizer from ...tokens import Token diff --git a/spacy/lang/ga/tokenizer_exceptions.py b/spacy/lang/ga/tokenizer_exceptions.py index 63af65fe9..eb4b413fb 100644 --- a/spacy/lang/ga/tokenizer_exceptions.py +++ b/spacy/lang/ga/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "'acha'n": [{ORTH: "'ach", NORM: "gach"}, {ORTH: "a'n", NORM: "aon"}], diff --git a/spacy/lang/grc/__init__.py b/spacy/lang/grc/__init__.py index 019b3802e..ed742f4c5 100644 --- a/spacy/lang/grc/__init__.py +++ b/spacy/lang/grc/__init__.py @@ -1,8 +1,8 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES -from ...language import Language, BaseDefaults +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class AncientGreekDefaults(BaseDefaults): diff --git a/spacy/lang/grc/lex_attrs.py b/spacy/lang/grc/lex_attrs.py index 0ab15e6fd..33cfca05b 100644 --- a/spacy/lang/grc/lex_attrs.py +++ b/spacy/lang/grc/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ # CARDINALS "εἷς", diff --git a/spacy/lang/grc/punctuation.py b/spacy/lang/grc/punctuation.py index 8f3589e9a..59037617d 100644 --- a/spacy/lang/grc/punctuation.py +++ b/spacy/lang/grc/punctuation.py @@ -1,11 +1,21 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY -from ..char_classes import LIST_ICONS, ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS -from ..char_classes import CONCAT_QUOTES +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, +) _prefixes = ( [ "†", "⸏", + "〈", ] + LIST_PUNCT + LIST_ELLIPSES @@ -22,6 +32,7 @@ _suffixes = ( + [ "†", "⸎", + "〉", r"(?<=[\u1F00-\u1FFF\u0370-\u03FF])[\-\.⸏]", ] ) diff --git a/spacy/lang/grc/tokenizer_exceptions.py b/spacy/lang/grc/tokenizer_exceptions.py index bcee70f32..86527ff61 100644 --- a/spacy/lang/grc/tokenizer_exceptions.py +++ b/spacy/lang/grc/tokenizer_exceptions.py @@ -1,6 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/gu/__init__.py b/spacy/lang/gu/__init__.py index e6fbc9d18..2f22034c1 100644 --- a/spacy/lang/gu/__init__.py +++ b/spacy/lang/gu/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class GujaratiDefaults(BaseDefaults): diff --git a/spacy/lang/he/__init__.py b/spacy/lang/he/__init__.py index dd2ee478d..07084acf1 100644 --- a/spacy/lang/he/__init__.py +++ b/spacy/lang/he/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class HebrewDefaults(BaseDefaults): diff --git a/spacy/lang/hi/__init__.py b/spacy/lang/hi/__init__.py index 4c8ae446d..980dc31c1 100644 --- a/spacy/lang/hi/__init__.py +++ b/spacy/lang/hi/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class HindiDefaults(BaseDefaults): diff --git a/spacy/lang/hi/lex_attrs.py b/spacy/lang/hi/lex_attrs.py index ee845e8b1..4ecd1db66 100644 --- a/spacy/lang/hi/lex_attrs.py +++ b/spacy/lang/hi/lex_attrs.py @@ -1,6 +1,5 @@ +from ...attrs import LIKE_NUM, NORM from ..norm_exceptions import BASE_NORMS -from ...attrs import NORM, LIKE_NUM - # fmt: off _stem_suffixes = [ diff --git a/spacy/lang/hr/__init__.py b/spacy/lang/hr/__init__.py index 30870b522..fd7622a3d 100644 --- a/spacy/lang/hr/__init__.py +++ b/spacy/lang/hr/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class CroatianDefaults(BaseDefaults): diff --git a/spacy/lang/hsb/__init__.py b/spacy/lang/hsb/__init__.py index 034d82319..e8b2ffc9f 100644 --- a/spacy/lang/hsb/__init__.py +++ b/spacy/lang/hsb/__init__.py @@ -1,7 +1,7 @@ +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ...language import Language, BaseDefaults class UpperSorbianDefaults(BaseDefaults): diff --git a/spacy/lang/hsb/tokenizer_exceptions.py b/spacy/lang/hsb/tokenizer_exceptions.py index 4b9a4f98a..cd3bac913 100644 --- a/spacy/lang/hsb/tokenizer_exceptions.py +++ b/spacy/lang/hsb/tokenizer_exceptions.py @@ -1,6 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = dict() for exc_data in [ diff --git a/spacy/lang/hu/__init__.py b/spacy/lang/hu/__init__.py index 9426bacea..799e6d230 100644 --- a/spacy/lang/hu/__init__.py +++ b/spacy/lang/hu/__init__.py @@ -1,7 +1,7 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES +from ...language import BaseDefaults, Language +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS class HungarianDefaults(BaseDefaults): diff --git a/spacy/lang/hu/punctuation.py b/spacy/lang/hu/punctuation.py index f827cd677..dbf93c622 100644 --- a/spacy/lang/hu/punctuation.py +++ b/spacy/lang/hu/punctuation.py @@ -1,6 +1,14 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CONCAT_QUOTES -from ..char_classes import CONCAT_ICONS, UNITS, ALPHA, ALPHA_LOWER, ALPHA_UPPER - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_ICONS, + CONCAT_QUOTES, + LIST_ELLIPSES, + LIST_PUNCT, + LIST_QUOTES, + UNITS, +) # removing ° from the special icons to keep e.g. 99° as one token _concat_icons = CONCAT_ICONS.replace("\u00B0", "") diff --git a/spacy/lang/hu/tokenizer_exceptions.py b/spacy/lang/hu/tokenizer_exceptions.py index ffaa74f50..3f79b02d2 100644 --- a/spacy/lang/hu/tokenizer_exceptions.py +++ b/spacy/lang/hu/tokenizer_exceptions.py @@ -1,10 +1,9 @@ import re -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ..punctuation import ALPHA_LOWER, CURRENCY from ...symbols import ORTH from ...util import update_exc - +from ..punctuation import ALPHA_LOWER, CURRENCY +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/hy/__init__.py b/spacy/lang/hy/__init__.py index 481eaae0a..e00d4fd11 100644 --- a/spacy/lang/hy/__init__.py +++ b/spacy/lang/hy/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class ArmenianDefaults(BaseDefaults): diff --git a/spacy/lang/hy/lex_attrs.py b/spacy/lang/hy/lex_attrs.py index 9c9c0380c..4c96b8ab5 100644 --- a/spacy/lang/hy/lex_attrs.py +++ b/spacy/lang/hy/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "զրո", "մեկ", diff --git a/spacy/lang/id/__init__.py b/spacy/lang/id/__init__.py index 0d72cfa9d..93eb3214a 100644 --- a/spacy/lang/id/__init__.py +++ b/spacy/lang/id/__init__.py @@ -1,9 +1,9 @@ -from .stop_words import STOP_WORDS -from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class IndonesianDefaults(BaseDefaults): diff --git a/spacy/lang/id/lex_attrs.py b/spacy/lang/id/lex_attrs.py index 3167f4659..5952c4d06 100644 --- a/spacy/lang/id/lex_attrs.py +++ b/spacy/lang/id/lex_attrs.py @@ -1,8 +1,7 @@ import unicodedata -from .punctuation import LIST_CURRENCY from ...attrs import IS_CURRENCY, LIKE_NUM - +from .punctuation import LIST_CURRENCY _num_words = [ "nol", diff --git a/spacy/lang/id/punctuation.py b/spacy/lang/id/punctuation.py index f6c2387d8..8303b8eaa 100644 --- a/spacy/lang/id/punctuation.py +++ b/spacy/lang/id/punctuation.py @@ -1,6 +1,5 @@ -from ..punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES -from ..char_classes import ALPHA, merge_chars, split_chars, _currency, _units - +from ..char_classes import ALPHA, _currency, _units, merge_chars, split_chars +from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES _units = ( _units + "s bit Gbps Mbps mbps Kbps kbps ƒ ppi px " diff --git a/spacy/lang/id/syntax_iterators.py b/spacy/lang/id/syntax_iterators.py index fa984d411..027798687 100644 --- a/spacy/lang/id/syntax_iterators.py +++ b/spacy/lang/id/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/id/tokenizer_exceptions.py b/spacy/lang/id/tokenizer_exceptions.py index ff77ede9f..8dea4e97f 100644 --- a/spacy/lang/id/tokenizer_exceptions.py +++ b/spacy/lang/id/tokenizer_exceptions.py @@ -1,8 +1,7 @@ +from ...symbols import NORM, ORTH +from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS -from ...symbols import ORTH, NORM -from ...util import update_exc - # Daftar singkatan dan Akronim dari: # https://id.wiktionary.org/wiki/Wiktionary:Daftar_singkatan_dan_akronim_bahasa_Indonesia#A diff --git a/spacy/lang/is/__init__.py b/spacy/lang/is/__init__.py index 318363beb..af1260045 100644 --- a/spacy/lang/is/__init__.py +++ b/spacy/lang/is/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class IcelandicDefaults(BaseDefaults): diff --git a/spacy/lang/it/__init__.py b/spacy/lang/it/__init__.py index ecf322bd7..14458d811 100644 --- a/spacy/lang/it/__init__.py +++ b/spacy/lang/it/__init__.py @@ -1,12 +1,13 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .stop_words import STOP_WORDS -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from ...language import Language, BaseDefaults +from ...language import BaseDefaults, Language from .lemmatizer import ItalianLemmatizer +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES +from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class ItalianDefaults(BaseDefaults): diff --git a/spacy/lang/it/lemmatizer.py b/spacy/lang/it/lemmatizer.py index e44e64e3a..bf869166d 100644 --- a/spacy/lang/it/lemmatizer.py +++ b/spacy/lang/it/lemmatizer.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Tuple +from typing import Dict, List, Tuple from ...pipeline import Lemmatizer from ...tokens import Token diff --git a/spacy/lang/it/punctuation.py b/spacy/lang/it/punctuation.py index f01ab4f0d..51318b22d 100644 --- a/spacy/lang/it/punctuation.py +++ b/spacy/lang/it/punctuation.py @@ -1,8 +1,13 @@ +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES -from ..char_classes import LIST_ELLIPSES, LIST_ICONS -from ..char_classes import ALPHA, HYPHENS, CONCAT_QUOTES -from ..char_classes import ALPHA_LOWER, ALPHA_UPPER - ELISION = "'’" diff --git a/spacy/lang/it/syntax_iterators.py b/spacy/lang/it/syntax_iterators.py index f63df3fad..924627648 100644 --- a/spacy/lang/it/syntax_iterators.py +++ b/spacy/lang/it/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/it/tokenizer_exceptions.py b/spacy/lang/it/tokenizer_exceptions.py index 42883863b..2e7a5a1a3 100644 --- a/spacy/lang/it/tokenizer_exceptions.py +++ b/spacy/lang/it/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "all'art.": [{ORTH: "all'"}, {ORTH: "art."}], diff --git a/spacy/lang/ja/__init__.py b/spacy/lang/ja/__init__.py index bf86305fb..0d5f97ac8 100644 --- a/spacy/lang/ja/__init__.py +++ b/spacy/lang/ja/__init__.py @@ -1,27 +1,27 @@ -from typing import Optional, Union, Dict, Any, Callable -from pathlib import Path -import srsly -from collections import namedtuple -from thinc.api import Model import re +from collections import namedtuple +from pathlib import Path +from typing import Any, Callable, Dict, Optional, Union -from .stop_words import STOP_WORDS -from .syntax_iterators import SYNTAX_ITERATORS -from .tag_map import TAG_MAP -from .tag_orth_map import TAG_ORTH_MAP -from .tag_bigram_map import TAG_BIGRAM_MAP +import srsly +from thinc.api import Model + +from ... import util from ...errors import Errors -from ...language import Language, BaseDefaults +from ...language import BaseDefaults, Language from ...pipeline import Morphologizer from ...pipeline.morphologizer import DEFAULT_MORPH_MODEL from ...scorer import Scorer from ...symbols import POS from ...tokens import Doc, MorphAnalysis from ...training import validate_examples -from ...util import DummyTokenizer, registry, load_config_from_str +from ...util import DummyTokenizer, load_config_from_str, registry from ...vocab import Vocab -from ... import util - +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tag_bigram_map import TAG_BIGRAM_MAP +from .tag_map import TAG_MAP +from .tag_orth_map import TAG_ORTH_MAP DEFAULT_CONFIG = """ [nlp] diff --git a/spacy/lang/ja/syntax_iterators.py b/spacy/lang/ja/syntax_iterators.py index 588a9ba03..34670083e 100644 --- a/spacy/lang/ja/syntax_iterators.py +++ b/spacy/lang/ja/syntax_iterators.py @@ -1,9 +1,8 @@ -from typing import Union, Iterator, Tuple, Set +from typing import Iterator, Set, Tuple, Union -from ...symbols import NOUN, PROPN, PRON, VERB +from ...symbols import NOUN, PRON, PROPN, VERB from ...tokens import Doc, Span - # TODO: this can probably be pruned a bit # fmt: off labels = ["nsubj", "nmod", "ddoclike", "nsubjpass", "pcomp", "pdoclike", "doclike", "obl", "dative", "appos", "attr", "ROOT"] diff --git a/spacy/lang/ja/tag_map.py b/spacy/lang/ja/tag_map.py index c6de3831a..5c14f41bf 100644 --- a/spacy/lang/ja/tag_map.py +++ b/spacy/lang/ja/tag_map.py @@ -1,6 +1,23 @@ -from ...symbols import POS, PUNCT, INTJ, ADJ, AUX, ADP, PART, SCONJ, NOUN -from ...symbols import SYM, PRON, VERB, ADV, PROPN, NUM, DET, SPACE, CCONJ - +from ...symbols import ( + ADJ, + ADP, + ADV, + AUX, + CCONJ, + DET, + INTJ, + NOUN, + NUM, + PART, + POS, + PRON, + PROPN, + PUNCT, + SCONJ, + SPACE, + SYM, + VERB, +) TAG_MAP = { # Explanation of Unidic tags: diff --git a/spacy/lang/kn/__init__.py b/spacy/lang/kn/__init__.py index ccd46a394..44d53f6b7 100644 --- a/spacy/lang/kn/__init__.py +++ b/spacy/lang/kn/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class KannadaDefaults(BaseDefaults): diff --git a/spacy/lang/ko/__init__.py b/spacy/lang/ko/__init__.py index 0e02e4a2d..e2c860f7d 100644 --- a/spacy/lang/ko/__init__.py +++ b/spacy/lang/ko/__init__.py @@ -1,17 +1,16 @@ -from typing import Iterator, Any, Dict +from typing import Any, Dict, Iterator +from ...language import BaseDefaults, Language +from ...scorer import Scorer +from ...symbols import POS, X +from ...tokens import Doc +from ...training import validate_examples +from ...util import DummyTokenizer, load_config_from_str, registry +from ...vocab import Vocab +from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tag_map import TAG_MAP -from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults -from ...tokens import Doc -from ...scorer import Scorer -from ...symbols import POS, X -from ...training import validate_examples -from ...util import DummyTokenizer, registry, load_config_from_str -from ...vocab import Vocab - DEFAULT_CONFIG = """ [nlp] diff --git a/spacy/lang/ko/lex_attrs.py b/spacy/lang/ko/lex_attrs.py index ac5bc7e48..2c49aa389 100644 --- a/spacy/lang/ko/lex_attrs.py +++ b/spacy/lang/ko/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "영", "공", diff --git a/spacy/lang/ko/punctuation.py b/spacy/lang/ko/punctuation.py index f5f1c51da..c3c32ea1f 100644 --- a/spacy/lang/ko/punctuation.py +++ b/spacy/lang/ko/punctuation.py @@ -1,7 +1,6 @@ from ..char_classes import LIST_QUOTES from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES - _infixes = ( ["·", "ㆍ", r"\(", r"\)"] + [r"(?<=[0-9])~(?=[0-9-])"] diff --git a/spacy/lang/ko/tag_map.py b/spacy/lang/ko/tag_map.py index 26a8c56b9..85598c3ef 100644 --- a/spacy/lang/ko/tag_map.py +++ b/spacy/lang/ko/tag_map.py @@ -1,5 +1,21 @@ -from ...symbols import POS, PUNCT, INTJ, X, SYM, ADJ, AUX, ADP, CONJ, NOUN, PRON -from ...symbols import VERB, ADV, PROPN, NUM, DET +from ...symbols import ( + ADJ, + ADP, + ADV, + AUX, + CONJ, + DET, + INTJ, + NOUN, + NUM, + POS, + PRON, + PROPN, + PUNCT, + SYM, + VERB, + X, +) # 은전한닢(mecab-ko-dic)의 품사 태그를 universal pos tag로 대응시킴 # https://docs.google.com/spreadsheets/d/1-9blXKjtjeKZqsf4NzHeYJCrr49-nXeRF6D80udfcwY/edit#gid=589544265 diff --git a/spacy/lang/ky/__init__.py b/spacy/lang/ky/__init__.py index ccca384bd..fafc0f020 100644 --- a/spacy/lang/ky/__init__.py +++ b/spacy/lang/ky/__init__.py @@ -1,8 +1,8 @@ +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ...language import Language, BaseDefaults class KyrgyzDefaults(BaseDefaults): diff --git a/spacy/lang/ky/punctuation.py b/spacy/lang/ky/punctuation.py index fa9819f80..6d89da2f7 100644 --- a/spacy/lang/ky/punctuation.py +++ b/spacy/lang/ky/punctuation.py @@ -1,5 +1,12 @@ -from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, HYPHENS -from ..char_classes import LIST_ELLIPSES, LIST_ICONS +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) _hyphens_no_dash = HYPHENS.replace("-", "").strip("|").replace("||", "") _infixes = ( diff --git a/spacy/lang/ky/tokenizer_exceptions.py b/spacy/lang/ky/tokenizer_exceptions.py index 8ec727ac1..c93e3dac3 100644 --- a/spacy/lang/ky/tokenizer_exceptions.py +++ b/spacy/lang/ky/tokenizer_exceptions.py @@ -1,6 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/la/__init__.py b/spacy/lang/la/__init__.py index 37164c3f3..d77ae267e 100644 --- a/spacy/lang/la/__init__.py +++ b/spacy/lang/la/__init__.py @@ -1,8 +1,8 @@ -from ...language import Language, BaseDefaults -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS +from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class LatinDefaults(BaseDefaults): diff --git a/spacy/lang/la/lex_attrs.py b/spacy/lang/la/lex_attrs.py index 9db1218a4..fcb35defc 100644 --- a/spacy/lang/la/lex_attrs.py +++ b/spacy/lang/la/lex_attrs.py @@ -1,6 +1,7 @@ -from ...attrs import LIKE_NUM import re +from ...attrs import LIKE_NUM + # cf. Goyvaerts/Levithan 2009; case-insensitive, allow 4 roman_numerals_compile = re.compile( r"(?i)^(?=[MDCLXVI])M*(C[MD]|D?C{0,4})(X[CL]|L?X{0,4})(I[XV]|V?I{0,4})$" diff --git a/spacy/lang/la/syntax_iterators.py b/spacy/lang/la/syntax_iterators.py index 7093bacf9..39b4fb39d 100644 --- a/spacy/lang/la/syntax_iterators.py +++ b/spacy/lang/la/syntax_iterators.py @@ -1,7 +1,8 @@ -from typing import Union, Iterator, Tuple -from ...tokens import Doc, Span -from ...symbols import NOUN, PROPN, PRON, VERB, AUX +from typing import Iterator, Tuple, Union + from ...errors import Errors +from ...symbols import AUX, NOUN, PRON, PROPN, VERB +from ...tokens import Doc, Span # NB: Modified from da on suggestion from https://github.com/explosion/spaCy/issues/7457#issuecomment-800349751 [PJB] diff --git a/spacy/lang/la/tokenizer_exceptions.py b/spacy/lang/la/tokenizer_exceptions.py index 6d14b92c5..c0b98116f 100644 --- a/spacy/lang/la/tokenizer_exceptions.py +++ b/spacy/lang/la/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS ## TODO: Look into systematically handling u/v _exc = { diff --git a/spacy/lang/lb/__init__.py b/spacy/lang/lb/__init__.py index 7827e7762..2386b4356 100644 --- a/spacy/lang/lb/__init__.py +++ b/spacy/lang/lb/__init__.py @@ -1,8 +1,8 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_INFIXES +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class LuxembourgishDefaults(BaseDefaults): diff --git a/spacy/lang/lb/lex_attrs.py b/spacy/lang/lb/lex_attrs.py index d2d50d9dc..119231374 100644 --- a/spacy/lang/lb/lex_attrs.py +++ b/spacy/lang/lb/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = set( """ null eent zwee dräi véier fënnef sechs ziwen aacht néng zéng eelef zwielef dräizéng diff --git a/spacy/lang/lb/punctuation.py b/spacy/lang/lb/punctuation.py index e382c56c5..8bdbf9713 100644 --- a/spacy/lang/lb/punctuation.py +++ b/spacy/lang/lb/punctuation.py @@ -1,4 +1,4 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, LIST_ELLIPSES, LIST_ICONS ELISION = " ' ’ ".strip().replace(" ", "") diff --git a/spacy/lang/lb/tokenizer_exceptions.py b/spacy/lang/lb/tokenizer_exceptions.py index d00dc9610..844826e27 100644 --- a/spacy/lang/lb/tokenizer_exceptions.py +++ b/spacy/lang/lb/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS # TODO # treat other apostrophes within words as part of the word: [op d'mannst], [fir d'éischt] (= exceptions) diff --git a/spacy/lang/lex_attrs.py b/spacy/lang/lex_attrs.py index 6ed981a06..3ac20420d 100644 --- a/spacy/lang/lex_attrs.py +++ b/spacy/lang/lex_attrs.py @@ -1,11 +1,10 @@ -from typing import Set -import unicodedata import re +import unicodedata +from typing import Set from .. import attrs from .tokenizer_exceptions import URL_MATCH - _like_email = re.compile(r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)").match _tlds = set( "com|org|edu|gov|net|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|" diff --git a/spacy/lang/lg/__init__.py b/spacy/lang/lg/__init__.py index 6f7153fce..a87685375 100644 --- a/spacy/lang/lg/__init__.py +++ b/spacy/lang/lg/__init__.py @@ -1,7 +1,7 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class LugandaDefaults(BaseDefaults): diff --git a/spacy/lang/lg/punctuation.py b/spacy/lang/lg/punctuation.py index 5d3eb792e..775c6b001 100644 --- a/spacy/lang/lg/punctuation.py +++ b/spacy/lang/lg/punctuation.py @@ -1,5 +1,12 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) _infixes = ( LIST_ELLIPSES diff --git a/spacy/lang/lij/__init__.py b/spacy/lang/lij/__init__.py index b7e11f77e..3b8e972c6 100644 --- a/spacy/lang/lij/__init__.py +++ b/spacy/lang/lij/__init__.py @@ -1,7 +1,7 @@ +from ...language import BaseDefaults, Language +from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_INFIXES -from ...language import Language, BaseDefaults class LigurianDefaults(BaseDefaults): diff --git a/spacy/lang/lij/punctuation.py b/spacy/lang/lij/punctuation.py index d50b75589..c5c150d0a 100644 --- a/spacy/lang/lij/punctuation.py +++ b/spacy/lang/lij/punctuation.py @@ -1,6 +1,5 @@ -from ..punctuation import TOKENIZER_INFIXES from ..char_classes import ALPHA - +from ..punctuation import TOKENIZER_INFIXES ELISION = " ' ’ ".strip().replace(" ", "").replace("\n", "") diff --git a/spacy/lang/lij/tokenizer_exceptions.py b/spacy/lang/lij/tokenizer_exceptions.py index 52eae2c89..cf5a1af66 100644 --- a/spacy/lang/lij/tokenizer_exceptions.py +++ b/spacy/lang/lij/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/lt/__init__.py b/spacy/lang/lt/__init__.py index 3ae000e5f..f3ea257b1 100644 --- a/spacy/lang/lt/__init__.py +++ b/spacy/lang/lt/__init__.py @@ -1,8 +1,8 @@ -from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class LithuanianDefaults(BaseDefaults): diff --git a/spacy/lang/lt/punctuation.py b/spacy/lang/lt/punctuation.py index 22aee0941..deef24854 100644 --- a/spacy/lang/lt/punctuation.py +++ b/spacy/lang/lt/punctuation.py @@ -1,9 +1,14 @@ -from ..char_classes import LIST_ICONS, LIST_ELLIPSES -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA -from ..char_classes import HYPHENS +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) from ..punctuation import TOKENIZER_SUFFIXES - _infixes = ( LIST_ELLIPSES + LIST_ICONS diff --git a/spacy/lang/lt/tokenizer_exceptions.py b/spacy/lang/lt/tokenizer_exceptions.py index 118fb2190..d39b86dfc 100644 --- a/spacy/lang/lt/tokenizer_exceptions.py +++ b/spacy/lang/lt/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/lv/__init__.py b/spacy/lang/lv/__init__.py index a05e5b939..fdfca5e97 100644 --- a/spacy/lang/lv/__init__.py +++ b/spacy/lang/lv/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class LatvianDefaults(BaseDefaults): diff --git a/spacy/lang/mk/__init__.py b/spacy/lang/mk/__init__.py index fa07cfef9..413f0038d 100644 --- a/spacy/lang/mk/__init__.py +++ b/spacy/lang/mk/__init__.py @@ -1,15 +1,16 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model + +from ...attrs import LANG +from ...language import BaseDefaults, Language +from ...lookups import Lookups +from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS from .lemmatizer import MacedonianLemmatizer +from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .lex_attrs import LEX_ATTRS -from ..tokenizer_exceptions import BASE_EXCEPTIONS - -from ...language import Language, BaseDefaults -from ...attrs import LANG -from ...util import update_exc -from ...lookups import Lookups class MacedonianDefaults(BaseDefaults): diff --git a/spacy/lang/mk/lemmatizer.py b/spacy/lang/mk/lemmatizer.py index a792095e7..f5a5eca85 100644 --- a/spacy/lang/mk/lemmatizer.py +++ b/spacy/lang/mk/lemmatizer.py @@ -1,5 +1,5 @@ -from typing import List from collections import OrderedDict +from typing import List from ...pipeline import Lemmatizer from ...tokens import Token diff --git a/spacy/lang/mk/tokenizer_exceptions.py b/spacy/lang/mk/tokenizer_exceptions.py index 3b589b2a9..40f2c1d80 100644 --- a/spacy/lang/mk/tokenizer_exceptions.py +++ b/spacy/lang/mk/tokenizer_exceptions.py @@ -1,5 +1,4 @@ -from ...symbols import ORTH, NORM - +from ...symbols import NORM, ORTH _exc = {} diff --git a/spacy/lang/ml/__init__.py b/spacy/lang/ml/__init__.py index 9f90605f0..0b17b8a7a 100644 --- a/spacy/lang/ml/__init__.py +++ b/spacy/lang/ml/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class MalayalamDefaults(BaseDefaults): diff --git a/spacy/lang/ml/lex_attrs.py b/spacy/lang/ml/lex_attrs.py index 9ac19b6a7..33a144f6b 100644 --- a/spacy/lang/ml/lex_attrs.py +++ b/spacy/lang/ml/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - # reference 2: https://www.omniglot.com/language/numbers/malayalam.htm _num_words = [ diff --git a/spacy/lang/mr/__init__.py b/spacy/lang/mr/__init__.py index 3e172fa60..f980efbd0 100644 --- a/spacy/lang/mr/__init__.py +++ b/spacy/lang/mr/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class MarathiDefaults(BaseDefaults): diff --git a/spacy/lang/ms/__init__.py b/spacy/lang/ms/__init__.py index 31a58a7e6..f53ebfcf2 100644 --- a/spacy/lang/ms/__init__.py +++ b/spacy/lang/ms/__init__.py @@ -1,9 +1,9 @@ -from .stop_words import STOP_WORDS -from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class MalayDefaults(BaseDefaults): diff --git a/spacy/lang/ms/lex_attrs.py b/spacy/lang/ms/lex_attrs.py index 42759fa4f..2088c9955 100644 --- a/spacy/lang/ms/lex_attrs.py +++ b/spacy/lang/ms/lex_attrs.py @@ -1,8 +1,7 @@ import unicodedata -from .punctuation import LIST_CURRENCY from ...attrs import IS_CURRENCY, LIKE_NUM - +from .punctuation import LIST_CURRENCY _num_words = [ "kosong", diff --git a/spacy/lang/ms/punctuation.py b/spacy/lang/ms/punctuation.py index 9fff72576..a8d6c2e8e 100644 --- a/spacy/lang/ms/punctuation.py +++ b/spacy/lang/ms/punctuation.py @@ -1,6 +1,5 @@ -from ..punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES -from ..char_classes import ALPHA, merge_chars, split_chars, _currency, _units - +from ..char_classes import ALPHA, _currency, _units, merge_chars, split_chars +from ..punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES _units = ( _units + "s bit Gbps Mbps mbps Kbps kbps ƒ ppi px " diff --git a/spacy/lang/ms/syntax_iterators.py b/spacy/lang/ms/syntax_iterators.py index fa984d411..027798687 100644 --- a/spacy/lang/ms/syntax_iterators.py +++ b/spacy/lang/ms/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/ms/tokenizer_exceptions.py b/spacy/lang/ms/tokenizer_exceptions.py index 6b6cf3b15..e8b53fed8 100644 --- a/spacy/lang/ms/tokenizer_exceptions.py +++ b/spacy/lang/ms/tokenizer_exceptions.py @@ -1,8 +1,7 @@ +from ...symbols import NORM, ORTH +from ...util import update_exc from ..tokenizer_exceptions import BASE_EXCEPTIONS from ._tokenizer_exceptions_list import MS_BASE_EXCEPTIONS -from ...symbols import ORTH, NORM -from ...util import update_exc - # Daftar singkatan dan Akronim dari: # https://ms.wiktionary.org/wiki/Wiktionary:Senarai_akronim_dan_singkatan diff --git a/spacy/lang/nb/__init__.py b/spacy/lang/nb/__init__.py index e079236fd..ef4665ccc 100644 --- a/spacy/lang/nb/__init__.py +++ b/spacy/lang/nb/__init__.py @@ -1,12 +1,13 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .punctuation import TOKENIZER_SUFFIXES + +from ...language import BaseDefaults, Language +from ...pipeline import Lemmatizer +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults -from ...pipeline import Lemmatizer +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class NorwegianDefaults(BaseDefaults): diff --git a/spacy/lang/nb/punctuation.py b/spacy/lang/nb/punctuation.py index 8f2933670..a1fdb872a 100644 --- a/spacy/lang/nb/punctuation.py +++ b/spacy/lang/nb/punctuation.py @@ -1,7 +1,17 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, LIST_PUNCT, LIST_QUOTES -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER -from ..char_classes import CURRENCY, PUNCT, UNITS, LIST_CURRENCY - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + UNITS, +) # Punctuation adapted from Danish _quotes = CONCAT_QUOTES.replace("'", "") diff --git a/spacy/lang/nb/syntax_iterators.py b/spacy/lang/nb/syntax_iterators.py index d86662693..89a8f5edf 100644 --- a/spacy/lang/nb/syntax_iterators.py +++ b/spacy/lang/nb/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/nb/tokenizer_exceptions.py b/spacy/lang/nb/tokenizer_exceptions.py index 0be436ae4..9b99a1d65 100644 --- a/spacy/lang/nb/tokenizer_exceptions.py +++ b/spacy/lang/nb/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/ne/__init__.py b/spacy/lang/ne/__init__.py index 0028d1b0b..5c9e6870e 100644 --- a/spacy/lang/ne/__init__.py +++ b/spacy/lang/ne/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class NepaliDefaults(BaseDefaults): diff --git a/spacy/lang/ne/lex_attrs.py b/spacy/lang/ne/lex_attrs.py index 7cb01c515..91d5b0eb5 100644 --- a/spacy/lang/ne/lex_attrs.py +++ b/spacy/lang/ne/lex_attrs.py @@ -1,6 +1,5 @@ +from ...attrs import LIKE_NUM, NORM from ..norm_exceptions import BASE_NORMS -from ...attrs import NORM, LIKE_NUM - # fmt: off _stem_suffixes = [ diff --git a/spacy/lang/nl/__init__.py b/spacy/lang/nl/__init__.py index ad2205a0b..213041a85 100644 --- a/spacy/lang/nl/__init__.py +++ b/spacy/lang/nl/__init__.py @@ -1,15 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional from thinc.api import Model +from ...language import BaseDefaults, Language from .lemmatizer import DutchLemmatizer from .lex_attrs import LEX_ATTRS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .punctuation import TOKENIZER_SUFFIXES +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ...language import Language, BaseDefaults class DutchDefaults(BaseDefaults): diff --git a/spacy/lang/nl/lex_attrs.py b/spacy/lang/nl/lex_attrs.py index f1acaefeb..488224c2f 100644 --- a/spacy/lang/nl/lex_attrs.py +++ b/spacy/lang/nl/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = set( """ nul een één twee drie vier vijf zes zeven acht negen tien elf twaalf dertien diff --git a/spacy/lang/nl/punctuation.py b/spacy/lang/nl/punctuation.py index d9dd2a6e3..c9a4c9eeb 100644 --- a/spacy/lang/nl/punctuation.py +++ b/spacy/lang/nl/punctuation.py @@ -1,10 +1,19 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, LIST_UNITS, merge_chars -from ..char_classes import LIST_PUNCT, LIST_QUOTES, CURRENCY, PUNCT -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + LIST_UNITS, + PUNCT, + merge_chars, +) from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES - _prefixes = [",,"] + BASE_TOKENIZER_PREFIXES diff --git a/spacy/lang/nl/syntax_iterators.py b/spacy/lang/nl/syntax_iterators.py index be9beabe6..d7388a333 100644 --- a/spacy/lang/nl/syntax_iterators.py +++ b/spacy/lang/nl/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON from ...tokens import Doc, Span diff --git a/spacy/lang/nl/tokenizer_exceptions.py b/spacy/lang/nl/tokenizer_exceptions.py index 489d10d71..85ad49f14 100644 --- a/spacy/lang/nl/tokenizer_exceptions.py +++ b/spacy/lang/nl/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS # Extensive list of both common and uncommon dutch abbreviations copied from # github.com/diasks2/pragmatic_segmenter, a Ruby library for rule-based diff --git a/spacy/lang/nn/__init__.py b/spacy/lang/nn/__init__.py new file mode 100644 index 000000000..ebbf07090 --- /dev/null +++ b/spacy/lang/nn/__init__.py @@ -0,0 +1,20 @@ +from ...language import BaseDefaults, Language +from ..nb import SYNTAX_ITERATORS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS + + +class NorwegianNynorskDefaults(BaseDefaults): + tokenizer_exceptions = TOKENIZER_EXCEPTIONS + prefixes = TOKENIZER_PREFIXES + infixes = TOKENIZER_INFIXES + suffixes = TOKENIZER_SUFFIXES + syntax_iterators = SYNTAX_ITERATORS + + +class NorwegianNynorsk(Language): + lang = "nn" + Defaults = NorwegianNynorskDefaults + + +__all__ = ["NorwegianNynorsk"] diff --git a/spacy/lang/nn/examples.py b/spacy/lang/nn/examples.py new file mode 100644 index 000000000..95ec0aadd --- /dev/null +++ b/spacy/lang/nn/examples.py @@ -0,0 +1,15 @@ +""" +Example sentences to test spaCy and its language models. + +>>> from spacy.lang.nn.examples import sentences +>>> docs = nlp.pipe(sentences) +""" + + +# sentences taken from Omsetjingsminne frå Nynorsk pressekontor 2022 (https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-80/) +sentences = [ + "Konseptet går ut på at alle tre omgangar tel, alle hopparar må stille i kvalifiseringa og poengsummen skal telje.", + "Det er ein meir enn i same periode i fjor.", + "Det har lava ned enorme snømengder i store delar av Europa den siste tida.", + "Akhtar Chaudhry er ikkje innstilt på Oslo-lista til SV, men utfordrar Heikki Holmås om førsteplassen.", +] diff --git a/spacy/lang/nn/punctuation.py b/spacy/lang/nn/punctuation.py new file mode 100644 index 000000000..7b50b58d3 --- /dev/null +++ b/spacy/lang/nn/punctuation.py @@ -0,0 +1,74 @@ +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + UNITS, +) +from ..punctuation import TOKENIZER_SUFFIXES + +_quotes = CONCAT_QUOTES.replace("'", "") +_list_punct = [x for x in LIST_PUNCT if x != "#"] +_list_icons = [x for x in LIST_ICONS if x != "°"] +_list_icons = [x.replace("\\u00B0", "") for x in _list_icons] +_list_quotes = [x for x in LIST_QUOTES if x != "\\'"] + + +_prefixes = ( + ["§", "%", "=", "—", "–", r"\+(?![0-9])"] + + _list_punct + + LIST_ELLIPSES + + LIST_QUOTES + + LIST_CURRENCY + + LIST_ICONS +) + + +_infixes = ( + LIST_ELLIPSES + + _list_icons + + [ + r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER), + r"(?<=[{a}])[,!?](?=[{a}])".format(a=ALPHA), + r"(?<=[{a}])[:<>=/](?=[{a}])".format(a=ALPHA), + r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA), + r"(?<=[{a}])([{q}\)\]\(\[])(?=[{a}])".format(a=ALPHA, q=_quotes), + r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA), + ] +) + +_suffixes = ( + LIST_PUNCT + + LIST_ELLIPSES + + _list_quotes + + _list_icons + + ["—", "–"] + + [ + r"(?<=[0-9])\+", + r"(?<=°[FfCcKk])\.", + r"(?<=[0-9])(?:{c})".format(c=CURRENCY), + r"(?<=[0-9])(?:{u})".format(u=UNITS), + r"(?<=[{al}{e}{p}(?:{q})])\.".format( + al=ALPHA_LOWER, e=r"%²\-\+", q=_quotes, p=PUNCT + ), + r"(?<=[{au}][{au}])\.".format(au=ALPHA_UPPER), + ] + + [r"(?<=[^sSxXzZ])'"] +) +_suffixes += [ + suffix + for suffix in TOKENIZER_SUFFIXES + if suffix not in ["'s", "'S", "’s", "’S", r"\'"] +] + + +TOKENIZER_PREFIXES = _prefixes +TOKENIZER_INFIXES = _infixes +TOKENIZER_SUFFIXES = _suffixes diff --git a/spacy/lang/nn/tokenizer_exceptions.py b/spacy/lang/nn/tokenizer_exceptions.py new file mode 100644 index 000000000..4bfcb26d8 --- /dev/null +++ b/spacy/lang/nn/tokenizer_exceptions.py @@ -0,0 +1,228 @@ +from ...symbols import NORM, ORTH +from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS + +_exc = {} + + +for exc_data in [ + {ORTH: "jan.", NORM: "januar"}, + {ORTH: "feb.", NORM: "februar"}, + {ORTH: "mar.", NORM: "mars"}, + {ORTH: "apr.", NORM: "april"}, + {ORTH: "jun.", NORM: "juni"}, + # note: "jul." is in the simple list below without a NORM exception + {ORTH: "aug.", NORM: "august"}, + {ORTH: "sep.", NORM: "september"}, + {ORTH: "okt.", NORM: "oktober"}, + {ORTH: "nov.", NORM: "november"}, + {ORTH: "des.", NORM: "desember"}, +]: + _exc[exc_data[ORTH]] = [exc_data] + + +for orth in [ + "Ap.", + "Aq.", + "Ca.", + "Chr.", + "Co.", + "Dr.", + "F.eks.", + "Fr.p.", + "Frp.", + "Grl.", + "Kr.", + "Kr.F.", + "Kr.F.s", + "Mr.", + "Mrs.", + "Pb.", + "Pr.", + "Sp.", + "St.", + "a.m.", + "ad.", + "adm.dir.", + "adr.", + "b.c.", + "bl.a.", + "bla.", + "bm.", + "bnr.", + "bto.", + "c.c.", + "ca.", + "cand.mag.", + "co.", + "d.d.", + "d.m.", + "d.y.", + "dept.", + "dr.", + "dr.med.", + "dr.philos.", + "dr.psychol.", + "dss.", + "dvs.", + "e.Kr.", + "e.l.", + "eg.", + "eig.", + "ekskl.", + "el.", + "et.", + "etc.", + "etg.", + "ev.", + "evt.", + "f.", + "f.Kr.", + "f.eks.", + "f.o.m.", + "fhv.", + "fk.", + "foreg.", + "fork.", + "fv.", + "fvt.", + "g.", + "gl.", + "gno.", + "gnr.", + "grl.", + "gt.", + "h.r.adv.", + "hhv.", + "hoh.", + "hr.", + "ifb.", + "ifm.", + "iht.", + "inkl.", + "istf.", + "jf.", + "jr.", + "jul.", + "juris.", + "kfr.", + "kgl.", + "kgl.res.", + "kl.", + "komm.", + "kr.", + "kst.", + "lat.", + "lø.", + "m.a.", + "m.a.o.", + "m.fl.", + "m.m.", + "m.v.", + "ma.", + "mag.art.", + "md.", + "mfl.", + "mht.", + "mill.", + "min.", + "mnd.", + "moh.", + "mrd.", + "muh.", + "mv.", + "mva.", + "n.å.", + "ndf.", + "nr.", + "nto.", + "nyno.", + "o.a.", + "o.l.", + "obl.", + "off.", + "ofl.", + "on.", + "op.", + "org.", + "osv.", + "ovf.", + "p.", + "p.a.", + "p.g.a.", + "p.m.", + "p.t.", + "pga.", + "ph.d.", + "pkt.", + "pr.", + "pst.", + "pt.", + "red.anm.", + "ref.", + "res.", + "res.kap.", + "resp.", + "rv.", + "s.", + "s.d.", + "s.k.", + "s.u.", + "s.å.", + "sen.", + "sep.", + "siviling.", + "sms.", + "snr.", + "spm.", + "sr.", + "sst.", + "st.", + "st.meld.", + "st.prp.", + "stip.", + "stk.", + "stud.", + "sv.", + "såk.", + "sø.", + "t.d.", + "t.h.", + "t.o.m.", + "t.v.", + "temp.", + "ti.", + "tils.", + "tilsv.", + "tl;dr", + "tlf.", + "to.", + "ult.", + "utg.", + "v.", + "vedk.", + "vedr.", + "vg.", + "vgs.", + "vha.", + "vit.ass.", + "vn.", + "vol.", + "vs.", + "vsa.", + "§§", + "©NTB", + "årg.", + "årh.", +]: + _exc[orth] = [{ORTH: orth}] + +# Dates +for h in range(1, 31 + 1): + for period in ["."]: + _exc[f"{h}{period}"] = [{ORTH: f"{h}."}] + +_custom_base_exc = {"i.": [{ORTH: "i", NORM: "i"}, {ORTH: "."}]} +_exc.update(_custom_base_exc) + +TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc) diff --git a/spacy/lang/pl/__init__.py b/spacy/lang/pl/__init__.py index 02c96799b..50a3a8e4c 100644 --- a/spacy/lang/pl/__init__.py +++ b/spacy/lang/pl/__init__.py @@ -1,15 +1,13 @@ -from typing import Optional, Callable +from typing import Callable, Optional from thinc.api import Model -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .punctuation import TOKENIZER_SUFFIXES -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .lemmatizer import PolishLemmatizer +from ...language import BaseDefaults, Language from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...language import Language, BaseDefaults - +from .lemmatizer import PolishLemmatizer +from .lex_attrs import LEX_ATTRS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS TOKENIZER_EXCEPTIONS = { exc: val for exc, val in BASE_EXCEPTIONS.items() if not exc.endswith(".") diff --git a/spacy/lang/pl/lemmatizer.py b/spacy/lang/pl/lemmatizer.py index 059d0609a..d1d2a9c54 100644 --- a/spacy/lang/pl/lemmatizer.py +++ b/spacy/lang/pl/lemmatizer.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Tuple +from typing import Dict, List, Tuple from ...pipeline import Lemmatizer from ...tokens import Token diff --git a/spacy/lang/pl/lex_attrs.py b/spacy/lang/pl/lex_attrs.py index ce56e28a8..398f52a3c 100644 --- a/spacy/lang/pl/lex_attrs.py +++ b/spacy/lang/pl/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "zero", "jeden", diff --git a/spacy/lang/pl/punctuation.py b/spacy/lang/pl/punctuation.py index 31e56b9ae..84ff239ed 100644 --- a/spacy/lang/pl/punctuation.py +++ b/spacy/lang/pl/punctuation.py @@ -1,6 +1,17 @@ -from ..char_classes import LIST_ELLIPSES, LIST_PUNCT, LIST_HYPHENS -from ..char_classes import LIST_ICONS, LIST_QUOTES, CURRENCY, UNITS, PUNCT -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_ELLIPSES, + LIST_HYPHENS, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + UNITS, +) from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES _quotes = CONCAT_QUOTES.replace("'", "") diff --git a/spacy/lang/pt/__init__.py b/spacy/lang/pt/__init__.py index 454002491..be4041f8e 100644 --- a/spacy/lang/pt/__init__.py +++ b/spacy/lang/pt/__init__.py @@ -1,9 +1,9 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from .syntax_iterators import SYNTAX_ITERATORS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class PortugueseDefaults(BaseDefaults): diff --git a/spacy/lang/pt/lex_attrs.py b/spacy/lang/pt/lex_attrs.py index 3c6979ab4..de6a67f14 100644 --- a/spacy/lang/pt/lex_attrs.py +++ b/spacy/lang/pt/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "zero", "um", diff --git a/spacy/lang/pt/punctuation.py b/spacy/lang/pt/punctuation.py index 08e31f9d0..b2d63cb3d 100644 --- a/spacy/lang/pt/punctuation.py +++ b/spacy/lang/pt/punctuation.py @@ -1,6 +1,6 @@ +from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES from ..punctuation import TOKENIZER_SUFFIXES as BASE_TOKENIZER_SUFFIXES -from ..punctuation import TOKENIZER_INFIXES as BASE_TOKENIZER_INFIXES _prefixes = [r"\w{1,3}\$"] + BASE_TOKENIZER_PREFIXES diff --git a/spacy/lang/pt/syntax_iterators.py b/spacy/lang/pt/syntax_iterators.py index 62661f5e4..11017aace 100644 --- a/spacy/lang/pt/syntax_iterators.py +++ b/spacy/lang/pt/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/pt/tokenizer_exceptions.py b/spacy/lang/pt/tokenizer_exceptions.py index 187fc65ea..e369eda80 100644 --- a/spacy/lang/pt/tokenizer_exceptions.py +++ b/spacy/lang/pt/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/punctuation.py b/spacy/lang/punctuation.py index a1cfe6224..e4a6392c8 100644 --- a/spacy/lang/punctuation.py +++ b/spacy/lang/punctuation.py @@ -1,7 +1,19 @@ -from .char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY -from .char_classes import LIST_ICONS, HYPHENS, CURRENCY, UNITS, COMBINING_DIACRITICS -from .char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT - +from .char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + COMBINING_DIACRITICS, + CONCAT_QUOTES, + CURRENCY, + HYPHENS, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + UNITS, +) TOKENIZER_PREFIXES = ( ["§", "%", "=", "—", "–", r"\+(?![0-9])"] diff --git a/spacy/lang/ro/__init__.py b/spacy/lang/ro/__init__.py index 50027ffd2..441fefbb6 100644 --- a/spacy/lang/ro/__init__.py +++ b/spacy/lang/ro/__init__.py @@ -1,9 +1,8 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .punctuation import TOKENIZER_SUFFIXES +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS # Lemma data note: # Original pairs downloaded from http://www.lexiconista.com/datasets/lemmatization/ diff --git a/spacy/lang/ro/lex_attrs.py b/spacy/lang/ro/lex_attrs.py index 0f86f53cd..736aa911a 100644 --- a/spacy/lang/ro/lex_attrs.py +++ b/spacy/lang/ro/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = set( """ zero unu doi două trei patru cinci șase șapte opt nouă zece diff --git a/spacy/lang/ro/punctuation.py b/spacy/lang/ro/punctuation.py index 529e1c977..7259f9ae7 100644 --- a/spacy/lang/ro/punctuation.py +++ b/spacy/lang/ro/punctuation.py @@ -1,9 +1,18 @@ import itertools -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, LIST_CURRENCY -from ..char_classes import LIST_ICONS, CURRENCY -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA, PUNCT - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, +) _list_icons = [x for x in LIST_ICONS if x != "°"] _list_icons = [x.replace("\\u00B0", "") for x in _list_icons] diff --git a/spacy/lang/ro/tokenizer_exceptions.py b/spacy/lang/ro/tokenizer_exceptions.py index b8af0b1d6..a397b2754 100644 --- a/spacy/lang/ro/tokenizer_exceptions.py +++ b/spacy/lang/ro/tokenizer_exceptions.py @@ -1,9 +1,8 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS from .punctuation import _make_ro_variants - _exc = {} diff --git a/spacy/lang/ru/__init__.py b/spacy/lang/ru/__init__.py index 7d17628c4..880965b70 100644 --- a/spacy/lang/ru/__init__.py +++ b/spacy/lang/ru/__init__.py @@ -1,13 +1,16 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model +from ...language import BaseDefaults, Language +from ..punctuation import ( + COMBINING_DIACRITICS_TOKENIZER_INFIXES, + COMBINING_DIACRITICS_TOKENIZER_SUFFIXES, +) +from .lemmatizer import RussianLemmatizer +from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .lex_attrs import LEX_ATTRS -from .lemmatizer import RussianLemmatizer -from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES -from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults class RussianDefaults(BaseDefaults): diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py index f4a35de38..1e41220f3 100644 --- a/spacy/lang/ru/lemmatizer.py +++ b/spacy/lang/ru/lemmatizer.py @@ -1,4 +1,4 @@ -from typing import Optional, List, Dict, Tuple, Callable +from typing import Callable, Dict, List, Optional, Tuple from thinc.api import Model @@ -8,7 +8,6 @@ from ...symbols import POS from ...tokens import Token from ...vocab import Vocab - PUNCT_RULES = {"«": '"', "»": '"'} diff --git a/spacy/lang/ru/lex_attrs.py b/spacy/lang/ru/lex_attrs.py index 2afe47623..e0b35bdc0 100644 --- a/spacy/lang/ru/lex_attrs.py +++ b/spacy/lang/ru/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = list( set( """ diff --git a/spacy/lang/ru/tokenizer_exceptions.py b/spacy/lang/ru/tokenizer_exceptions.py index e1889f785..0a8c476b1 100644 --- a/spacy/lang/ru/tokenizer_exceptions.py +++ b/spacy/lang/ru/tokenizer_exceptions.py @@ -1,6 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/sa/__init__.py b/spacy/lang/sa/__init__.py index 61398af6c..c7c0e98e6 100644 --- a/spacy/lang/sa/__init__.py +++ b/spacy/lang/sa/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class SanskritDefaults(BaseDefaults): diff --git a/spacy/lang/si/__init__.py b/spacy/lang/si/__init__.py index 971cee3c6..08d0937b1 100644 --- a/spacy/lang/si/__init__.py +++ b/spacy/lang/si/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class SinhalaDefaults(BaseDefaults): diff --git a/spacy/lang/sk/__init__.py b/spacy/lang/sk/__init__.py index da6e3048e..2ed7448d2 100644 --- a/spacy/lang/sk/__init__.py +++ b/spacy/lang/sk/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class SlovakDefaults(BaseDefaults): diff --git a/spacy/lang/sl/__init__.py b/spacy/lang/sl/__init__.py index 0070e9fa1..cd3d70fc9 100644 --- a/spacy/lang/sl/__init__.py +++ b/spacy/lang/sl/__init__.py @@ -1,8 +1,8 @@ +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ...language import Language, BaseDefaults class SlovenianDefaults(BaseDefaults): diff --git a/spacy/lang/sl/lex_attrs.py b/spacy/lang/sl/lex_attrs.py index 958152e37..3c1493050 100644 --- a/spacy/lang/sl/lex_attrs.py +++ b/spacy/lang/sl/lex_attrs.py @@ -1,7 +1,6 @@ -from ...attrs import LIKE_NUM -from ...attrs import IS_CURRENCY import unicodedata +from ...attrs import IS_CURRENCY, LIKE_NUM _num_words = set( """ diff --git a/spacy/lang/sl/punctuation.py b/spacy/lang/sl/punctuation.py index b6ca1830e..dadb54d31 100644 --- a/spacy/lang/sl/punctuation.py +++ b/spacy/lang/sl/punctuation.py @@ -1,20 +1,21 @@ from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + HYPHENS, + LIST_CURRENCY, LIST_ELLIPSES, LIST_ICONS, - HYPHENS, LIST_PUNCT, LIST_QUOTES, - CURRENCY, - UNITS, PUNCT, - LIST_CURRENCY, - CONCAT_QUOTES, + UNITS, + merge_chars, ) -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA -from ..char_classes import merge_chars from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES - INCLUDE_SPECIAL = ["\\+", "\\/", "\\•", "\\¯", "\\=", "\\×"] + HYPHENS.split("|") _prefixes = INCLUDE_SPECIAL + BASE_TOKENIZER_PREFIXES diff --git a/spacy/lang/sl/tokenizer_exceptions.py b/spacy/lang/sl/tokenizer_exceptions.py index 3d4109228..ec4ea9e41 100644 --- a/spacy/lang/sl/tokenizer_exceptions.py +++ b/spacy/lang/sl/tokenizer_exceptions.py @@ -1,7 +1,8 @@ from typing import Dict, List -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM + +from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc: Dict[str, List[Dict]] = {} diff --git a/spacy/lang/sq/__init__.py b/spacy/lang/sq/__init__.py index 5e32a0cbe..1c8a5acf8 100644 --- a/spacy/lang/sq/__init__.py +++ b/spacy/lang/sq/__init__.py @@ -1,5 +1,5 @@ +from ...language import BaseDefaults, Language from .stop_words import STOP_WORDS -from ...language import Language, BaseDefaults class AlbanianDefaults(BaseDefaults): diff --git a/spacy/lang/sr/__init__.py b/spacy/lang/sr/__init__.py index b99ce96ec..5f121d79e 100644 --- a/spacy/lang/sr/__init__.py +++ b/spacy/lang/sr/__init__.py @@ -1,8 +1,8 @@ -from .stop_words import STOP_WORDS -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class SerbianDefaults(BaseDefaults): diff --git a/spacy/lang/sr/lex_attrs.py b/spacy/lang/sr/lex_attrs.py index dc48909bc..696b9fd74 100644 --- a/spacy/lang/sr/lex_attrs.py +++ b/spacy/lang/sr/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "нула", "један", diff --git a/spacy/lang/sr/punctuation.py b/spacy/lang/sr/punctuation.py index 793a20ec2..cafb0f68f 100644 --- a/spacy/lang/sr/punctuation.py +++ b/spacy/lang/sr/punctuation.py @@ -1,7 +1,16 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, LIST_PUNCT, LIST_QUOTES -from ..char_classes import CURRENCY, UNITS, PUNCT -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER - +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + CURRENCY, + LIST_ELLIPSES, + LIST_ICONS, + LIST_PUNCT, + LIST_QUOTES, + PUNCT, + UNITS, +) _infixes = ( LIST_ELLIPSES diff --git a/spacy/lang/sr/tokenizer_exceptions.py b/spacy/lang/sr/tokenizer_exceptions.py index dcaa3e239..b7db0aadc 100755 --- a/spacy/lang/sr/tokenizer_exceptions.py +++ b/spacy/lang/sr/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/sv/__init__.py b/spacy/lang/sv/__init__.py index 28e5085a8..bb4ee1702 100644 --- a/spacy/lang/sv/__init__.py +++ b/spacy/lang/sv/__init__.py @@ -1,12 +1,14 @@ -from typing import Optional, Callable +from typing import Callable, Optional + from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from .syntax_iterators import SYNTAX_ITERATORS -from ...language import Language, BaseDefaults + +from ...language import BaseDefaults, Language from ...pipeline import Lemmatizer +from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES +from .stop_words import STOP_WORDS +from .syntax_iterators import SYNTAX_ITERATORS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class SwedishDefaults(BaseDefaults): diff --git a/spacy/lang/sv/lex_attrs.py b/spacy/lang/sv/lex_attrs.py index f8ada9e2e..8eeafede8 100644 --- a/spacy/lang/sv/lex_attrs.py +++ b/spacy/lang/sv/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "noll", "en", diff --git a/spacy/lang/sv/punctuation.py b/spacy/lang/sv/punctuation.py index 67f1bcdc4..64f1da989 100644 --- a/spacy/lang/sv/punctuation.py +++ b/spacy/lang/sv/punctuation.py @@ -1,8 +1,13 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS -from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + LIST_ELLIPSES, + LIST_ICONS, +) from ..punctuation import TOKENIZER_SUFFIXES - _quotes = CONCAT_QUOTES.replace("'", "") _infixes = ( diff --git a/spacy/lang/sv/syntax_iterators.py b/spacy/lang/sv/syntax_iterators.py index 06ad016ac..09153a8ec 100644 --- a/spacy/lang/sv/syntax_iterators.py +++ b/spacy/lang/sv/syntax_iterators.py @@ -1,7 +1,7 @@ -from typing import Union, Iterator, Tuple +from typing import Iterator, Tuple, Union -from ...symbols import NOUN, PROPN, PRON from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN from ...tokens import Doc, Span diff --git a/spacy/lang/sv/tokenizer_exceptions.py b/spacy/lang/sv/tokenizer_exceptions.py index ce7db895a..8fd3afbe3 100644 --- a/spacy/lang/sv/tokenizer_exceptions.py +++ b/spacy/lang/sv/tokenizer_exceptions.py @@ -1,6 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS from ...symbols import NORM, ORTH from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/ta/__init__.py b/spacy/lang/ta/__init__.py index 4929a4b97..7fd29371a 100644 --- a/spacy/lang/ta/__init__.py +++ b/spacy/lang/ta/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class TamilDefaults(BaseDefaults): diff --git a/spacy/lang/ta/lex_attrs.py b/spacy/lang/ta/lex_attrs.py index f830f4ac9..d66125552 100644 --- a/spacy/lang/ta/lex_attrs.py +++ b/spacy/lang/ta/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _numeral_suffixes = {"பத்து": "பது", "ற்று": "று", "ரத்து": "ரம்", "சத்து": "சம்"} _num_words = [ "பூச்சியம்", diff --git a/spacy/lang/te/__init__.py b/spacy/lang/te/__init__.py index 77cc2fe9b..611e9746a 100644 --- a/spacy/lang/te/__init__.py +++ b/spacy/lang/te/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class TeluguDefaults(BaseDefaults): diff --git a/spacy/lang/th/__init__.py b/spacy/lang/th/__init__.py index 12b1527e0..bd29d32a4 100644 --- a/spacy/lang/th/__init__.py +++ b/spacy/lang/th/__init__.py @@ -1,10 +1,9 @@ -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from ...language import BaseDefaults, Language from ...tokens import Doc -from ...util import DummyTokenizer, registry, load_config_from_str +from ...util import DummyTokenizer, load_config_from_str, registry from ...vocab import Vocab - +from .lex_attrs import LEX_ATTRS +from .stop_words import STOP_WORDS DEFAULT_CONFIG = """ [nlp] diff --git a/spacy/lang/th/lex_attrs.py b/spacy/lang/th/lex_attrs.py index bc4e5293e..80f6ccbe8 100644 --- a/spacy/lang/th/lex_attrs.py +++ b/spacy/lang/th/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "ศูนย์", "หนึ่ง", diff --git a/spacy/lang/th/tokenizer_exceptions.py b/spacy/lang/th/tokenizer_exceptions.py index 92116d474..954766d28 100644 --- a/spacy/lang/th/tokenizer_exceptions.py +++ b/spacy/lang/th/tokenizer_exceptions.py @@ -1,6 +1,5 @@ from ...symbols import ORTH - _exc = { # หน่วยงานรัฐ / government agency "กกต.": [{ORTH: "กกต."}], diff --git a/spacy/lang/ti/__init__.py b/spacy/lang/ti/__init__.py index c74c081b5..510999f67 100644 --- a/spacy/lang/ti/__init__.py +++ b/spacy/lang/ti/__init__.py @@ -1,12 +1,11 @@ -from .stop_words import STOP_WORDS +from ...attrs import LANG +from ...language import BaseDefaults, Language +from ...util import update_exc +from ..tokenizer_exceptions import BASE_EXCEPTIONS from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_SUFFIXES - +from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...language import Language, BaseDefaults -from ...attrs import LANG -from ...util import update_exc class TigrinyaDefaults(BaseDefaults): diff --git a/spacy/lang/ti/punctuation.py b/spacy/lang/ti/punctuation.py index aa884c2ba..f29f30e26 100644 --- a/spacy/lang/ti/punctuation.py +++ b/spacy/lang/ti/punctuation.py @@ -1,5 +1,11 @@ -from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY -from ..char_classes import UNITS, ALPHA_UPPER +from ..char_classes import ( + ALPHA_UPPER, + CURRENCY, + LIST_ELLIPSES, + LIST_PUNCT, + LIST_QUOTES, + UNITS, +) _list_punct = LIST_PUNCT + "፡ ። ፣ ፤ ፥ ፦ ፧ ፠ ፨".strip().split() diff --git a/spacy/lang/ti/tokenizer_exceptions.py b/spacy/lang/ti/tokenizer_exceptions.py index 3d79cd84b..711e4b406 100644 --- a/spacy/lang/ti/tokenizer_exceptions.py +++ b/spacy/lang/ti/tokenizer_exceptions.py @@ -1,5 +1,4 @@ -from ...symbols import ORTH, NORM - +from ...symbols import NORM, ORTH _exc = {} diff --git a/spacy/lang/tl/__init__.py b/spacy/lang/tl/__init__.py index 30838890a..6849810ef 100644 --- a/spacy/lang/tl/__init__.py +++ b/spacy/lang/tl/__init__.py @@ -1,7 +1,7 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class TagalogDefaults(BaseDefaults): diff --git a/spacy/lang/tl/lex_attrs.py b/spacy/lang/tl/lex_attrs.py index 60bdc923b..8866453a0 100644 --- a/spacy/lang/tl/lex_attrs.py +++ b/spacy/lang/tl/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "sero", "isa", diff --git a/spacy/lang/tl/tokenizer_exceptions.py b/spacy/lang/tl/tokenizer_exceptions.py index 51ad12d9f..b10c90437 100644 --- a/spacy/lang/tl/tokenizer_exceptions.py +++ b/spacy/lang/tl/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = { "tayo'y": [{ORTH: "tayo"}, {ORTH: "'y", NORM: "ay"}], diff --git a/spacy/lang/tn/__init__.py b/spacy/lang/tn/__init__.py index 28e887eea..4cb8a1635 100644 --- a/spacy/lang/tn/__init__.py +++ b/spacy/lang/tn/__init__.py @@ -1,7 +1,7 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class SetswanaDefaults(BaseDefaults): diff --git a/spacy/lang/tn/punctuation.py b/spacy/lang/tn/punctuation.py index a52755564..54d76fbaf 100644 --- a/spacy/lang/tn/punctuation.py +++ b/spacy/lang/tn/punctuation.py @@ -1,5 +1,12 @@ -from ..char_classes import LIST_ELLIPSES, LIST_ICONS, HYPHENS -from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) _infixes = ( LIST_ELLIPSES diff --git a/spacy/lang/tokenizer_exceptions.py b/spacy/lang/tokenizer_exceptions.py index d76fe4262..dbf9aab49 100644 --- a/spacy/lang/tokenizer_exceptions.py +++ b/spacy/lang/tokenizer_exceptions.py @@ -1,8 +1,7 @@ import re +from ..symbols import NORM, ORTH from .char_classes import ALPHA_LOWER -from ..symbols import ORTH, NORM - # URL validation regex courtesy of: https://mathiasbynens.be/demo/url-regex # and https://gist.github.com/dperini/729294 (Diego Perini, MIT License) diff --git a/spacy/lang/tr/__init__.py b/spacy/lang/tr/__init__.py index 02b5c7bf4..9aa752168 100644 --- a/spacy/lang/tr/__init__.py +++ b/spacy/lang/tr/__init__.py @@ -1,8 +1,8 @@ -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH +from ...language import BaseDefaults, Language +from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS -from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .tokenizer_exceptions import TOKEN_MATCH, TOKENIZER_EXCEPTIONS class TurkishDefaults(BaseDefaults): diff --git a/spacy/lang/tr/examples.py b/spacy/lang/tr/examples.py index dfb324a4e..c912c950d 100644 --- a/spacy/lang/tr/examples.py +++ b/spacy/lang/tr/examples.py @@ -15,4 +15,7 @@ sentences = [ "Türkiye'nin başkenti neresi?", "Bakanlar Kurulu 180 günlük eylem planını açıkladı.", "Merkez Bankası, beklentiler doğrultusunda faizlerde değişikliğe gitmedi.", + "Cemal Sureya kimdir?", + "Bunlari Biliyor muydunuz?", + "Altinoluk Turkiye haritasinin neresinde yer alir?", ] diff --git a/spacy/lang/tr/lex_attrs.py b/spacy/lang/tr/lex_attrs.py index 6d9f4f388..2189932b6 100644 --- a/spacy/lang/tr/lex_attrs.py +++ b/spacy/lang/tr/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - # Thirteen, fifteen etc. are written separate: on üç _num_words = [ diff --git a/spacy/lang/tr/syntax_iterators.py b/spacy/lang/tr/syntax_iterators.py index 769af1223..ed588424a 100644 --- a/spacy/lang/tr/syntax_iterators.py +++ b/spacy/lang/tr/syntax_iterators.py @@ -1,7 +1,8 @@ -from typing import Union, Iterator, Tuple -from ...tokens import Doc, Span -from ...symbols import NOUN, PROPN, PRON +from typing import Iterator, Tuple, Union + from ...errors import Errors +from ...symbols import NOUN, PRON, PROPN +from ...tokens import Doc, Span def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: diff --git a/spacy/lang/tr/tokenizer_exceptions.py b/spacy/lang/tr/tokenizer_exceptions.py index 22fa9f09e..d095a3d0e 100644 --- a/spacy/lang/tr/tokenizer_exceptions.py +++ b/spacy/lang/tr/tokenizer_exceptions.py @@ -1,8 +1,7 @@ import re -from ..punctuation import ALPHA_LOWER, ALPHA -from ...symbols import ORTH, NORM - +from ...symbols import NORM, ORTH +from ..punctuation import ALPHA, ALPHA_LOWER _exc = {} diff --git a/spacy/lang/tt/__init__.py b/spacy/lang/tt/__init__.py index d5e1e87ef..ce04d09c2 100644 --- a/spacy/lang/tt/__init__.py +++ b/spacy/lang/tt/__init__.py @@ -1,8 +1,8 @@ +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from ...language import Language, BaseDefaults class TatarDefaults(BaseDefaults): diff --git a/spacy/lang/tt/punctuation.py b/spacy/lang/tt/punctuation.py index f644a8ccb..5c233df7c 100644 --- a/spacy/lang/tt/punctuation.py +++ b/spacy/lang/tt/punctuation.py @@ -1,5 +1,12 @@ -from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, CONCAT_QUOTES, HYPHENS -from ..char_classes import LIST_ELLIPSES, LIST_ICONS +from ..char_classes import ( + ALPHA, + ALPHA_LOWER, + ALPHA_UPPER, + CONCAT_QUOTES, + HYPHENS, + LIST_ELLIPSES, + LIST_ICONS, +) _hyphens_no_dash = HYPHENS.replace("-", "").strip("|").replace("||", "") _infixes = ( diff --git a/spacy/lang/tt/tokenizer_exceptions.py b/spacy/lang/tt/tokenizer_exceptions.py index 3b8cc86b5..280b9f866 100644 --- a/spacy/lang/tt/tokenizer_exceptions.py +++ b/spacy/lang/tt/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/uk/__init__.py b/spacy/lang/uk/__init__.py index bfea9ff69..5dd75a2a4 100644 --- a/spacy/lang/uk/__init__.py +++ b/spacy/lang/uk/__init__.py @@ -1,14 +1,16 @@ -from typing import Optional, Callable +from typing import Callable, Optional from thinc.api import Model -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS +from ...language import BaseDefaults, Language +from ..punctuation import ( + COMBINING_DIACRITICS_TOKENIZER_INFIXES, + COMBINING_DIACRITICS_TOKENIZER_SUFFIXES, +) from .lemmatizer import UkrainianLemmatizer -from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_INFIXES -from ..punctuation import COMBINING_DIACRITICS_TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults +from .lex_attrs import LEX_ATTRS +from .stop_words import STOP_WORDS +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS class UkrainianDefaults(BaseDefaults): diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py index 37015cc2a..9ec582b76 100644 --- a/spacy/lang/uk/lemmatizer.py +++ b/spacy/lang/uk/lemmatizer.py @@ -1,10 +1,10 @@ -from typing import Optional, Callable +from typing import Callable, Optional from thinc.api import Model -from ..ru.lemmatizer import RussianLemmatizer from ...pipeline.lemmatizer import lemmatizer_score from ...vocab import Vocab +from ..ru.lemmatizer import RussianLemmatizer class UkrainianLemmatizer(RussianLemmatizer): diff --git a/spacy/lang/uk/tokenizer_exceptions.py b/spacy/lang/uk/tokenizer_exceptions.py index 7e168a27c..07dd941af 100644 --- a/spacy/lang/uk/tokenizer_exceptions.py +++ b/spacy/lang/uk/tokenizer_exceptions.py @@ -1,7 +1,6 @@ -from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, NORM +from ...symbols import NORM, ORTH from ...util import update_exc - +from ..tokenizer_exceptions import BASE_EXCEPTIONS _exc = {} diff --git a/spacy/lang/ur/__init__.py b/spacy/lang/ur/__init__.py index 266c5a73d..4f20ac92f 100644 --- a/spacy/lang/ur/__init__.py +++ b/spacy/lang/ur/__init__.py @@ -1,7 +1,7 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS from .punctuation import TOKENIZER_SUFFIXES -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class UrduDefaults(BaseDefaults): diff --git a/spacy/lang/ur/punctuation.py b/spacy/lang/ur/punctuation.py index 5d35d0a25..382bfc75c 100644 --- a/spacy/lang/ur/punctuation.py +++ b/spacy/lang/ur/punctuation.py @@ -1,4 +1,3 @@ from ..punctuation import TOKENIZER_SUFFIXES - _suffixes = TOKENIZER_SUFFIXES diff --git a/spacy/lang/vi/__init__.py b/spacy/lang/vi/__init__.py index 822dc348c..a621b8bfe 100644 --- a/spacy/lang/vi/__init__.py +++ b/spacy/lang/vi/__init__.py @@ -1,17 +1,17 @@ -from typing import Any, Dict, Union -from pathlib import Path import re -import srsly import string +from pathlib import Path +from typing import Any, Dict, Union + +import srsly -from .stop_words import STOP_WORDS -from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults -from ...tokens import Doc -from ...util import DummyTokenizer, registry, load_config_from_str -from ...vocab import Vocab from ... import util - +from ...language import BaseDefaults, Language +from ...tokens import Doc +from ...util import DummyTokenizer, load_config_from_str, registry +from ...vocab import Vocab +from .lex_attrs import LEX_ATTRS +from .stop_words import STOP_WORDS DEFAULT_CONFIG = """ [nlp] diff --git a/spacy/lang/vi/lex_attrs.py b/spacy/lang/vi/lex_attrs.py index 0cbda4ffb..82997a133 100644 --- a/spacy/lang/vi/lex_attrs.py +++ b/spacy/lang/vi/lex_attrs.py @@ -1,6 +1,5 @@ from ...attrs import LIKE_NUM - _num_words = [ "không", # Zero "một", # One diff --git a/spacy/lang/yo/__init__.py b/spacy/lang/yo/__init__.py index 6c38ec8af..93c4ca493 100644 --- a/spacy/lang/yo/__init__.py +++ b/spacy/lang/yo/__init__.py @@ -1,6 +1,6 @@ -from .stop_words import STOP_WORDS +from ...language import BaseDefaults, Language from .lex_attrs import LEX_ATTRS -from ...language import Language, BaseDefaults +from .stop_words import STOP_WORDS class YorubaDefaults(BaseDefaults): diff --git a/spacy/lang/yo/lex_attrs.py b/spacy/lang/yo/lex_attrs.py index ead68ced2..5f33e06a5 100644 --- a/spacy/lang/yo/lex_attrs.py +++ b/spacy/lang/yo/lex_attrs.py @@ -2,7 +2,6 @@ import unicodedata from ...attrs import LIKE_NUM - _num_words = [ "ení", "oókàn", diff --git a/spacy/lang/zh/__init__.py b/spacy/lang/zh/__init__.py index fdf6776e2..f7bb09277 100644 --- a/spacy/lang/zh/__init__.py +++ b/spacy/lang/zh/__init__.py @@ -1,21 +1,21 @@ -from typing import Optional, List, Dict, Any, Callable, Iterable -from enum import Enum import tempfile -import srsly import warnings +from enum import Enum from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional -from ...errors import Warnings, Errors -from ...language import Language, BaseDefaults +import srsly + +from ... import util +from ...errors import Errors, Warnings +from ...language import BaseDefaults, Language from ...scorer import Scorer from ...tokens import Doc -from ...training import validate_examples, Example -from ...util import DummyTokenizer, registry, load_config_from_str +from ...training import Example, validate_examples +from ...util import DummyTokenizer, load_config_from_str, registry from ...vocab import Vocab from .lex_attrs import LEX_ATTRS from .stop_words import STOP_WORDS -from ... import util - # fmt: off _PKUSEG_INSTALL_MSG = "install spacy-pkuseg with `pip install \"spacy-pkuseg>=0.0.27,<0.1.0\"` or `conda install -c conda-forge \"spacy-pkuseg>=0.0.27,<0.1.0\"`" diff --git a/spacy/lang/zh/lex_attrs.py b/spacy/lang/zh/lex_attrs.py index 08c8e3160..36fa7310a 100644 --- a/spacy/lang/zh/lex_attrs.py +++ b/spacy/lang/zh/lex_attrs.py @@ -2,7 +2,6 @@ import re from ...attrs import LIKE_NUM - _single_num_words = [ "〇", "一", diff --git a/spacy/language.py b/spacy/language.py index 0e9ff6893..18d20c939 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1,47 +1,72 @@ -from typing import Iterator, Optional, Any, Dict, Callable, Iterable -from typing import Union, Tuple, List, Set, Pattern, Sequence -from typing import NoReturn, TypeVar, cast, overload - -from dataclasses import dataclass -import random -import itertools import functools +import inspect +import itertools +import multiprocessing as mp +import random +import traceback +import warnings from contextlib import contextmanager from copy import deepcopy -from pathlib import Path -import warnings - -from thinc.api import get_current_ops, Config, CupyOps, Optimizer -import srsly -import multiprocessing as mp +from dataclasses import dataclass from itertools import chain, cycle +from pathlib import Path from timeit import default_timer as timer -import traceback +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + NoReturn, + Optional, + Pattern, + Sequence, + Set, + Tuple, + TypeVar, + Union, + cast, + overload, +) -from . import ty -from .tokens.underscore import Underscore -from .vocab import Vocab, create_vocab -from .pipe_analysis import validate_attrs, analyze_pipes, print_pipe_analysis -from .training import Example, validate_examples -from .training.initialize import init_vocab, init_tok2vec -from .scorer import Scorer -from .util import registry, SimpleFrozenList, _pipe, raise_error, _DEFAULT_EMPTY_PIPES -from .util import SimpleFrozenDict, combine_score_weights, CONFIG_SECTION_ORDER -from .util import warn_if_jupyter_cupy -from .lang.tokenizer_exceptions import URL_MATCH, BASE_EXCEPTIONS -from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES -from .lang.punctuation import TOKENIZER_INFIXES -from .tokens import Doc -from .tokenizer import Tokenizer -from .errors import Errors, Warnings -from .schemas import ConfigSchema, ConfigSchemaNlp, ConfigSchemaInit -from .schemas import ConfigSchemaPretrain, validate_init_settings -from .git_info import GIT_VERSION -from . import util -from . import about -from .lookups import load_lookups +import srsly +from thinc.api import Config, CupyOps, Optimizer, get_current_ops + +from . import about, ty, util from .compat import Literal - +from .errors import Errors, Warnings +from .git_info import GIT_VERSION +from .lang.punctuation import TOKENIZER_INFIXES, TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES +from .lang.tokenizer_exceptions import BASE_EXCEPTIONS, URL_MATCH +from .lookups import load_lookups +from .pipe_analysis import analyze_pipes, print_pipe_analysis, validate_attrs +from .schemas import ( + ConfigSchema, + ConfigSchemaInit, + ConfigSchemaNlp, + ConfigSchemaPretrain, + validate_init_settings, +) +from .scorer import Scorer +from .tokenizer import Tokenizer +from .tokens import Doc +from .tokens.underscore import Underscore +from .training import Example, validate_examples +from .training.initialize import init_tok2vec, init_vocab +from .util import ( + _DEFAULT_EMPTY_PIPES, + CONFIG_SECTION_ORDER, + SimpleFrozenDict, + SimpleFrozenList, + _pipe, + combine_score_weights, + raise_error, + registry, + warn_if_jupyter_cupy, +) +from .vectors import BaseVectors +from .vocab import Vocab, create_vocab PipeCallable = Callable[[Doc], Doc] @@ -134,6 +159,7 @@ class Language: max_length: int = 10**6, meta: Dict[str, Any] = {}, create_tokenizer: Optional[Callable[["Language"], Callable[[str], Doc]]] = None, + create_vectors: Optional[Callable[["Vocab"], BaseVectors]] = None, batch_size: int = 1000, **kwargs, ) -> None: @@ -174,6 +200,10 @@ class Language: if vocab is True: vectors_name = meta.get("vectors", {}).get("name") vocab = create_vocab(self.lang, self.Defaults, vectors_name=vectors_name) + if not create_vectors: + vectors_cfg = {"vectors": self._config["nlp"]["vectors"]} + create_vectors = registry.resolve(vectors_cfg)["vectors"] + vocab.vectors = create_vectors(vocab) else: if (self.lang and vocab.lang) and (self.lang != vocab.lang): raise ValueError(Errors.E150.format(nlp=self.lang, vocab=vocab.lang)) @@ -716,6 +746,11 @@ class Language: ) ) pipe = source.get_pipe(source_name) + # There is no actual solution here. Either the component has the right + # name for the source pipeline or the component has the right name for + # the current pipeline. This prioritizes the current pipeline. + if hasattr(pipe, "name"): + pipe.name = name # Make sure the source config is interpolated so we don't end up with # orphaned variables in our final config source_config = source.config.interpolate() @@ -793,6 +828,7 @@ class Language: pipe_index = self._get_pipe_index(before, after, first, last) self._pipe_meta[name] = self.get_factory_meta(factory_name) self._components.insert(pipe_index, (name, pipe_component)) + self._link_components() return pipe_component def _get_pipe_index( @@ -928,6 +964,7 @@ class Language: if old_name in self._config["initialize"]["components"]: init_cfg = self._config["initialize"]["components"].pop(old_name) self._config["initialize"]["components"][new_name] = init_cfg + self._link_components() def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]: """Remove a component from the pipeline. @@ -951,6 +988,7 @@ class Language: # Make sure the name is also removed from the set of disabled components if name in self.disabled: self._disabled.remove(name) + self._link_components() return removed def disable_pipe(self, name: str) -> None: @@ -1645,6 +1683,12 @@ class Language: for proc in procs: proc.start() + # Close writing-end of channels. This is needed to avoid that reading + # from the channel blocks indefinitely when the worker closes the + # channel. + for tx in bytedocs_send_ch: + tx.close() + # Cycle channels not to break the order of docs. # The received object is a batch of byte-encoded docs, so flatten them with chain.from_iterable. byte_tuples = chain.from_iterable( @@ -1667,8 +1711,27 @@ class Language: # tell `sender` that one batch was consumed. sender.step() finally: + # If we are stopping in an orderly fashion, the workers' queues + # are empty. Put the sentinel in their queues to signal that work + # is done, so that they can exit gracefully. + for q in texts_q: + q.put(_WORK_DONE_SENTINEL) + q.close() + + # Otherwise, we are stopping because the error handler raised an + # exception. The sentinel will be last to go out of the queue. + # To avoid doing unnecessary work or hanging on platforms that + # block on sending (Windows), we'll close our end of the channel. + # This signals to the worker that it can exit the next time it + # attempts to send data down the channel. + for r in bytedocs_recv_ch: + r.close() + for proc in procs: - proc.terminate() + proc.join() + + if not all(proc.exitcode == 0 for proc in procs): + warnings.warn(Warnings.W127) def _link_components(self) -> None: """Register 'listeners' within pipeline components, to allow them to @@ -1679,8 +1742,16 @@ class Language: # The problem is we need to do it during deserialization...And the # components don't receive the pipeline then. So this does have to be # here :( + # First, fix up all the internal component names in case they have + # gotten out of sync due to sourcing components from different + # pipelines, since find_listeners uses proc2.name for the listener + # map. + for name, proc in self.pipeline: + if hasattr(proc, "name"): + proc.name = name for i, (name1, proc1) in enumerate(self.pipeline): if isinstance(proc1, ty.ListenedToComponent): + proc1.listener_map = {} for name2, proc2 in self.pipeline[i + 1 :]: proc1.find_listeners(proc2) @@ -1725,6 +1796,10 @@ class Language: ).merge(config) if "nlp" not in config: raise ValueError(Errors.E985.format(config=config)) + # fill in [nlp.vectors] if not present (as a narrower alternative to + # auto-filling [nlp] from the default config) + if "vectors" not in config["nlp"]: + config["nlp"]["vectors"] = {"@vectors": "spacy.Vectors.v1"} config_lang = config["nlp"].get("lang") if config_lang is not None and config_lang != cls.lang: raise ValueError( @@ -1756,6 +1831,7 @@ class Language: filled["nlp"], validate=validate, schema=ConfigSchemaNlp ) create_tokenizer = resolved_nlp["tokenizer"] + create_vectors = resolved_nlp["vectors"] before_creation = resolved_nlp["before_creation"] after_creation = resolved_nlp["after_creation"] after_pipeline_creation = resolved_nlp["after_pipeline_creation"] @@ -1776,7 +1852,12 @@ class Language: # inside stuff like the spacy train function. If we loaded them here, # then we would load them twice at runtime: once when we make from config, # and then again when we load from disk. - nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer, meta=meta) + nlp = lang_cls( + vocab=vocab, + create_tokenizer=create_tokenizer, + create_vectors=create_vectors, + meta=meta, + ) if after_creation is not None: nlp = after_creation(nlp) if not isinstance(nlp, cls): @@ -1786,7 +1867,6 @@ class Language: # Later we replace the component config with the raw config again. interpolated = filled.interpolate() if not filled.is_interpolated else filled pipeline = interpolated.get("components", {}) - sourced = util.get_sourced_components(interpolated) # If components are loaded from a source (existing models), we cache # them here so they're only loaded once source_nlps = {} @@ -1814,6 +1894,7 @@ class Language: raw_config=raw_config, ) else: + assert "source" in pipe_cfg # We need the sourced components to reference the same # vocab without modifying the current vocab state **AND** # we still want to load the source model vectors to perform @@ -1833,6 +1914,10 @@ class Language: source_name = pipe_cfg.get("component", pipe_name) listeners_replaced = False if "replace_listeners" in pipe_cfg: + # Make sure that the listened-to component has the + # state of the source pipeline listener map so that the + # replace_listeners method below works as intended. + source_nlps[model]._link_components() for name, proc in source_nlps[model].pipeline: if source_name in getattr(proc, "listening_components", []): source_nlps[model].replace_listeners( @@ -1844,6 +1929,8 @@ class Language: nlp.add_pipe( source_name, source=source_nlps[model], name=pipe_name ) + # At this point after nlp.add_pipe, the listener map + # corresponds to the new pipeline. if model not in source_nlp_vectors_hashes: source_nlp_vectors_hashes[model] = hash( source_nlps[model].vocab.vectors.to_bytes( @@ -1898,27 +1985,6 @@ class Language: raise ValueError( Errors.E942.format(name="pipeline_creation", value=type(nlp)) ) - # Detect components with listeners that are not frozen consistently - for name, proc in nlp.pipeline: - if isinstance(proc, ty.ListenedToComponent): - # Remove listeners not in the pipeline - listener_names = proc.listening_components - unused_listener_names = [ - ll for ll in listener_names if ll not in nlp.pipe_names - ] - for listener_name in unused_listener_names: - for listener in proc.listener_map.get(listener_name, []): - proc.remove_listener(listener, listener_name) - - for listener_name in proc.listening_components: - # e.g. tok2vec/transformer - # If it's a component sourced from another pipeline, we check if - # the tok2vec listeners should be replaced with standalone tok2vec - # models (e.g. so component can be frozen without its performance - # degrading when other components/tok2vec are updated) - paths = sourced.get(listener_name, {}).get("replace_listeners", []) - if paths: - nlp.replace_listeners(name, listener_name, paths) return nlp def replace_listeners( @@ -1933,7 +1999,7 @@ class Language: useful when training a pipeline with components sourced from an existing pipeline: if multiple components (e.g. tagger, parser, NER) listen to the same tok2vec component, but some of them are frozen and not updated, - their performance may degrade significally as the tok2vec component is + their performance may degrade significantly as the tok2vec component is updated with new data. To prevent this, listeners can be replaced with a standalone tok2vec layer that is owned by the component and doesn't change if the component isn't updated. @@ -2008,8 +2074,20 @@ class Language: # Go over the listener layers and replace them for listener in pipe_listeners: new_model = tok2vec_model.copy() - if "replace_listener" in tok2vec_model.attrs: - new_model = tok2vec_model.attrs["replace_listener"](new_model) + replace_listener_func = tok2vec_model.attrs.get("replace_listener") + if replace_listener_func is not None: + # Pass the extra args to the callback without breaking compatibility with + # old library versions that only expect a single parameter. + num_params = len( + inspect.signature(replace_listener_func).parameters + ) + if num_params == 1: + new_model = replace_listener_func(new_model) + elif num_params == 3: + new_model = replace_listener_func(new_model, listener, tok2vec) + else: + raise ValueError(Errors.E1055.format(num_params=num_params)) + util.replace_model_node(pipe.model, listener, new_model) # type: ignore[attr-defined] tok2vec.remove_listener(listener, pipe_name) @@ -2270,6 +2348,13 @@ def _apply_pipes( while True: try: texts_with_ctx = receiver.get() + + # Stop working if we encounter the end-of-work sentinel. + if isinstance(texts_with_ctx, _WorkDoneSentinel): + sender.close() + receiver.close() + return + docs = ( ensure_doc(doc_like, context) for doc_like, context in texts_with_ctx ) @@ -2278,11 +2363,23 @@ def _apply_pipes( # Connection does not accept unpickable objects, so send list. byte_docs = [(doc.to_bytes(), doc._context, None) for doc in docs] padding = [(None, None, None)] * (len(texts_with_ctx) - len(byte_docs)) - sender.send(byte_docs + padding) # type: ignore[operator] + data: Sequence[Tuple[Optional[bytes], Optional[Any], Optional[bytes]]] = ( + byte_docs + padding # type: ignore[operator] + ) except Exception: error_msg = [(None, None, srsly.msgpack_dumps(traceback.format_exc()))] padding = [(None, None, None)] * (len(texts_with_ctx) - 1) - sender.send(error_msg + padding) + data = error_msg + padding + + try: + sender.send(data) + except BrokenPipeError: + # Parent has closed the pipe prematurely. This happens when a + # worker encounters an error and the error handler is set to + # stop processing. + sender.close() + receiver.close() + return class _Sender: @@ -2312,3 +2409,10 @@ class _Sender: if self.count >= self.chunk_size: self.count = 0 self.send() + + +class _WorkDoneSentinel: + pass + + +_WORK_DONE_SENTINEL = _WorkDoneSentinel() diff --git a/spacy/lexeme.pxd b/spacy/lexeme.pxd index 8dea0d6a2..ff2e4f92e 100644 --- a/spacy/lexeme.pxd +++ b/spacy/lexeme.pxd @@ -1,11 +1,20 @@ from numpy cimport ndarray -from .typedefs cimport attr_t, hash_t, flags_t, len_t, tag_t -from .attrs cimport attr_id_t -from .attrs cimport ID, ORTH, LOWER, NORM, SHAPE, PREFIX, SUFFIX, LENGTH, LANG - -from .structs cimport LexemeC +from .attrs cimport ( + ID, + LANG, + LENGTH, + LOWER, + NORM, + ORTH, + PREFIX, + SHAPE, + SUFFIX, + attr_id_t, +) from .strings cimport StringStore +from .structs cimport LexemeC +from .typedefs cimport attr_t, flags_t, hash_t, len_t, tag_t from .vocab cimport Vocab diff --git a/spacy/lexeme.pyi b/spacy/lexeme.pyi index 9b7a6156a..9980b9fce 100644 --- a/spacy/lexeme.pyi +++ b/spacy/lexeme.pyi @@ -1,8 +1,7 @@ -from typing import ( - Union, - Any, -) +from typing import Any, Union + from thinc.types import Floats1d + from .tokens import Doc, Span, Token from .vocab import Vocab diff --git a/spacy/lexeme.pyx b/spacy/lexeme.pyx index e70feaf9a..7a0c19bf3 100644 --- a/spacy/lexeme.pyx +++ b/spacy/lexeme.pyx @@ -1,25 +1,41 @@ # cython: embedsignature=True +# cython: profile=False # Compiler crashes on memory view coercion without this. Should report bug. -from cython.view cimport array as cvarray -from libc.string cimport memset cimport numpy as np +from libc.string cimport memset + np.import_array() +import warnings + import numpy from thinc.api import get_array_module -import warnings +from .attrs cimport ( + IS_ALPHA, + IS_ASCII, + IS_BRACKET, + IS_CURRENCY, + IS_DIGIT, + IS_LEFT_PUNCT, + IS_LOWER, + IS_PUNCT, + IS_QUOTE, + IS_RIGHT_PUNCT, + IS_SPACE, + IS_STOP, + IS_TITLE, + IS_UPPER, + LIKE_EMAIL, + LIKE_NUM, + LIKE_URL, +) from .typedefs cimport attr_t, flags_t -from .attrs cimport IS_ALPHA, IS_ASCII, IS_DIGIT, IS_LOWER, IS_PUNCT, IS_SPACE -from .attrs cimport IS_TITLE, IS_UPPER, LIKE_URL, LIKE_NUM, LIKE_EMAIL, IS_STOP -from .attrs cimport IS_BRACKET, IS_QUOTE, IS_LEFT_PUNCT, IS_RIGHT_PUNCT -from .attrs cimport IS_CURRENCY from .attrs import intify_attrs from .errors import Errors, Warnings - -OOV_RANK = 0xffffffffffffffff # UINT64_MAX +OOV_RANK = 0xffffffffffffffff # UINT64_MAX memset(&EMPTY_LEXEME, 0, sizeof(LexemeC)) EMPTY_LEXEME.id = OOV_RANK @@ -89,7 +105,7 @@ cdef class Lexeme: if isinstance(value, float): continue elif isinstance(value, (int, long)): - Lexeme.set_struct_attr(self.c, attr, value) + Lexeme.set_struct_attr(self.c, attr, value) else: Lexeme.set_struct_attr(self.c, attr, self.vocab.strings.add(value)) @@ -121,10 +137,12 @@ cdef class Lexeme: if hasattr(other, "orth"): if self.c.orth == other.orth: return 1.0 - elif hasattr(other, "__len__") and len(other) == 1 \ - and hasattr(other[0], "orth"): - if self.c.orth == other[0].orth: - return 1.0 + elif ( + hasattr(other, "__len__") and len(other) == 1 + and hasattr(other[0], "orth") + and self.c.orth == other[0].orth + ): + return 1.0 if self.vector_norm == 0 or other.vector_norm == 0: warnings.warn(Warnings.W008.format(obj="Lexeme")) return 0.0 @@ -133,7 +151,7 @@ cdef class Lexeme: result = xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm) # ensure we get a scalar back (numpy does this automatically but cupy doesn't) return result.item() - + @property def has_vector(self): """RETURNS (bool): Whether a word vector is associated with the object. @@ -146,45 +164,48 @@ cdef class Lexeme: vector = self.vector return numpy.sqrt((vector**2).sum()) - property vector: + @property + def vector(self): """A real-valued meaning representation. RETURNS (numpy.ndarray[ndim=1, dtype='float32']): A 1D numpy array representing the lexeme's semantics. """ - def __get__(self): - cdef int length = self.vocab.vectors_length - if length == 0: - raise ValueError(Errors.E010) - return self.vocab.get_vector(self.c.orth) + cdef int length = self.vocab.vectors_length + if length == 0: + raise ValueError(Errors.E010) + return self.vocab.get_vector(self.c.orth) - def __set__(self, vector): - if len(vector) != self.vocab.vectors_length: - raise ValueError(Errors.E073.format(new_length=len(vector), - length=self.vocab.vectors_length)) - self.vocab.set_vector(self.c.orth, vector) + @vector.setter + def vector(self, vector): + if len(vector) != self.vocab.vectors_length: + raise ValueError(Errors.E073.format(new_length=len(vector), + length=self.vocab.vectors_length)) + self.vocab.set_vector(self.c.orth, vector) - property rank: + @property + def rank(self): """RETURNS (str): Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors.""" - def __get__(self): - return self.c.id + return self.c.id - def __set__(self, value): - self.c.id = value + @rank.setter + def rank(self, value): + self.c.id = value - property sentiment: + @property + def sentiment(self): """RETURNS (float): A scalar value indicating the positivity or negativity of the lexeme.""" - def __get__(self): - sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment", {}) - return sentiment_table.get(self.c.orth, 0.0) + sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment", {}) + return sentiment_table.get(self.c.orth, 0.0) - def __set__(self, float x): - if "lexeme_sentiment" not in self.vocab.lookups: - self.vocab.lookups.add_table("lexeme_sentiment") - sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment") - sentiment_table[self.c.orth] = x + @sentiment.setter + def sentiment(self, float x): + if "lexeme_sentiment" not in self.vocab.lookups: + self.vocab.lookups.add_table("lexeme_sentiment") + sentiment_table = self.vocab.lookups.get_table("lexeme_sentiment") + sentiment_table[self.c.orth] = x @property def orth_(self): @@ -198,306 +219,338 @@ cdef class Lexeme: """RETURNS (str): The original verbatim text of the lexeme.""" return self.orth_ - property lower: + @property + def lower(self): """RETURNS (uint64): Lowercase form of the lexeme.""" - def __get__(self): - return self.c.lower + return self.c.lower - def __set__(self, attr_t x): - self.c.lower = x + @lower.setter + def lower(self, attr_t x): + self.c.lower = x - property norm: + @property + def norm(self): """RETURNS (uint64): The lexeme's norm, i.e. a normalised form of the lexeme text. """ - def __get__(self): - return self.c.norm + return self.c.norm - def __set__(self, attr_t x): - if "lexeme_norm" not in self.vocab.lookups: - self.vocab.lookups.add_table("lexeme_norm") - norm_table = self.vocab.lookups.get_table("lexeme_norm") - norm_table[self.c.orth] = self.vocab.strings[x] - self.c.norm = x + @norm.setter + def norm(self, attr_t x): + if "lexeme_norm" not in self.vocab.lookups: + self.vocab.lookups.add_table("lexeme_norm") + norm_table = self.vocab.lookups.get_table("lexeme_norm") + norm_table[self.c.orth] = self.vocab.strings[x] + self.c.norm = x - property shape: + @property + def shape(self): """RETURNS (uint64): Transform of the word's string, to show orthographic features. """ - def __get__(self): - return self.c.shape + return self.c.shape - def __set__(self, attr_t x): - self.c.shape = x + @shape.setter + def shape(self, attr_t x): + self.c.shape = x - property prefix: + @property + def prefix(self): """RETURNS (uint64): Length-N substring from the start of the word. Defaults to `N=1`. """ - def __get__(self): - return self.c.prefix + return self.c.prefix - def __set__(self, attr_t x): - self.c.prefix = x + @prefix.setter + def prefix(self, attr_t x): + self.c.prefix = x - property suffix: + @property + def suffix(self): """RETURNS (uint64): Length-N substring from the end of the word. Defaults to `N=3`. """ - def __get__(self): - return self.c.suffix + return self.c.suffix - def __set__(self, attr_t x): - self.c.suffix = x + @suffix.setter + def suffix(self, attr_t x): + self.c.suffix = x - property cluster: + @property + def cluster(self): """RETURNS (int): Brown cluster ID.""" - def __get__(self): - cluster_table = self.vocab.lookups.get_table("lexeme_cluster", {}) - return cluster_table.get(self.c.orth, 0) + cluster_table = self.vocab.lookups.get_table("lexeme_cluster", {}) + return cluster_table.get(self.c.orth, 0) - def __set__(self, int x): - cluster_table = self.vocab.lookups.get_table("lexeme_cluster", {}) - cluster_table[self.c.orth] = x + @cluster.setter + def cluster(self, int x): + cluster_table = self.vocab.lookups.get_table("lexeme_cluster", {}) + cluster_table[self.c.orth] = x - property lang: + @property + def lang(self): """RETURNS (uint64): Language of the parent vocabulary.""" - def __get__(self): - return self.c.lang + return self.c.lang - def __set__(self, attr_t x): - self.c.lang = x + @lang.setter + def lang(self, attr_t x): + self.c.lang = x - property prob: + @property + def prob(self): """RETURNS (float): Smoothed log probability estimate of the lexeme's type.""" - def __get__(self): - prob_table = self.vocab.lookups.get_table("lexeme_prob", {}) - settings_table = self.vocab.lookups.get_table("lexeme_settings", {}) - default_oov_prob = settings_table.get("oov_prob", -20.0) - return prob_table.get(self.c.orth, default_oov_prob) + prob_table = self.vocab.lookups.get_table("lexeme_prob", {}) + settings_table = self.vocab.lookups.get_table("lexeme_settings", {}) + default_oov_prob = settings_table.get("oov_prob", -20.0) + return prob_table.get(self.c.orth, default_oov_prob) - def __set__(self, float x): - prob_table = self.vocab.lookups.get_table("lexeme_prob", {}) - prob_table[self.c.orth] = x + @prob.setter + def prob(self, float x): + prob_table = self.vocab.lookups.get_table("lexeme_prob", {}) + prob_table[self.c.orth] = x - property lower_: + @property + def lower_(self): """RETURNS (str): Lowercase form of the word.""" - def __get__(self): - return self.vocab.strings[self.c.lower] + return self.vocab.strings[self.c.lower] - def __set__(self, str x): - self.c.lower = self.vocab.strings.add(x) + @lower_.setter + def lower_(self, str x): + self.c.lower = self.vocab.strings.add(x) - property norm_: + @property + def norm_(self): """RETURNS (str): The lexeme's norm, i.e. a normalised form of the lexeme text. """ - def __get__(self): - return self.vocab.strings[self.c.norm] + return self.vocab.strings[self.c.norm] - def __set__(self, str x): - self.norm = self.vocab.strings.add(x) + @norm_.setter + def norm_(self, str x): + self.norm = self.vocab.strings.add(x) - property shape_: + @property + def shape_(self): """RETURNS (str): Transform of the word's string, to show orthographic features. """ - def __get__(self): - return self.vocab.strings[self.c.shape] + return self.vocab.strings[self.c.shape] - def __set__(self, str x): - self.c.shape = self.vocab.strings.add(x) + @shape_.setter + def shape_(self, str x): + self.c.shape = self.vocab.strings.add(x) - property prefix_: + @property + def prefix_(self): """RETURNS (str): Length-N substring from the start of the word. Defaults to `N=1`. """ - def __get__(self): - return self.vocab.strings[self.c.prefix] + return self.vocab.strings[self.c.prefix] - def __set__(self, str x): - self.c.prefix = self.vocab.strings.add(x) + @prefix_.setter + def prefix_(self, str x): + self.c.prefix = self.vocab.strings.add(x) - property suffix_: + @property + def suffix_(self): """RETURNS (str): Length-N substring from the end of the word. Defaults to `N=3`. """ - def __get__(self): - return self.vocab.strings[self.c.suffix] + return self.vocab.strings[self.c.suffix] - def __set__(self, str x): - self.c.suffix = self.vocab.strings.add(x) + @suffix_.setter + def suffix_(self, str x): + self.c.suffix = self.vocab.strings.add(x) - property lang_: + @property + def lang_(self): """RETURNS (str): Language of the parent vocabulary.""" - def __get__(self): - return self.vocab.strings[self.c.lang] + return self.vocab.strings[self.c.lang] - def __set__(self, str x): - self.c.lang = self.vocab.strings.add(x) + @lang_.setter + def lang_(self, str x): + self.c.lang = self.vocab.strings.add(x) - property flags: + @property + def flags(self): """RETURNS (uint64): Container of the lexeme's binary flags.""" - def __get__(self): - return self.c.flags + return self.c.flags - def __set__(self, flags_t x): - self.c.flags = x + @flags.setter + def flags(self, flags_t x): + self.c.flags = x @property def is_oov(self): """RETURNS (bool): Whether the lexeme is out-of-vocabulary.""" return self.orth not in self.vocab.vectors - property is_stop: + @property + def is_stop(self): """RETURNS (bool): Whether the lexeme is a stop word.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_STOP) + return Lexeme.c_check_flag(self.c, IS_STOP) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_STOP, x) + @is_stop.setter + def is_stop(self, bint x): + Lexeme.c_set_flag(self.c, IS_STOP, x) - property is_alpha: + @property + def is_alpha(self): """RETURNS (bool): Whether the lexeme consists of alphabetic characters. Equivalent to `lexeme.text.isalpha()`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_ALPHA) + return Lexeme.c_check_flag(self.c, IS_ALPHA) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_ALPHA, x) + @is_alpha.setter + def is_alpha(self, bint x): + Lexeme.c_set_flag(self.c, IS_ALPHA, x) - property is_ascii: + @property + def is_ascii(self): """RETURNS (bool): Whether the lexeme consists of ASCII characters. Equivalent to `[any(ord(c) >= 128 for c in lexeme.text)]`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_ASCII) + return Lexeme.c_check_flag(self.c, IS_ASCII) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_ASCII, x) + @is_ascii.setter + def is_ascii(self, bint x): + Lexeme.c_set_flag(self.c, IS_ASCII, x) - property is_digit: + @property + def is_digit(self): """RETURNS (bool): Whether the lexeme consists of digits. Equivalent to `lexeme.text.isdigit()`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_DIGIT) + return Lexeme.c_check_flag(self.c, IS_DIGIT) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_DIGIT, x) + @is_digit.setter + def is_digit(self, bint x): + Lexeme.c_set_flag(self.c, IS_DIGIT, x) - property is_lower: + @property + def is_lower(self): """RETURNS (bool): Whether the lexeme is in lowercase. Equivalent to `lexeme.text.islower()`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_LOWER) + return Lexeme.c_check_flag(self.c, IS_LOWER) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_LOWER, x) + @is_lower.setter + def is_lower(self, bint x): + Lexeme.c_set_flag(self.c, IS_LOWER, x) - property is_upper: + @property + def is_upper(self): """RETURNS (bool): Whether the lexeme is in uppercase. Equivalent to `lexeme.text.isupper()`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_UPPER) + return Lexeme.c_check_flag(self.c, IS_UPPER) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_UPPER, x) + @is_upper.setter + def is_upper(self, bint x): + Lexeme.c_set_flag(self.c, IS_UPPER, x) - property is_title: + @property + def is_title(self): """RETURNS (bool): Whether the lexeme is in titlecase. Equivalent to `lexeme.text.istitle()`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_TITLE) + return Lexeme.c_check_flag(self.c, IS_TITLE) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_TITLE, x) + @is_title.setter + def is_title(self, bint x): + Lexeme.c_set_flag(self.c, IS_TITLE, x) - property is_punct: + @property + def is_punct(self): """RETURNS (bool): Whether the lexeme is punctuation.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_PUNCT) + return Lexeme.c_check_flag(self.c, IS_PUNCT) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_PUNCT, x) + @is_punct.setter + def is_punct(self, bint x): + Lexeme.c_set_flag(self.c, IS_PUNCT, x) - property is_space: + @property + def is_space(self): """RETURNS (bool): Whether the lexeme consist of whitespace characters. Equivalent to `lexeme.text.isspace()`. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_SPACE) + return Lexeme.c_check_flag(self.c, IS_SPACE) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_SPACE, x) + @is_space.setter + def is_space(self, bint x): + Lexeme.c_set_flag(self.c, IS_SPACE, x) - property is_bracket: + @property + def is_bracket(self): """RETURNS (bool): Whether the lexeme is a bracket.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_BRACKET) + return Lexeme.c_check_flag(self.c, IS_BRACKET) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_BRACKET, x) + @is_bracket.setter + def is_bracket(self, bint x): + Lexeme.c_set_flag(self.c, IS_BRACKET, x) - property is_quote: + @property + def is_quote(self): """RETURNS (bool): Whether the lexeme is a quotation mark.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_QUOTE) + return Lexeme.c_check_flag(self.c, IS_QUOTE) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_QUOTE, x) + @is_quote.setter + def is_quote(self, bint x): + Lexeme.c_set_flag(self.c, IS_QUOTE, x) - property is_left_punct: + @property + def is_left_punct(self): """RETURNS (bool): Whether the lexeme is left punctuation, e.g. (.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_LEFT_PUNCT) + return Lexeme.c_check_flag(self.c, IS_LEFT_PUNCT) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_LEFT_PUNCT, x) + @is_left_punct.setter + def is_left_punct(self, bint x): + Lexeme.c_set_flag(self.c, IS_LEFT_PUNCT, x) - property is_right_punct: + @property + def is_right_punct(self): """RETURNS (bool): Whether the lexeme is right punctuation, e.g. ).""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_RIGHT_PUNCT) + return Lexeme.c_check_flag(self.c, IS_RIGHT_PUNCT) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_RIGHT_PUNCT, x) + @is_right_punct.setter + def is_right_punct(self, bint x): + Lexeme.c_set_flag(self.c, IS_RIGHT_PUNCT, x) - property is_currency: + @property + def is_currency(self): """RETURNS (bool): Whether the lexeme is a currency symbol, e.g. $, €.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, IS_CURRENCY) + return Lexeme.c_check_flag(self.c, IS_CURRENCY) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, IS_CURRENCY, x) + @is_currency.setter + def is_currency(self, bint x): + Lexeme.c_set_flag(self.c, IS_CURRENCY, x) - property like_url: + @property + def like_url(self): """RETURNS (bool): Whether the lexeme resembles a URL.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, LIKE_URL) + return Lexeme.c_check_flag(self.c, LIKE_URL) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, LIKE_URL, x) + @like_url.setter + def like_url(self, bint x): + Lexeme.c_set_flag(self.c, LIKE_URL, x) - property like_num: + @property + def like_num(self): """RETURNS (bool): Whether the lexeme represents a number, e.g. "10.9", "10", "ten", etc. """ - def __get__(self): - return Lexeme.c_check_flag(self.c, LIKE_NUM) + return Lexeme.c_check_flag(self.c, LIKE_NUM) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, LIKE_NUM, x) + @like_num.setter + def like_num(self, bint x): + Lexeme.c_set_flag(self.c, LIKE_NUM, x) - property like_email: + @property + def like_email(self): """RETURNS (bool): Whether the lexeme resembles an email address.""" - def __get__(self): - return Lexeme.c_check_flag(self.c, LIKE_EMAIL) + return Lexeme.c_check_flag(self.c, LIKE_EMAIL) - def __set__(self, bint x): - Lexeme.c_set_flag(self.c, LIKE_EMAIL, x) + @like_email.setter + def like_email(self, bint x): + Lexeme.c_set_flag(self.c, LIKE_EMAIL, x) diff --git a/spacy/lookups.py b/spacy/lookups.py index d7cc44fb3..1a2c44bfa 100644 --- a/spacy/lookups.py +++ b/spacy/lookups.py @@ -1,13 +1,13 @@ -from typing import Any, List, Union, Optional, Dict +from collections import OrderedDict from pathlib import Path +from typing import Any, Dict, List, Optional, Union + import srsly from preshed.bloom import BloomFilter -from collections import OrderedDict from .errors import Errors -from .util import SimpleFrozenDict, ensure_path, registry, load_language_data from .strings import get_string_id - +from .util import SimpleFrozenDict, ensure_path, load_language_data, registry UNSET = object() diff --git a/spacy/matcher/__init__.py b/spacy/matcher/__init__.py index a4f164847..b6d6d70ab 100644 --- a/spacy/matcher/__init__.py +++ b/spacy/matcher/__init__.py @@ -1,6 +1,6 @@ -from .matcher import Matcher -from .phrasematcher import PhraseMatcher from .dependencymatcher import DependencyMatcher from .levenshtein import levenshtein +from .matcher import Matcher +from .phrasematcher import PhraseMatcher -__all__ = ["Matcher", "PhraseMatcher", "DependencyMatcher", "levenshtein"] +__all__ = ["DependencyMatcher", "Matcher", "PhraseMatcher", "levenshtein"] diff --git a/spacy/matcher/dependencymatcher.pyi b/spacy/matcher/dependencymatcher.pyi index c19d3a71c..b9fbabda7 100644 --- a/spacy/matcher/dependencymatcher.pyi +++ b/spacy/matcher/dependencymatcher.pyi @@ -1,8 +1,9 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from .matcher import Matcher -from ..vocab import Vocab + from ..tokens.doc import Doc from ..tokens.span import Span +from ..vocab import Vocab +from .matcher import Matcher class DependencyMatcher: """Match dependency parse tree based on pattern rules.""" diff --git a/spacy/matcher/dependencymatcher.pyx b/spacy/matcher/dependencymatcher.pyx index 48fb3eb2a..ab5f5d5d1 100644 --- a/spacy/matcher/dependencymatcher.pyx +++ b/spacy/matcher/dependencymatcher.pyx @@ -1,18 +1,16 @@ -# cython: infer_types=True, profile=True -from typing import List +# cython: infer_types=True +import warnings from collections import defaultdict from itertools import product +from typing import List -import warnings - -from .matcher cimport Matcher -from ..vocab cimport Vocab from ..tokens.doc cimport Doc +from ..vocab cimport Vocab +from .matcher cimport Matcher from ..errors import Errors, Warnings from ..tokens import Span - DELIMITER = "||" INDEX_HEAD = 1 INDEX_RELOP = 0 @@ -110,7 +108,7 @@ cdef class DependencyMatcher: key (str): The match ID. RETURNS (bool): Whether the matcher contains rules for this match ID. """ - return self.has_key(key) + return self.has_key(key) # no-cython-lint: W601 def _validate_input(self, pattern, key): idx = 0 @@ -131,6 +129,7 @@ cdef class DependencyMatcher: else: required_keys = {"RIGHT_ID", "RIGHT_ATTRS", "REL_OP", "LEFT_ID"} relation_keys = set(relation.keys()) + # Identify required keys that have not been specified missing = required_keys - relation_keys if missing: missing_txt = ", ".join(list(missing)) @@ -138,6 +137,13 @@ cdef class DependencyMatcher: required=required_keys, missing=missing_txt )) + # Identify additional, unsupported keys + unsupported = relation_keys - required_keys + if unsupported: + unsupported_txt = ", ".join(list(unsupported)) + warnings.warn(Warnings.W126.format( + unsupported=unsupported_txt + )) if ( relation["RIGHT_ID"] in visited_nodes or relation["LEFT_ID"] not in visited_nodes @@ -266,7 +272,7 @@ cdef class DependencyMatcher: def remove(self, key): key = self._normalize_key(key) - if not key in self._patterns: + if key not in self._patterns: raise ValueError(Errors.E175.format(key=key)) self._patterns.pop(key) self._raw_patterns.pop(key) @@ -384,7 +390,7 @@ cdef class DependencyMatcher: return [] return [doc[node].head] - def _gov(self,doc,node): + def _gov(self, doc, node): return list(doc[node].children) def _dep_chain(self, doc, node): @@ -445,7 +451,7 @@ cdef class DependencyMatcher: def _right_child(self, doc, node): return [child for child in doc[node].rights] - + def _left_child(self, doc, node): return [child for child in doc[node].lefts] @@ -463,7 +469,7 @@ cdef class DependencyMatcher: if doc[node].head.i > node: return [doc[node].head] return [] - + def _left_parent(self, doc, node): if doc[node].head.i < node: return [doc[node].head] diff --git a/spacy/matcher/levenshtein.pyx b/spacy/matcher/levenshtein.pyx index e823ce99d..e394f2cf4 100644 --- a/spacy/matcher/levenshtein.pyx +++ b/spacy/matcher/levenshtein.pyx @@ -1,4 +1,4 @@ -# cython: profile=True, binding=True, infer_types=True +# cython: binding=True, infer_types=True from cpython.object cimport PyObject from libc.stdint cimport int64_t diff --git a/spacy/matcher/matcher.pxd b/spacy/matcher/matcher.pxd index 51854d562..2c82cea1d 100644 --- a/spacy/matcher/matcher.pxd +++ b/spacy/matcher/matcher.pxd @@ -1,11 +1,11 @@ +from cymem.cymem cimport Pool from libc.stdint cimport int32_t from libcpp.vector cimport vector -from cymem.cymem cimport Pool -from ..vocab cimport Vocab -from ..typedefs cimport attr_t, hash_t -from ..structs cimport TokenC from ..lexeme cimport attr_id_t +from ..structs cimport TokenC +from ..typedefs cimport attr_t, hash_t +from ..vocab cimport Vocab cdef enum action_t: diff --git a/spacy/matcher/matcher.pyi b/spacy/matcher/matcher.pyi index 48922865b..c33b534cb 100644 --- a/spacy/matcher/matcher.pyi +++ b/spacy/matcher/matcher.pyi @@ -1,8 +1,19 @@ -from typing import Any, List, Dict, Tuple, Optional, Callable, Union -from typing import Iterator, Iterable, overload +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + Union, + overload, +) + from ..compat import Literal -from ..vocab import Vocab from ..tokens import Doc, Span +from ..vocab import Vocab class Matcher: def __init__( diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index b886bd2ec..9a9ed4212 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -1,32 +1,30 @@ -# cython: binding=True, infer_types=True, profile=True -from typing import List, Iterable +# cython: binding=True, infer_types=True +from typing import Iterable, List -from libcpp.vector cimport vector -from libc.stdint cimport int32_t, int8_t -from libc.string cimport memset, memcmp from cymem.cymem cimport Pool +from libc.stdint cimport int8_t, int32_t +from libc.string cimport memcmp, memset +from libcpp.vector cimport vector from murmurhash.mrmr cimport hash64 import re -import srsly import warnings -from ..typedefs cimport attr_t +import srsly + +from ..attrs cimport DEP, ENT_IOB, ID, LEMMA, MORPH, NULL_ATTR, POS, TAG from ..structs cimport TokenC -from ..vocab cimport Vocab from ..tokens.doc cimport Doc, get_token_attr_for_matcher +from ..tokens.morphanalysis cimport MorphAnalysis from ..tokens.span cimport Span from ..tokens.token cimport Token -from ..tokens.morphanalysis cimport MorphAnalysis -from ..attrs cimport ID, attr_id_t, NULL_ATTR, ORTH, POS, TAG, DEP, LEMMA, MORPH, ENT_IOB +from ..typedefs cimport attr_t -from .levenshtein import levenshtein_compare -from ..schemas import validate_token_pattern -from ..errors import Errors, MatchPatternError, Warnings -from ..strings import get_string_id from ..attrs import IDS -from ..util import registry - +from ..errors import Errors, MatchPatternError, Warnings +from ..schemas import validate_token_pattern +from ..strings import get_string_id +from .levenshtein import levenshtein_compare DEF PADDING = 5 @@ -76,9 +74,9 @@ cdef class Matcher: key (str): The match ID. RETURNS (bool): Whether the matcher contains rules for this match ID. """ - return self.has_key(key) + return self.has_key(key) # no-cython-lint: W601 - def add(self, key, patterns, *, on_match=None, greedy: str=None): + def add(self, key, patterns, *, on_match=None, greedy: str = None): """Add a match-rule to the matcher. A match-rule consists of: an ID key, an on_match callback, and one or more patterns. @@ -132,8 +130,13 @@ cdef class Matcher: key = self._normalize_key(key) for pattern in patterns: try: - specs = _preprocess_pattern(pattern, self.vocab, - self._extensions, self._extra_predicates, self._fuzzy_compare) + specs = _preprocess_pattern( + pattern, + self.vocab, + self._extensions, + self._extra_predicates, + self._fuzzy_compare + ) self.patterns.push_back(init_pattern(self.mem, key, specs)) for spec in specs: for attr, _ in spec[1]: @@ -157,7 +160,7 @@ cdef class Matcher: key (str): The ID of the match rule. """ norm_key = self._normalize_key(key) - if not norm_key in self._patterns: + if norm_key not in self._patterns: raise ValueError(Errors.E175.format(key=key)) self._patterns.pop(norm_key) self._callbacks.pop(norm_key) @@ -257,8 +260,15 @@ cdef class Matcher: if self.patterns.empty(): matches = [] else: - matches = find_matches(&self.patterns[0], self.patterns.size(), doclike, length, - extensions=self._extensions, predicates=self._extra_predicates, with_alignments=with_alignments) + matches = find_matches( + &self.patterns[0], + self.patterns.size(), + doclike, + length, + extensions=self._extensions, + predicates=self._extra_predicates, + with_alignments=with_alignments + ) final_matches = [] pairs_by_id = {} # For each key, either add all matches, or only the filtered, @@ -278,9 +288,9 @@ cdef class Matcher: memset(matched, 0, length * sizeof(matched[0])) span_filter = self._filter.get(key) if span_filter == "FIRST": - sorted_pairs = sorted(pairs, key=lambda x: (x[0], -x[1]), reverse=False) # sort by start + sorted_pairs = sorted(pairs, key=lambda x: (x[0], -x[1]), reverse=False) # sort by start elif span_filter == "LONGEST": - sorted_pairs = sorted(pairs, key=lambda x: (x[1]-x[0], -x[0]), reverse=True) # reverse sort by length + sorted_pairs = sorted(pairs, key=lambda x: (x[1]-x[0], -x[0]), reverse=True) # reverse sort by length else: raise ValueError(Errors.E947.format(expected=["FIRST", "LONGEST"], arg=span_filter)) for match in sorted_pairs: @@ -355,7 +365,6 @@ cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, e cdef vector[MatchC] matches cdef vector[vector[MatchAlignmentC]] align_states cdef vector[vector[MatchAlignmentC]] align_matches - cdef PatternStateC state cdef int i, j, nr_extra_attr cdef Pool mem = Pool() output = [] @@ -377,14 +386,22 @@ cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, e value = token.vocab.strings[value] extra_attr_values[i * nr_extra_attr + index] = value # Main loop - cdef int nr_predicate = len(predicates) for i in range(length): for j in range(n): states.push_back(PatternStateC(patterns[j], i, 0)) if with_alignments != 0: align_states.resize(states.size()) - transition_states(states, matches, align_states, align_matches, predicate_cache, - doclike[i], extra_attr_values, predicates, with_alignments) + transition_states( + states, + matches, + align_states, + align_matches, + predicate_cache, + doclike[i], + extra_attr_values, + predicates, + with_alignments + ) extra_attr_values += nr_extra_attr predicate_cache += len(predicates) # Handle matches that end in 0-width patterns @@ -410,18 +427,28 @@ cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, e return output -cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& matches, - vector[vector[MatchAlignmentC]]& align_states, vector[vector[MatchAlignmentC]]& align_matches, - int8_t* cached_py_predicates, - Token token, const attr_t* extra_attrs, py_predicates, bint with_alignments) except *: +cdef void transition_states( + vector[PatternStateC]& states, + vector[MatchC]& matches, + vector[vector[MatchAlignmentC]]& align_states, + vector[vector[MatchAlignmentC]]& align_matches, + int8_t* cached_py_predicates, + Token token, + const attr_t* extra_attrs, + py_predicates, + bint with_alignments +) except *: cdef int q = 0 cdef vector[PatternStateC] new_states cdef vector[vector[MatchAlignmentC]] align_new_states - cdef int nr_predicate = len(py_predicates) for i in range(states.size()): if states[i].pattern.nr_py >= 1: - update_predicate_cache(cached_py_predicates, - states[i].pattern, token, py_predicates) + update_predicate_cache( + cached_py_predicates, + states[i].pattern, + token, + py_predicates + ) action = get_action(states[i], token.c, extra_attrs, cached_py_predicates) if action == REJECT: @@ -457,8 +484,12 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match align_new_states.push_back(align_states[q]) states[q].pattern += 1 if states[q].pattern.nr_py != 0: - update_predicate_cache(cached_py_predicates, - states[q].pattern, token, py_predicates) + update_predicate_cache( + cached_py_predicates, + states[q].pattern, + token, + py_predicates + ) action = get_action(states[q], token.c, extra_attrs, cached_py_predicates) # Update alignment before the transition of current state @@ -474,8 +505,12 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match ent_id = get_ent_id(state.pattern) if action == MATCH: matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length+1)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length+1 + ) + ) # `align_matches` always corresponds to `matches` 1:1 if with_alignments != 0: align_matches.push_back(align_states[q]) @@ -483,23 +518,35 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match # push match without last token if length > 0 if state.length > 0: matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length + ) + ) # MATCH_DOUBLE emits matches twice, # add one more to align_matches in order to keep 1:1 relationship if with_alignments != 0: align_matches.push_back(align_states[q]) # push match with last token matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length+1)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length + 1 + ) + ) # `align_matches` always corresponds to `matches` 1:1 if with_alignments != 0: align_matches.push_back(align_states[q]) elif action == MATCH_REJECT: matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length + ) + ) # `align_matches` always corresponds to `matches` 1:1 if with_alignments != 0: align_matches.push_back(align_states[q]) @@ -522,8 +569,12 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match align_states.push_back(align_new_states[i]) -cdef int update_predicate_cache(int8_t* cache, - const TokenPatternC* pattern, Token token, predicates) except -1: +cdef int update_predicate_cache( + int8_t* cache, + const TokenPatternC* pattern, + Token token, + predicates +) except -1: # If the state references any extra predicates, check whether they match. # These are cached, so that we don't call these potentially expensive # Python functions more than we need to. @@ -569,10 +620,12 @@ cdef void finish_states(vector[MatchC]& matches, vector[PatternStateC]& states, else: state.pattern += 1 - -cdef action_t get_action(PatternStateC state, - const TokenC* token, const attr_t* extra_attrs, - const int8_t* predicate_matches) nogil: +cdef action_t get_action( + PatternStateC state, + const TokenC * token, + const attr_t * extra_attrs, + const int8_t * predicate_matches +) nogil: """We need to consider: a) Does the token match the specification? [Yes, No] b) What's the quantifier? [1, 0+, ?] @@ -638,53 +691,56 @@ cdef action_t get_action(PatternStateC state, is_match = not is_match quantifier = ONE if quantifier == ONE: - if is_match and is_final: - # Yes, final: 1000 - return MATCH - elif is_match and not is_final: - # Yes, non-final: 0100 - return ADVANCE - elif not is_match and is_final: - # No, final: 0000 - return REJECT - else: - return REJECT + if is_match and is_final: + # Yes, final: 1000 + return MATCH + elif is_match and not is_final: + # Yes, non-final: 0100 + return ADVANCE + elif not is_match and is_final: + # No, final: 0000 + return REJECT + else: + return REJECT elif quantifier == ZERO_PLUS: - if is_match and is_final: - # Yes, final: 1001 - return MATCH_EXTEND - elif is_match and not is_final: - # Yes, non-final: 0011 - return RETRY_EXTEND - elif not is_match and is_final: - # No, final 2000 (note: Don't include last token!) - return MATCH_REJECT - else: - # No, non-final 0010 - return RETRY + if is_match and is_final: + # Yes, final: 1001 + return MATCH_EXTEND + elif is_match and not is_final: + # Yes, non-final: 0011 + return RETRY_EXTEND + elif not is_match and is_final: + # No, final 2000 (note: Don't include last token!) + return MATCH_REJECT + else: + # No, non-final 0010 + return RETRY elif quantifier == ZERO_ONE: - if is_match and is_final: - # Yes, final: 3000 - # To cater for a pattern ending in "?", we need to add - # a match both with and without the last token - return MATCH_DOUBLE - elif is_match and not is_final: - # Yes, non-final: 0110 - # We need both branches here, consider a pair like: - # pattern: .?b string: b - # If we 'ADVANCE' on the .?, we miss the match. - return RETRY_ADVANCE - elif not is_match and is_final: - # No, final 2000 (note: Don't include last token!) - return MATCH_REJECT - else: - # No, non-final 0010 - return RETRY + if is_match and is_final: + # Yes, final: 3000 + # To cater for a pattern ending in "?", we need to add + # a match both with and without the last token + return MATCH_DOUBLE + elif is_match and not is_final: + # Yes, non-final: 0110 + # We need both branches here, consider a pair like: + # pattern: .?b string: b + # If we 'ADVANCE' on the .?, we miss the match. + return RETRY_ADVANCE + elif not is_match and is_final: + # No, final 2000 (note: Don't include last token!) + return MATCH_REJECT + else: + # No, non-final 0010 + return RETRY -cdef int8_t get_is_match(PatternStateC state, - const TokenC* token, const attr_t* extra_attrs, - const int8_t* predicate_matches) nogil: +cdef int8_t get_is_match( + PatternStateC state, + const TokenC* token, + const attr_t* extra_attrs, + const int8_t* predicate_matches +) nogil: for i in range(state.pattern.nr_py): if predicate_matches[state.pattern.py_predicates[i]] == -1: return 0 @@ -849,7 +905,7 @@ class _FuzzyPredicate: self.is_extension = is_extension if self.predicate not in self.operators: raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate)) - fuzz = self.predicate[len("FUZZY"):] # number after prefix + fuzz = self.predicate[len("FUZZY"):] # number after prefix self.fuzzy = int(fuzz) if fuzz else -1 self.fuzzy_compare = fuzzy_compare self.key = _predicate_cache_key(self.attr, self.predicate, value, fuzzy=self.fuzzy) @@ -1071,7 +1127,7 @@ def _get_extra_predicates_dict(attr, value_dict, vocab, predicate_types, elif cls == _FuzzyPredicate: if isinstance(value, dict): # add predicates inside fuzzy operator - fuzz = type_[len("FUZZY"):] # number after prefix + fuzz = type_[len("FUZZY"):] # number after prefix fuzzy_val = int(fuzz) if fuzz else -1 output.extend(_get_extra_predicates_dict(attr, value, vocab, predicate_types, extra_predicates, seen_predicates, @@ -1090,8 +1146,9 @@ def _get_extra_predicates_dict(attr, value_dict, vocab, predicate_types, return output -def _get_extension_extra_predicates(spec, extra_predicates, predicate_types, - seen_predicates): +def _get_extension_extra_predicates( + spec, extra_predicates, predicate_types, seen_predicates +): output = [] for attr, value in spec.items(): if isinstance(value, dict): @@ -1120,7 +1177,7 @@ def _get_operators(spec): return (ONE,) elif spec["OP"] in lookup: return lookup[spec["OP"]] - #Min_max {n,m} + # Min_max {n,m} elif spec["OP"].startswith("{") and spec["OP"].endswith("}"): # {n} --> {n,n} exactly n ONE,(n) # {n,m}--> {n,m} min of n, max of m ONE,(n),ZERO_ONE,(m) @@ -1131,8 +1188,8 @@ def _get_operators(spec): min_max = min_max if "," in min_max else f"{min_max},{min_max}" n, m = min_max.split(",") - #1. Either n or m is a blank string and the other is numeric -->isdigit - #2. Both are numeric and n <= m + # 1. Either n or m is a blank string and the other is numeric -->isdigit + # 2. Both are numeric and n <= m if (not n.isdecimal() and not m.isdecimal()) or (n.isdecimal() and m.isdecimal() and int(n) > int(m)): keys = ", ".join(lookup.keys()) + ", {n}, {n,m}, {n,}, {,m} where n and m are integers and n <= m " raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys)) diff --git a/spacy/matcher/phrasematcher.pxd b/spacy/matcher/phrasematcher.pxd index 1bdc19012..bffc1ac97 100644 --- a/spacy/matcher/phrasematcher.pxd +++ b/spacy/matcher/phrasematcher.pxd @@ -1,6 +1,6 @@ -from libcpp.vector cimport vector from cymem.cymem cimport Pool -from preshed.maps cimport key_t, MapStruct +from libcpp.vector cimport vector +from preshed.maps cimport MapStruct, key_t from ..attrs cimport attr_id_t from ..structs cimport SpanC diff --git a/spacy/matcher/phrasematcher.pyi b/spacy/matcher/phrasematcher.pyi index 68e3386e4..27f6ba373 100644 --- a/spacy/matcher/phrasematcher.pyi +++ b/spacy/matcher/phrasematcher.pyi @@ -1,12 +1,13 @@ -from typing import List, Tuple, Union, Optional, Callable, Any, Dict, overload +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, overload + from ..compat import Literal -from .matcher import Matcher -from ..vocab import Vocab from ..tokens import Doc, Span +from ..vocab import Vocab +from .matcher import Matcher class PhraseMatcher: def __init__( - self, vocab: Vocab, attr: Optional[Union[int, str]], validate: bool = ... + self, vocab: Vocab, attr: Optional[Union[int, str]] = ..., validate: bool = ... ) -> None: ... def __reduce__(self) -> Any: ... def __len__(self) -> int: ... diff --git a/spacy/matcher/phrasematcher.pyx b/spacy/matcher/phrasematcher.pyx index 382029872..4efcdb05c 100644 --- a/spacy/matcher/phrasematcher.pyx +++ b/spacy/matcher/phrasematcher.pyx @@ -1,18 +1,18 @@ -# cython: infer_types=True, profile=True -from libc.stdint cimport uintptr_t -from preshed.maps cimport map_init, map_set, map_get, map_clear, map_iter +# cython: infer_types=True +from preshed.maps cimport map_clear, map_get, map_init, map_iter, map_set import warnings -from ..attrs cimport ORTH, POS, TAG, DEP, LEMMA, MORPH +from ..attrs cimport DEP, LEMMA, MORPH, POS, TAG + from ..attrs import IDS -from ..structs cimport TokenC -from ..tokens.token cimport Token + from ..tokens.span cimport Span +from ..tokens.token cimport Token from ..typedefs cimport attr_t -from ..schemas import TokenPattern from ..errors import Errors, Warnings +from ..schemas import TokenPattern cdef class PhraseMatcher: diff --git a/spacy/ml/_character_embed.py b/spacy/ml/_character_embed.py index e46735102..89c836144 100644 --- a/spacy/ml/_character_embed.py +++ b/spacy/ml/_character_embed.py @@ -1,4 +1,5 @@ from typing import List + from thinc.api import Model from thinc.types import Floats2d diff --git a/spacy/ml/callbacks.py b/spacy/ml/callbacks.py index 3b60ec2ab..e2378a7ba 100644 --- a/spacy/ml/callbacks.py +++ b/spacy/ml/callbacks.py @@ -1,8 +1,8 @@ -from typing import Type, Callable, Dict, TYPE_CHECKING, List, Optional, Set import functools import inspect import types import warnings +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Type from thinc.layers import with_nvtx_range from thinc.model import Model, wrap_model_recursive diff --git a/spacy/ml/extract_ngrams.py b/spacy/ml/extract_ngrams.py index c9c82f369..ce7c585cc 100644 --- a/spacy/ml/extract_ngrams.py +++ b/spacy/ml/extract_ngrams.py @@ -1,7 +1,7 @@ from thinc.api import Model -from ..util import registry from ..attrs import LOWER +from ..util import registry @registry.layers("spacy.extract_ngrams.v1") diff --git a/spacy/ml/extract_spans.py b/spacy/ml/extract_spans.py index af6be78db..ac0f5fa1b 100644 --- a/spacy/ml/extract_spans.py +++ b/spacy/ml/extract_spans.py @@ -1,6 +1,7 @@ -from typing import List, Tuple, Callable +from typing import Callable, List, Tuple + from thinc.api import Model, to_numpy -from thinc.types import Ragged, Ints1d +from thinc.types import Ints1d, Ragged from ..util import registry diff --git a/spacy/ml/featureextractor.py b/spacy/ml/featureextractor.py index ed2918f02..06f1ff51a 100644 --- a/spacy/ml/featureextractor.py +++ b/spacy/ml/featureextractor.py @@ -1,6 +1,7 @@ -from typing import List, Union, Callable, Tuple -from thinc.types import Ints2d +from typing import Callable, List, Tuple, Union + from thinc.api import Model, registry +from thinc.types import Ints2d from ..tokens import Doc diff --git a/spacy/ml/models/entity_linker.py b/spacy/ml/models/entity_linker.py index 7332ca199..b7100c00a 100644 --- a/spacy/ml/models/entity_linker.py +++ b/spacy/ml/models/entity_linker.py @@ -1,16 +1,31 @@ from pathlib import Path -from typing import Optional, Callable, Iterable, List, Tuple -from thinc.types import Floats2d -from thinc.api import chain, list2ragged, reduce_mean, residual -from thinc.api import Model, Maxout, Linear, tuplify, Ragged +from typing import Callable, Iterable, List, Optional, Tuple + +from thinc.api import ( + Linear, + Maxout, + Model, + Ragged, + chain, + list2ragged, + reduce_mean, + residual, + tuplify, +) +from thinc.types import Floats2d -from ...util import registry -from ...kb import KnowledgeBase, InMemoryLookupKB -from ...kb import Candidate, get_candidates, get_candidates_batch -from ...vocab import Vocab -from ...tokens import Span, Doc -from ..extract_spans import extract_spans from ...errors import Errors +from ...kb import ( + Candidate, + InMemoryLookupKB, + KnowledgeBase, + get_candidates, + get_candidates_batch, +) +from ...tokens import Doc, Span +from ...util import registry +from ...vocab import Vocab +from ..extract_spans import extract_spans @registry.architectures("spacy.EntityLinker.v2") diff --git a/spacy/ml/models/multi_task.py b/spacy/ml/models/multi_task.py index 7eb13b608..b7faf1cd7 100644 --- a/spacy/ml/models/multi_task.py +++ b/spacy/ml/models/multi_task.py @@ -1,22 +1,33 @@ -from typing import Any, Optional, Iterable, Tuple, List, Callable, TYPE_CHECKING, cast -from thinc.types import Floats2d, Ints1d -from thinc.api import chain, Maxout, LayerNorm, Softmax, Linear, zero_init, Model -from thinc.api import MultiSoftmax, list2array -from thinc.api import to_categorical, CosineDistance, L2Distance -from thinc.loss import Loss - -from ...util import registry, OOV_RANK -from ...errors import Errors -from ...attrs import ID, ORTH -from ...vectors import Mode as VectorsMode +from functools import partial +from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, cast import numpy -from functools import partial +from thinc.api import ( + CosineDistance, + L2Distance, + LayerNorm, + Linear, + Maxout, + Model, + MultiSoftmax, + Softmax, + chain, + list2array, + to_categorical, + zero_init, +) +from thinc.loss import Loss +from thinc.types import Floats2d, Ints1d + +from ...attrs import ID, ORTH +from ...errors import Errors +from ...util import OOV_RANK, registry +from ...vectors import Mode as VectorsMode if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports - from ...vocab import Vocab # noqa: F401 from ...tokens.doc import Doc # noqa: F401 + from ...vocab import Vocab # noqa: F401 @registry.architectures("spacy.PretrainVectors.v1") diff --git a/spacy/ml/models/parser.py b/spacy/ml/models/parser.py index a70d84dea..f6c0e565d 100644 --- a/spacy/ml/models/parser.py +++ b/spacy/ml/models/parser.py @@ -1,13 +1,14 @@ -from typing import Optional, List, cast -from thinc.api import Model, chain, list2array, Linear, zero_init, use_ops +from typing import List, Optional, cast + +from thinc.api import Linear, Model, chain, list2array, use_ops, zero_init from thinc.types import Floats2d -from ...errors import Errors from ...compat import Literal +from ...errors import Errors +from ...tokens import Doc from ...util import registry from .._precomputable_affine import PrecomputableAffine from ..tb_framework import TransitionModel -from ...tokens import Doc @registry.architectures("spacy.TransitionBasedParser.v2") diff --git a/spacy/ml/models/span_finder.py b/spacy/ml/models/span_finder.py index a805e2086..d327fc761 100644 --- a/spacy/ml/models/span_finder.py +++ b/spacy/ml/models/span_finder.py @@ -4,7 +4,6 @@ from thinc.api import Model, chain, with_array from thinc.types import Floats1d, Floats2d from ...tokens import Doc - from ...util import registry InT = List[Doc] diff --git a/spacy/ml/models/spancat.py b/spacy/ml/models/spancat.py index 893db2e6d..140ec553a 100644 --- a/spacy/ml/models/spancat.py +++ b/spacy/ml/models/spancat.py @@ -1,11 +1,24 @@ from typing import List, Tuple, cast -from thinc.api import Model, with_getitem, chain, list2ragged, Logistic -from thinc.api import Maxout, Linear, concatenate, glorot_uniform_init -from thinc.api import reduce_mean, reduce_max, reduce_first, reduce_last -from thinc.types import Ragged, Floats2d -from ...util import registry +from thinc.api import ( + Linear, + Logistic, + Maxout, + Model, + chain, + concatenate, + glorot_uniform_init, + list2ragged, + reduce_first, + reduce_last, + reduce_max, + reduce_mean, + with_getitem, +) +from thinc.types import Floats2d, Ragged + from ...tokens import Doc +from ...util import registry from ..extract_spans import extract_spans diff --git a/spacy/ml/models/tagger.py b/spacy/ml/models/tagger.py index 9f8ef7b2b..8f1554fab 100644 --- a/spacy/ml/models/tagger.py +++ b/spacy/ml/models/tagger.py @@ -1,9 +1,10 @@ -from typing import Optional, List -from thinc.api import zero_init, with_array, Softmax_v2, chain, Model +from typing import List, Optional + +from thinc.api import Model, Softmax_v2, chain, with_array, zero_init from thinc.types import Floats2d -from ...util import registry from ...tokens import Doc +from ...util import registry @registry.architectures("spacy.Tagger.v2") diff --git a/spacy/ml/models/textcat.py b/spacy/ml/models/textcat.py index 9c7e607fe..601c94a7f 100644 --- a/spacy/ml/models/textcat.py +++ b/spacy/ml/models/textcat.py @@ -1,22 +1,46 @@ -from typing import Optional, List, cast from functools import partial +from typing import List, Optional, Tuple, cast -from thinc.types import Floats2d -from thinc.api import Model, reduce_mean, Linear, list2ragged, Logistic -from thinc.api import chain, concatenate, clone, Dropout, ParametricAttention -from thinc.api import SparseLinear, Softmax, softmax_activation, Maxout, reduce_sum -from thinc.api import with_cpu, Relu, residual, LayerNorm, resizable +from thinc.api import ( + Dropout, + Gelu, + LayerNorm, + Linear, + Logistic, + Maxout, + Model, + ParametricAttention, + ParametricAttention_v2, + Relu, + Softmax, + SparseLinear, + SparseLinear_v2, + chain, + clone, + concatenate, + list2ragged, + reduce_first, + reduce_last, + reduce_max, + reduce_mean, + reduce_sum, + residual, + resizable, + softmax_activation, + with_cpu, +) from thinc.layers.chain import init as init_chain -from thinc.layers.resizable import resize_model, resize_linear_weighted +from thinc.layers.resizable import resize_linear_weighted, resize_model +from thinc.types import ArrayXd, Floats2d from ...attrs import ORTH +from ...errors import Errors +from ...tokens import Doc from ...util import registry from ..extract_ngrams import extract_ngrams from ..staticvectors import StaticVectors -from ...tokens import Doc from .tok2vec import get_tok2vec_width - NEG_VALUE = -5000 @@ -30,39 +54,15 @@ def build_simple_cnn_text_classifier( outputs sum to 1. If exclusive_classes=False, a logistic non-linearity is applied instead, so that outputs are in the range [0, 1]. """ - fill_defaults = {"b": 0, "W": 0} - with Model.define_operators({">>": chain}): - cnn = tok2vec >> list2ragged() >> reduce_mean() - nI = tok2vec.maybe_get_dim("nO") - if exclusive_classes: - output_layer = Softmax(nO=nO, nI=nI) - fill_defaults["b"] = NEG_VALUE - resizable_layer: Model = resizable( - output_layer, - resize_layer=partial( - resize_linear_weighted, fill_defaults=fill_defaults - ), - ) - model = cnn >> resizable_layer - else: - output_layer = Linear(nO=nO, nI=nI) - resizable_layer = resizable( - output_layer, - resize_layer=partial( - resize_linear_weighted, fill_defaults=fill_defaults - ), - ) - model = cnn >> resizable_layer >> Logistic() - model.set_ref("output_layer", output_layer) - model.attrs["resize_output"] = partial( - resize_and_set_ref, - resizable_layer=resizable_layer, - ) - model.set_ref("tok2vec", tok2vec) - if nO is not None: - model.set_dim("nO", cast(int, nO)) - model.attrs["multi_label"] = not exclusive_classes - return model + return build_reduce_text_classifier( + tok2vec=tok2vec, + exclusive_classes=exclusive_classes, + use_reduce_first=False, + use_reduce_last=False, + use_reduce_max=False, + use_reduce_mean=True, + nO=nO, + ) def resize_and_set_ref(model, new_nO, resizable_layer): @@ -78,10 +78,48 @@ def build_bow_text_classifier( ngram_size: int, no_output_layer: bool, nO: Optional[int] = None, +) -> Model[List[Doc], Floats2d]: + return _build_bow_text_classifier( + exclusive_classes=exclusive_classes, + ngram_size=ngram_size, + no_output_layer=no_output_layer, + nO=nO, + sparse_linear=SparseLinear(nO=nO), + ) + + +@registry.architectures("spacy.TextCatBOW.v3") +def build_bow_text_classifier_v3( + exclusive_classes: bool, + ngram_size: int, + no_output_layer: bool, + length: int = 262144, + nO: Optional[int] = None, +) -> Model[List[Doc], Floats2d]: + if length < 1: + raise ValueError(Errors.E1056.format(length=length)) + + # Find k such that 2**(k-1) < length <= 2**k. + length = 2 ** (length - 1).bit_length() + + return _build_bow_text_classifier( + exclusive_classes=exclusive_classes, + ngram_size=ngram_size, + no_output_layer=no_output_layer, + nO=nO, + sparse_linear=SparseLinear_v2(nO=nO, length=length), + ) + + +def _build_bow_text_classifier( + exclusive_classes: bool, + ngram_size: int, + no_output_layer: bool, + sparse_linear: Model[Tuple[ArrayXd, ArrayXd, ArrayXd], ArrayXd], + nO: Optional[int] = None, ) -> Model[List[Doc], Floats2d]: fill_defaults = {"b": 0, "W": 0} with Model.define_operators({">>": chain}): - sparse_linear = SparseLinear(nO=nO) output_layer = None if not no_output_layer: fill_defaults["b"] = NEG_VALUE @@ -110,6 +148,9 @@ def build_text_classifier_v2( linear_model: Model[List[Doc], Floats2d], nO: Optional[int] = None, ) -> Model[List[Doc], Floats2d]: + # TODO: build the model with _build_parametric_attention_with_residual_nonlinear + # in spaCy v4. We don't do this in spaCy v3 to preserve model + # compatibility. exclusive_classes = not linear_model.attrs["multi_label"] with Model.define_operators({">>": chain, "|": concatenate}): width = tok2vec.maybe_get_dim("nO") @@ -144,6 +185,11 @@ def build_text_classifier_v2( def init_ensemble_textcat(model, X, Y) -> Model: + # When tok2vec is lazily initialized, we need to initialize it before + # the rest of the chain to ensure that we can get its width. + tok2vec = model.get_ref("tok2vec") + tok2vec.initialize(X) + tok2vec_width = get_tok2vec_width(model) model.get_ref("attention_layer").set_dim("nO", tok2vec_width) model.get_ref("maxout_layer").set_dim("nO", tok2vec_width) @@ -173,3 +219,153 @@ def build_text_classifier_lowdata( model = model >> Dropout(dropout) model = model >> Logistic() return model + + +@registry.architectures("spacy.TextCatParametricAttention.v1") +def build_textcat_parametric_attention_v1( + tok2vec: Model[List[Doc], List[Floats2d]], + exclusive_classes: bool, + nO: Optional[int] = None, +) -> Model[List[Doc], Floats2d]: + width = tok2vec.maybe_get_dim("nO") + parametric_attention = _build_parametric_attention_with_residual_nonlinear( + tok2vec=tok2vec, + nonlinear_layer=Maxout(nI=width, nO=width), + key_transform=Gelu(nI=width, nO=width), + ) + with Model.define_operators({">>": chain}): + if exclusive_classes: + output_layer = Softmax(nO=nO) + else: + output_layer = Linear(nO=nO) >> Logistic() + model = parametric_attention >> output_layer + if model.has_dim("nO") is not False and nO is not None: + model.set_dim("nO", cast(int, nO)) + model.set_ref("output_layer", output_layer) + model.attrs["multi_label"] = not exclusive_classes + + return model + + +def _build_parametric_attention_with_residual_nonlinear( + *, + tok2vec: Model[List[Doc], List[Floats2d]], + nonlinear_layer: Model[Floats2d, Floats2d], + key_transform: Optional[Model[Floats2d, Floats2d]] = None, +) -> Model[List[Doc], Floats2d]: + with Model.define_operators({">>": chain, "|": concatenate}): + width = tok2vec.maybe_get_dim("nO") + attention_layer = ParametricAttention_v2(nO=width, key_transform=key_transform) + norm_layer = LayerNorm(nI=width) + parametric_attention = ( + tok2vec + >> list2ragged() + >> attention_layer + >> reduce_sum() + >> residual(nonlinear_layer >> norm_layer >> Dropout(0.0)) + ) + + parametric_attention.init = _init_parametric_attention_with_residual_nonlinear + + parametric_attention.set_ref("tok2vec", tok2vec) + parametric_attention.set_ref("attention_layer", attention_layer) + parametric_attention.set_ref("key_transform", key_transform) + parametric_attention.set_ref("nonlinear_layer", nonlinear_layer) + parametric_attention.set_ref("norm_layer", norm_layer) + + return parametric_attention + + +def _init_parametric_attention_with_residual_nonlinear(model, X, Y) -> Model: + # When tok2vec is lazily initialized, we need to initialize it before + # the rest of the chain to ensure that we can get its width. + tok2vec = model.get_ref("tok2vec") + tok2vec.initialize(X) + + tok2vec_width = get_tok2vec_width(model) + model.get_ref("attention_layer").set_dim("nO", tok2vec_width) + model.get_ref("key_transform").set_dim("nI", tok2vec_width) + model.get_ref("key_transform").set_dim("nO", tok2vec_width) + model.get_ref("nonlinear_layer").set_dim("nI", tok2vec_width) + model.get_ref("nonlinear_layer").set_dim("nO", tok2vec_width) + model.get_ref("norm_layer").set_dim("nI", tok2vec_width) + model.get_ref("norm_layer").set_dim("nO", tok2vec_width) + init_chain(model, X, Y) + return model + + +@registry.architectures("spacy.TextCatReduce.v1") +def build_reduce_text_classifier( + tok2vec: Model, + exclusive_classes: bool, + use_reduce_first: bool, + use_reduce_last: bool, + use_reduce_max: bool, + use_reduce_mean: bool, + nO: Optional[int] = None, +) -> Model[List[Doc], Floats2d]: + """Build a model that classifies pooled `Doc` representations. + + Pooling is performed using reductions. Reductions are concatenated when + multiple reductions are used. + + tok2vec (Model): the tok2vec layer to pool over. + exclusive_classes (bool): Whether or not classes are mutually exclusive. + use_reduce_first (bool): Pool by using the hidden representation of the + first token of a `Doc`. + use_reduce_last (bool): Pool by using the hidden representation of the + last token of a `Doc`. + use_reduce_max (bool): Pool by taking the maximum values of the hidden + representations of a `Doc`. + use_reduce_mean (bool): Pool by taking the mean of all hidden + representations of a `Doc`. + nO (Optional[int]): Number of classes. + """ + + fill_defaults = {"b": 0, "W": 0} + reductions = [] + if use_reduce_first: + reductions.append(reduce_first()) + if use_reduce_last: + reductions.append(reduce_last()) + if use_reduce_max: + reductions.append(reduce_max()) + if use_reduce_mean: + reductions.append(reduce_mean()) + + if not len(reductions): + raise ValueError(Errors.E1057) + + with Model.define_operators({">>": chain}): + cnn = tok2vec >> list2ragged() >> concatenate(*reductions) + nO_tok2vec = tok2vec.maybe_get_dim("nO") + nI = nO_tok2vec * len(reductions) if nO_tok2vec is not None else None + if exclusive_classes: + output_layer = Softmax(nO=nO, nI=nI) + fill_defaults["b"] = NEG_VALUE + resizable_layer: Model = resizable( + output_layer, + resize_layer=partial( + resize_linear_weighted, fill_defaults=fill_defaults + ), + ) + model = cnn >> resizable_layer + else: + output_layer = Linear(nO=nO, nI=nI) + resizable_layer = resizable( + output_layer, + resize_layer=partial( + resize_linear_weighted, fill_defaults=fill_defaults + ), + ) + model = cnn >> resizable_layer >> Logistic() + model.set_ref("output_layer", output_layer) + model.attrs["resize_output"] = partial( + resize_and_set_ref, + resizable_layer=resizable_layer, + ) + model.set_ref("tok2vec", tok2vec) + if nO is not None: + model.set_dim("nO", cast(int, nO)) + model.attrs["multi_label"] = not exclusive_classes + return model diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index 30c7360ff..0edc89991 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -1,17 +1,32 @@ -from typing import Optional, List, Union, cast -from thinc.types import Floats2d, Ints2d, Ragged, Ints1d -from thinc.api import chain, clone, concatenate, with_array, with_padded -from thinc.api import Model, noop, list2ragged, ragged2list, HashEmbed -from thinc.api import expand_window, residual, Maxout, Mish, PyTorchLSTM +from typing import List, Optional, Union, cast -from ...tokens import Doc -from ...util import registry +from thinc.api import ( + HashEmbed, + Maxout, + Mish, + Model, + PyTorchLSTM, + chain, + clone, + concatenate, + expand_window, + list2ragged, + noop, + ragged2list, + residual, + with_array, + with_padded, +) +from thinc.types import Floats2d, Ints1d, Ints2d, Ragged + +from ...attrs import intify_attr from ...errors import Errors from ...ml import _character_embed -from ..staticvectors import StaticVectors -from ..featureextractor import FeatureExtractor from ...pipeline.tok2vec import Tok2VecListener -from ...attrs import intify_attr +from ...tokens import Doc +from ...util import registry +from ..featureextractor import FeatureExtractor +from ..staticvectors import StaticVectors @registry.architectures("spacy.Tok2VecListener.v1") @@ -52,8 +67,8 @@ def build_hash_embed_cnn_tok2vec( are between 2 and 8. window_size (int): The number of tokens on either side to concatenate during the convolutions. The receptive field of the CNN will be - depth * (window_size * 2 + 1), so a 4-layer network with window_size of - 2 will be sensitive to 20 words at a time. Recommended value is 1. + depth * window_size * 2 + 1, so a 4-layer network with window_size of + 2 will be sensitive to 17 words at a time. Recommended value is 1. embed_size (int): The number of rows in the hash embedding tables. This can be surprisingly small, due to the use of the hash embeddings. Recommended values are between 2000 and 10000. diff --git a/spacy/ml/parser_model.pxd b/spacy/ml/parser_model.pxd index 8def6cea5..4d2d7b3fe 100644 --- a/spacy/ml/parser_model.pxd +++ b/spacy/ml/parser_model.pxd @@ -1,7 +1,8 @@ -from libc.string cimport memset, memcpy +from libc.string cimport memcpy, memset from thinc.backends.cblas cimport CBlas -from ..typedefs cimport weight_t, hash_t + from ..pipeline._parser_internals._state cimport StateC +from ..typedefs cimport hash_t, weight_t cdef struct SizesC: @@ -39,11 +40,16 @@ cdef ActivationsC alloc_activations(SizesC n) nogil cdef void free_activations(const ActivationsC* A) nogil -cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, - const WeightsC* W, SizesC n) nogil - +cdef void predict_states( + CBlas cblas, ActivationsC* A, StateC** states, const WeightsC* W, SizesC n +) nogil + cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil -cdef void cpu_log_loss(float* d_scores, - const float* costs, const int* is_valid, const float* scores, int O) nogil - +cdef void cpu_log_loss( + float* d_scores, + const float* costs, + const int* is_valid, + const float* scores, + int O +) nogil diff --git a/spacy/ml/parser_model.pyx b/spacy/ml/parser_model.pyx index 961bf4d70..f004c562e 100644 --- a/spacy/ml/parser_model.pyx +++ b/spacy/ml/parser_model.pyx @@ -1,19 +1,21 @@ # cython: infer_types=True, cdivision=True, boundscheck=False +# cython: profile=False cimport numpy as np from libc.math cimport exp -from libc.string cimport memset, memcpy from libc.stdlib cimport calloc, free, realloc -from thinc.backends.linalg cimport Vec, VecVec +from libc.string cimport memcpy, memset from thinc.backends.cblas cimport saxpy, sgemm +from thinc.backends.linalg cimport Vec, VecVec import numpy import numpy.random -from thinc.api import Model, CupyOps, NumpyOps, get_ops +from thinc.api import CupyOps, Model, NumpyOps from .. import util from ..errors import Errors -from ..typedefs cimport weight_t, class_t, hash_t + from ..pipeline._parser_internals.stateclass cimport StateClass +from ..typedefs cimport weight_t cdef WeightsC get_c_weights(model) except *: @@ -77,33 +79,48 @@ cdef void resize_activations(ActivationsC* A, SizesC n) nogil: A.is_valid = calloc(n.states * n.classes, sizeof(A.is_valid[0])) A._max_size = n.states else: - A.token_ids = realloc(A.token_ids, - n.states * n.feats * sizeof(A.token_ids[0])) - A.scores = realloc(A.scores, - n.states * n.classes * sizeof(A.scores[0])) - A.unmaxed = realloc(A.unmaxed, - n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0])) - A.hiddens = realloc(A.hiddens, - n.states * n.hiddens * sizeof(A.hiddens[0])) - A.is_valid = realloc(A.is_valid, - n.states * n.classes * sizeof(A.is_valid[0])) + A.token_ids = realloc( + A.token_ids, n.states * n.feats * sizeof(A.token_ids[0]) + ) + A.scores = realloc( + A.scores, n.states * n.classes * sizeof(A.scores[0]) + ) + A.unmaxed = realloc( + A.unmaxed, n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0]) + ) + A.hiddens = realloc( + A.hiddens, n.states * n.hiddens * sizeof(A.hiddens[0]) + ) + A.is_valid = realloc( + A.is_valid, n.states * n.classes * sizeof(A.is_valid[0]) + ) A._max_size = n.states A._curr_size = n.states -cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, - const WeightsC* W, SizesC n) nogil: - cdef double one = 1.0 +cdef void predict_states( + CBlas cblas, ActivationsC* A, StateC** states, const WeightsC* W, SizesC n +) nogil: resize_activations(A, n) for i in range(n.states): states[i].set_context_tokens(&A.token_ids[i*n.feats], n.feats) memset(A.unmaxed, 0, n.states * n.hiddens * n.pieces * sizeof(float)) memset(A.hiddens, 0, n.states * n.hiddens * sizeof(float)) - sum_state_features(cblas, A.unmaxed, - W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces) + sum_state_features( + cblas, + A.unmaxed, + W.feat_weights, + A.token_ids, + n.states, + n.feats, + n.hiddens * n.pieces + ) for i in range(n.states): - VecVec.add_i(&A.unmaxed[i*n.hiddens*n.pieces], - W.feat_bias, 1., n.hiddens * n.pieces) + VecVec.add_i( + &A.unmaxed[i*n.hiddens*n.pieces], + W.feat_bias, 1., + n.hiddens * n.pieces + ) for j in range(n.hiddens): index = i * n.hiddens * n.pieces + j * n.pieces which = Vec.arg_max(&A.unmaxed[index], n.pieces) @@ -113,14 +130,15 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) else: # Compute hidden-to-output - sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, + sgemm(cblas)( + False, True, n.states, n.classes, n.hiddens, 1.0, A.hiddens, n.hiddens, W.hidden_weights, n.hiddens, - 0.0, A.scores, n.classes) + 0.0, A.scores, n.classes + ) # Add bias for i in range(n.states): - VecVec.add_i(&A.scores[i*n.classes], - W.hidden_bias, 1., n.classes) + VecVec.add_i(&A.scores[i*n.classes], W.hidden_bias, 1., n.classes) # Set unseen classes to minimum value i = 0 min_ = A.scores[0] @@ -133,9 +151,16 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, A.scores[i*n.classes+j] = min_ -cdef void sum_state_features(CBlas cblas, float* output, - const float* cached, const int* token_ids, int B, int F, int O) nogil: - cdef int idx, b, f, i +cdef void sum_state_features( + CBlas cblas, + float* output, + const float* cached, + const int* token_ids, + int B, + int F, + int O +) nogil: + cdef int idx, b, f cdef const float* feature padding = cached cached += F * O @@ -152,9 +177,13 @@ cdef void sum_state_features(CBlas cblas, float* output, token_ids += F -cdef void cpu_log_loss(float* d_scores, - const float* costs, const int* is_valid, const float* scores, - int O) nogil: +cdef void cpu_log_loss( + float* d_scores, + const float* costs, + const int* is_valid, + const float* scores, + int O +) nogil: """Do multi-label log loss""" cdef double max_, gmax, Z, gZ best = arg_max_if_gold(scores, costs, is_valid, O) @@ -178,8 +207,9 @@ cdef void cpu_log_loss(float* d_scores, d_scores[i] = exp(scores[i]-max_) / Z -cdef int arg_max_if_gold(const weight_t* scores, const weight_t* costs, - const int* is_valid, int n) nogil: +cdef int arg_max_if_gold( + const weight_t* scores, const weight_t* costs, const int* is_valid, int n +) nogil: # Find minimum cost cdef float cost = 1 for i in range(n): @@ -203,10 +233,17 @@ cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) no return best - class ParserStepModel(Model): - def __init__(self, docs, layers, *, has_upper, unseen_classes=None, train=True, - dropout=0.1): + def __init__( + self, + docs, + layers, + *, + has_upper, + unseen_classes=None, + train=True, + dropout=0.1 + ): Model.__init__(self, name="parser_step_model", forward=step_forward) self.attrs["has_upper"] = has_upper self.attrs["dropout_rate"] = dropout @@ -267,8 +304,10 @@ class ParserStepModel(Model): return ids def backprop_step(self, token_ids, d_vector, get_d_tokvecs): - if isinstance(self.state2vec.ops, CupyOps) \ - and not isinstance(token_ids, self.state2vec.ops.xp.ndarray): + if ( + isinstance(self.state2vec.ops, CupyOps) + and not isinstance(token_ids, self.state2vec.ops.xp.ndarray) + ): # Move token_ids and d_vector to GPU, asynchronously self.backprops.append(( util.get_async(self.cuda_stream, token_ids), @@ -278,7 +317,6 @@ class ParserStepModel(Model): else: self.backprops.append((token_ids, d_vector, get_d_tokvecs)) - def finish_steps(self, golds): # Add a padding vector to the d_tokvecs gradient, so that missing # values don't affect the real gradient. @@ -291,14 +329,15 @@ class ParserStepModel(Model): ids = ids.flatten() d_state_features = d_state_features.reshape( (ids.size, d_state_features.shape[2])) - self.ops.scatter_add(d_tokvecs, ids, - d_state_features) + self.ops.scatter_add(d_tokvecs, ids, d_state_features) # Padded -- see update() self.bp_tokvecs(d_tokvecs[:-1]) return d_tokvecs + NUMPY_OPS = NumpyOps() + def step_forward(model: ParserStepModel, states, is_train): token_ids = model.get_token_ids(states) vector, get_d_tokvecs = model.state2vec(token_ids, is_train) @@ -311,7 +350,7 @@ def step_forward(model: ParserStepModel, states, is_train): scores, get_d_vector = model.vec2scores(vector, is_train) else: scores = NumpyOps().asarray(vector) - get_d_vector = lambda d_scores: d_scores + get_d_vector = lambda d_scores: d_scores # no-cython-lint: E731 # If the class is unseen, make sure its score is minimum scores[:, model._class_mask == 0] = numpy.nanmin(scores) @@ -447,9 +486,11 @@ cdef class precompute_hiddens: feat_weights = self.get_feat_weights() cdef int[:, ::1] ids = token_ids - sum_state_features(cblas, state_vector.data, - feat_weights, &ids[0,0], - token_ids.shape[0], self.nF, self.nO*self.nP) + sum_state_features( + cblas, state_vector.data, + feat_weights, &ids[0, 0], + token_ids.shape[0], self.nF, self.nO*self.nP + ) state_vector += self.bias state_vector, bp_nonlinearity = self._nonlinearity(state_vector) @@ -474,7 +515,7 @@ cdef class precompute_hiddens: def backprop_maxout(d_best): return self.ops.backprop_maxout(d_best, mask, self.nP) - + return state_vector, backprop_maxout def _relu_nonlinearity(self, state_vector): @@ -488,5 +529,5 @@ cdef class precompute_hiddens: def backprop_relu(d_best): d_best *= mask return d_best.reshape((d_best.shape + (1,))) - + return state_vector, backprop_relu diff --git a/spacy/ml/staticvectors.py b/spacy/ml/staticvectors.py index 04cfe912d..1a1b0a0ff 100644 --- a/spacy/ml/staticvectors.py +++ b/spacy/ml/staticvectors.py @@ -1,12 +1,15 @@ -from typing import List, Tuple, Callable, Optional, Sequence, cast -from thinc.initializers import glorot_uniform_init -from thinc.util import partial -from thinc.types import Ragged, Floats2d, Floats1d, Ints1d -from thinc.api import Model, Ops, registry +import warnings +from typing import Callable, List, Optional, Sequence, Tuple, cast +from thinc.api import Model, Ops, registry +from thinc.initializers import glorot_uniform_init +from thinc.types import Floats1d, Floats2d, Ints1d, Ragged +from thinc.util import partial + +from ..attrs import ORTH +from ..errors import Errors, Warnings from ..tokens import Doc -from ..errors import Errors -from ..vectors import Mode +from ..vectors import Mode, Vectors from ..vocab import Vocab @@ -23,6 +26,8 @@ def StaticVectors( linear projection to control the dimensionality. If a dropout rate is specified, the dropout is applied per dimension over the whole batch. """ + if key_attr != "ORTH": + warnings.warn(Warnings.W125, DeprecationWarning) return Model( "static_vectors", forward, @@ -39,15 +44,18 @@ def forward( token_count = sum(len(doc) for doc in docs) if not token_count: return _handle_empty(model.ops, model.get_dim("nO")) - key_attr: int = model.attrs["key_attr"] - keys = model.ops.flatten([cast(Ints1d, doc.to_array(key_attr)) for doc in docs]) vocab: Vocab = docs[0].vocab + key_attr: int = getattr(vocab.vectors, "attr", ORTH) + keys = model.ops.flatten([cast(Ints1d, doc.to_array(key_attr)) for doc in docs]) W = cast(Floats2d, model.ops.as_contig(model.get_param("W"))) - if vocab.vectors.mode == Mode.default: + if isinstance(vocab.vectors, Vectors) and vocab.vectors.mode == Mode.default: V = model.ops.asarray(vocab.vectors.data) rows = vocab.vectors.find(keys=keys) V = model.ops.as_contig(V[rows]) - elif vocab.vectors.mode == Mode.floret: + elif isinstance(vocab.vectors, Vectors) and vocab.vectors.mode == Mode.floret: + V = vocab.vectors.get_batch(keys) + V = model.ops.as_contig(V) + elif hasattr(vocab.vectors, "get_batch"): V = vocab.vectors.get_batch(keys) V = model.ops.as_contig(V) else: @@ -56,7 +64,7 @@ def forward( vectors_data = model.ops.gemm(V, W, trans2=True) except ValueError: raise RuntimeError(Errors.E896) - if vocab.vectors.mode == Mode.default: + if isinstance(vocab.vectors, Vectors) and vocab.vectors.mode == Mode.default: # Convert negative indices to 0-vectors # TODO: more options for UNK tokens vectors_data[rows < 0] = 0 diff --git a/spacy/ml/tb_framework.py b/spacy/ml/tb_framework.py index ab4a969e2..e351ad4e5 100644 --- a/spacy/ml/tb_framework.py +++ b/spacy/ml/tb_framework.py @@ -1,6 +1,7 @@ from thinc.api import Model, noop -from .parser_model import ParserStepModel + from ..util import registry +from .parser_model import ParserStepModel @registry.layers("spacy.TransitionModel.v1") diff --git a/spacy/morphology.pxd b/spacy/morphology.pxd index 8d449d065..ee43aa4ec 100644 --- a/spacy/morphology.pxd +++ b/spacy/morphology.pxd @@ -1,17 +1,17 @@ -from cymem.cymem cimport Pool -from preshed.maps cimport PreshMap cimport numpy as np +from cymem.cymem cimport Pool from libc.stdint cimport uint64_t +from preshed.maps cimport PreshMap -from .structs cimport MorphAnalysisC from .strings cimport StringStore +from .structs cimport MorphAnalysisC from .typedefs cimport attr_t, hash_t cdef class Morphology: cdef readonly Pool mem cdef readonly StringStore strings - cdef PreshMap tags # Keyed by hash, value is pointer to tag + cdef PreshMap tags # Keyed by hash, value is pointer to tag cdef MorphAnalysisC create_morph_tag(self, field_feature_pairs) except * cdef int insert(self, MorphAnalysisC tag) except -1 @@ -20,4 +20,8 @@ cdef class Morphology: cdef int check_feature(const MorphAnalysisC* morph, attr_t feature) nogil cdef list list_features(const MorphAnalysisC* morph) cdef np.ndarray get_by_field(const MorphAnalysisC* morph, attr_t field) -cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t field) nogil +cdef int get_n_by_field( + attr_t* results, + const MorphAnalysisC* morph, + attr_t field, +) nogil diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index c3ffc46a1..cef45b04d 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -1,12 +1,14 @@ # cython: infer_types -import numpy +# cython: profile=False import warnings +import numpy + from .attrs cimport POS -from .parts_of_speech import IDS as POS_IDS -from .errors import Warnings from . import symbols +from .errors import Warnings +from .parts_of_speech import IDS as POS_IDS cdef class Morphology: @@ -82,10 +84,11 @@ cdef class Morphology: features = self.normalize_attrs(features) string_features = {self.strings.as_string(field): self.strings.as_string(values) for field, values in features.items()} # normalized UFEATS string with sorted fields and values - norm_feats_string = self.FEATURE_SEP.join(sorted([ - self.FIELD_SEP.join([field, values]) - for field, values in string_features.items() - ])) + norm_feats_string = self.FEATURE_SEP.join( + sorted( + [self.FIELD_SEP.join([field, values]) for field, values in string_features.items()] + ) + ) return norm_feats_string or self.EMPTY_MORPH def normalize_attrs(self, attrs): @@ -191,6 +194,7 @@ cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t fie n_results += 1 return n_results + def unpickle_morphology(strings, tags): cdef Morphology morphology = Morphology(strings) for tag in tags: diff --git a/spacy/parts_of_speech.pxd b/spacy/parts_of_speech.pxd index 0bf5b4789..b5423d113 100644 --- a/spacy/parts_of_speech.pxd +++ b/spacy/parts_of_speech.pxd @@ -1,5 +1,6 @@ from . cimport symbols + cpdef enum univ_pos_t: NO_TAG = 0 ADJ = symbols.ADJ @@ -7,7 +8,7 @@ cpdef enum univ_pos_t: ADV AUX CONJ - CCONJ # U20 + CCONJ # U20 DET INTJ NOUN diff --git a/spacy/parts_of_speech.pyx b/spacy/parts_of_speech.pyx index e71fb917f..98e3570ec 100644 --- a/spacy/parts_of_speech.pyx +++ b/spacy/parts_of_speech.pyx @@ -1,4 +1,4 @@ - +# cython: profile=False IDS = { "": NO_TAG, "ADJ": ADJ, diff --git a/spacy/pipe_analysis.py b/spacy/pipe_analysis.py index 245747061..d26884487 100644 --- a/spacy/pipe_analysis.py +++ b/spacy/pipe_analysis.py @@ -1,8 +1,9 @@ -from typing import List, Set, Dict, Iterable, ItemsView, Union, TYPE_CHECKING +from typing import TYPE_CHECKING, Dict, ItemsView, Iterable, List, Set, Union + from wasabi import msg -from .tokens import Doc, Token, Span from .errors import Errors +from .tokens import Doc, Span, Token from .util import dot_to_dict if TYPE_CHECKING: diff --git a/spacy/pipeline/__init__.py b/spacy/pipeline/__init__.py index 40e3fd638..2c4a5a8a8 100644 --- a/spacy/pipeline/__init__.py +++ b/spacy/pipeline/__init__.py @@ -22,6 +22,7 @@ from .trainable_pipe import TrainablePipe __all__ = [ "AttributeRuler", "DependencyParser", + "EditTreeLemmatizer", "EntityLinker", "EntityRecognizer", "EntityRuler", diff --git a/spacy/pipeline/_edit_tree_internals/edit_trees.pxd b/spacy/pipeline/_edit_tree_internals/edit_trees.pxd index dc4289f37..41acd2b07 100644 --- a/spacy/pipeline/_edit_tree_internals/edit_trees.pxd +++ b/spacy/pipeline/_edit_tree_internals/edit_trees.pxd @@ -2,8 +2,9 @@ from libc.stdint cimport uint32_t, uint64_t from libcpp.unordered_map cimport unordered_map from libcpp.vector cimport vector -from ...typedefs cimport attr_t, hash_t, len_t from ...strings cimport StringStore +from ...typedefs cimport attr_t, hash_t, len_t + cdef extern from "" namespace "std" nogil: void swap[T](T& a, T& b) except + # Only available in Cython 3. @@ -45,11 +46,18 @@ cdef struct EditTreeC: bint is_match_node NodeC inner -cdef inline EditTreeC edittree_new_match(len_t prefix_len, len_t suffix_len, - uint32_t prefix_tree, uint32_t suffix_tree): - cdef MatchNodeC match_node = MatchNodeC(prefix_len=prefix_len, - suffix_len=suffix_len, prefix_tree=prefix_tree, - suffix_tree=suffix_tree) +cdef inline EditTreeC edittree_new_match( + len_t prefix_len, + len_t suffix_len, + uint32_t prefix_tree, + uint32_t suffix_tree +): + cdef MatchNodeC match_node = MatchNodeC( + prefix_len=prefix_len, + suffix_len=suffix_len, + prefix_tree=prefix_tree, + suffix_tree=suffix_tree + ) cdef NodeC inner = NodeC(match_node=match_node) return EditTreeC(is_match_node=True, inner=inner) diff --git a/spacy/pipeline/_edit_tree_internals/edit_trees.pyx b/spacy/pipeline/_edit_tree_internals/edit_trees.pyx index 9d18c0334..7abd9f2a6 100644 --- a/spacy/pipeline/_edit_tree_internals/edit_trees.pyx +++ b/spacy/pipeline/_edit_tree_internals/edit_trees.pyx @@ -1,13 +1,11 @@ # cython: infer_types=True, binding=True +# cython: profile=False from cython.operator cimport dereference as deref -from libc.stdint cimport uint32_t -from libc.stdint cimport UINT32_MAX +from libc.stdint cimport UINT32_MAX, uint32_t from libc.string cimport memset from libcpp.pair cimport pair from libcpp.vector cimport vector -from pathlib import Path - from ...typedefs cimport hash_t from ... import util @@ -15,7 +13,6 @@ from ...errors import Errors from ...strings import StringStore from .schemas import validate_edit_tree - NULL_TREE_ID = UINT32_MAX cdef LCS find_lcs(str source, str target): @@ -27,17 +24,16 @@ cdef LCS find_lcs(str source, str target): target (str): The second string. RETURNS (LCS): The spans of the longest common subsequences. """ - cdef Py_ssize_t source_len = len(source) cdef Py_ssize_t target_len = len(target) - cdef size_t longest_align = 0; + cdef size_t longest_align = 0 cdef int source_idx, target_idx cdef LCS lcs cdef Py_UCS4 source_cp, target_cp memset(&lcs, 0, sizeof(lcs)) - cdef vector[size_t] prev_aligns = vector[size_t](target_len); - cdef vector[size_t] cur_aligns = vector[size_t](target_len); + cdef vector[size_t] prev_aligns = vector[size_t](target_len) + cdef vector[size_t] cur_aligns = vector[size_t](target_len) for (source_idx, source_cp) in enumerate(source): for (target_idx, target_cp) in enumerate(target): @@ -91,7 +87,7 @@ cdef class EditTrees: cdef LCS lcs = find_lcs(form, lemma) cdef EditTreeC tree - cdef uint32_t tree_id, prefix_tree, suffix_tree + cdef uint32_t prefix_tree, suffix_tree if lcs_is_empty(lcs): tree = edittree_new_subst(self.strings.add(form), self.strings.add(lemma)) else: @@ -110,7 +106,7 @@ cdef class EditTrees: return self._tree_id(tree) cdef uint32_t _tree_id(self, EditTreeC tree): - # If this tree has been constructed before, return its identifier. + # If this tree has been constructed before, return its identifier. cdef hash_t hash = edittree_hash(tree) cdef unordered_map[hash_t, uint32_t].iterator iter = self.map.find(hash) if iter != self.map.end(): @@ -291,6 +287,7 @@ def _tree2dict(tree): tree = tree["inner"]["subst_node"] return(dict(tree)) + def _dict2tree(tree): errors = validate_edit_tree(tree) if errors: diff --git a/spacy/pipeline/_edit_tree_internals/schemas.py b/spacy/pipeline/_edit_tree_internals/schemas.py index c01d0632e..89f2861ce 100644 --- a/spacy/pipeline/_edit_tree_internals/schemas.py +++ b/spacy/pipeline/_edit_tree_internals/schemas.py @@ -1,7 +1,12 @@ -from typing import Any, Dict, List, Union from collections import defaultdict -from pydantic import BaseModel, Field, ValidationError -from pydantic.types import StrictBool, StrictInt, StrictStr +from typing import Any, Dict, List, Union + +try: + from pydantic.v1 import BaseModel, Field, ValidationError + from pydantic.v1.types import StrictBool, StrictInt, StrictStr +except ImportError: + from pydantic import BaseModel, Field, ValidationError # type: ignore + from pydantic.types import StrictBool, StrictInt, StrictStr # type: ignore class MatchNodeSchema(BaseModel): diff --git a/spacy/pipeline/_parser_internals/_beam_utils.pxd b/spacy/pipeline/_parser_internals/_beam_utils.pxd index de3573fbc..596306b23 100644 --- a/spacy/pipeline/_parser_internals/_beam_utils.pxd +++ b/spacy/pipeline/_parser_internals/_beam_utils.pxd @@ -1,5 +1,6 @@ from ...typedefs cimport class_t, hash_t + # These are passed as callbacks to thinc.search.Beam cdef int transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1 diff --git a/spacy/pipeline/_parser_internals/_beam_utils.pyx b/spacy/pipeline/_parser_internals/_beam_utils.pyx index fa7df2056..ac04be5a7 100644 --- a/spacy/pipeline/_parser_internals/_beam_utils.pyx +++ b/spacy/pipeline/_parser_internals/_beam_utils.pyx @@ -1,15 +1,17 @@ # cython: infer_types=True -# cython: profile=True -cimport numpy as np import numpy -from cpython.ref cimport PyObject, Py_XDECREF + from thinc.extra.search cimport Beam + from thinc.extra.search import MaxViolation + from thinc.extra.search cimport MaxViolation -from ...typedefs cimport hash_t, class_t -from .transition_system cimport TransitionSystem, Transition +from ...typedefs cimport class_t +from .transition_system cimport Transition, TransitionSystem + from ...errors import Errors + from .stateclass cimport StateC, StateClass @@ -140,7 +142,6 @@ def update_beam(TransitionSystem moves, states, golds, model, int width, beam_de cdef MaxViolation violn pbeam = BeamBatch(moves, states, golds, width=width, density=beam_density) gbeam = BeamBatch(moves, states, golds, width=width, density=0.0) - cdef StateClass state beam_maps = [] backprops = [] violns = [MaxViolation() for _ in range(len(states))] diff --git a/spacy/pipeline/_parser_internals/_state.pxd b/spacy/pipeline/_parser_internals/_state.pxd index a1262bb61..c063cf97c 100644 --- a/spacy/pipeline/_parser_internals/_state.pxd +++ b/spacy/pipeline/_parser_internals/_state.pxd @@ -1,19 +1,20 @@ -from cython.operator cimport dereference as deref, preincrement as incr -from libc.string cimport memcpy, memset -from libc.stdlib cimport calloc, free -from libc.stdint cimport uint32_t, uint64_t cimport libcpp +from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno +from cython.operator cimport dereference as deref +from cython.operator cimport preincrement as incr +from libc.stdint cimport uint32_t, uint64_t +from libc.stdlib cimport calloc, free +from libc.string cimport memcpy, memset +from libcpp.set cimport set from libcpp.unordered_map cimport unordered_map from libcpp.vector cimport vector -from libcpp.set cimport set -from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno from murmurhash.mrmr cimport hash64 -from ...vocab cimport EMPTY_LEXEME -from ...structs cimport TokenC, SpanC -from ...lexeme cimport Lexeme from ...attrs cimport IS_SPACE +from ...lexeme cimport Lexeme +from ...structs cimport SpanC, TokenC from ...typedefs cimport attr_t +from ...vocab cimport EMPTY_LEXEME cdef inline bint is_space_token(const TokenC* token) nogil: @@ -276,7 +277,6 @@ cdef cppclass StateC: return n - int n_L(int head) nogil const: return n_arcs(this._left_arcs, head) diff --git a/spacy/pipeline/_parser_internals/_state.pyx b/spacy/pipeline/_parser_internals/_state.pyx index e69de29bb..61bf62038 100644 --- a/spacy/pipeline/_parser_internals/_state.pyx +++ b/spacy/pipeline/_parser_internals/_state.pyx @@ -0,0 +1 @@ +# cython: profile=False diff --git a/spacy/pipeline/_parser_internals/arc_eager.pxd b/spacy/pipeline/_parser_internals/arc_eager.pxd index b618bc587..2c17e7b26 100644 --- a/spacy/pipeline/_parser_internals/arc_eager.pxd +++ b/spacy/pipeline/_parser_internals/arc_eager.pxd @@ -1,5 +1,5 @@ +from ...typedefs cimport attr_t, weight_t from ._state cimport StateC -from ...typedefs cimport weight_t, attr_t from .transition_system cimport Transition, TransitionSystem diff --git a/spacy/pipeline/_parser_internals/arc_eager.pyx b/spacy/pipeline/_parser_internals/arc_eager.pyx index 257b5ef8a..e13754944 100644 --- a/spacy/pipeline/_parser_internals/arc_eager.pyx +++ b/spacy/pipeline/_parser_internals/arc_eager.pyx @@ -1,22 +1,27 @@ -# cython: profile=True, cdivision=True, infer_types=True -from cymem.cymem cimport Pool, Address +# cython: cdivision=True, infer_types=True +from cymem.cymem cimport Address, Pool from libc.stdint cimport int32_t from libcpp.vector cimport vector -from collections import defaultdict, Counter +from collections import Counter, defaultdict -from ...typedefs cimport hash_t, attr_t from ...strings cimport hash_string from ...structs cimport TokenC from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.token cimport MISSING_DEP +from ...typedefs cimport attr_t + from ...training import split_bilu_label + from ...training.example cimport Example +from ._state cimport ArcC, StateC from .stateclass cimport StateClass -from ._state cimport StateC, ArcC + from ...errors import Errors + from thinc.extra.search cimport Beam + cdef weight_t MIN_SCORE = -90000 cdef attr_t SUBTOK_LABEL = hash_string('subtok') @@ -63,8 +68,9 @@ cdef struct GoldParseStateC: weight_t pop_cost -cdef GoldParseStateC create_gold_state(Pool mem, const StateC* state, - heads, labels, sent_starts) except *: +cdef GoldParseStateC create_gold_state( + Pool mem, const StateC* state, heads, labels, sent_starts +) except *: cdef GoldParseStateC gs gs.length = len(heads) gs.stride = 1 @@ -77,7 +83,7 @@ cdef GoldParseStateC create_gold_state(Pool mem, const StateC* state, gs.n_kids_in_stack = mem.alloc(gs.length, sizeof(gs.n_kids_in_stack[0])) for i, is_sent_start in enumerate(sent_starts): - if is_sent_start == True: + if is_sent_start is True: gs.state_bits[i] = set_state_flag( gs.state_bits[i], IS_SENT_START, @@ -205,6 +211,7 @@ cdef class ArcEagerGold: def update(self, StateClass stcls): update_gold_state(&self.c, stcls.c) + def _get_aligned_sent_starts(example): """Get list of SENT_START attributes aligned to the predicted tokenization. If the reference has not sentence starts, return a list of None values. @@ -519,7 +526,6 @@ cdef class Break: """ @staticmethod cdef bint is_valid(const StateC* st, attr_t label) nogil: - cdef int i if st.buffer_length() < 2: return False elif st.B(1) != st.B(0) + 1: @@ -551,8 +557,8 @@ cdef class Break: cost -= 1 if gold.heads[si] == b0: cost -= 1 - if not is_sent_start(gold, state.B(1)) \ - and not is_sent_start_unknown(gold, state.B(1)): + if not is_sent_start(gold, state.B(1)) and\ + not is_sent_start_unknown(gold, state.B(1)): cost += 1 return cost @@ -798,7 +804,6 @@ cdef class ArcEager(TransitionSystem): raise TypeError(Errors.E909.format(name="ArcEagerGold")) cdef ArcEagerGold gold_ = gold gold_state = gold_.c - n_gold = 0 if self.c[i].is_valid(stcls.c, self.c[i].label): cost = self.c[i].get_cost(stcls.c, &gold_state, self.c[i].label) else: @@ -870,7 +875,7 @@ cdef class ArcEager(TransitionSystem): print("Gold") for token in example.y: print(token.i, token.text, token.dep_, token.head.text) - aligned_heads, aligned_labels = example.get_aligned_parse() + aligned_heads, _aligned_labels = example.get_aligned_parse() print("Aligned heads") for i, head in enumerate(aligned_heads): print(example.x[i], example.x[head] if head is not None else "__") diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index fab872f00..e4312bd2f 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -1,22 +1,26 @@ -import os -import random -from libc.stdint cimport int32_t +# cython: profile=False from cymem.cymem cimport Pool +from libc.stdint cimport int32_t from collections import Counter + from thinc.extra.search cimport Beam from ...tokens.doc cimport Doc + from ...tokens.span import Span -from ...tokens.span cimport Span -from ...typedefs cimport weight_t, attr_t -from ...lexeme cimport Lexeme + from ...attrs cimport IS_SPACE -from ...structs cimport TokenC, SpanC +from ...lexeme cimport Lexeme +from ...structs cimport SpanC +from ...tokens.span cimport Span +from ...typedefs cimport attr_t, weight_t + from ...training import split_bilu_label + from ...training.example cimport Example -from .stateclass cimport StateClass from ._state cimport StateC +from .stateclass cimport StateClass from .transition_system cimport Transition, do_func_t from ...errors import Errors @@ -135,11 +139,10 @@ cdef class BiluoPushDown(TransitionSystem): OUT: Counter() } actions[OUT][''] = 1 # Represents a token predicted to be outside of any entity - actions[UNIT][''] = 1 # Represents a token prohibited to be in an entity + actions[UNIT][''] = 1 # Represents a token prohibited to be in an entity for entity_type in kwargs.get('entity_types', []): for action in (BEGIN, IN, LAST, UNIT): actions[action][entity_type] = 1 - moves = ('M', 'B', 'I', 'L', 'U') for example in kwargs.get('examples', []): for token in example.y: ent_type = token.ent_type_ @@ -158,7 +161,7 @@ cdef class BiluoPushDown(TransitionSystem): if token.ent_type: labels.add(token.ent_type_) return labels - + def move_name(self, int move, attr_t label): if move == OUT: return 'O' @@ -319,7 +322,6 @@ cdef class BiluoPushDown(TransitionSystem): raise TypeError(Errors.E909.format(name="BiluoGold")) cdef BiluoGold gold_ = gold gold_state = gold_.c - n_gold = 0 if self.c[i].is_valid(stcls.c, self.c[i].label): cost = self.c[i].get_cost(stcls.c, &gold_state, self.c[i].label) else: @@ -480,10 +482,8 @@ cdef class In: @staticmethod cdef weight_t cost(const StateC* s, const void* _gold, attr_t label) nogil: gold = _gold - move = IN cdef int next_act = gold.ner[s.B(1)].move if s.B(1) >= 0 else OUT cdef int g_act = gold.ner[s.B(0)].move - cdef attr_t g_tag = gold.ner[s.B(0)].label cdef bint is_sunk = _entity_is_sunk(s, gold.ner) if g_act == MISSING: @@ -543,12 +543,10 @@ cdef class Last: @staticmethod cdef weight_t cost(const StateC* s, const void* _gold, attr_t label) nogil: gold = _gold - move = LAST b0 = s.B(0) ent_start = s.E(0) cdef int g_act = gold.ner[b0].move - cdef attr_t g_tag = gold.ner[b0].label cdef int cost = 0 @@ -644,7 +642,6 @@ cdef class Unit: cost += 1 break return cost - cdef class Out: @@ -669,7 +666,6 @@ cdef class Out: cdef weight_t cost(const StateC* s, const void* _gold, attr_t label) nogil: gold = _gold cdef int g_act = gold.ner[s.B(0)].move - cdef attr_t g_tag = gold.ner[s.B(0)].label cdef weight_t cost = 0 if g_act == MISSING: pass diff --git a/spacy/pipeline/_parser_internals/nonproj.pxd b/spacy/pipeline/_parser_internals/nonproj.pxd index aabdf7ebe..1a349d56a 100644 --- a/spacy/pipeline/_parser_internals/nonproj.pxd +++ b/spacy/pipeline/_parser_internals/nonproj.pxd @@ -1,4 +1,5 @@ from libcpp.string cimport string + cdef extern from "nonproj.hh": cdef void raise_domain_error(const string& msg) nogil except + diff --git a/spacy/pipeline/_parser_internals/nonproj.pyx b/spacy/pipeline/_parser_internals/nonproj.pyx index d1b6e7066..7de19851e 100644 --- a/spacy/pipeline/_parser_internals/nonproj.pyx +++ b/spacy/pipeline/_parser_internals/nonproj.pyx @@ -1,22 +1,23 @@ -# cython: profile=True, infer_types=True +# cython: infer_types=True """Implements the projectivize/deprojectivize mechanism in Nivre & Nilsson 2005 for doing pseudo-projective parsing implementation uses the HEAD decoration scheme. """ from copy import copy -from cython.operator cimport preincrement as incr, dereference as deref + +from cython.operator cimport dereference as deref +from cython.operator cimport preincrement as incr from libc.limits cimport INT_MAX from libc.stdlib cimport abs from libcpp cimport bool from libcpp.string cimport string, to_string -from libcpp.vector cimport vector from libcpp.unordered_set cimport unordered_set +from libcpp.vector cimport vector from ...tokens.doc cimport Doc, set_children_from_heads from ...errors import Errors - DELIMITER = '||' @@ -124,14 +125,17 @@ def decompose(label): def is_decorated(label): return DELIMITER in label + def count_decorated_labels(gold_data): freqs = {} for example in gold_data: proj_heads, deco_deps = projectivize(example.get_aligned("HEAD"), example.get_aligned("DEP")) # set the label to ROOT for each root dependent - deco_deps = ['ROOT' if head == i else deco_deps[i] - for i, head in enumerate(proj_heads)] + deco_deps = [ + 'ROOT' if head == i else deco_deps[i] + for i, head in enumerate(proj_heads) + ] # count label frequencies for label in deco_deps: if is_decorated(label): @@ -159,9 +163,9 @@ def projectivize(heads, labels): cdef vector[int] _heads_to_c(heads): - cdef vector[int] c_heads; + cdef vector[int] c_heads for head in heads: - if head == None: + if head is None: c_heads.push_back(-1) else: assert head < len(heads) @@ -198,6 +202,7 @@ def _decorate(heads, proj_heads, labels): deco_labels.append(labels[tokenid]) return deco_labels + def get_smallest_nonproj_arc_slow(heads): cdef vector[int] c_heads = _heads_to_c(heads) return _get_smallest_nonproj_arc(c_heads) diff --git a/spacy/pipeline/_parser_internals/stateclass.pxd b/spacy/pipeline/_parser_internals/stateclass.pxd index 54ff344b9..b8ecc1bbf 100644 --- a/spacy/pipeline/_parser_internals/stateclass.pxd +++ b/spacy/pipeline/_parser_internals/stateclass.pxd @@ -1,9 +1,8 @@ from cymem.cymem cimport Pool -from ...structs cimport TokenC, SpanC -from ...typedefs cimport attr_t +from ...structs cimport SpanC, TokenC from ...tokens.doc cimport Doc - +from ...typedefs cimport attr_t from ._state cimport StateC diff --git a/spacy/pipeline/_parser_internals/stateclass.pyx b/spacy/pipeline/_parser_internals/stateclass.pyx index 4eaddd997..24b9f1adc 100644 --- a/spacy/pipeline/_parser_internals/stateclass.pyx +++ b/spacy/pipeline/_parser_internals/stateclass.pyx @@ -1,9 +1,9 @@ # cython: infer_types=True -import numpy +# cython: profile=False from libcpp.vector cimport vector -from ._state cimport ArcC from ...tokens.doc cimport Doc +from ._state cimport ArcC cdef class StateClass: @@ -29,7 +29,7 @@ cdef class StateClass: return [self.B(i) for i in range(self.c.buffer_length())] @property - def token_vector_lenth(self): + def token_vector_length(self): return self.doc.tensor.shape[1] @property @@ -37,11 +37,11 @@ cdef class StateClass: cdef vector[ArcC] arcs self.c.get_arcs(&arcs) return list(arcs) - #py_arcs = [] - #for arc in arcs: - # if arc.head != -1 and arc.child != -1: - # py_arcs.append((arc.head, arc.child, arc.label)) - #return arcs + # py_arcs = [] + # for arc in arcs: + # if arc.head != -1 and arc.child != -1: + # py_arcs.append((arc.head, arc.child, arc.label)) + # return arcs def add_arc(self, int head, int child, int label): self.c.add_arc(head, child, label) @@ -51,10 +51,10 @@ cdef class StateClass: def H(self, int child): return self.c.H(child) - + def L(self, int head, int idx): return self.c.L(head, idx) - + def R(self, int head, int idx): return self.c.R(head, idx) @@ -97,7 +97,7 @@ cdef class StateClass: def H(self, int i): return self.c.H(i) - + def E(self, int i): return self.c.E(i) @@ -115,7 +115,7 @@ cdef class StateClass: def H_(self, int i): return self.doc[self.c.H(i)] - + def E_(self, int i): return self.doc[self.c.E(i)] @@ -124,7 +124,7 @@ cdef class StateClass: def R_(self, int i, int idx): return self.doc[self.c.R(i, idx)] - + def empty(self): return self.c.empty() @@ -133,7 +133,7 @@ cdef class StateClass: def at_break(self): return False - #return self.c.at_break() + # return self.c.at_break() def has_head(self, int i): return self.c.has_head(i) diff --git a/spacy/pipeline/_parser_internals/transition_system.pxd b/spacy/pipeline/_parser_internals/transition_system.pxd index 52ebd2b8e..04cd10d88 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pxd +++ b/spacy/pipeline/_parser_internals/transition_system.pxd @@ -1,11 +1,11 @@ from cymem.cymem cimport Pool -from ...typedefs cimport attr_t, weight_t -from ...structs cimport TokenC from ...strings cimport StringStore +from ...structs cimport TokenC from ...training.example cimport Example -from .stateclass cimport StateClass +from ...typedefs cimport attr_t, weight_t from ._state cimport StateC +from .stateclass cimport StateClass cdef struct Transition: @@ -20,11 +20,15 @@ cdef struct Transition: int (*do)(StateC* state, attr_t label) nogil -ctypedef weight_t (*get_cost_func_t)(const StateC* state, const void* gold, - attr_tlabel) nogil -ctypedef weight_t (*move_cost_func_t)(const StateC* state, const void* gold) nogil -ctypedef weight_t (*label_cost_func_t)(const StateC* state, const void* - gold, attr_t label) nogil +ctypedef weight_t (*get_cost_func_t)( + const StateC* state, const void* gold, attr_tlabel +) nogil +ctypedef weight_t (*move_cost_func_t)( + const StateC* state, const void* gold +) nogil +ctypedef weight_t (*label_cost_func_t)( + const StateC* state, const void* gold, attr_t label +) nogil ctypedef int (*do_func_t)(StateC* state, attr_t label) nogil diff --git a/spacy/pipeline/_parser_internals/transition_system.pyx b/spacy/pipeline/_parser_internals/transition_system.pyx index 18eb745a9..e035053b3 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pyx +++ b/spacy/pipeline/_parser_internals/transition_system.pyx @@ -1,18 +1,19 @@ # cython: infer_types=True +# cython: profile=False from __future__ import print_function + from cymem.cymem cimport Pool from collections import Counter + import srsly -from . cimport _beam_utils -from ...typedefs cimport weight_t, attr_t -from ...tokens.doc cimport Doc from ...structs cimport TokenC +from ...typedefs cimport attr_t, weight_t from .stateclass cimport StateClass -from ...errors import Errors from ... import util +from ...errors import Errors cdef weight_t MIN_SCORE = -90000 @@ -229,7 +230,6 @@ cdef class TransitionSystem: return self def to_bytes(self, exclude=tuple()): - transitions = [] serializers = { 'moves': lambda: srsly.json_dumps(self.labels), 'strings': lambda: self.strings.to_bytes(), diff --git a/spacy/pipeline/attributeruler.py b/spacy/pipeline/attributeruler.py index 0d9494865..8ac74d92b 100644 --- a/spacy/pipeline/attributeruler.py +++ b/spacy/pipeline/attributeruler.py @@ -1,21 +1,20 @@ -from typing import List, Dict, Union, Iterable, Any, Optional, Callable -from typing import Tuple -import srsly from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union -from .pipe import Pipe +import srsly + +from .. import util from ..errors import Errors -from ..training import Example from ..language import Language from ..matcher import Matcher from ..scorer import Scorer from ..symbols import IDS from ..tokens import Doc, Span from ..tokens._retokenize import normalize_token_attrs, set_token_attrs -from ..vocab import Vocab +from ..training import Example from ..util import SimpleFrozenList, registry -from .. import util - +from ..vocab import Vocab +from .pipe import Pipe MatcherPatternType = List[Dict[Union[int, str], Any]] AttributeRulerPatternType = Dict[str, Union[MatcherPatternType, Dict, int]] diff --git a/spacy/pipeline/dep_parser.pyx b/spacy/pipeline/dep_parser.pyx index e5f686158..18a220bd6 100644 --- a/spacy/pipeline/dep_parser.pyx +++ b/spacy/pipeline/dep_parser.pyx @@ -1,20 +1,21 @@ -# cython: infer_types=True, profile=True, binding=True +# cython: infer_types=True, binding=True from collections import defaultdict -from typing import Optional, Iterable, Callable -from thinc.api import Model, Config +from typing import Callable, Optional + +from thinc.api import Config, Model from ._parser_internals.transition_system import TransitionSystem -from .transition_parser cimport Parser -from ._parser_internals.arc_eager cimport ArcEager -from .functions import merge_subtokens +from ._parser_internals.arc_eager cimport ArcEager +from .transition_parser cimport Parser + from ..language import Language -from ._parser_internals import nonproj -from ._parser_internals.nonproj import DELIMITER from ..scorer import Scorer from ..training import remove_bilu_prefix from ..util import registry - +from ._parser_internals import nonproj +from ._parser_internals.nonproj import DELIMITER +from .functions import merge_subtokens default_model_config = """ [model] @@ -123,6 +124,7 @@ def make_parser( scorer=scorer, ) + @Language.factory( "beam_parser", assigns=["token.dep", "token.head", "token.is_sent_start", "doc.sents"], diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index 332badd8c..4a6174bc3 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -1,24 +1,22 @@ -from typing import cast, Any, Callable, Dict, Iterable, List, Optional -from typing import Tuple from collections import Counter from itertools import islice -import numpy as np +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, cast +import numpy as np import srsly -from thinc.api import Config, Model, SequenceCategoricalCrossentropy, NumpyOps +from thinc.api import Config, Model, NumpyOps, SequenceCategoricalCrossentropy from thinc.types import Floats2d, Ints2d -from ._edit_tree_internals.edit_trees import EditTrees -from ._edit_tree_internals.schemas import validate_edit_tree -from .lemmatizer import lemmatizer_score -from .trainable_pipe import TrainablePipe +from .. import util from ..errors import Errors from ..language import Language from ..tokens import Doc from ..training import Example, validate_examples, validate_get_examples from ..vocab import Vocab -from .. import util - +from ._edit_tree_internals.edit_trees import EditTrees +from ._edit_tree_internals.schemas import validate_edit_tree +from .lemmatizer import lemmatizer_score +from .trainable_pipe import TrainablePipe # The cutoff value of *top_k* above which an alternative method is used to process guesses. TOP_K_GUARDRAIL = 20 diff --git a/spacy/pipeline/entity_linker.py b/spacy/pipeline/entity_linker.py index 76ccc3247..40a9c8a79 100644 --- a/spacy/pipeline/entity_linker.py +++ b/spacy/pipeline/entity_linker.py @@ -1,25 +1,24 @@ -from typing import Optional, Iterable, Callable, Dict, Union, List, Any -from thinc.types import Floats2d -from pathlib import Path -from itertools import islice -import srsly import random -from thinc.api import CosineDistance, Model, Optimizer, Config -from thinc.api import set_dropout_rate +from itertools import islice +from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +import srsly +from thinc.api import Config, CosineDistance, Model, Optimizer, set_dropout_rate +from thinc.types import Floats2d -from ..kb import KnowledgeBase, Candidate -from ..ml import empty_kb -from ..tokens import Doc, Span -from .pipe import deserialize_config -from .legacy.entity_linker import EntityLinker_v1 -from .trainable_pipe import TrainablePipe -from ..language import Language -from ..vocab import Vocab -from ..training import Example, validate_examples, validate_get_examples -from ..errors import Errors -from ..util import SimpleFrozenList, registry from .. import util +from ..errors import Errors +from ..kb import Candidate, KnowledgeBase +from ..language import Language from ..scorer import Scorer +from ..tokens import Doc, Span +from ..training import Example, validate_examples, validate_get_examples +from ..util import SimpleFrozenList, registry +from ..vocab import Vocab +from .legacy.entity_linker import EntityLinker_v1 +from .pipe import deserialize_config +from .trainable_pipe import TrainablePipe # See #9050 BACKWARD_OVERWRITE = True @@ -105,7 +104,7 @@ def make_entity_linker( ): Function that produces a list of candidates, given a certain knowledge base and several textual mentions. generate_empty_kb (Callable[[Vocab, int], KnowledgeBase]): Callable returning empty KnowledgeBase. scorer (Optional[Callable]): The scoring method. - use_gold_ents (bool): Whether to copy entities from gold docs or not. If false, another + use_gold_ents (bool): Whether to copy entities from gold docs during training or not. If false, another component must provide entity annotations. candidates_batch_size (int): Size of batches for entity candidate generation. threshold (Optional[float]): Confidence threshold for entity predictions. If confidence is below the threshold, @@ -235,7 +234,6 @@ class EntityLinker(TrainablePipe): self.cfg: Dict[str, Any] = {"overwrite": overwrite} self.distance = CosineDistance(normalize=False) self.kb = generate_empty_kb(self.vocab, entity_vector_length) - self.scorer = scorer self.use_gold_ents = use_gold_ents self.candidates_batch_size = candidates_batch_size self.threshold = threshold @@ -243,6 +241,37 @@ class EntityLinker(TrainablePipe): if candidates_batch_size < 1: raise ValueError(Errors.E1044) + def _score_with_ents_set(examples: Iterable[Example], **kwargs): + # Because of how spaCy works, we can't just score immediately, because Language.evaluate + # calls pipe() on the predicted docs, which won't have entities if there is no NER in the pipeline. + if not scorer: + return scorer + if not self.use_gold_ents: + return scorer(examples, **kwargs) + else: + examples = self._ensure_ents(examples) + docs = self.pipe( + (eg.predicted for eg in examples), + ) + for eg, doc in zip(examples, docs): + eg.predicted = doc + return scorer(examples, **kwargs) + + self.scorer = _score_with_ents_set + + def _ensure_ents(self, examples: Iterable[Example]) -> Iterable[Example]: + """If use_gold_ents is true, set the gold entities to (a copy of) eg.predicted.""" + if not self.use_gold_ents: + return examples + + new_examples = [] + for eg in examples: + ents, _ = eg.get_aligned_ents_and_ner() + new_eg = eg.copy() + new_eg.predicted.ents = ents + new_examples.append(new_eg) + return new_examples + def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): """Define the KB of this pipe by providing a function that will create it using this object's vocab.""" @@ -284,11 +313,9 @@ class EntityLinker(TrainablePipe): nO = self.kb.entity_vector_length doc_sample = [] vector_sample = [] - for eg in islice(get_examples(), 10): + examples = self._ensure_ents(islice(get_examples(), 10)) + for eg in examples: doc = eg.x - if self.use_gold_ents: - ents, _ = eg.get_aligned_ents_and_ner() - doc.ents = ents doc_sample.append(doc) vector_sample.append(self.model.ops.alloc1f(nO)) assert len(doc_sample) > 0, Errors.E923.format(name=self.name) @@ -354,31 +381,17 @@ class EntityLinker(TrainablePipe): losses.setdefault(self.name, 0.0) if not examples: return losses + examples = self._ensure_ents(examples) validate_examples(examples, "EntityLinker.update") - set_dropout_rate(self.model, drop) - docs = [eg.predicted for eg in examples] - # save to restore later - old_ents = [doc.ents for doc in docs] - - for doc, ex in zip(docs, examples): - if self.use_gold_ents: - ents, _ = ex.get_aligned_ents_and_ner() - doc.ents = ents - else: - # only keep matching ents - doc.ents = ex.get_matching_ents() - # make sure we have something to learn from, if not, short-circuit if not self.batch_has_learnable_example(examples): return losses + set_dropout_rate(self.model, drop) + docs = [eg.predicted for eg in examples] sentence_encodings, bp_context = self.model.begin_update(docs) - # now restore the ents - for doc, old in zip(docs, old_ents): - doc.ents = old - loss, d_scores = self.get_loss( sentence_encodings=sentence_encodings, examples=examples ) @@ -386,11 +399,13 @@ class EntityLinker(TrainablePipe): if sgd is not None: self.finish_update(sgd) losses[self.name] += loss + return losses def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d): validate_examples(examples, "EntityLinker.get_loss") entity_encodings = [] + # We assume that get_loss is called with gold ents set in the examples if need be eidx = 0 # indices in gold entities to keep keep_ents = [] # indices in sentence_encodings to keep diff --git a/spacy/pipeline/entityruler.py b/spacy/pipeline/entityruler.py index 6a3755533..3683cfc02 100644 --- a/spacy/pipeline/entityruler.py +++ b/spacy/pipeline/entityruler.py @@ -1,19 +1,19 @@ -from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable, Sequence import warnings from collections import defaultdict from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union + import srsly -from .pipe import Pipe -from ..training import Example -from ..language import Language from ..errors import Errors, Warnings -from ..util import ensure_path, to_disk, from_disk, SimpleFrozenList, registry -from ..tokens import Doc, Span +from ..language import Language from ..matcher import Matcher, PhraseMatcher from ..matcher.levenshtein import levenshtein_compare from ..scorer import get_ner_prf - +from ..tokens import Doc, Span +from ..training import Example +from ..util import SimpleFrozenList, ensure_path, from_disk, registry, to_disk +from .pipe import Pipe DEFAULT_ENT_ID_SEP = "||" PatternType = Dict[str, Union[str, List[Dict[str, Any]]]] diff --git a/spacy/pipeline/functions.py b/spacy/pipeline/functions.py index c005395bf..2bf0437d5 100644 --- a/spacy/pipeline/functions.py +++ b/spacy/pipeline/functions.py @@ -1,12 +1,13 @@ -from typing import Dict, Any -import srsly import warnings +from typing import Any, Dict +import srsly + +from .. import util from ..errors import Warnings from ..language import Language from ..matcher import Matcher from ..tokens import Doc -from .. import util @Language.component( diff --git a/spacy/pipeline/legacy/entity_linker.py b/spacy/pipeline/legacy/entity_linker.py index c14dfa1db..1e46db019 100644 --- a/spacy/pipeline/legacy/entity_linker.py +++ b/spacy/pipeline/legacy/entity_linker.py @@ -1,28 +1,28 @@ # This file is present to provide a prior version of the EntityLinker component # for backwards compatability. For details see #9669. -from typing import Optional, Iterable, Callable, Dict, Union, List, Any -from thinc.types import Floats2d -from pathlib import Path -from itertools import islice -import srsly import random -from thinc.api import CosineDistance, Model, Optimizer -from thinc.api import set_dropout_rate import warnings +from itertools import islice +from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional, Union -from ...kb import KnowledgeBase, Candidate +import srsly +from thinc.api import CosineDistance, Model, Optimizer, set_dropout_rate +from thinc.types import Floats2d + +from ... import util +from ...errors import Errors, Warnings +from ...kb import Candidate, KnowledgeBase +from ...language import Language from ...ml import empty_kb +from ...scorer import Scorer from ...tokens import Doc, Span +from ...training import Example, validate_examples, validate_get_examples +from ...util import SimpleFrozenList +from ...vocab import Vocab from ..pipe import deserialize_config from ..trainable_pipe import TrainablePipe -from ...language import Language -from ...vocab import Vocab -from ...training import Example, validate_examples, validate_get_examples -from ...errors import Errors, Warnings -from ...util import SimpleFrozenList -from ... import util -from ...scorer import Scorer # See #9050 BACKWARD_OVERWRITE = True diff --git a/spacy/pipeline/lemmatizer.py b/spacy/pipeline/lemmatizer.py index 9c2fc2f09..09e501595 100644 --- a/spacy/pipeline/lemmatizer.py +++ b/spacy/pipeline/lemmatizer.py @@ -1,19 +1,19 @@ -from typing import Optional, List, Dict, Any, Callable, Iterable, Union, Tuple -from thinc.api import Model -from pathlib import Path - import warnings +from pathlib import Path +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union -from .pipe import Pipe +from thinc.api import Model + +from .. import util from ..errors import Errors, Warnings from ..language import Language -from ..training import Example from ..lookups import Lookups, load_lookups from ..scorer import Scorer from ..tokens import Doc, Token +from ..training import Example +from ..util import SimpleFrozenList, logger, registry from ..vocab import Vocab -from ..util import logger, SimpleFrozenList, registry -from .. import util +from .pipe import Pipe @Language.factory( diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx index be8f82212..d415ae43c 100644 --- a/spacy/pipeline/morphologizer.pyx +++ b/spacy/pipeline/morphologizer.pyx @@ -1,23 +1,21 @@ -# cython: infer_types=True, profile=True, binding=True -from typing import Optional, Union, Dict, Callable -import srsly -from thinc.api import SequenceCategoricalCrossentropy, Model, Config +# cython: infer_types=True, binding=True from itertools import islice +from typing import Callable, Dict, Optional, Union +from thinc.api import Config, Model, SequenceCategoricalCrossentropy + +from ..morphology cimport Morphology from ..tokens.doc cimport Doc from ..vocab cimport Vocab -from ..morphology cimport Morphology -from ..parts_of_speech import IDS as POS_IDS -from ..symbols import POS -from ..language import Language -from ..errors import Errors -from .pipe import deserialize_config -from .tagger import Tagger from .. import util +from ..errors import Errors +from ..language import Language +from ..parts_of_speech import IDS as POS_IDS from ..scorer import Scorer from ..training import validate_examples, validate_get_examples from ..util import registry +from .tagger import Tagger # See #9050 BACKWARD_OVERWRITE = True @@ -75,8 +73,11 @@ def morphologizer_score(examples, **kwargs): results = {} results.update(Scorer.score_token_attr(examples, "pos", **kwargs)) results.update(Scorer.score_token_attr(examples, "morph", getter=morph_key_getter, **kwargs)) - results.update(Scorer.score_token_attr_per_feat(examples, - "morph", getter=morph_key_getter, **kwargs)) + results.update( + Scorer.score_token_attr_per_feat( + examples, "morph", getter=morph_key_getter, **kwargs + ) + ) return results @@ -232,7 +233,6 @@ class Morphologizer(Tagger): if isinstance(docs, Doc): docs = [docs] cdef Doc doc - cdef Vocab vocab = self.vocab cdef bint overwrite = self.cfg["overwrite"] cdef bint extend = self.cfg["extend"] labels = self.labels diff --git a/spacy/pipeline/multitask.pyx b/spacy/pipeline/multitask.pyx index 8c44061e2..f33a90fde 100644 --- a/spacy/pipeline/multitask.pyx +++ b/spacy/pipeline/multitask.pyx @@ -1,19 +1,15 @@ -# cython: infer_types=True, profile=True, binding=True +# cython: infer_types=True, binding=True from typing import Optional + import numpy -from thinc.api import CosineDistance, to_categorical, Model, Config -from thinc.api import set_dropout_rate +from thinc.api import Config, CosineDistance, Model, set_dropout_rate, to_categorical -from ..tokens.doc cimport Doc - -from .trainable_pipe import TrainablePipe -from .tagger import Tagger -from ..training import validate_examples -from ..language import Language -from ._parser_internals import nonproj -from ..attrs import POS, ID +from ..attrs import ID from ..errors import Errors - +from ..language import Language +from ..training import validate_examples +from .tagger import Tagger +from .trainable_pipe import TrainablePipe default_model_config = """ [model] @@ -104,10 +100,9 @@ class MultitaskObjective(Tagger): cdef int idx = 0 correct = numpy.zeros((scores.shape[0],), dtype="i") guesses = scores.argmax(axis=1) - docs = [eg.predicted for eg in examples] for i, eg in enumerate(examples): # Handles alignment for tokenization differences - doc_annots = eg.get_aligned() # TODO + _doc_annots = eg.get_aligned() # TODO for j in range(len(eg.predicted)): tok_annots = {key: values[j] for key, values in tok_annots.items()} label = self.make_label(j, tok_annots) @@ -207,7 +202,6 @@ class ClozeMultitask(TrainablePipe): losses[self.name] = 0. set_dropout_rate(self.model, drop) validate_examples(examples, "ClozeMultitask.rehearse") - docs = [eg.predicted for eg in examples] predictions, bp_predictions = self.model.begin_update() loss, d_predictions = self.get_loss(examples, self.vocab.vectors.data, predictions) bp_predictions(d_predictions) diff --git a/spacy/pipeline/ner.pyx b/spacy/pipeline/ner.pyx index 25f48c9f8..bb009dc7a 100644 --- a/spacy/pipeline/ner.pyx +++ b/spacy/pipeline/ner.pyx @@ -1,16 +1,18 @@ -# cython: infer_types=True, profile=True, binding=True +# cython: infer_types=True, binding=True from collections import defaultdict -from typing import Optional, Iterable, Callable -from thinc.api import Model, Config +from typing import Callable, Optional + +from thinc.api import Config, Model from ._parser_internals.transition_system import TransitionSystem -from .transition_parser cimport Parser -from ._parser_internals.ner cimport BiluoPushDown -from ..language import Language -from ..scorer import get_ner_prf, PRFScore -from ..util import registry -from ..training import remove_bilu_prefix +from ._parser_internals.ner cimport BiluoPushDown +from .transition_parser cimport Parser + +from ..language import Language +from ..scorer import get_ner_prf +from ..training import remove_bilu_prefix +from ..util import registry default_model_config = """ [model] @@ -98,6 +100,7 @@ def make_ner( scorer=scorer, ) + @Language.factory( "beam_ner", assigns=["doc.ents", "token.ent_iob", "token.ent_type"], diff --git a/spacy/pipeline/pipe.pyi b/spacy/pipeline/pipe.pyi index 9dd6a9d50..9a1c11cef 100644 --- a/spacy/pipeline/pipe.pyi +++ b/spacy/pipeline/pipe.pyi @@ -1,11 +1,20 @@ from pathlib import Path -from typing import Any, Callable, Dict, Iterable, Iterator, List -from typing import NoReturn, Optional, Tuple, Union +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + NoReturn, + Optional, + Tuple, + Union, +) -from ..tokens.doc import Doc - -from ..training import Example from ..language import Language +from ..tokens.doc import Doc +from ..training import Example class Pipe: def __call__(self, doc: Doc) -> Doc: ... diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx index 8407acc45..72ea7e45a 100644 --- a/spacy/pipeline/pipe.pyx +++ b/spacy/pipeline/pipe.pyx @@ -1,15 +1,17 @@ -# cython: infer_types=True, profile=True, binding=True -from typing import Optional, Tuple, Iterable, Iterator, Callable, Union, Dict -import srsly +# cython: infer_types=True, binding=True import warnings +from typing import Callable, Dict, Iterable, Iterator, Tuple, Union + +import srsly from ..tokens.doc cimport Doc -from ..training import Example from ..errors import Errors, Warnings from ..language import Language +from ..training import Example from ..util import raise_error + cdef class Pipe: """This class is a base class and not instantiated directly. It provides an interface for pipeline components to implement. @@ -38,7 +40,7 @@ cdef class Pipe: """ raise NotImplementedError(Errors.E931.format(parent="Pipe", method="__call__", name=self.name)) - def pipe(self, stream: Iterable[Doc], *, batch_size: int=128) -> Iterator[Doc]: + def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]: """Apply the pipe to a stream of documents. This usually happens under the hood when the nlp object is called on a text and all components are applied to the Doc. @@ -57,7 +59,7 @@ cdef class Pipe: except Exception as e: error_handler(self.name, self, [doc], e) - def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language=None): + def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language = None): """Initialize the pipe. For non-trainable components, this method is optional. For trainable components, which should inherit from the subclass TrainablePipe, the provided data examples diff --git a/spacy/pipeline/sentencizer.pyx b/spacy/pipeline/sentencizer.pyx index 77f4e8adb..08ba9d989 100644 --- a/spacy/pipeline/sentencizer.pyx +++ b/spacy/pipeline/sentencizer.pyx @@ -1,18 +1,19 @@ -# cython: infer_types=True, profile=True, binding=True -from typing import Optional, List, Callable +# cython: infer_types=True, binding=True +from typing import Callable, List, Optional + import srsly from ..tokens.doc cimport Doc +from .. import util +from ..language import Language from .pipe import Pipe from .senter import senter_score -from ..language import Language -from ..scorer import Scorer -from .. import util # see #9050 BACKWARD_OVERWRITE = False + @Language.factory( "sentencizer", assigns=["token.is_sent_start", "doc.sents"], @@ -35,17 +36,19 @@ class Sentencizer(Pipe): DOCS: https://spacy.io/api/sentencizer """ - default_punct_chars = ['!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '߹', - '।', '॥', '၊', '။', '።', '፧', '፨', '᙮', '᜵', '᜶', '᠃', '᠉', '᥄', - '᥅', '᪨', '᪩', '᪪', '᪫', '᭚', '᭛', '᭞', '᭟', '᰻', '᰼', '᱾', '᱿', - '‼', '‽', '⁇', '⁈', '⁉', '⸮', '⸼', '꓿', '꘎', '꘏', '꛳', '꛷', '꡶', - '꡷', '꣎', '꣏', '꤯', '꧈', '꧉', '꩝', '꩞', '꩟', '꫰', '꫱', '꯫', '﹒', - '﹖', '﹗', '!', '.', '?', '𐩖', '𐩗', '𑁇', '𑁈', '𑂾', '𑂿', '𑃀', - '𑃁', '𑅁', '𑅂', '𑅃', '𑇅', '𑇆', '𑇍', '𑇞', '𑇟', '𑈸', '𑈹', '𑈻', '𑈼', - '𑊩', '𑑋', '𑑌', '𑗂', '𑗃', '𑗉', '𑗊', '𑗋', '𑗌', '𑗍', '𑗎', '𑗏', '𑗐', - '𑗑', '𑗒', '𑗓', '𑗔', '𑗕', '𑗖', '𑗗', '𑙁', '𑙂', '𑜼', '𑜽', '𑜾', '𑩂', - '𑩃', '𑪛', '𑪜', '𑱁', '𑱂', '𖩮', '𖩯', '𖫵', '𖬷', '𖬸', '𖭄', '𛲟', '𝪈', - '。', '。'] + default_punct_chars = [ + '!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '߹', + '।', '॥', '၊', '။', '።', '፧', '፨', '᙮', '᜵', '᜶', '᠃', '᠉', '᥄', + '᥅', '᪨', '᪩', '᪪', '᪫', '᭚', '᭛', '᭞', '᭟', '᰻', '᰼', '᱾', '᱿', + '‼', '‽', '⁇', '⁈', '⁉', '⸮', '⸼', '꓿', '꘎', '꘏', '꛳', '꛷', '꡶', + '꡷', '꣎', '꣏', '꤯', '꧈', '꧉', '꩝', '꩞', '꩟', '꫰', '꫱', '꯫', '﹒', + '﹖', '﹗', '!', '.', '?', '𐩖', '𐩗', '𑁇', '𑁈', '𑂾', '𑂿', '𑃀', + '𑃁', '𑅁', '𑅂', '𑅃', '𑇅', '𑇆', '𑇍', '𑇞', '𑇟', '𑈸', '𑈹', '𑈻', '𑈼', + '𑊩', '𑑋', '𑑌', '𑗂', '𑗃', '𑗉', '𑗊', '𑗋', '𑗌', '𑗍', '𑗎', '𑗏', '𑗐', + '𑗑', '𑗒', '𑗓', '𑗔', '𑗕', '𑗖', '𑗗', '𑙁', '𑙂', '𑜼', '𑜽', '𑜾', '𑩂', + '𑩃', '𑪛', '𑪜', '𑱁', '𑱂', '𖩮', '𖩯', '𖫵', '𖬷', '𖬸', '𖭄', '𛲟', '𝪈', + '。', '。' + ] def __init__( self, @@ -127,7 +130,6 @@ class Sentencizer(Pipe): if isinstance(docs, Doc): docs = [docs] cdef Doc doc - cdef int idx = 0 for i, doc in enumerate(docs): doc_tag_ids = batch_tag_ids[i] for j, tag_id in enumerate(doc_tag_ids): @@ -168,7 +170,6 @@ class Sentencizer(Pipe): path = path.with_suffix(".json") srsly.write_json(path, {"punct_chars": list(self.punct_chars), "overwrite": self.overwrite}) - def from_disk(self, path, *, exclude=tuple()): """Load the sentencizer from disk. diff --git a/spacy/pipeline/senter.pyx b/spacy/pipeline/senter.pyx index 6808fe70e..df093baa9 100644 --- a/spacy/pipeline/senter.pyx +++ b/spacy/pipeline/senter.pyx @@ -1,19 +1,18 @@ -# cython: infer_types=True, profile=True, binding=True -from typing import Optional, Callable +# cython: infer_types=True, binding=True from itertools import islice +from typing import Callable, Optional -import srsly -from thinc.api import Model, SequenceCategoricalCrossentropy, Config +from thinc.api import Config, Model, SequenceCategoricalCrossentropy from ..tokens.doc cimport Doc -from .tagger import Tagger -from ..language import Language +from .. import util from ..errors import Errors +from ..language import Language from ..scorer import Scorer from ..training import validate_examples, validate_get_examples from ..util import registry -from .. import util +from .tagger import Tagger # See #9050 BACKWARD_OVERWRITE = False diff --git a/spacy/pipeline/span_finder.py b/spacy/pipeline/span_finder.py index da3c38430..a12d52911 100644 --- a/spacy/pipeline/span_finder.py +++ b/spacy/pipeline/span_finder.py @@ -3,15 +3,14 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple from thinc.api import Config, Model, Optimizer, set_dropout_rate from thinc.types import Floats2d +from ..errors import Errors from ..language import Language -from .trainable_pipe import TrainablePipe from ..scorer import Scorer from ..tokens import Doc, Span from ..training import Example -from ..errors import Errors - from ..util import registry from .spancat import DEFAULT_SPANS_KEY +from .trainable_pipe import TrainablePipe span_finder_default_config = """ [model] @@ -49,14 +48,14 @@ DEFAULT_SPAN_FINDER_MODEL = Config().from_str(span_finder_default_config)["model "threshold": 0.5, "model": DEFAULT_SPAN_FINDER_MODEL, "spans_key": DEFAULT_SPANS_KEY, - "max_length": None, + "max_length": 25, "min_length": None, "scorer": {"@scorers": "spacy.span_finder_scorer.v1"}, }, default_score_weights={ - f"span_finder_{DEFAULT_SPANS_KEY}_f": 1.0, - f"span_finder_{DEFAULT_SPANS_KEY}_p": 0.0, - f"span_finder_{DEFAULT_SPANS_KEY}_r": 0.0, + f"spans_{DEFAULT_SPANS_KEY}_f": 1.0, + f"spans_{DEFAULT_SPANS_KEY}_p": 0.0, + f"spans_{DEFAULT_SPANS_KEY}_r": 0.0, }, ) def make_span_finder( @@ -105,7 +104,7 @@ def make_span_finder_scorer(): def span_finder_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: kwargs = dict(kwargs) - attr_prefix = "span_finder_" + attr_prefix = "spans_" key = kwargs["spans_key"] kwargs.setdefault("attr", f"{attr_prefix}{key}") kwargs.setdefault( diff --git a/spacy/pipeline/span_ruler.py b/spacy/pipeline/span_ruler.py index b0669c0ef..2a5e2179a 100644 --- a/spacy/pipeline/span_ruler.py +++ b/spacy/pipeline/span_ruler.py @@ -1,20 +1,32 @@ -from typing import Optional, Union, List, Dict, Tuple, Iterable, Any, Callable -from typing import Sequence, Set, cast import warnings from functools import partial from pathlib import Path +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Sequence, + Set, + Tuple, + Union, + cast, +) + import srsly -from .pipe import Pipe -from ..training import Example -from ..language import Language +from .. import util from ..errors import Errors, Warnings -from ..util import ensure_path, SimpleFrozenList, registry -from ..tokens import Doc, Span -from ..scorer import Scorer +from ..language import Language from ..matcher import Matcher, PhraseMatcher from ..matcher.levenshtein import levenshtein_compare -from .. import util +from ..scorer import Scorer +from ..tokens import Doc, Span +from ..training import Example +from ..util import SimpleFrozenList, ensure_path, registry +from .pipe import Pipe PatternType = Dict[str, Union[str, List[Dict[str, Any]]]] DEFAULT_SPANS_KEY = "ruler" diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx index 4d5d78035..34e85d49c 100644 --- a/spacy/pipeline/tagger.pyx +++ b/spacy/pipeline/tagger.pyx @@ -1,26 +1,19 @@ -# cython: infer_types=True, profile=True, binding=True -from typing import Callable, Optional -import numpy -import srsly -from thinc.api import Model, set_dropout_rate, SequenceCategoricalCrossentropy, Config -from thinc.types import Floats2d -import warnings +# cython: infer_types=True, binding=True from itertools import islice +from typing import Callable, Optional + +import numpy +from thinc.api import Config, Model, SequenceCategoricalCrossentropy, set_dropout_rate from ..tokens.doc cimport Doc -from ..morphology cimport Morphology -from ..vocab cimport Vocab -from .trainable_pipe import TrainablePipe -from .pipe import deserialize_config +from .. import util +from ..errors import Errors from ..language import Language -from ..attrs import POS, ID -from ..parts_of_speech import X -from ..errors import Errors, Warnings from ..scorer import Scorer from ..training import validate_examples, validate_get_examples from ..util import registry -from .. import util +from .trainable_pipe import TrainablePipe # See #9050 BACKWARD_OVERWRITE = False @@ -168,7 +161,6 @@ class Tagger(TrainablePipe): if isinstance(docs, Doc): docs = [docs] cdef Doc doc - cdef Vocab vocab = self.vocab cdef bint overwrite = self.cfg["overwrite"] labels = self.labels for i, doc in enumerate(docs): diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index 650a01949..ae227017a 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -1,18 +1,18 @@ -from typing import Iterable, Tuple, Optional, Dict, List, Callable, Any -from thinc.api import get_array_module, Model, Optimizer, set_dropout_rate, Config -from thinc.types import Floats2d -import numpy from itertools import islice +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple + +import numpy +from thinc.api import Config, Model, Optimizer, get_array_module, set_dropout_rate +from thinc.types import Floats2d -from .trainable_pipe import TrainablePipe -from ..language import Language -from ..training import Example, validate_examples, validate_get_examples from ..errors import Errors +from ..language import Language from ..scorer import Scorer from ..tokens import Doc +from ..training import Example, validate_examples, validate_get_examples from ..util import registry from ..vocab import Vocab - +from .trainable_pipe import TrainablePipe single_label_default_config = """ [model] @@ -36,8 +36,9 @@ maxout_pieces = 3 depth = 2 [model.linear_model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = true +length = 262144 ngram_size = 1 no_output_layer = false """ @@ -45,16 +46,21 @@ DEFAULT_SINGLE_TEXTCAT_MODEL = Config().from_str(single_label_default_config)["m single_label_bow_config = """ [model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = true +length = 262144 ngram_size = 1 no_output_layer = false """ single_label_cnn_config = """ [model] -@architectures = "spacy.TextCatCNN.v2" +@architectures = "spacy.TextCatReduce.v1" exclusive_classes = true +use_reduce_first = false +use_reduce_last = false +use_reduce_max = false +use_reduce_mean = true [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v2" diff --git a/spacy/pipeline/textcat_multilabel.py b/spacy/pipeline/textcat_multilabel.py index 41c0e2f63..2f8d5e604 100644 --- a/spacy/pipeline/textcat_multilabel.py +++ b/spacy/pipeline/textcat_multilabel.py @@ -1,19 +1,18 @@ -from typing import Iterable, Optional, Dict, List, Callable, Any -from thinc.types import Floats2d -from thinc.api import Model, Config - from itertools import islice +from typing import Any, Callable, Dict, Iterable, List, Optional + +from thinc.api import Config, Model +from thinc.types import Floats2d -from ..language import Language -from ..training import Example, validate_get_examples from ..errors import Errors +from ..language import Language from ..scorer import Scorer from ..tokens import Doc +from ..training import Example, validate_get_examples from ..util import registry from ..vocab import Vocab from .textcat import TextCategorizer - multi_label_default_config = """ [model] @architectures = "spacy.TextCatEnsemble.v2" @@ -36,8 +35,9 @@ maxout_pieces = 3 depth = 2 [model.linear_model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = false +length = 262144 ngram_size = 1 no_output_layer = false """ @@ -45,7 +45,7 @@ DEFAULT_MULTI_TEXTCAT_MODEL = Config().from_str(multi_label_default_config)["mod multi_label_bow_config = """ [model] -@architectures = "spacy.TextCatBOW.v2" +@architectures = "spacy.TextCatBOW.v3" exclusive_classes = false ngram_size = 1 no_output_layer = false @@ -53,8 +53,12 @@ no_output_layer = false multi_label_cnn_config = """ [model] -@architectures = "spacy.TextCatCNN.v2" +@architectures = "spacy.TextCatReduce.v1" exclusive_classes = false +use_reduce_first = false +use_reduce_last = false +use_reduce_max = false +use_reduce_mean = true [model.tok2vec] @architectures = "spacy.HashEmbedCNN.v2" diff --git a/spacy/pipeline/tok2vec.py b/spacy/pipeline/tok2vec.py index c742aaeaa..677f5eec1 100644 --- a/spacy/pipeline/tok2vec.py +++ b/spacy/pipeline/tok2vec.py @@ -1,13 +1,14 @@ -from typing import Sequence, Iterable, Optional, Dict, Callable, List, Any -from thinc.api import Model, set_dropout_rate, Optimizer, Config from itertools import islice +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence + +from thinc.api import Config, Model, Optimizer, set_dropout_rate -from .trainable_pipe import TrainablePipe -from ..training import Example, validate_examples, validate_get_examples -from ..tokens import Doc -from ..vocab import Vocab -from ..language import Language from ..errors import Errors +from ..language import Language +from ..tokens import Doc +from ..training import Example, validate_examples, validate_get_examples +from ..vocab import Vocab +from .trainable_pipe import TrainablePipe default_model_config = """ [model] diff --git a/spacy/pipeline/trainable_pipe.pxd b/spacy/pipeline/trainable_pipe.pxd index 65daa8b22..b1d2550a1 100644 --- a/spacy/pipeline/trainable_pipe.pxd +++ b/spacy/pipeline/trainable_pipe.pxd @@ -1,5 +1,6 @@ -from .pipe cimport Pipe from ..vocab cimport Vocab +from .pipe cimport Pipe + cdef class TrainablePipe(Pipe): cdef public Vocab vocab diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx index 3f0507d4b..8f219b327 100644 --- a/spacy/pipeline/trainable_pipe.pyx +++ b/spacy/pipeline/trainable_pipe.pyx @@ -1,17 +1,17 @@ -# cython: infer_types=True, profile=True, binding=True -from typing import Iterable, Iterator, Optional, Dict, Tuple, Callable +# cython: infer_types=True, binding=True +from typing import Callable, Dict, Iterable, Iterator, Optional, Tuple + import srsly -from thinc.api import set_dropout_rate, Model, Optimizer +from thinc.api import Model, Optimizer, set_dropout_rate from ..tokens.doc cimport Doc -from ..training import validate_examples -from ..errors import Errors -from .pipe import Pipe, deserialize_config from .. import util -from ..vocab import Vocab +from ..errors import Errors from ..language import Language -from ..training import Example +from ..training import Example, validate_examples +from ..vocab import Vocab +from .pipe import Pipe, deserialize_config cdef class TrainablePipe(Pipe): @@ -55,7 +55,7 @@ cdef class TrainablePipe(Pipe): except Exception as e: error_handler(self.name, self, [doc], e) - def pipe(self, stream: Iterable[Doc], *, batch_size: int=128) -> Iterator[Doc]: + def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]: """Apply the pipe to a stream of documents. This usually happens under the hood when the nlp object is called on a text and all components are applied to the Doc. @@ -102,9 +102,9 @@ cdef class TrainablePipe(Pipe): def update(self, examples: Iterable["Example"], *, - drop: float=0.0, - sgd: Optimizer=None, - losses: Optional[Dict[str, float]]=None) -> Dict[str, float]: + drop: float = 0.0, + sgd: Optimizer = None, + losses: Optional[Dict[str, float]] = None) -> Dict[str, float]: """Learn from a batch of documents and gold-standard information, updating the pipe's model. Delegates to predict and get_loss. @@ -138,8 +138,8 @@ cdef class TrainablePipe(Pipe): def rehearse(self, examples: Iterable[Example], *, - sgd: Optimizer=None, - losses: Dict[str, float]=None, + sgd: Optimizer = None, + losses: Dict[str, float] = None, **config) -> Dict[str, float]: """Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, @@ -177,7 +177,7 @@ cdef class TrainablePipe(Pipe): """ return util.create_default_optimizer() - def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language=None): + def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language = None): """Initialize the pipe for training, using data examples if available. This method needs to be implemented by each TrainablePipe component, ensuring the internal model (if available) is initialized properly diff --git a/spacy/pipeline/transition_parser.pxd b/spacy/pipeline/transition_parser.pxd index 1521fde60..7ddb91e01 100644 --- a/spacy/pipeline/transition_parser.pxd +++ b/spacy/pipeline/transition_parser.pxd @@ -1,11 +1,11 @@ from cymem.cymem cimport Pool from thinc.backends.cblas cimport CBlas +from ..ml.parser_model cimport ActivationsC, SizesC, WeightsC from ..vocab cimport Vocab -from .trainable_pipe cimport TrainablePipe -from ._parser_internals.transition_system cimport Transition, TransitionSystem from ._parser_internals._state cimport StateC -from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC +from ._parser_internals.transition_system cimport Transition, TransitionSystem +from .trainable_pipe cimport TrainablePipe cdef class Parser(TrainablePipe): @@ -13,8 +13,18 @@ cdef class Parser(TrainablePipe): cdef readonly TransitionSystem moves cdef public object _multitasks - cdef void _parseC(self, CBlas cblas, StateC** states, - WeightsC weights, SizesC sizes) nogil + cdef void _parseC( + self, + CBlas cblas, + StateC** states, + WeightsC weights, + SizesC sizes + ) nogil - cdef void c_transition_batch(self, StateC** states, const float* scores, - int nr_class, int batch_size) nogil + cdef void c_transition_batch( + self, + StateC** states, + const float* scores, + int nr_class, + int batch_size + ) nogil diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx index 1327db2ce..9a278fc13 100644 --- a/spacy/pipeline/transition_parser.pyx +++ b/spacy/pipeline/transition_parser.pyx @@ -1,34 +1,46 @@ # cython: infer_types=True, cdivision=True, boundscheck=False, binding=True +# cython: profile=False from __future__ import print_function -from cymem.cymem cimport Pool + cimport numpy as np +from cymem.cymem cimport Pool + from itertools import islice -from libcpp.vector cimport vector -from libc.string cimport memset, memcpy + from libc.stdlib cimport calloc, free +from libc.string cimport memset +from libcpp.vector cimport vector + import random -import srsly -from thinc.api import get_ops, set_dropout_rate, CupyOps, NumpyOps -from thinc.extra.search cimport Beam -import numpy.random import numpy -import warnings +import numpy.random +import srsly +from thinc.api import CupyOps, NumpyOps, set_dropout_rate -from ._parser_internals.stateclass cimport StateClass -from ..ml.parser_model cimport alloc_activations, free_activations -from ..ml.parser_model cimport predict_states, arg_max_if_valid -from ..ml.parser_model cimport WeightsC, ActivationsC, SizesC, cpu_log_loss -from ..ml.parser_model cimport get_c_weights, get_c_sizes +from ..ml.parser_model cimport ( + ActivationsC, + SizesC, + WeightsC, + alloc_activations, + arg_max_if_valid, + cpu_log_loss, + free_activations, + get_c_sizes, + get_c_weights, + predict_states, +) from ..tokens.doc cimport Doc +from ._parser_internals.stateclass cimport StateClass + from .trainable_pipe import TrainablePipe + from ._parser_internals cimport _beam_utils -from ._parser_internals import _beam_utils -from ..training import validate_examples, validate_get_examples -from ..errors import Errors, Warnings from .. import util - +from ..errors import Errors +from ..training import validate_examples, validate_get_examples +from ._parser_internals import _beam_utils NUMPY_OPS = NumpyOps() @@ -242,7 +254,6 @@ cdef class Parser(TrainablePipe): except Exception as e: error_handler(self.name, self, batch_in_order, e) - def predict(self, docs): if isinstance(docs, Doc): docs = [docs] @@ -284,8 +295,6 @@ cdef class Parser(TrainablePipe): return batch def beam_parse(self, docs, int beam_width, float drop=0., beam_density=0.): - cdef Beam beam - cdef Doc doc self._ensure_labels_are_added(docs) batch = _beam_utils.BeamBatch( self.moves, @@ -305,16 +314,18 @@ cdef class Parser(TrainablePipe): del model return list(batch) - cdef void _parseC(self, CBlas cblas, StateC** states, - WeightsC weights, SizesC sizes) nogil: - cdef int i, j + cdef void _parseC( + self, CBlas cblas, StateC** states, WeightsC weights, SizesC sizes + ) nogil: + cdef int i cdef vector[StateC*] unfinished cdef ActivationsC activations = alloc_activations(sizes) while sizes.states >= 1: predict_states(cblas, &activations, states, &weights, sizes) # Validate actions, argmax, take action. - self.c_transition_batch(states, - activations.scores, sizes.classes, sizes.states) + self.c_transition_batch( + states, activations.scores, sizes.classes, sizes.states + ) for i in range(sizes.states): if not states[i].is_final(): unfinished.push_back(states[i]) @@ -326,7 +337,6 @@ cdef class Parser(TrainablePipe): def set_annotations(self, docs, states_or_beams): cdef StateClass state - cdef Beam beam cdef Doc doc states = _beam_utils.collect_states(states_or_beams, docs) for i, (state, doc) in enumerate(zip(states, docs)): @@ -343,8 +353,13 @@ cdef class Parser(TrainablePipe): self.c_transition_batch(&c_states[0], c_scores, scores.shape[1], scores.shape[0]) return [state for state in states if not state.c.is_final()] - cdef void c_transition_batch(self, StateC** states, const float* scores, - int nr_class, int batch_size) nogil: + cdef void c_transition_batch( + self, + StateC** states, + const float* scores, + int nr_class, + int batch_size + ) nogil: # n_moves should not be zero at this point, but make sure to avoid zero-length mem alloc with gil: assert self.moves.n_moves > 0, Errors.E924.format(name=self.name) @@ -364,7 +379,6 @@ cdef class Parser(TrainablePipe): free(is_valid) def update(self, examples, *, drop=0., sgd=None, losses=None): - cdef StateClass state if losses is None: losses = {} losses.setdefault(self.name, 0.) @@ -403,8 +417,7 @@ cdef class Parser(TrainablePipe): if not states: return losses model, backprop_tok2vec = self.model.begin_update([eg.x for eg in examples]) - - all_states = list(states) + states_golds = list(zip(states, golds)) n_moves = 0 while states_golds: @@ -484,8 +497,16 @@ cdef class Parser(TrainablePipe): del tutor return losses - def update_beam(self, examples, *, beam_width, - drop=0., sgd=None, losses=None, beam_density=0.0): + def update_beam( + self, + examples, + *, + beam_width, + drop=0., + sgd=None, + losses=None, + beam_density=0.0 + ): states, golds, _ = self.moves.init_gold_batch(examples) if not states: return losses @@ -515,8 +536,9 @@ cdef class Parser(TrainablePipe): is_valid = mem.alloc(self.moves.n_moves, sizeof(int)) costs = mem.alloc(self.moves.n_moves, sizeof(float)) - cdef np.ndarray d_scores = numpy.zeros((len(states), self.moves.n_moves), - dtype='f', order='C') + cdef np.ndarray d_scores = numpy.zeros( + (len(states), self.moves.n_moves), dtype='f', order='C' + ) c_d_scores = d_scores.data unseen_classes = self.model.attrs["unseen_classes"] for i, (state, gold) in enumerate(zip(states, golds)): @@ -526,8 +548,9 @@ cdef class Parser(TrainablePipe): for j in range(self.moves.n_moves): if costs[j] <= 0.0 and j in unseen_classes: unseen_classes.remove(j) - cpu_log_loss(c_d_scores, - costs, is_valid, &scores[i, 0], d_scores.shape[1]) + cpu_log_loss( + c_d_scores, costs, is_valid, &scores[i, 0], d_scores.shape[1] + ) c_d_scores += d_scores.shape[1] # Note that we don't normalize this. See comment in update() for why. if losses is not None: diff --git a/spacy/schemas.py b/spacy/schemas.py index 140592dcd..fa987b90f 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -1,17 +1,54 @@ -from typing import Dict, List, Union, Optional, Any, Callable, Type, Tuple -from typing import Iterable, TypeVar, TYPE_CHECKING -from .compat import Literal -from enum import Enum -from pydantic import BaseModel, Field, ValidationError, validator, create_model -from pydantic import StrictStr, StrictInt, StrictFloat, StrictBool, ConstrainedStr -from pydantic.main import ModelMetaclass -from thinc.api import Optimizer, ConfigValidationError, Model -from thinc.config import Promise -from collections import defaultdict import inspect import re +from collections import defaultdict +from enum import Enum +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) + +try: + from pydantic.v1 import ( + BaseModel, + ConstrainedStr, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + ValidationError, + create_model, + validator, + ) + from pydantic.v1.main import ModelMetaclass +except ImportError: + from pydantic import ( # type: ignore + BaseModel, + ConstrainedStr, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + ValidationError, + create_model, + validator, + ) + from pydantic.main import ModelMetaclass # type: ignore +from thinc.api import ConfigValidationError, Model, Optimizer +from thinc.config import Promise from .attrs import NAMES +from .compat import Literal from .lookups import Lookups from .util import is_cython_func @@ -375,6 +412,7 @@ class ConfigSchemaNlp(BaseModel): after_creation: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after creation and before the pipeline is constructed") after_pipeline_creation: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after the pipeline is constructed") batch_size: Optional[int] = Field(..., title="Default batch size") + vectors: Callable = Field(..., title="Vectors implementation") # fmt: on class Config: @@ -443,66 +481,6 @@ CONFIG_SCHEMAS = { "initialize": ConfigSchemaInit, } - -# Project config Schema - - -class ProjectConfigAssetGitItem(BaseModel): - # fmt: off - repo: StrictStr = Field(..., title="URL of Git repo to download from") - path: StrictStr = Field(..., title="File path or sub-directory to download (used for sparse checkout)") - branch: StrictStr = Field("master", title="Branch to clone from") - # fmt: on - - -class ProjectConfigAssetURL(BaseModel): - # fmt: off - dest: StrictStr = Field(..., title="Destination of downloaded asset") - url: Optional[StrictStr] = Field(None, title="URL of asset") - checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") - description: StrictStr = Field("", title="Description of asset") - # fmt: on - - -class ProjectConfigAssetGit(BaseModel): - # fmt: off - git: ProjectConfigAssetGitItem = Field(..., title="Git repo information") - checksum: Optional[str] = Field(None, title="MD5 hash of file", regex=r"([a-fA-F\d]{32})") - description: Optional[StrictStr] = Field(None, title="Description of asset") - # fmt: on - - -class ProjectConfigCommand(BaseModel): - # fmt: off - name: StrictStr = Field(..., title="Name of command") - help: Optional[StrictStr] = Field(None, title="Command description") - script: List[StrictStr] = Field([], title="List of CLI commands to run, in order") - deps: List[StrictStr] = Field([], title="File dependencies required by this command") - outputs: List[StrictStr] = Field([], title="Outputs produced by this command") - outputs_no_cache: List[StrictStr] = Field([], title="Outputs not tracked by DVC (DVC only)") - no_skip: bool = Field(False, title="Never skip this command, even if nothing changed") - # fmt: on - - class Config: - title = "A single named command specified in a project config" - extra = "forbid" - - -class ProjectConfigSchema(BaseModel): - # fmt: off - vars: Dict[StrictStr, Any] = Field({}, title="Optional variables to substitute in commands") - env: Dict[StrictStr, Any] = Field({}, title="Optional variable names to substitute in commands, mapped to environment variable names") - assets: List[Union[ProjectConfigAssetURL, ProjectConfigAssetGit]] = Field([], title="Data assets") - workflows: Dict[StrictStr, List[StrictStr]] = Field({}, title="Named workflows, mapped to list of project commands to run in order") - commands: List[ProjectConfigCommand] = Field([], title="Project command shortucts") - title: Optional[str] = Field(None, title="Project title") - spacy_version: Optional[StrictStr] = Field(None, title="spaCy version range that the project is compatible with") - # fmt: on - - class Config: - title = "Schema for project configuration file" - - # Recommendations for init config workflows diff --git a/spacy/scorer.py b/spacy/scorer.py index 86cd00a50..9ab116deb 100644 --- a/spacy/scorer.py +++ b/spacy/scorer.py @@ -1,13 +1,23 @@ -from typing import Optional, Iterable, Dict, Set, List, Any, Callable, Tuple -from typing import TYPE_CHECKING -import numpy as np from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, +) + +import numpy as np -from .training import Example -from .tokens import Token, Doc, Span from .errors import Errors -from .util import get_lang_class, SimpleFrozenList from .morphology import Morphology +from .tokens import Doc, Span, Token +from .training import Example +from .util import SimpleFrozenList, get_lang_class if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports @@ -792,6 +802,140 @@ def get_ner_prf(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: } +# The following implementation of trapezoid() is adapted from SciPy, +# which is distributed under the New BSD License. +# Copyright (c) 2001-2002 Enthought, Inc. 2003-2023, SciPy Developers. +# See licenses/3rd_party_licenses.txt +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + cumulative_trapezoid, simpson, romb + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + Use the trapezoidal rule on evenly spaced points: + + >>> import numpy as np + >>> from scipy import integrate + >>> integrate.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> integrate.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> integrate.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> integrate.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> integrate.trapezoid(a, axis=1) + array([2., 8.]) + """ + y = np.asanyarray(y) + if x is None: + d = dx + else: + x = np.asanyarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = np.diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = np.add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis) + return ret + + # The following implementation of roc_auc_score() is adapted from # scikit-learn, which is distributed under the New BSD License. # Copyright (c) 2007–2019 The scikit-learn developers. @@ -1014,9 +1158,9 @@ def _auc(x, y): else: raise ValueError(Errors.E164.format(x=x)) - area = direction * np.trapz(y, x) + area = direction * trapezoid(y, x) if isinstance(area, np.memmap): - # Reductions such as .sum used internally in np.trapz do not return a + # Reductions such as .sum used internally in trapezoid do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) diff --git a/spacy/strings.pxd b/spacy/strings.pxd index 5f03a9a28..d22f48ba1 100644 --- a/spacy/strings.pxd +++ b/spacy/strings.pxd @@ -1,9 +1,9 @@ -from libc.stdint cimport int64_t -from libcpp.vector cimport vector -from libcpp.set cimport set from cymem.cymem cimport Pool -from preshed.maps cimport PreshMap +from libc.stdint cimport int64_t +from libcpp.set cimport set +from libcpp.vector cimport vector from murmurhash.mrmr cimport hash64 +from preshed.maps cimport PreshMap from .typedefs cimport attr_t, hash_t diff --git a/spacy/strings.pyi b/spacy/strings.pyi index b29389b9a..f8fe8381c 100644 --- a/spacy/strings.pyi +++ b/spacy/strings.pyi @@ -1,5 +1,5 @@ -from typing import Optional, Iterable, Iterator, Union, Any, overload from pathlib import Path +from typing import Any, Iterable, Iterator, Optional, Union, overload def get_string_id(key: Union[str, int]) -> int: ... diff --git a/spacy/strings.pyx b/spacy/strings.pyx index c5f218342..376a13175 100644 --- a/spacy/strings.pyx +++ b/spacy/strings.pyx @@ -1,27 +1,29 @@ # cython: infer_types=True +# cython: profile=False cimport cython -from libc.string cimport memcpy -from libcpp.set cimport set from libc.stdint cimport uint32_t -from murmurhash.mrmr cimport hash64, hash32 +from libc.string cimport memcpy +from murmurhash.mrmr cimport hash32, hash64 import srsly from .typedefs cimport hash_t +from . import util +from .errors import Errors from .symbols import IDS as SYMBOLS_BY_STR from .symbols import NAMES as SYMBOLS_BY_INT -from .errors import Errors -from . import util + # Not particularly elegant, but this is faster than `isinstance(key, numbers.Integral)` cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash): try: out_hash[0] = key return True - except: + except: # no-cython-lint return False + def get_string_id(key): """Get a string ID, handling the reserved symbols correctly. If the key is already an ID, return it. @@ -86,7 +88,6 @@ cdef Utf8Str* _allocate(Pool mem, const unsigned char* chars, uint32_t length) e cdef int n_length_bytes cdef int i cdef Utf8Str* string = mem.alloc(1, sizeof(Utf8Str)) - cdef uint32_t ulength = length if length < sizeof(string.s): string.s[0] = length memcpy(&string.s[1], chars, length) diff --git a/spacy/structs.pxd b/spacy/structs.pxd index 86d5b67ed..8cfcc2964 100644 --- a/spacy/structs.pxd +++ b/spacy/structs.pxd @@ -1,11 +1,10 @@ -from libc.stdint cimport uint8_t, uint32_t, int32_t, uint64_t -from libcpp.vector cimport vector -from libcpp.unordered_set cimport unordered_set +from libc.stdint cimport int32_t, int64_t, uint8_t, uint32_t, uint64_t from libcpp.unordered_map cimport unordered_map -from libc.stdint cimport int32_t, int64_t +from libcpp.unordered_set cimport unordered_set +from libcpp.vector cimport vector -from .typedefs cimport flags_t, attr_t, hash_t from .parts_of_speech cimport univ_pos_t +from .typedefs cimport attr_t, flags_t, hash_t cdef struct LexemeC: @@ -53,7 +52,7 @@ cdef struct TokenC: int sent_start int ent_iob - attr_t ent_type # TODO: Is there a better way to do this? Multiple sources of truth.. + attr_t ent_type # TODO: Is there a better way to do this? Multiple sources of truth.. attr_t ent_kb_id hash_t ent_id diff --git a/spacy/symbols.pxd b/spacy/symbols.pxd index bc15d9b80..73be19145 100644 --- a/spacy/symbols.pxd +++ b/spacy/symbols.pxd @@ -92,7 +92,7 @@ cdef enum symbol_t: ADV AUX CONJ - CCONJ # U20 + CCONJ # U20 DET INTJ NOUN @@ -418,7 +418,7 @@ cdef enum symbol_t: ccomp complm conj - cop # U20 + cop # U20 csubj csubjpass dep @@ -441,8 +441,8 @@ cdef enum symbol_t: num number oprd - obj # U20 - obl # U20 + obj # U20 + obl # U20 parataxis partmod pcomp diff --git a/spacy/symbols.pyx b/spacy/symbols.pyx index b0345c710..f7713577b 100644 --- a/spacy/symbols.pyx +++ b/spacy/symbols.pyx @@ -1,4 +1,5 @@ # cython: optimize.unpack_method_calls=False +# cython: profile=False IDS = { "": NIL, "IS_ALPHA": IS_ALPHA, @@ -96,7 +97,7 @@ IDS = { "ADV": ADV, "AUX": AUX, "CONJ": CONJ, - "CCONJ": CCONJ, # U20 + "CCONJ": CCONJ, # U20 "DET": DET, "INTJ": INTJ, "NOUN": NOUN, @@ -421,7 +422,7 @@ IDS = { "ccomp": ccomp, "complm": complm, "conj": conj, - "cop": cop, # U20 + "cop": cop, # U20 "csubj": csubj, "csubjpass": csubjpass, "dep": dep, @@ -444,8 +445,8 @@ IDS = { "num": num, "number": number, "oprd": oprd, - "obj": obj, # U20 - "obl": obl, # U20 + "obj": obj, # U20 + "obl": obl, # U20 "parataxis": parataxis, "partmod": partmod, "pcomp": pcomp, diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 00b8f5f1c..7db986ab9 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -1,7 +1,8 @@ import pytest -from spacy.util import get_lang_class from hypothesis import settings +from spacy.util import get_lang_class + # Functionally disable deadline settings for tests # to prevent spurious test failures in CI builds. settings.register_profile("no_deadlines", deadline=2 * 60 * 1000) # in ms @@ -161,6 +162,11 @@ def fi_tokenizer(): return get_lang_class("fi")().tokenizer +@pytest.fixture(scope="session") +def fo_tokenizer(): + return get_lang_class("fo")().tokenizer + + @pytest.fixture(scope="session") def fr_tokenizer(): return get_lang_class("fr")().tokenizer @@ -316,6 +322,11 @@ def nl_tokenizer(): return get_lang_class("nl")().tokenizer +@pytest.fixture(scope="session") +def nn_tokenizer(): + return get_lang_class("nn")().tokenizer + + @pytest.fixture(scope="session") def pl_tokenizer(): return get_lang_class("pl")().tokenizer diff --git a/spacy/tests/doc/test_add_entities.py b/spacy/tests/doc/test_add_entities.py index 231b7c2a8..259b21fb3 100644 --- a/spacy/tests/doc/test_add_entities.py +++ b/spacy/tests/doc/test_add_entities.py @@ -1,10 +1,11 @@ -from spacy.pipeline.ner import DEFAULT_NER_MODEL -from spacy.training import Example -from spacy.pipeline import EntityRecognizer -from spacy.tokens import Span, Doc -from spacy import registry import pytest +from spacy import registry +from spacy.pipeline import EntityRecognizer +from spacy.pipeline.ner import DEFAULT_NER_MODEL +from spacy.tokens import Doc, Span +from spacy.training import Example + def _ner_example(ner): doc = Doc( diff --git a/spacy/tests/doc/test_array.py b/spacy/tests/doc/test_array.py index 1f2d7d999..757655f55 100644 --- a/spacy/tests/doc/test_array.py +++ b/spacy/tests/doc/test_array.py @@ -1,8 +1,8 @@ import numpy import pytest +from spacy.attrs import DEP, MORPH, ORTH, POS, SHAPE from spacy.tokens import Doc -from spacy.attrs import ORTH, SHAPE, POS, DEP, MORPH @pytest.mark.issue(2203) diff --git a/spacy/tests/doc/test_creation.py b/spacy/tests/doc/test_creation.py index 302a9b6ea..4bc1de3e0 100644 --- a/spacy/tests/doc/test_creation.py +++ b/spacy/tests/doc/test_creation.py @@ -1,7 +1,8 @@ import pytest -from spacy.vocab import Vocab -from spacy.tokens import Doc + from spacy import util +from spacy.tokens import Doc +from spacy.vocab import Vocab @pytest.fixture diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py index 38003dea9..73544c51a 100644 --- a/spacy/tests/doc/test_doc_api.py +++ b/spacy/tests/doc/test_doc_api.py @@ -1,13 +1,22 @@ +import warnings import weakref import numpy -from numpy.testing import assert_array_equal import pytest -import warnings +from numpy.testing import assert_array_equal from thinc.api import NumpyOps, get_current_ops -from spacy.attrs import DEP, ENT_IOB, ENT_TYPE, HEAD, IS_ALPHA, MORPH, POS -from spacy.attrs import SENT_START, TAG +from spacy.attrs import ( + DEP, + ENT_IOB, + ENT_TYPE, + HEAD, + IS_ALPHA, + MORPH, + POS, + SENT_START, + TAG, +) from spacy.lang.en import English from spacy.lang.xx import MultiLanguage from spacy.language import Language diff --git a/spacy/tests/doc/test_graph.py b/spacy/tests/doc/test_graph.py index e464b0058..d14a5b057 100644 --- a/spacy/tests/doc/test_graph.py +++ b/spacy/tests/doc/test_graph.py @@ -1,6 +1,6 @@ -from spacy.vocab import Vocab from spacy.tokens.doc import Doc from spacy.tokens.graph import Graph +from spacy.vocab import Vocab def test_graph_init(): diff --git a/spacy/tests/doc/test_json_doc_conversion.py b/spacy/tests/doc/test_json_doc_conversion.py index 11a1817e6..a76472d07 100644 --- a/spacy/tests/doc/test_json_doc_conversion.py +++ b/spacy/tests/doc/test_json_doc_conversion.py @@ -1,8 +1,10 @@ import pytest +import srsly + import spacy from spacy import schemas from spacy.tokens import Doc, Span, Token -import srsly + from .test_underscore import clean_underscore # noqa: F401 diff --git a/spacy/tests/doc/test_pickle_doc.py b/spacy/tests/doc/test_pickle_doc.py index 28cb66714..2e28162d4 100644 --- a/spacy/tests/doc/test_pickle_doc.py +++ b/spacy/tests/doc/test_pickle_doc.py @@ -1,5 +1,5 @@ -from spacy.language import Language from spacy.compat import pickle +from spacy.language import Language def test_pickle_single_doc(): diff --git a/spacy/tests/doc/test_retokenize_merge.py b/spacy/tests/doc/test_retokenize_merge.py index 20c302da1..45d54346e 100644 --- a/spacy/tests/doc/test_retokenize_merge.py +++ b/spacy/tests/doc/test_retokenize_merge.py @@ -1,7 +1,8 @@ import pytest + from spacy.attrs import LEMMA -from spacy.vocab import Vocab from spacy.tokens import Doc, Token +from spacy.vocab import Vocab def test_doc_retokenize_merge(en_tokenizer): diff --git a/spacy/tests/doc/test_retokenize_split.py b/spacy/tests/doc/test_retokenize_split.py index ec4deb033..61ef599be 100644 --- a/spacy/tests/doc/test_retokenize_split.py +++ b/spacy/tests/doc/test_retokenize_split.py @@ -1,8 +1,8 @@ import numpy import pytest -from spacy.vocab import Vocab from spacy.tokens import Doc, Token +from spacy.vocab import Vocab @pytest.mark.issue(3540) diff --git a/spacy/tests/doc/test_span.py b/spacy/tests/doc/test_span.py index a5c512dc0..98a74bc21 100644 --- a/spacy/tests/doc/test_span.py +++ b/spacy/tests/doc/test_span.py @@ -1,13 +1,13 @@ -import pytest import numpy +import pytest from numpy.testing import assert_array_equal +from thinc.api import get_current_ops -from spacy.attrs import ORTH, LENGTH +from spacy.attrs import LENGTH, ORTH from spacy.lang.en import English from spacy.tokens import Doc, Span, Token -from spacy.vocab import Vocab from spacy.util import filter_spans -from thinc.api import get_current_ops +from spacy.vocab import Vocab from ..util import add_vecs_to_vocab from .test_underscore import clean_underscore # noqa: F401 @@ -731,3 +731,12 @@ def test_for_no_ent_sents(): sents = list(doc.ents[0].sents) assert len(sents) == 1 assert str(sents[0]) == str(doc.ents[0].sent) == "ENTITY" + + +def test_span_api_richcmp_other(en_tokenizer): + doc1 = en_tokenizer("a b") + doc2 = en_tokenizer("b c") + assert not doc1[1:2] == doc1[1] + assert not doc1[1:2] == doc2[0] + assert not doc1[1:2] == doc2[0:1] + assert not doc1[0:1] == doc2 diff --git a/spacy/tests/doc/test_span_group.py b/spacy/tests/doc/test_span_group.py index cea2c42ee..ef78172bf 100644 --- a/spacy/tests/doc/test_span_group.py +++ b/spacy/tests/doc/test_span_group.py @@ -1,9 +1,10 @@ +from random import Random from typing import List import pytest -from random import Random + from spacy.matcher import Matcher -from spacy.tokens import Span, SpanGroup, Doc +from spacy.tokens import Doc, Span, SpanGroup from spacy.util import filter_spans diff --git a/spacy/tests/doc/test_token_api.py b/spacy/tests/doc/test_token_api.py index e715c5e85..c10221e65 100644 --- a/spacy/tests/doc/test_token_api.py +++ b/spacy/tests/doc/test_token_api.py @@ -1,10 +1,11 @@ -import pytest import numpy -from spacy.attrs import IS_ALPHA, IS_DIGIT, IS_LOWER, IS_PUNCT, IS_TITLE, IS_STOP +import pytest + +from spacy.attrs import IS_ALPHA, IS_DIGIT, IS_LOWER, IS_PUNCT, IS_STOP, IS_TITLE from spacy.symbols import VERB -from spacy.vocab import Vocab from spacy.tokens import Doc from spacy.training import Example +from spacy.vocab import Vocab @pytest.fixture @@ -293,3 +294,12 @@ def test_missing_head_dep(en_vocab): assert aligned_heads[0] == ref_heads[0] assert aligned_deps[5] == ref_deps[5] assert aligned_heads[5] == ref_heads[5] + + +def test_token_api_richcmp_other(en_tokenizer): + doc1 = en_tokenizer("a b") + doc2 = en_tokenizer("b c") + assert not doc1[1] == doc1[0:1] + assert not doc1[1] == doc2[1:2] + assert not doc1[1] == doc2[0] + assert not doc1[0] == doc2 diff --git a/spacy/tests/doc/test_underscore.py b/spacy/tests/doc/test_underscore.py index b934221af..b79d2f01f 100644 --- a/spacy/tests/doc/test_underscore.py +++ b/spacy/tests/doc/test_underscore.py @@ -1,5 +1,6 @@ import pytest from mock import Mock + from spacy.tokens import Doc, Span, Token from spacy.tokens.underscore import Underscore diff --git a/spacy/tests/lang/bn/test_tokenizer.py b/spacy/tests/lang/bn/test_tokenizer.py index 5b18c5269..e9a4d5e54 100644 --- a/spacy/tests/lang/bn/test_tokenizer.py +++ b/spacy/tests/lang/bn/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - # fmt: off TESTCASES = [ # Punctuation tests diff --git a/spacy/tests/lang/da/test_noun_chunks.py b/spacy/tests/lang/da/test_noun_chunks.py index 30df92c0b..b4d389e4b 100644 --- a/spacy/tests/lang/da/test_noun_chunks.py +++ b/spacy/tests/lang/da/test_noun_chunks.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc diff --git a/spacy/tests/lang/da/test_text.py b/spacy/tests/lang/da/test_text.py index 3c6cca5ac..e1f3b96e2 100644 --- a/spacy/tests/lang/da/test_text.py +++ b/spacy/tests/lang/da/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.da.lex_attrs import like_num diff --git a/spacy/tests/lang/en/test_customized_tokenizer.py b/spacy/tests/lang/en/test_customized_tokenizer.py index f5302cb31..8251306a6 100644 --- a/spacy/tests/lang/en/test_customized_tokenizer.py +++ b/spacy/tests/lang/en/test_customized_tokenizer.py @@ -1,9 +1,10 @@ -import pytest import re + +import pytest + from spacy.lang.en import English from spacy.tokenizer import Tokenizer -from spacy.util import compile_prefix_regex, compile_suffix_regex -from spacy.util import compile_infix_regex +from spacy.util import compile_infix_regex, compile_prefix_regex, compile_suffix_regex @pytest.fixture diff --git a/spacy/tests/lang/en/test_noun_chunks.py b/spacy/tests/lang/en/test_noun_chunks.py index 0c54ffbb4..bda203b2c 100644 --- a/spacy/tests/lang/en/test_noun_chunks.py +++ b/spacy/tests/lang/en/test_noun_chunks.py @@ -1,6 +1,7 @@ -from spacy.tokens import Doc import pytest +from spacy.tokens import Doc + @pytest.fixture def doc(en_vocab): diff --git a/spacy/tests/lang/en/test_punct.py b/spacy/tests/lang/en/test_punct.py index 1d10478a1..79d03d2db 100644 --- a/spacy/tests/lang/en/test_punct.py +++ b/spacy/tests/lang/en/test_punct.py @@ -1,7 +1,7 @@ import pytest -from spacy.util import compile_prefix_regex -from spacy.lang.punctuation import TOKENIZER_PREFIXES +from spacy.lang.punctuation import TOKENIZER_PREFIXES +from spacy.util import compile_prefix_regex PUNCT_OPEN = ["(", "[", "{", "*"] PUNCT_CLOSE = [")", "]", "}", "*"] diff --git a/spacy/tests/lang/en/test_sbd.py b/spacy/tests/lang/en/test_sbd.py index d30c72750..c07c23193 100644 --- a/spacy/tests/lang/en/test_sbd.py +++ b/spacy/tests/lang/en/test_sbd.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc from ...util import apply_transition_sequence diff --git a/spacy/tests/lang/en/test_text.py b/spacy/tests/lang/en/test_text.py index 358f4c0f9..53cf0cc5b 100644 --- a/spacy/tests/lang/en/test_text.py +++ b/spacy/tests/lang/en/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.en.lex_attrs import like_num diff --git a/spacy/tests/lang/es/test_noun_chunks.py b/spacy/tests/lang/es/test_noun_chunks.py index 6118a0458..8e5fe8354 100644 --- a/spacy/tests/lang/es/test_noun_chunks.py +++ b/spacy/tests/lang/es/test_noun_chunks.py @@ -1,6 +1,7 @@ -from spacy.tokens import Doc import pytest +from spacy.tokens import Doc + # fmt: off @pytest.mark.parametrize( diff --git a/spacy/tests/lang/es/test_text.py b/spacy/tests/lang/es/test_text.py index d95f6d26b..1d1f7fa6b 100644 --- a/spacy/tests/lang/es/test_text.py +++ b/spacy/tests/lang/es/test_text.py @@ -1,6 +1,7 @@ import pytest -from spacy.lang.es.lex_attrs import like_num + from spacy.lang.es import Spanish +from spacy.lang.es.lex_attrs import like_num @pytest.mark.issue(3803) diff --git a/spacy/tests/lang/fi/test_noun_chunks.py b/spacy/tests/lang/fi/test_noun_chunks.py index cab84b311..37e1b00a0 100644 --- a/spacy/tests/lang/fi/test_noun_chunks.py +++ b/spacy/tests/lang/fi/test_noun_chunks.py @@ -1,6 +1,6 @@ import pytest -from spacy.tokens import Doc +from spacy.tokens import Doc FI_NP_TEST_EXAMPLES = [ ( diff --git a/spacy/tests/lang/fi/test_tokenizer.py b/spacy/tests/lang/fi/test_tokenizer.py index dc40e18a3..2d9f081a7 100644 --- a/spacy/tests/lang/fi/test_tokenizer.py +++ b/spacy/tests/lang/fi/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - ABBREVIATION_TESTS = [ ( "Hyvää uutta vuotta t. siht. Niemelä!", diff --git a/spacy/tests/lang/fo/__init__.py b/spacy/tests/lang/fo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/lang/fo/test_tokenizer.py b/spacy/tests/lang/fo/test_tokenizer.py new file mode 100644 index 000000000..e61a62be5 --- /dev/null +++ b/spacy/tests/lang/fo/test_tokenizer.py @@ -0,0 +1,26 @@ +import pytest + +# examples taken from Basic LAnguage Resource Kit 1.0 for Faroese (https://maltokni.fo/en/resources) licensed with CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/) +# fmt: off +FO_TOKEN_EXCEPTION_TESTS = [ + ( + "Eftir løgtingslóg um samsýning og eftirløn landsstýrismanna v.m., skulu løgmaður og landsstýrismenn vanliga siga frá sær størv í almennari tænastu ella privatum virkjum, samtøkum ella stovnum. ", + [ + "Eftir", "løgtingslóg", "um", "samsýning", "og", "eftirløn", "landsstýrismanna", "v.m.", ",", "skulu", "løgmaður", "og", "landsstýrismenn", "vanliga", "siga", "frá", "sær", "størv", "í", "almennari", "tænastu", "ella", "privatum", "virkjum", ",", "samtøkum", "ella", "stovnum", ".", + ], + ), + ( + "Sambandsflokkurin gongur aftur við 2,7 prosentum í mun til valið í 1994, tá flokkurin fekk undirtøku frá 23,4 prosent av veljarunum.", + [ + "Sambandsflokkurin", "gongur", "aftur", "við", "2,7", "prosentum", "í", "mun", "til", "valið", "í", "1994", ",", "tá", "flokkurin", "fekk", "undirtøku", "frá", "23,4", "prosent", "av", "veljarunum", ".", + ], + ), +] +# fmt: on + + +@pytest.mark.parametrize("text,expected_tokens", FO_TOKEN_EXCEPTION_TESTS) +def test_fo_tokenizer_handles_exception_cases(fo_tokenizer, text, expected_tokens): + tokens = fo_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list diff --git a/spacy/tests/lang/fr/test_noun_chunks.py b/spacy/tests/lang/fr/test_noun_chunks.py index 25b95f566..436e07b29 100644 --- a/spacy/tests/lang/fr/test_noun_chunks.py +++ b/spacy/tests/lang/fr/test_noun_chunks.py @@ -1,6 +1,7 @@ -from spacy.tokens import Doc import pytest +from spacy.tokens import Doc + # fmt: off @pytest.mark.parametrize( diff --git a/spacy/tests/lang/fr/test_prefix_suffix_infix.py b/spacy/tests/lang/fr/test_prefix_suffix_infix.py index 272531b63..b81ccbc0e 100644 --- a/spacy/tests/lang/fr/test_prefix_suffix_infix.py +++ b/spacy/tests/lang/fr/test_prefix_suffix_infix.py @@ -1,7 +1,8 @@ import pytest -from spacy.language import Language, BaseDefaults -from spacy.lang.punctuation import TOKENIZER_INFIXES + from spacy.lang.char_classes import ALPHA +from spacy.lang.punctuation import TOKENIZER_INFIXES +from spacy.language import BaseDefaults, Language @pytest.mark.issue(768) diff --git a/spacy/tests/lang/fr/test_text.py b/spacy/tests/lang/fr/test_text.py index 01231f593..2c58a1c4a 100644 --- a/spacy/tests/lang/fr/test_text.py +++ b/spacy/tests/lang/fr/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.fr.lex_attrs import like_num diff --git a/spacy/tests/lang/ga/test_tokenizer.py b/spacy/tests/lang/ga/test_tokenizer.py index 78127ef7c..0c16b27d2 100644 --- a/spacy/tests/lang/ga/test_tokenizer.py +++ b/spacy/tests/lang/ga/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - # fmt: off GA_TOKEN_EXCEPTION_TESTS = [ ("Niall Ó Domhnaill, Rialtas na hÉireann 1977 (lch. 600).", ["Niall", "Ó", "Domhnaill", ",", "Rialtas", "na", "hÉireann", "1977", "(", "lch.", "600", ")", "."]), diff --git a/spacy/tests/lang/grc/test_tokenizer.py b/spacy/tests/lang/grc/test_tokenizer.py index 3df5b546b..9f29b9024 100644 --- a/spacy/tests/lang/grc/test_tokenizer.py +++ b/spacy/tests/lang/grc/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - # fmt: off GRC_TOKEN_EXCEPTION_TESTS = [ ("τὸ 〈τῆς〉 φιλοσοφίας ἔργον ἔνιοί φασιν ἀπὸ ⟦βαρβάρων⟧ ἄρξαι.", ["τὸ", "〈", "τῆς", "〉", "φιλοσοφίας", "ἔργον", "ἔνιοί", "φασιν", "ἀπὸ", "⟦", "βαρβάρων", "⟧", "ἄρξαι", "."]), diff --git a/spacy/tests/lang/he/test_tokenizer.py b/spacy/tests/lang/he/test_tokenizer.py index 3716f7e3b..15d059328 100644 --- a/spacy/tests/lang/he/test_tokenizer.py +++ b/spacy/tests/lang/he/test_tokenizer.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.he.lex_attrs import like_num diff --git a/spacy/tests/lang/hi/test_lex_attrs.py b/spacy/tests/lang/hi/test_lex_attrs.py index 80a7cc1c4..2d8d4a53e 100644 --- a/spacy/tests/lang/hi/test_lex_attrs.py +++ b/spacy/tests/lang/hi/test_lex_attrs.py @@ -1,5 +1,6 @@ import pytest -from spacy.lang.hi.lex_attrs import norm, like_num + +from spacy.lang.hi.lex_attrs import like_num, norm def test_hi_tokenizer_handles_long_text(hi_tokenizer): diff --git a/spacy/tests/lang/hi/test_text.py b/spacy/tests/lang/hi/test_text.py index 791cc3822..837dc3099 100644 --- a/spacy/tests/lang/hi/test_text.py +++ b/spacy/tests/lang/hi/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.hi import Hindi diff --git a/spacy/tests/lang/hu/test_tokenizer.py b/spacy/tests/lang/hu/test_tokenizer.py index 0488474ae..fa689c8f3 100644 --- a/spacy/tests/lang/hu/test_tokenizer.py +++ b/spacy/tests/lang/hu/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - DEFAULT_TESTS = [ ("N. kormányzósági\nszékhely.", ["N.", "kormányzósági", "székhely", "."]), pytest.param( diff --git a/spacy/tests/lang/hy/test_text.py b/spacy/tests/lang/hy/test_text.py index ac0f1e128..7a69c2a81 100644 --- a/spacy/tests/lang/hy/test_text.py +++ b/spacy/tests/lang/hy/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.hy.lex_attrs import like_num diff --git a/spacy/tests/lang/hy/test_tokenizer.py b/spacy/tests/lang/hy/test_tokenizer.py index e9efb224a..9423cb4d0 100644 --- a/spacy/tests/lang/hy/test_tokenizer.py +++ b/spacy/tests/lang/hy/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - # TODO add test cases with valid punctuation signs. hy_tokenize_text_test = [ diff --git a/spacy/tests/lang/id/test_text.py b/spacy/tests/lang/id/test_text.py index ed6487b68..7397a8c17 100644 --- a/spacy/tests/lang/id/test_text.py +++ b/spacy/tests/lang/id/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.id.lex_attrs import like_num diff --git a/spacy/tests/lang/it/test_noun_chunks.py b/spacy/tests/lang/it/test_noun_chunks.py index 0a8c10e79..7f6659ee7 100644 --- a/spacy/tests/lang/it/test_noun_chunks.py +++ b/spacy/tests/lang/it/test_noun_chunks.py @@ -1,6 +1,7 @@ -from spacy.tokens import Doc import pytest +from spacy.tokens import Doc + # fmt: off @pytest.mark.parametrize( diff --git a/spacy/tests/lang/ja/test_morphologizer_factory.py b/spacy/tests/lang/ja/test_morphologizer_factory.py index a4e038d01..d504576d0 100644 --- a/spacy/tests/lang/ja/test_morphologizer_factory.py +++ b/spacy/tests/lang/ja/test_morphologizer_factory.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.ja import Japanese diff --git a/spacy/tests/lang/ja/test_serialize.py b/spacy/tests/lang/ja/test_serialize.py index 011eb470f..f48b2570e 100644 --- a/spacy/tests/lang/ja/test_serialize.py +++ b/spacy/tests/lang/ja/test_serialize.py @@ -1,6 +1,7 @@ import pickle from spacy.lang.ja import Japanese + from ...util import make_tempdir diff --git a/spacy/tests/lang/ja/test_tokenizer.py b/spacy/tests/lang/ja/test_tokenizer.py index ef7bed06d..a26347444 100644 --- a/spacy/tests/lang/ja/test_tokenizer.py +++ b/spacy/tests/lang/ja/test_tokenizer.py @@ -1,7 +1,8 @@ import pytest +from spacy.lang.ja import DetailedToken, Japanese + from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS -from spacy.lang.ja import Japanese, DetailedToken # fmt: off TOKENIZER_TESTS = [ diff --git a/spacy/tests/lang/ko/test_serialize.py b/spacy/tests/lang/ko/test_serialize.py index 75288fcc5..bba7bce6e 100644 --- a/spacy/tests/lang/ko/test_serialize.py +++ b/spacy/tests/lang/ko/test_serialize.py @@ -1,6 +1,7 @@ import pickle from spacy.lang.ko import Korean + from ...util import make_tempdir diff --git a/spacy/tests/lang/ky/test_tokenizer.py b/spacy/tests/lang/ky/test_tokenizer.py index 5cf6eb1a6..b089dd9b9 100644 --- a/spacy/tests/lang/ky/test_tokenizer.py +++ b/spacy/tests/lang/ky/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - INFIX_HYPHEN_TESTS = [ ("Бала-чака жакшыбы?", "Бала-чака жакшыбы ?".split()), ("Кыз-келиндер кийими.", "Кыз-келиндер кийими .".split()), diff --git a/spacy/tests/lang/la/test_noun_chunks.py b/spacy/tests/lang/la/test_noun_chunks.py index ba8f5658b..70a3392cd 100644 --- a/spacy/tests/lang/la/test_noun_chunks.py +++ b/spacy/tests/lang/la/test_noun_chunks.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc diff --git a/spacy/tests/lang/la/test_text.py b/spacy/tests/lang/la/test_text.py index 48e7359a4..74606c4e8 100644 --- a/spacy/tests/lang/la/test_text.py +++ b/spacy/tests/lang/la/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.la.lex_attrs import like_num diff --git a/spacy/tests/lang/mk/test_text.py b/spacy/tests/lang/mk/test_text.py index b8881082c..b3a7ff9ee 100644 --- a/spacy/tests/lang/mk/test_text.py +++ b/spacy/tests/lang/mk/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.mk.lex_attrs import like_num diff --git a/spacy/tests/lang/ms/test_text.py b/spacy/tests/lang/ms/test_text.py index d6cd169ce..4b0ac3b2b 100644 --- a/spacy/tests/lang/ms/test_text.py +++ b/spacy/tests/lang/ms/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.ms.lex_attrs import like_num diff --git a/spacy/tests/lang/nb/test_tokenizer.py b/spacy/tests/lang/nb/test_tokenizer.py index 2da6e8d40..4f5fd89a3 100644 --- a/spacy/tests/lang/nb/test_tokenizer.py +++ b/spacy/tests/lang/nb/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - NB_TOKEN_EXCEPTION_TESTS = [ ( "Smørsausen brukes bl.a. til fisk", diff --git a/spacy/tests/lang/nl/test_noun_chunks.py b/spacy/tests/lang/nl/test_noun_chunks.py index 8962e3b75..6004ac230 100644 --- a/spacy/tests/lang/nl/test_noun_chunks.py +++ b/spacy/tests/lang/nl/test_noun_chunks.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc from spacy.util import filter_spans diff --git a/spacy/tests/lang/nl/test_text.py b/spacy/tests/lang/nl/test_text.py index 8bc72cc6d..d6413e0d7 100644 --- a/spacy/tests/lang/nl/test_text.py +++ b/spacy/tests/lang/nl/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.nl.lex_attrs import like_num diff --git a/spacy/tests/lang/nn/__init__.py b/spacy/tests/lang/nn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/lang/nn/test_tokenizer.py b/spacy/tests/lang/nn/test_tokenizer.py new file mode 100644 index 000000000..74a6937bd --- /dev/null +++ b/spacy/tests/lang/nn/test_tokenizer.py @@ -0,0 +1,38 @@ +import pytest + +# examples taken from Omsetjingsminne frå Nynorsk pressekontor 2022 (https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-80/) +# fmt: off +NN_TOKEN_EXCEPTION_TESTS = [ + ( + "Målet til direktoratet er at alle skal bli tilbydd jobb i politiet så raskt som mogleg i 2014.", + [ + "Målet", "til", "direktoratet", "er", "at", "alle", "skal", "bli", "tilbydd", "jobb", "i", "politiet", "så", "raskt", "som", "mogleg", "i", "2014", ".", + ], + ), + ( + "Han ønskjer ikkje at staten skal vere med på å finansiere slik undervisning, men dette er rektor på skulen ueinig i.", + [ + "Han", "ønskjer", "ikkje", "at", "staten", "skal", "vere", "med", "på", "å", "finansiere", "slik", "undervisning", ",", "men", "dette", "er", "rektor", "på", "skulen", "ueinig", "i", ".", + ], + ), + ( + "Ifølgje China Daily vart det 8.848 meter høge fjellet flytta 3 centimeter sørvestover under jordskjelvet, som vart målt til 7,8.", + [ + "Ifølgje", "China", "Daily", "vart", "det", "8.848", "meter", "høge", "fjellet", "flytta", "3", "centimeter", "sørvestover", "under", "jordskjelvet", ",", "som", "vart", "målt", "til", "7,8", ".", + ], + ), + ( + "Brukssesongen er frå nov. til mai, med ein topp i mars.", + [ + "Brukssesongen", "er", "frå", "nov.", "til", "mai", ",", "med", "ein", "topp", "i", "mars", ".", + ], + ), +] +# fmt: on + + +@pytest.mark.parametrize("text,expected_tokens", NN_TOKEN_EXCEPTION_TESTS) +def test_nn_tokenizer_handles_exception_cases(nn_tokenizer, text, expected_tokens): + tokens = nn_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list diff --git a/spacy/tests/lang/pt/test_noun_chunks.py b/spacy/tests/lang/pt/test_noun_chunks.py index 9a42ce268..eee96d593 100644 --- a/spacy/tests/lang/pt/test_noun_chunks.py +++ b/spacy/tests/lang/pt/test_noun_chunks.py @@ -1,6 +1,7 @@ -from spacy.tokens import Doc import pytest +from spacy.tokens import Doc + # fmt: off @pytest.mark.parametrize( diff --git a/spacy/tests/lang/pt/test_text.py b/spacy/tests/lang/pt/test_text.py index 3a9162b80..cb8723901 100644 --- a/spacy/tests/lang/pt/test_text.py +++ b/spacy/tests/lang/pt/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.pt.lex_attrs import like_num diff --git a/spacy/tests/lang/ro/test_tokenizer.py b/spacy/tests/lang/ro/test_tokenizer.py index 64c072470..d2affd607 100644 --- a/spacy/tests/lang/ro/test_tokenizer.py +++ b/spacy/tests/lang/ro/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - TEST_CASES = [ ( "Adresa este str. Principală nr. 5.", diff --git a/spacy/tests/lang/ru/test_lemmatizer.py b/spacy/tests/lang/ru/test_lemmatizer.py index 9a5a9ad68..66aa7e3a6 100644 --- a/spacy/tests/lang/ru/test_lemmatizer.py +++ b/spacy/tests/lang/ru/test_lemmatizer.py @@ -1,6 +1,6 @@ import pytest -from spacy.tokens import Doc +from spacy.tokens import Doc pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning") diff --git a/spacy/tests/lang/ru/test_text.py b/spacy/tests/lang/ru/test_text.py index b0eaf66bb..0bbed2122 100644 --- a/spacy/tests/lang/ru/test_text.py +++ b/spacy/tests/lang/ru/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.ru.lex_attrs import like_num diff --git a/spacy/tests/lang/ru/test_tokenizer.py b/spacy/tests/lang/ru/test_tokenizer.py index 083b55a09..c941e21fc 100644 --- a/spacy/tests/lang/ru/test_tokenizer.py +++ b/spacy/tests/lang/ru/test_tokenizer.py @@ -1,6 +1,6 @@ from string import punctuation -import pytest +import pytest PUNCT_OPEN = ["(", "[", "{", "*"] PUNCT_CLOSE = [")", "]", "}", "*"] diff --git a/spacy/tests/lang/sr/test_tokenizer.py b/spacy/tests/lang/sr/test_tokenizer.py index fdcf790d8..7ecd9596b 100644 --- a/spacy/tests/lang/sr/test_tokenizer.py +++ b/spacy/tests/lang/sr/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - PUNCT_OPEN = ["(", "[", "{", "*"] PUNCT_CLOSE = [")", "]", "}", "*"] PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")] diff --git a/spacy/tests/lang/sv/test_lex_attrs.py b/spacy/tests/lang/sv/test_lex_attrs.py index 656c4706b..a47b17b27 100644 --- a/spacy/tests/lang/sv/test_lex_attrs.py +++ b/spacy/tests/lang/sv/test_lex_attrs.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.sv.lex_attrs import like_num diff --git a/spacy/tests/lang/sv/test_noun_chunks.py b/spacy/tests/lang/sv/test_noun_chunks.py index d2410156c..599148384 100644 --- a/spacy/tests/lang/sv/test_noun_chunks.py +++ b/spacy/tests/lang/sv/test_noun_chunks.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc diff --git a/spacy/tests/lang/sv/test_tokenizer.py b/spacy/tests/lang/sv/test_tokenizer.py index 8871f4414..f19c6b66f 100644 --- a/spacy/tests/lang/sv/test_tokenizer.py +++ b/spacy/tests/lang/sv/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - SV_TOKEN_EXCEPTION_TESTS = [ ( "Smörsåsen används bl.a. till fisk", diff --git a/spacy/tests/lang/ta/test_text.py b/spacy/tests/lang/ta/test_text.py index 228a14c18..2d15e96fc 100644 --- a/spacy/tests/lang/ta/test_text.py +++ b/spacy/tests/lang/ta/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.ta import Tamil # Wikipedia excerpt: https://en.wikipedia.org/wiki/Chennai (Tamil Language) diff --git a/spacy/tests/lang/ta/test_tokenizer.py b/spacy/tests/lang/ta/test_tokenizer.py index 6ba8a2400..e668b5aca 100644 --- a/spacy/tests/lang/ta/test_tokenizer.py +++ b/spacy/tests/lang/ta/test_tokenizer.py @@ -1,6 +1,7 @@ import pytest -from spacy.symbols import ORTH + from spacy.lang.ta import Tamil +from spacy.symbols import ORTH TA_BASIC_TOKENIZATION_TESTS = [ ( diff --git a/spacy/tests/lang/test_attrs.py b/spacy/tests/lang/test_attrs.py index 1c27c1744..fd96e8f9b 100644 --- a/spacy/tests/lang/test_attrs.py +++ b/spacy/tests/lang/test_attrs.py @@ -1,10 +1,15 @@ import pytest -from spacy.attrs import intify_attrs, ENT_IOB -from spacy.attrs import IS_ALPHA, LEMMA, NORM, ORTH, intify_attrs +from spacy.attrs import ENT_IOB, IS_ALPHA, LEMMA, NORM, ORTH, intify_attrs from spacy.lang.en.stop_words import STOP_WORDS -from spacy.lang.lex_attrs import is_ascii, is_currency, is_punct, is_stop -from spacy.lang.lex_attrs import like_url, word_shape +from spacy.lang.lex_attrs import ( + is_ascii, + is_currency, + is_punct, + is_stop, + like_url, + word_shape, +) @pytest.mark.parametrize("word", ["the"]) diff --git a/spacy/tests/lang/test_initialize.py b/spacy/tests/lang/test_initialize.py index 36f4a75e0..8a158647a 100644 --- a/spacy/tests/lang/test_initialize.py +++ b/spacy/tests/lang/test_initialize.py @@ -1,6 +1,6 @@ import pytest -from spacy.util import get_lang_class +from spacy.util import get_lang_class # fmt: off # Only include languages with no external dependencies diff --git a/spacy/tests/lang/test_lemmatizers.py b/spacy/tests/lang/test_lemmatizers.py index e419f0a14..ddb3336ff 100644 --- a/spacy/tests/lang/test_lemmatizers.py +++ b/spacy/tests/lang/test_lemmatizers.py @@ -1,9 +1,9 @@ import pytest + from spacy import registry from spacy.lookups import Lookups from spacy.util import get_lang_class - # fmt: off # Only include languages with no external dependencies # excluded: ru, uk diff --git a/spacy/tests/lang/th/test_serialize.py b/spacy/tests/lang/th/test_serialize.py index a3de4bf54..57d0f1726 100644 --- a/spacy/tests/lang/th/test_serialize.py +++ b/spacy/tests/lang/th/test_serialize.py @@ -1,6 +1,7 @@ import pickle from spacy.lang.th import Thai + from ...util import make_tempdir diff --git a/spacy/tests/lang/tl/test_punct.py b/spacy/tests/lang/tl/test_punct.py index d6bcf297d..e2c93bf88 100644 --- a/spacy/tests/lang/tl/test_punct.py +++ b/spacy/tests/lang/tl/test_punct.py @@ -1,7 +1,7 @@ import pytest -from spacy.util import compile_prefix_regex -from spacy.lang.punctuation import TOKENIZER_PREFIXES +from spacy.lang.punctuation import TOKENIZER_PREFIXES +from spacy.util import compile_prefix_regex PUNCT_OPEN = ["(", "[", "{", "*"] PUNCT_CLOSE = [")", "]", "}", "*"] diff --git a/spacy/tests/lang/tl/test_text.py b/spacy/tests/lang/tl/test_text.py index 17429617c..26635ca90 100644 --- a/spacy/tests/lang/tl/test_text.py +++ b/spacy/tests/lang/tl/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.tl.lex_attrs import like_num # https://github.com/explosion/spaCy/blob/master/spacy/tests/lang/en/test_text.py diff --git a/spacy/tests/lang/tr/test_text.py b/spacy/tests/lang/tr/test_text.py index 323b11bd1..b4d84daae 100644 --- a/spacy/tests/lang/tr/test_text.py +++ b/spacy/tests/lang/tr/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.tr.lex_attrs import like_num diff --git a/spacy/tests/lang/tr/test_tokenizer.py b/spacy/tests/lang/tr/test_tokenizer.py index 9f988eae9..b07c98535 100644 --- a/spacy/tests/lang/tr/test_tokenizer.py +++ b/spacy/tests/lang/tr/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - ABBREV_TESTS = [ ("Dr. Murat Bey ile görüştüm.", ["Dr.", "Murat", "Bey", "ile", "görüştüm", "."]), ("Dr.la görüştüm.", ["Dr.la", "görüştüm", "."]), diff --git a/spacy/tests/lang/tt/test_tokenizer.py b/spacy/tests/lang/tt/test_tokenizer.py index 246d2824d..0bb241f27 100644 --- a/spacy/tests/lang/tt/test_tokenizer.py +++ b/spacy/tests/lang/tt/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - INFIX_HYPHEN_TESTS = [ ("Явым-төшем күләме.", "Явым-төшем күләме .".split()), ("Хатын-кыз киеме.", "Хатын-кыз киеме .".split()), diff --git a/spacy/tests/lang/uk/test_lemmatizer.py b/spacy/tests/lang/uk/test_lemmatizer.py index a65bb25e5..060114cdf 100644 --- a/spacy/tests/lang/uk/test_lemmatizer.py +++ b/spacy/tests/lang/uk/test_lemmatizer.py @@ -1,6 +1,6 @@ import pytest -from spacy.tokens import Doc +from spacy.tokens import Doc pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning") diff --git a/spacy/tests/lang/uk/test_tokenizer.py b/spacy/tests/lang/uk/test_tokenizer.py index 6596f490a..7960a30a2 100644 --- a/spacy/tests/lang/uk/test_tokenizer.py +++ b/spacy/tests/lang/uk/test_tokenizer.py @@ -1,6 +1,5 @@ import pytest - PUNCT_OPEN = ["(", "[", "{", "*"] PUNCT_CLOSE = [")", "]", "}", "*"] PUNCT_PAIRED = [("(", ")"), ("[", "]"), ("{", "}"), ("*", "*")] diff --git a/spacy/tests/lang/vi/test_serialize.py b/spacy/tests/lang/vi/test_serialize.py index 55dab799c..20bfd20d5 100644 --- a/spacy/tests/lang/vi/test_serialize.py +++ b/spacy/tests/lang/vi/test_serialize.py @@ -1,6 +1,7 @@ import pickle from spacy.lang.vi import Vietnamese + from ...util import make_tempdir diff --git a/spacy/tests/lang/vi/test_tokenizer.py b/spacy/tests/lang/vi/test_tokenizer.py index 3d0642d1e..ca6dee985 100644 --- a/spacy/tests/lang/vi/test_tokenizer.py +++ b/spacy/tests/lang/vi/test_tokenizer.py @@ -1,8 +1,8 @@ import pytest -from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS from spacy.lang.vi import Vietnamese +from ...tokenizer.test_naughty_strings import NAUGHTY_STRINGS # fmt: off TOKENIZER_TESTS = [ diff --git a/spacy/tests/lang/yo/test_text.py b/spacy/tests/lang/yo/test_text.py index 48b689f3d..a1bbc38da 100644 --- a/spacy/tests/lang/yo/test_text.py +++ b/spacy/tests/lang/yo/test_text.py @@ -1,4 +1,5 @@ import pytest + from spacy.lang.yo.lex_attrs import like_num diff --git a/spacy/tests/lang/zh/test_serialize.py b/spacy/tests/lang/zh/test_serialize.py index 03cdbbe24..4b014d713 100644 --- a/spacy/tests/lang/zh/test_serialize.py +++ b/spacy/tests/lang/zh/test_serialize.py @@ -1,5 +1,7 @@ import pytest + from spacy.lang.zh import Chinese + from ...util import make_tempdir diff --git a/spacy/tests/lang/zh/test_tokenizer.py b/spacy/tests/lang/zh/test_tokenizer.py index 741eb0ace..cdba5e397 100644 --- a/spacy/tests/lang/zh/test_tokenizer.py +++ b/spacy/tests/lang/zh/test_tokenizer.py @@ -1,7 +1,7 @@ import pytest -from spacy.lang.zh import Chinese, _get_pkuseg_trie_data from thinc.api import ConfigValidationError +from spacy.lang.zh import Chinese, _get_pkuseg_trie_data # fmt: off TEXTS = ("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",) diff --git a/spacy/tests/matcher/test_dependency_matcher.py b/spacy/tests/matcher/test_dependency_matcher.py index 200384320..be33f90cf 100644 --- a/spacy/tests/matcher/test_dependency_matcher.py +++ b/spacy/tests/matcher/test_dependency_matcher.py @@ -1,8 +1,10 @@ -import pytest +import copy import pickle import re -import copy + +import pytest from mock import Mock + from spacy.matcher import DependencyMatcher from spacy.tokens import Doc, Token @@ -214,6 +216,11 @@ def test_dependency_matcher_pattern_validation(en_vocab): pattern2 = copy.deepcopy(pattern) pattern2[1]["RIGHT_ID"] = "fox" matcher.add("FOUNDED", [pattern2]) + # invalid key + with pytest.warns(UserWarning): + pattern2 = copy.deepcopy(pattern) + pattern2[1]["FOO"] = "BAR" + matcher.add("FOUNDED", [pattern2]) def test_dependency_matcher_callback(en_vocab, doc): diff --git a/spacy/tests/matcher/test_levenshtein.py b/spacy/tests/matcher/test_levenshtein.py index 5afb7e1fc..fd85579ae 100644 --- a/spacy/tests/matcher/test_levenshtein.py +++ b/spacy/tests/matcher/test_levenshtein.py @@ -1,4 +1,5 @@ import pytest + from spacy.matcher import levenshtein from spacy.matcher.levenshtein import levenshtein_compare diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py index 09ab6c7dc..c824ca392 100644 --- a/spacy/tests/matcher/test_matcher_api.py +++ b/spacy/tests/matcher/test_matcher_api.py @@ -1,7 +1,8 @@ import pytest from mock import Mock + from spacy.matcher import Matcher -from spacy.tokens import Doc, Token, Span +from spacy.tokens import Doc, Span, Token from ..doc.test_underscore import clean_underscore # noqa: F401 diff --git a/spacy/tests/matcher/test_pattern_validation.py b/spacy/tests/matcher/test_pattern_validation.py index e7eced02c..45f9f4ee7 100644 --- a/spacy/tests/matcher/test_pattern_validation.py +++ b/spacy/tests/matcher/test_pattern_validation.py @@ -1,6 +1,7 @@ import pytest -from spacy.matcher import Matcher + from spacy.errors import MatchPatternError +from spacy.matcher import Matcher from spacy.schemas import validate_token_pattern # (pattern, num errors with validation, num errors identified with minimal @@ -51,7 +52,8 @@ TEST_PATTERNS = [ @pytest.mark.parametrize( - "pattern", [[{"XX": "y"}, {"LENGTH": "2"}, {"TEXT": {"IN": 5}}]] + "pattern", + [[{"XX": "y"}], [{"LENGTH": "2"}], [{"TEXT": {"IN": 5}}], [{"text": {"in": 6}}]], ) def test_matcher_pattern_validation(en_vocab, pattern): matcher = Matcher(en_vocab, validate=True) diff --git a/spacy/tests/matcher/test_phrase_matcher.py b/spacy/tests/matcher/test_phrase_matcher.py index 8a8d9eb84..7335bbdf1 100644 --- a/spacy/tests/matcher/test_phrase_matcher.py +++ b/spacy/tests/matcher/test_phrase_matcher.py @@ -1,14 +1,14 @@ -import pytest import warnings + +import pytest import srsly from mock import Mock from spacy.lang.en import English -from spacy.matcher import PhraseMatcher, Matcher +from spacy.matcher import Matcher, PhraseMatcher from spacy.tokens import Doc, Span from spacy.vocab import Vocab - from ..util import make_tempdir diff --git a/spacy/tests/morphology/test_morph_features.py b/spacy/tests/morphology/test_morph_features.py index 0693da690..ae20f9ba8 100644 --- a/spacy/tests/morphology/test_morph_features.py +++ b/spacy/tests/morphology/test_morph_features.py @@ -1,4 +1,5 @@ import pytest + from spacy.morphology import Morphology from spacy.strings import StringStore, get_string_id diff --git a/spacy/tests/morphology/test_morph_pickle.py b/spacy/tests/morphology/test_morph_pickle.py index d9b0e3476..5c1a8a31e 100644 --- a/spacy/tests/morphology/test_morph_pickle.py +++ b/spacy/tests/morphology/test_morph_pickle.py @@ -1,5 +1,7 @@ -import pytest import pickle + +import pytest + from spacy.morphology import Morphology from spacy.strings import StringStore diff --git a/spacy/tests/package/test_requirements.py b/spacy/tests/package/test_requirements.py index b403f274f..ff07c5b45 100644 --- a/spacy/tests/package/test_requirements.py +++ b/spacy/tests/package/test_requirements.py @@ -4,15 +4,17 @@ from pathlib import Path def test_build_dependencies(): # Check that library requirements are pinned exactly the same across different setup files. - # TODO: correct checks for numpy rather than ignoring libs_ignore_requirements = [ + "numpy", "pytest", "pytest-timeout", "mock", "flake8", "hypothesis", "pre-commit", + "cython-lint", "black", + "isort", "mypy", "types-dataclasses", "types-mock", @@ -21,6 +23,7 @@ def test_build_dependencies(): ] # ignore language-specific packages that shouldn't be installed by all libs_ignore_setup = [ + "numpy", "fugashi", "natto-py", "pythainlp", diff --git a/spacy/tests/parser/test_add_label.py b/spacy/tests/parser/test_add_label.py index f89e993e9..89626597d 100644 --- a/spacy/tests/parser/test_add_label.py +++ b/spacy/tests/parser/test_add_label.py @@ -1,14 +1,15 @@ import pytest from thinc.api import Adam, fix_random_seed + from spacy import registry -from spacy.language import Language from spacy.attrs import NORM -from spacy.vocab import Vocab -from spacy.training import Example -from spacy.tokens import Doc +from spacy.language import Language from spacy.pipeline import DependencyParser, EntityRecognizer -from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL +from spacy.pipeline.ner import DEFAULT_NER_MODEL +from spacy.tokens import Doc +from spacy.training import Example +from spacy.vocab import Vocab @pytest.fixture diff --git a/spacy/tests/parser/test_arc_eager_oracle.py b/spacy/tests/parser/test_arc_eager_oracle.py index bb226f9c5..fafd23268 100644 --- a/spacy/tests/parser/test_arc_eager_oracle.py +++ b/spacy/tests/parser/test_arc_eager_oracle.py @@ -1,12 +1,13 @@ import pytest -from spacy.vocab import Vocab + from spacy import registry -from spacy.training import Example from spacy.pipeline import DependencyParser -from spacy.tokens import Doc -from spacy.pipeline._parser_internals.nonproj import projectivize from spacy.pipeline._parser_internals.arc_eager import ArcEager +from spacy.pipeline._parser_internals.nonproj import projectivize from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL +from spacy.tokens import Doc +from spacy.training import Example +from spacy.vocab import Vocab def get_sequence_costs(M, words, heads, deps, transitions): diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index 7198859b3..1509c31bb 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -1,21 +1,21 @@ +import logging import random import pytest from numpy.testing import assert_equal +from spacy import registry, util from spacy.attrs import ENT_IOB -from spacy import util, registry from spacy.lang.en import English from spacy.lang.it import Italian from spacy.language import Language from spacy.lookups import Lookups from spacy.pipeline import EntityRecognizer -from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.pipeline._parser_internals.ner import BiluoPushDown -from spacy.training import Example, iob_to_biluo, split_bilu_label +from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.tokens import Doc, Span +from spacy.training import Example, iob_to_biluo, split_bilu_label from spacy.vocab import Vocab -import logging from ..util import make_tempdir diff --git a/spacy/tests/parser/test_neural_parser.py b/spacy/tests/parser/test_neural_parser.py index 1bb5d4aa5..5bef5758f 100644 --- a/spacy/tests/parser/test_neural_parser.py +++ b/spacy/tests/parser/test_neural_parser.py @@ -1,14 +1,14 @@ import pytest +from thinc.api import Model from spacy import registry -from spacy.training import Example -from spacy.vocab import Vocab from spacy.pipeline._parser_internals.arc_eager import ArcEager +from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL +from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.pipeline.transition_parser import Parser from spacy.tokens.doc import Doc -from thinc.api import Model -from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL -from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL +from spacy.training import Example +from spacy.vocab import Vocab @pytest.fixture diff --git a/spacy/tests/parser/test_nn_beam.py b/spacy/tests/parser/test_nn_beam.py index 4ba020ef0..f852e5cda 100644 --- a/spacy/tests/parser/test_nn_beam.py +++ b/spacy/tests/parser/test_nn_beam.py @@ -1,16 +1,17 @@ -import pytest import hypothesis import hypothesis.strategies import numpy -from spacy.vocab import Vocab -from spacy.language import Language -from spacy.pipeline._parser_internals.arc_eager import ArcEager -from spacy.tokens import Doc -from spacy.pipeline._parser_internals._beam_utils import BeamBatch -from spacy.pipeline._parser_internals.stateclass import StateClass -from spacy.training import Example +import pytest from thinc.tests.strategies import ndarrays_of_shape +from spacy.language import Language +from spacy.pipeline._parser_internals._beam_utils import BeamBatch +from spacy.pipeline._parser_internals.arc_eager import ArcEager +from spacy.pipeline._parser_internals.stateclass import StateClass +from spacy.tokens import Doc +from spacy.training import Example +from spacy.vocab import Vocab + @pytest.fixture(scope="module") def vocab(): diff --git a/spacy/tests/parser/test_nonproj.py b/spacy/tests/parser/test_nonproj.py index 051d0ef0c..f4e09fc91 100644 --- a/spacy/tests/parser/test_nonproj.py +++ b/spacy/tests/parser/test_nonproj.py @@ -1,7 +1,12 @@ import pytest -from spacy.pipeline._parser_internals.nonproj import ancestors, contains_cycle -from spacy.pipeline._parser_internals.nonproj import is_nonproj_tree, is_nonproj_arc + from spacy.pipeline._parser_internals import nonproj +from spacy.pipeline._parser_internals.nonproj import ( + ancestors, + contains_cycle, + is_nonproj_arc, + is_nonproj_tree, +) from spacy.tokens import Doc diff --git a/spacy/tests/parser/test_parse.py b/spacy/tests/parser/test_parse.py index 4b05c6721..3565c62af 100644 --- a/spacy/tests/parser/test_parse.py +++ b/spacy/tests/parser/test_parse.py @@ -5,12 +5,12 @@ from thinc.api import Adam from spacy import registry, util from spacy.attrs import DEP, NORM from spacy.lang.en import English -from spacy.tokens import Doc -from spacy.training import Example -from spacy.vocab import Vocab from spacy.pipeline import DependencyParser from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL +from spacy.tokens import Doc +from spacy.training import Example +from spacy.vocab import Vocab from ..util import apply_transition_sequence, make_tempdir diff --git a/spacy/tests/parser/test_parse_navigate.py b/spacy/tests/parser/test_parse_navigate.py index 50da60594..d2f684fdc 100644 --- a/spacy/tests/parser/test_parse_navigate.py +++ b/spacy/tests/parser/test_parse_navigate.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc diff --git a/spacy/tests/parser/test_preset_sbd.py b/spacy/tests/parser/test_preset_sbd.py index d71388900..dcbb9679d 100644 --- a/spacy/tests/parser/test_preset_sbd.py +++ b/spacy/tests/parser/test_preset_sbd.py @@ -1,12 +1,13 @@ import pytest from thinc.api import Adam -from spacy.attrs import NORM -from spacy.vocab import Vocab + from spacy import registry -from spacy.training import Example +from spacy.attrs import NORM +from spacy.pipeline import DependencyParser from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL from spacy.tokens import Doc -from spacy.pipeline import DependencyParser +from spacy.training import Example +from spacy.vocab import Vocab @pytest.fixture diff --git a/spacy/tests/parser/test_space_attachment.py b/spacy/tests/parser/test_space_attachment.py index 2b80272d6..30e66b37a 100644 --- a/spacy/tests/parser/test_space_attachment.py +++ b/spacy/tests/parser/test_space_attachment.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc from ..util import apply_transition_sequence diff --git a/spacy/tests/parser/test_state.py b/spacy/tests/parser/test_state.py index ca1755c48..0febc3d09 100644 --- a/spacy/tests/parser/test_state.py +++ b/spacy/tests/parser/test_state.py @@ -1,8 +1,8 @@ import pytest +from spacy.pipeline._parser_internals.stateclass import StateClass from spacy.tokens.doc import Doc from spacy.vocab import Vocab -from spacy.pipeline._parser_internals.stateclass import StateClass @pytest.fixture diff --git a/spacy/tests/pipeline/test_analysis.py b/spacy/tests/pipeline/test_analysis.py index df3d7dff5..503b501ce 100644 --- a/spacy/tests/pipeline/test_analysis.py +++ b/spacy/tests/pipeline/test_analysis.py @@ -1,7 +1,8 @@ +import pytest +from mock import Mock + from spacy.language import Language from spacy.pipe_analysis import get_attr_info, validate_attrs -from mock import Mock -import pytest def test_component_decorator_assigns(): diff --git a/spacy/tests/pipeline/test_annotates_on_update.py b/spacy/tests/pipeline/test_annotates_on_update.py index 869b8b874..d4feebd30 100644 --- a/spacy/tests/pipeline/test_annotates_on_update.py +++ b/spacy/tests/pipeline/test_annotates_on_update.py @@ -1,12 +1,13 @@ from typing import Callable, Iterable, Iterator -import pytest +import pytest from thinc.api import Config + +from spacy.lang.en import English from spacy.language import Language from spacy.training import Example from spacy.training.loop import train -from spacy.lang.en import English -from spacy.util import registry, load_model_from_config +from spacy.util import load_model_from_config, registry @pytest.fixture diff --git a/spacy/tests/pipeline/test_attributeruler.py b/spacy/tests/pipeline/test_attributeruler.py index dab3ebf57..06587b4be 100644 --- a/spacy/tests/pipeline/test_attributeruler.py +++ b/spacy/tests/pipeline/test_attributeruler.py @@ -1,10 +1,11 @@ -import pytest import numpy -from spacy.training import Example +import pytest + +from spacy import registry, util from spacy.lang.en import English from spacy.pipeline import AttributeRuler -from spacy import util, registry from spacy.tokens import Doc +from spacy.training import Example from ..util import make_tempdir diff --git a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py index 128d75680..5a8f0aee2 100644 --- a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py +++ b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py @@ -1,16 +1,17 @@ import pickle + +import hypothesis.strategies as st import pytest from hypothesis import given -import hypothesis.strategies as st + from spacy import util from spacy.lang.en import English from spacy.language import Language from spacy.pipeline._edit_tree_internals.edit_trees import EditTrees -from spacy.training import Example from spacy.strings import StringStore +from spacy.training import Example from spacy.util import make_tempdir - TRAIN_DATA = [ ("She likes green eggs", {"lemmas": ["she", "like", "green", "egg"]}), ("Eat blue ham", {"lemmas": ["eat", "blue", "ham"]}), diff --git a/spacy/tests/pipeline/test_entity_linker.py b/spacy/tests/pipeline/test_entity_linker.py index fc960cb01..5e50a4d28 100644 --- a/spacy/tests/pipeline/test_entity_linker.py +++ b/spacy/tests/pipeline/test_entity_linker.py @@ -1,12 +1,12 @@ -from typing import Callable, Iterable, Dict, Any, Tuple +from typing import Any, Callable, Dict, Iterable, Tuple import pytest from numpy.testing import assert_equal -from spacy import registry, util, Language +from spacy import Language, registry, util from spacy.attrs import ENT_KB_ID from spacy.compat import pickle -from spacy.kb import Candidate, InMemoryLookupKB, get_candidates, KnowledgeBase +from spacy.kb import Candidate, InMemoryLookupKB, KnowledgeBase, get_candidates from spacy.lang.en import English from spacy.ml import load_kb from spacy.ml.models.entity_linker import build_span_maker @@ -15,7 +15,7 @@ from spacy.pipeline.legacy import EntityLinker_v1 from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.scorer import Scorer from spacy.tests.util import make_tempdir -from spacy.tokens import Span, Doc +from spacy.tokens import Doc, Span from spacy.training import Example from spacy.util import ensure_path from spacy.vocab import Vocab @@ -717,7 +717,7 @@ GOLD_entities = ["Q2146908", "Q7381115", "Q7381115", "Q2146908"] # fmt: on -def test_overfitting_IO(): +def test_overfitting_IO_gold_entities(): # Simple test to try and quickly overfit the NEL component - ensuring the ML models work correctly nlp = English() vector_length = 3 @@ -744,7 +744,9 @@ def test_overfitting_IO(): return mykb # Create the Entity Linker component and add it to the pipeline - entity_linker = nlp.add_pipe("entity_linker", last=True) + entity_linker = nlp.add_pipe( + "entity_linker", last=True, config={"use_gold_ents": True} + ) assert isinstance(entity_linker, EntityLinker) entity_linker.set_kb(create_kb) assert "Q2146908" in entity_linker.vocab.strings @@ -807,6 +809,107 @@ def test_overfitting_IO(): assert_equal(batch_deps_1, batch_deps_2) assert_equal(batch_deps_1, no_batch_deps) + eval = nlp.evaluate(train_examples) + assert "nel_macro_p" in eval + assert "nel_macro_r" in eval + assert "nel_macro_f" in eval + assert "nel_micro_p" in eval + assert "nel_micro_r" in eval + assert "nel_micro_f" in eval + assert "nel_f_per_type" in eval + assert "PERSON" in eval["nel_f_per_type"] + + assert eval["nel_macro_f"] > 0 + assert eval["nel_micro_f"] > 0 + + +def test_overfitting_IO_with_ner(): + # Simple test to try and overfit the NER and NEL component in combination - ensuring the ML models work correctly + nlp = English() + vector_length = 3 + assert "Q2146908" not in nlp.vocab.strings + + # Convert the texts to docs to make sure we have doc.ents set for the training examples + train_examples = [] + for text, annotation in TRAIN_DATA: + doc = nlp(text) + train_examples.append(Example.from_dict(doc, annotation)) + + def create_kb(vocab): + # create artificial KB - assign same prior weight to the two russ cochran's + # Q2146908 (Russ Cochran): American golfer + # Q7381115 (Russ Cochran): publisher + mykb = InMemoryLookupKB(vocab, entity_vector_length=vector_length) + mykb.add_entity(entity="Q2146908", freq=12, entity_vector=[6, -4, 3]) + mykb.add_entity(entity="Q7381115", freq=12, entity_vector=[9, 1, -7]) + mykb.add_alias( + alias="Russ Cochran", + entities=["Q2146908", "Q7381115"], + probabilities=[0.5, 0.5], + ) + return mykb + + # Create the NER and EL components and add them to the pipeline + ner = nlp.add_pipe("ner", first=True) + entity_linker = nlp.add_pipe( + "entity_linker", last=True, config={"use_gold_ents": False} + ) + entity_linker.set_kb(create_kb) + + train_examples = [] + for text, annotations in TRAIN_DATA: + train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) + for ent in annotations.get("entities"): + ner.add_label(ent[2]) + optimizer = nlp.initialize() + + # train the NER and NEL pipes + for i in range(50): + losses = {} + nlp.update(train_examples, sgd=optimizer, losses=losses) + assert losses["ner"] < 0.001 + assert losses["entity_linker"] < 0.001 + + # adding additional components that are required for the entity_linker + nlp.add_pipe("sentencizer", first=True) + + # test the trained model + test_text = "Russ Cochran captured his first major title with his son as caddie." + doc = nlp(test_text) + ents = doc.ents + assert len(ents) == 1 + assert ents[0].text == "Russ Cochran" + assert ents[0].label_ == "PERSON" + assert ents[0].kb_id_ != "NIL" + + # TODO: below assert is still flaky - EL doesn't properly overfit quite yet + # assert ents[0].kb_id_ == "Q2146908" + + # Also test the results are still the same after IO + with make_tempdir() as tmp_dir: + nlp.to_disk(tmp_dir) + nlp2 = util.load_model_from_path(tmp_dir) + assert nlp2.pipe_names == nlp.pipe_names + doc2 = nlp2(test_text) + ents2 = doc2.ents + assert len(ents2) == 1 + assert ents2[0].text == "Russ Cochran" + assert ents2[0].label_ == "PERSON" + assert ents2[0].kb_id_ != "NIL" + + eval = nlp.evaluate(train_examples) + assert "nel_macro_f" in eval + assert "nel_micro_f" in eval + assert "ents_f" in eval + assert "nel_f_per_type" in eval + assert "ents_per_type" in eval + assert "PERSON" in eval["nel_f_per_type"] + assert "PERSON" in eval["ents_per_type"] + + assert eval["nel_macro_f"] > 0 + assert eval["nel_micro_f"] > 0 + assert eval["ents_f"] > 0 + def test_kb_serialization(): # Test that the KB can be used in a pipeline with a different vocab diff --git a/spacy/tests/pipeline/test_entity_ruler.py b/spacy/tests/pipeline/test_entity_ruler.py index 417f930cb..d0ab00391 100644 --- a/spacy/tests/pipeline/test_entity_ruler.py +++ b/spacy/tests/pipeline/test_entity_ruler.py @@ -1,16 +1,14 @@ import pytest +from thinc.api import NumpyOps, get_current_ops from spacy import registry -from spacy.tokens import Doc, Span -from spacy.language import Language -from spacy.lang.en import English -from spacy.pipeline import EntityRuler, EntityRecognizer, merge_entities -from spacy.pipeline import SpanRuler -from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.errors import MatchPatternError +from spacy.lang.en import English +from spacy.language import Language +from spacy.pipeline import EntityRecognizer, EntityRuler, SpanRuler, merge_entities +from spacy.pipeline.ner import DEFAULT_NER_MODEL from spacy.tests.util import make_tempdir - -from thinc.api import NumpyOps, get_current_ops +from spacy.tokens import Doc, Span ENTITY_RULERS = ["entity_ruler", "future_entity_ruler"] diff --git a/spacy/tests/pipeline/test_functions.py b/spacy/tests/pipeline/test_functions.py index e4adfe2fe..f4db4ee98 100644 --- a/spacy/tests/pipeline/test_functions.py +++ b/spacy/tests/pipeline/test_functions.py @@ -1,7 +1,8 @@ import pytest -from spacy.pipeline.functions import merge_subtokens + from spacy.language import Language -from spacy.tokens import Span, Doc +from spacy.pipeline.functions import merge_subtokens +from spacy.tokens import Doc, Span from ..doc.test_underscore import clean_underscore # noqa: F401 diff --git a/spacy/tests/pipeline/test_initialize.py b/spacy/tests/pipeline/test_initialize.py index c9b514770..9854b391e 100644 --- a/spacy/tests/pipeline/test_initialize.py +++ b/spacy/tests/pipeline/test_initialize.py @@ -1,9 +1,15 @@ import pytest -from spacy.language import Language -from spacy.lang.en import English -from spacy.training import Example + +try: + from pydantic.v1 import StrictBool +except ImportError: + from pydantic import StrictBool # type: ignore + from thinc.api import ConfigValidationError -from pydantic import StrictBool + +from spacy.lang.en import English +from spacy.language import Language +from spacy.training import Example def test_initialize_arguments(): diff --git a/spacy/tests/pipeline/test_lemmatizer.py b/spacy/tests/pipeline/test_lemmatizer.py index 0d2d3d6e5..ccc2e0b15 100644 --- a/spacy/tests/pipeline/test_lemmatizer.py +++ b/spacy/tests/pipeline/test_lemmatizer.py @@ -1,6 +1,8 @@ -import pytest import pickle -from spacy import util, registry + +import pytest + +from spacy import registry, util from spacy.lang.en import English from spacy.lookups import Lookups diff --git a/spacy/tests/pipeline/test_models.py b/spacy/tests/pipeline/test_models.py index e3fd28d0f..fef0017a8 100644 --- a/spacy/tests/pipeline/test_models.py +++ b/spacy/tests/pipeline/test_models.py @@ -3,7 +3,6 @@ from typing import List import numpy import pytest from numpy.testing import assert_almost_equal -from spacy.vocab import Vocab from thinc.api import Model, data_validation, get_current_ops from thinc.types import Array2d, Ragged @@ -11,7 +10,7 @@ from spacy.lang.en import English from spacy.ml import FeatureExtractor, StaticVectors from spacy.ml._character_embed import CharacterEmbed from spacy.tokens import Doc - +from spacy.vocab import Vocab OPS = get_current_ops() diff --git a/spacy/tests/pipeline/test_morphologizer.py b/spacy/tests/pipeline/test_morphologizer.py index 74c571ccf..0d895f236 100644 --- a/spacy/tests/pipeline/test_morphologizer.py +++ b/spacy/tests/pipeline/test_morphologizer.py @@ -1,16 +1,15 @@ import pytest -from numpy.testing import assert_equal, assert_almost_equal - +from numpy.testing import assert_almost_equal, assert_equal from thinc.api import get_current_ops from spacy import util -from spacy.training import Example +from spacy.attrs import MORPH from spacy.lang.en import English from spacy.language import Language -from spacy.tests.util import make_tempdir from spacy.morphology import Morphology -from spacy.attrs import MORPH +from spacy.tests.util import make_tempdir from spacy.tokens import Doc +from spacy.training import Example def test_label_types(): diff --git a/spacy/tests/pipeline/test_pipe_factories.py b/spacy/tests/pipeline/test_pipe_factories.py index 232b0512e..c45dccb06 100644 --- a/spacy/tests/pipeline/test_pipe_factories.py +++ b/spacy/tests/pipeline/test_pipe_factories.py @@ -1,14 +1,19 @@ import pytest +try: + from pydantic.v1 import StrictInt, StrictStr +except ImportError: + from pydantic import StrictInt, StrictStr # type: ignore + +from thinc.api import ConfigValidationError, Linear, Model + import spacy -from spacy.language import Language -from spacy.lang.en import English from spacy.lang.de import German +from spacy.lang.en import English +from spacy.language import Language from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.tokens import Doc -from spacy.util import registry, SimpleFrozenDict, combine_score_weights -from thinc.api import Model, Linear, ConfigValidationError -from pydantic import StrictInt, StrictStr +from spacy.util import SimpleFrozenDict, combine_score_weights, registry from ..util import make_tempdir @@ -198,7 +203,7 @@ def test_pipe_class_component_model(): "@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": { - "@architectures": "spacy.TextCatBOW.v2", + "@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False, diff --git a/spacy/tests/pipeline/test_sentencizer.py b/spacy/tests/pipeline/test_sentencizer.py index 5dd0fef43..9b1ddd530 100644 --- a/spacy/tests/pipeline/test_sentencizer.py +++ b/spacy/tests/pipeline/test_sentencizer.py @@ -1,8 +1,9 @@ import pytest + import spacy +from spacy.lang.en import English from spacy.pipeline import Sentencizer from spacy.tokens import Doc -from spacy.lang.en import English def test_sentencizer(en_vocab): diff --git a/spacy/tests/pipeline/test_senter.py b/spacy/tests/pipeline/test_senter.py index 047f59bef..6c7655812 100644 --- a/spacy/tests/pipeline/test_senter.py +++ b/spacy/tests/pipeline/test_senter.py @@ -1,12 +1,12 @@ import pytest from numpy.testing import assert_equal -from spacy.attrs import SENT_START from spacy import util -from spacy.training import Example +from spacy.attrs import SENT_START from spacy.lang.en import English from spacy.language import Language from spacy.tests.util import make_tempdir +from spacy.training import Example def test_label_types(): diff --git a/spacy/tests/pipeline/test_span_finder.py b/spacy/tests/pipeline/test_span_finder.py index 91b08cabf..47a8a34a8 100644 --- a/spacy/tests/pipeline/test_span_finder.py +++ b/spacy/tests/pipeline/test_span_finder.py @@ -1,15 +1,13 @@ import pytest from thinc.api import Config -from spacy.language import Language +from spacy import util from spacy.lang.en import English +from spacy.language import Language from spacy.pipeline.span_finder import span_finder_default_config from spacy.tokens import Doc from spacy.training import Example -from spacy import util -from spacy.util import registry -from spacy.util import fix_random_seed, make_tempdir - +from spacy.util import fix_random_seed, make_tempdir, registry SPANS_KEY = "pytest" TRAIN_DATA = [ @@ -232,10 +230,10 @@ def test_overfitting_IO(): # Test scoring scores = nlp.evaluate(train_examples) - assert f"span_finder_{SPANS_KEY}_f" in scores + assert f"spans_{SPANS_KEY}_f" in scores # It's not perfect 1.0 F1 because it's designed to overgenerate for now. - assert scores[f"span_finder_{SPANS_KEY}_p"] == 0.75 - assert scores[f"span_finder_{SPANS_KEY}_r"] == 1.0 + assert scores[f"spans_{SPANS_KEY}_p"] == 0.75 + assert scores[f"spans_{SPANS_KEY}_r"] == 1.0 # also test that the spancat works for just a single entity in a sentence doc = nlp("London") diff --git a/spacy/tests/pipeline/test_span_ruler.py b/spacy/tests/pipeline/test_span_ruler.py index 794815359..0a8616f44 100644 --- a/spacy/tests/pipeline/test_span_ruler.py +++ b/spacy/tests/pipeline/test_span_ruler.py @@ -1,13 +1,12 @@ import pytest +from thinc.api import NumpyOps, get_current_ops import spacy from spacy import registry from spacy.errors import MatchPatternError +from spacy.tests.util import make_tempdir from spacy.tokens import Span from spacy.training import Example -from spacy.tests.util import make_tempdir - -from thinc.api import NumpyOps, get_current_ops @pytest.fixture diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py index b7024cf36..9405a78e0 100644 --- a/spacy/tests/pipeline/test_spancat.py +++ b/spacy/tests/pipeline/test_spancat.py @@ -1,7 +1,7 @@ -import pytest import numpy -from numpy.testing import assert_array_equal, assert_almost_equal -from thinc.api import get_current_ops, NumpyOps, Ragged +import pytest +from numpy.testing import assert_almost_equal, assert_array_equal +from thinc.api import NumpyOps, Ragged, get_current_ops from spacy import util from spacy.lang.en import English @@ -9,7 +9,7 @@ from spacy.language import Language from spacy.tokens import SpanGroup from spacy.tokens._dict_proxies import SpanGroups from spacy.training import Example -from spacy.util import fix_random_seed, registry, make_tempdir +from spacy.util import fix_random_seed, make_tempdir, registry OPS = get_current_ops() diff --git a/spacy/tests/pipeline/test_tagger.py b/spacy/tests/pipeline/test_tagger.py index 746f32ee3..4b5f1ee99 100644 --- a/spacy/tests/pipeline/test_tagger.py +++ b/spacy/tests/pipeline/test_tagger.py @@ -1,12 +1,12 @@ import pytest -from numpy.testing import assert_equal, assert_almost_equal -from spacy.attrs import TAG +from numpy.testing import assert_almost_equal, assert_equal +from thinc.api import compounding, get_current_ops from spacy import util -from spacy.training import Example +from spacy.attrs import TAG from spacy.lang.en import English from spacy.language import Language -from thinc.api import compounding, get_current_ops +from spacy.training import Example from ..util import make_tempdir diff --git a/spacy/tests/pipeline/test_textcat.py b/spacy/tests/pipeline/test_textcat.py index d042f3445..8a0c1a976 100644 --- a/spacy/tests/pipeline/test_textcat.py +++ b/spacy/tests/pipeline/test_textcat.py @@ -12,18 +12,24 @@ from spacy.cli.evaluate import print_prf_per_type, print_textcats_auc_per_cat from spacy.lang.en import English from spacy.language import Language from spacy.pipeline import TextCategorizer -from spacy.pipeline.textcat import single_label_bow_config -from spacy.pipeline.textcat import single_label_cnn_config -from spacy.pipeline.textcat import single_label_default_config -from spacy.pipeline.textcat_multilabel import multi_label_bow_config -from spacy.pipeline.textcat_multilabel import multi_label_cnn_config -from spacy.pipeline.textcat_multilabel import multi_label_default_config +from spacy.pipeline.textcat import ( + single_label_bow_config, + single_label_cnn_config, + single_label_default_config, +) +from spacy.pipeline.textcat_multilabel import ( + multi_label_bow_config, + multi_label_cnn_config, + multi_label_default_config, +) from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL from spacy.scorer import Scorer from spacy.tokens import Doc, DocBin from spacy.training import Example from spacy.training.initialize import init_nlp +# Ensure that the architecture gets added to the registry. +from ..tok2vec import build_lazy_init_tok2vec as _ from ..util import make_tempdir TRAIN_DATA_SINGLE_LABEL = [ @@ -36,6 +42,13 @@ TRAIN_DATA_MULTI_LABEL = [ ("I'm confused but happy", {"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}}), ] +lazy_init_model_config = """ +[model] +@architectures = "test.LazyInitTok2Vec.v1" +width = 96 +""" +LAZY_INIT_TOK2VEC_MODEL = Config().from_str(lazy_init_model_config)["model"] + def make_get_examples_single_label(nlp): train_examples = [] @@ -410,7 +423,7 @@ def test_implicit_label(name, get_examples): @pytest.mark.parametrize( "name,textcat_config", [ - # BOW + # BOW V1 ("textcat", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), ("textcat", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), @@ -447,14 +460,14 @@ def test_no_resize(name, textcat_config): @pytest.mark.parametrize( "name,textcat_config", [ - # BOW - ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), - ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), - ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), - ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), + # BOW V3 + ("textcat", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), + ("textcat", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), # CNN - ("textcat", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), - ("textcat_multilabel", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), + ("textcat", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), ], ) # fmt: on @@ -476,14 +489,14 @@ def test_resize(name, textcat_config): @pytest.mark.parametrize( "name,textcat_config", [ - # BOW - ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), - ("textcat", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), - ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), - ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), - # CNN - ("textcat", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), - ("textcat_multilabel", {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), + # BOW v3 + ("textcat", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "no_output_layer": False, "ngram_size": 3}), + ("textcat", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "no_output_layer": True, "ngram_size": 3}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "no_output_layer": False, "ngram_size": 3}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "no_output_layer": True, "ngram_size": 3}), + # REDUCE + ("textcat", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), ], ) # fmt: on @@ -542,6 +555,34 @@ def test_error_with_multi_labels(): nlp.initialize(get_examples=lambda: train_examples) +# fmt: off +@pytest.mark.parametrize( + "name,textcat_config", + [ + # ENSEMBLE V2 + ("textcat_multilabel", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}}), + ("textcat", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "ngram_size": 5, "no_output_layer": False}}), + # PARAMETRIC ATTENTION V1 + ("textcat", {"@architectures": "spacy.TextCatParametricAttention.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": True}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatParametricAttention.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": False}), + # REDUCE + ("textcat", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": True, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), + ("textcat_multilabel", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": False, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), + ], +) +# fmt: on +def test_tok2vec_lazy_init(name, textcat_config): + # Check that we can properly initialize and use a textcat model using + # a lazily-initialized tok2vec. + nlp = English() + pipe_config = {"model": textcat_config} + textcat = nlp.add_pipe(name, config=pipe_config) + textcat.add_label("POSITIVE") + textcat.add_label("NEGATIVE") + nlp.initialize() + nlp.pipe(["This is a test."]) + + @pytest.mark.parametrize( "name,get_examples, train_data", [ @@ -689,12 +730,23 @@ def test_overfitting_IO_multi(): ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "ngram_size": 4, "no_output_layer": False}), ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "ngram_size": 3, "no_output_layer": True}), ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "ngram_size": 2, "no_output_layer": True}), + # BOW V3 + ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}), + ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "ngram_size": 4, "no_output_layer": False}), + ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "ngram_size": 3, "no_output_layer": True}), + ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "ngram_size": 2, "no_output_layer": True}), # ENSEMBLE V2 - ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}}), - ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v2", "exclusive_classes": True, "ngram_size": 5, "no_output_layer": False}}), - # CNN V2 + ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}}), + ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "ngram_size": 5, "no_output_layer": False}}), + # CNN V2 (legacy) ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatCNN.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), + # PARAMETRIC ATTENTION V1 + ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatParametricAttention.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}), + ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatParametricAttention.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}), + # REDUCE V1 + ("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), + ("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}), ], ) # fmt: on diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py index e423d9a19..998f0472c 100644 --- a/spacy/tests/pipeline/test_tok2vec.py +++ b/spacy/tests/pipeline/test_tok2vec.py @@ -1,17 +1,21 @@ import pytest -from spacy.ml.models.tok2vec import build_Tok2Vec_model -from spacy.ml.models.tok2vec import MultiHashEmbed, MaxoutWindowEncoder -from spacy.pipeline.tok2vec import Tok2Vec, Tok2VecListener -from spacy.vocab import Vocab -from spacy.tokens import Doc -from spacy.training import Example +from numpy.testing import assert_array_equal +from thinc.api import Config, get_current_ops + from spacy import util from spacy.lang.en import English +from spacy.ml.models.tok2vec import ( + MaxoutWindowEncoder, + MultiHashEmbed, + build_Tok2Vec_model, +) +from spacy.pipeline.tok2vec import Tok2Vec, Tok2VecListener +from spacy.tokens import Doc +from spacy.training import Example from spacy.util import registry -from thinc.api import Config, get_current_ops -from numpy.testing import assert_array_equal +from spacy.vocab import Vocab -from ..util import get_batch, make_tempdir, add_vecs_to_vocab +from ..util import add_vecs_to_vocab, get_batch, make_tempdir def test_empty_doc(): @@ -188,8 +192,7 @@ def test_tok2vec_listener(with_vectors): for tag in t[1]["tags"]: tagger.add_label(tag) - # Check that the Tok2Vec component finds it listeners - assert tok2vec.listeners == [] + # Check that the Tok2Vec component finds its listeners optimizer = nlp.initialize(lambda: train_examples) assert tok2vec.listeners == [tagger_tok2vec] @@ -217,7 +220,6 @@ def test_tok2vec_listener_callback(): assert nlp.pipe_names == ["tok2vec", "tagger"] tagger = nlp.get_pipe("tagger") tok2vec = nlp.get_pipe("tok2vec") - nlp._link_components() docs = [nlp.make_doc("A random sentence")] tok2vec.model.initialize(X=docs) gold_array = [[1.0 for tag in ["V", "Z"]] for word in docs] @@ -426,29 +428,46 @@ def test_replace_listeners_from_config(): nlp.to_disk(dir_path) base_model = str(dir_path) new_config = { - "nlp": {"lang": "en", "pipeline": ["tok2vec", "tagger", "ner"]}, + "nlp": { + "lang": "en", + "pipeline": ["tok2vec", "tagger2", "ner3", "tagger4"], + }, "components": { "tok2vec": {"source": base_model}, - "tagger": { + "tagger2": { "source": base_model, + "component": "tagger", "replace_listeners": ["model.tok2vec"], }, - "ner": {"source": base_model}, + "ner3": { + "source": base_model, + "component": "ner", + }, + "tagger4": { + "source": base_model, + "component": "tagger", + }, }, } new_nlp = util.load_model_from_config(new_config, auto_fill=True) new_nlp.initialize(lambda: examples) tok2vec = new_nlp.get_pipe("tok2vec") - tagger = new_nlp.get_pipe("tagger") - ner = new_nlp.get_pipe("ner") - assert tok2vec.listening_components == ["ner"] + tagger = new_nlp.get_pipe("tagger2") + ner = new_nlp.get_pipe("ner3") + assert "ner" not in new_nlp.pipe_names + assert "tagger" not in new_nlp.pipe_names + assert tok2vec.listening_components == ["ner3", "tagger4"] assert any(isinstance(node, Tok2VecListener) for node in ner.model.walk()) assert not any(isinstance(node, Tok2VecListener) for node in tagger.model.walk()) t2v_cfg = new_nlp.config["components"]["tok2vec"]["model"] assert t2v_cfg["@architectures"] == "spacy.Tok2Vec.v2" - assert new_nlp.config["components"]["tagger"]["model"]["tok2vec"] == t2v_cfg + assert new_nlp.config["components"]["tagger2"]["model"]["tok2vec"] == t2v_cfg assert ( - new_nlp.config["components"]["ner"]["model"]["tok2vec"]["@architectures"] + new_nlp.config["components"]["ner3"]["model"]["tok2vec"]["@architectures"] + == "spacy.Tok2VecListener.v1" + ) + assert ( + new_nlp.config["components"]["tagger4"]["model"]["tok2vec"]["@architectures"] == "spacy.Tok2VecListener.v1" ) @@ -540,3 +559,57 @@ def test_tok2vec_listeners_textcat(): assert cats1["imperative"] < 0.9 assert [t.tag_ for t in docs[0]] == ["V", "J", "N"] assert [t.tag_ for t in docs[1]] == ["N", "V", "J", "N"] + + +def test_tok2vec_listener_source_link_name(): + """The component's internal name and the tok2vec listener map correspond + to the most recently modified pipeline. + """ + orig_config = Config().from_str(cfg_string_multi) + nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] + + nlp2 = English() + nlp2.add_pipe("tok2vec", source=nlp1) + nlp2.add_pipe("tagger", name="tagger2", source=nlp1) + + # there is no way to have the component have the right name for both + # pipelines, right now the most recently modified pipeline is prioritized + assert nlp1.get_pipe("tagger").name == nlp2.get_pipe("tagger2").name == "tagger2" + + # there is no way to have the tok2vec have the right listener map for both + # pipelines, right now the most recently modified pipeline is prioritized + assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"] + nlp2.add_pipe("ner", name="ner3", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2", "ner3"] + nlp2.remove_pipe("ner3") + assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"] + nlp2.remove_pipe("tagger2") + assert nlp2.get_pipe("tok2vec").listening_components == [] + + # at this point the tok2vec component corresponds to nlp2 + assert nlp1.get_pipe("tok2vec").listening_components == [] + + # modifying the nlp1 pipeline syncs the tok2vec listener map back to nlp1 + nlp1.add_pipe("sentencizer") + assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] + + # modifying nlp2 syncs it back to nlp2 + nlp2.add_pipe("sentencizer") + assert nlp1.get_pipe("tok2vec").listening_components == [] + + +def test_tok2vec_listener_source_replace_listeners(): + orig_config = Config().from_str(cfg_string_multi) + nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] + nlp1.replace_listeners("tok2vec", "tagger", ["model.tok2vec"]) + assert nlp1.get_pipe("tok2vec").listening_components == ["ner"] + + nlp2 = English() + nlp2.add_pipe("tok2vec", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == [] + nlp2.add_pipe("tagger", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == [] + nlp2.add_pipe("ner", name="ner2", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == ["ner2"] diff --git a/spacy/tests/serialize/test_resource_warning.py b/spacy/tests/serialize/test_resource_warning.py index befd05635..ab6e6e9ee 100644 --- a/spacy/tests/serialize/test_resource_warning.py +++ b/spacy/tests/serialize/test_resource_warning.py @@ -1,12 +1,14 @@ import warnings from unittest import TestCase + import pytest import srsly from numpy import zeros + from spacy.kb.kb_in_memory import InMemoryLookupKB, Writer -from spacy.vectors import Vectors from spacy.language import Language from spacy.pipeline import TrainablePipe +from spacy.vectors import Vectors from spacy.vocab import Vocab from ..util import make_tempdir diff --git a/spacy/tests/serialize/test_serialize_config.py b/spacy/tests/serialize/test_serialize_config.py index 85e6f8b2c..b36d3ad74 100644 --- a/spacy/tests/serialize/test_serialize_config.py +++ b/spacy/tests/serialize/test_serialize_config.py @@ -5,13 +5,21 @@ from thinc.api import Config, ConfigValidationError import spacy from spacy.lang.de import German from spacy.lang.en import English -from spacy.language import DEFAULT_CONFIG, DEFAULT_CONFIG_PRETRAIN_PATH -from spacy.language import Language -from spacy.ml.models import MaxoutWindowEncoder, MultiHashEmbed -from spacy.ml.models import build_tb_parser_model, build_Tok2Vec_model +from spacy.language import DEFAULT_CONFIG, DEFAULT_CONFIG_PRETRAIN_PATH, Language +from spacy.ml.models import ( + MaxoutWindowEncoder, + MultiHashEmbed, + build_tb_parser_model, + build_Tok2Vec_model, +) from spacy.schemas import ConfigSchema, ConfigSchemaPretrain -from spacy.util import load_config, load_config_from_str -from spacy.util import load_model_from_config, registry +from spacy.training import Example +from spacy.util import ( + load_config, + load_config_from_str, + load_model_from_config, + registry, +) from ..util import make_tempdir @@ -415,6 +423,55 @@ def test_config_overrides(): assert nlp.pipe_names == ["tok2vec", "tagger"] +@pytest.mark.filterwarnings("ignore:\\[W036") +def test_config_overrides_registered_functions(): + nlp = spacy.blank("en") + nlp.add_pipe("attribute_ruler") + with make_tempdir() as d: + nlp.to_disk(d) + nlp_re1 = spacy.load( + d, + config={ + "components": { + "attribute_ruler": { + "scorer": {"@scorers": "spacy.tagger_scorer.v1"} + } + } + }, + ) + assert ( + nlp_re1.config["components"]["attribute_ruler"]["scorer"]["@scorers"] + == "spacy.tagger_scorer.v1" + ) + + @registry.misc("test_some_other_key") + def misc_some_other_key(): + return "some_other_key" + + nlp_re2 = spacy.load( + d, + config={ + "components": { + "attribute_ruler": { + "scorer": { + "@scorers": "spacy.overlapping_labeled_spans_scorer.v1", + "spans_key": {"@misc": "test_some_other_key"}, + } + } + } + }, + ) + assert nlp_re2.config["components"]["attribute_ruler"]["scorer"][ + "spans_key" + ] == {"@misc": "test_some_other_key"} + # run dummy evaluation (will return None scores) in order to test that + # the spans_key value in the nested override is working as intended in + # the config + example = Example.from_dict(nlp_re2.make_doc("a b c"), {}) + scores = nlp_re2.evaluate([example]) + assert "spans_some_other_key_f" in scores + + def test_config_interpolation(): config = Config().from_str(nlp_config_string, interpolate=False) assert config["corpora"]["train"]["path"] == "${paths.train}" diff --git a/spacy/tests/serialize/test_serialize_extension_attrs.py b/spacy/tests/serialize/test_serialize_extension_attrs.py index 9cfa1a552..2fb56c848 100644 --- a/spacy/tests/serialize/test_serialize_extension_attrs.py +++ b/spacy/tests/serialize/test_serialize_extension_attrs.py @@ -1,4 +1,5 @@ import pytest + from spacy.tokens import Doc, Token from spacy.vocab import Vocab @@ -14,7 +15,12 @@ def doc_w_attrs(en_tokenizer): Token.set_extension("_test_token", default="t0") doc[1]._._test_token = "t1" - return doc + yield doc + + Doc.remove_extension("_test_attr") + Doc.remove_extension("_test_prop") + Doc.remove_extension("_test_method") + Token.remove_extension("_test_token") def test_serialize_ext_attrs_from_bytes(doc_w_attrs): diff --git a/spacy/tests/serialize/test_serialize_kb.py b/spacy/tests/serialize/test_serialize_kb.py index f9d2e226b..99eb8cd86 100644 --- a/spacy/tests/serialize/test_serialize_kb.py +++ b/spacy/tests/serialize/test_serialize_kb.py @@ -1,16 +1,16 @@ from pathlib import Path -from typing import Callable, Iterable, Any, Dict +from typing import Any, Callable, Dict, Iterable import srsly - -from spacy import util, Errors -from spacy.util import ensure_path, registry, load_model_from_config, SimpleFrozenList -from spacy.kb.kb_in_memory import InMemoryLookupKB -from spacy.vocab import Vocab +from numpy import zeros from thinc.api import Config +from spacy import Errors, util +from spacy.kb.kb_in_memory import InMemoryLookupKB +from spacy.util import SimpleFrozenList, ensure_path, load_model_from_config, registry +from spacy.vocab import Vocab + from ..util import make_tempdir -from numpy import zeros def test_serialize_kb_disk(en_vocab): diff --git a/spacy/tests/serialize/test_serialize_language.py b/spacy/tests/serialize/test_serialize_language.py index c03287548..9c36015a9 100644 --- a/spacy/tests/serialize/test_serialize_language.py +++ b/spacy/tests/serialize/test_serialize_language.py @@ -1,11 +1,11 @@ -import re import pickle +import re import pytest -from spacy.language import Language -from spacy.lang.it import Italian from spacy.lang.en import English +from spacy.lang.it import Italian +from spacy.language import Language from spacy.tokenizer import Tokenizer from spacy.training import Example from spacy.util import load_config_from_str diff --git a/spacy/tests/serialize/test_serialize_pipeline.py b/spacy/tests/serialize/test_serialize_pipeline.py index 9fcf18e2d..6bbe743a1 100644 --- a/spacy/tests/serialize/test_serialize_pipeline.py +++ b/spacy/tests/serialize/test_serialize_pipeline.py @@ -8,15 +8,21 @@ import spacy from spacy import Vocab, load, registry from spacy.lang.en import English from spacy.language import Language -from spacy.pipeline import DependencyParser, EntityRecognizer, EntityRuler -from spacy.pipeline import SentenceRecognizer, Tagger, TextCategorizer -from spacy.pipeline import TrainablePipe +from spacy.pipeline import ( + DependencyParser, + EntityRecognizer, + EntityRuler, + SentenceRecognizer, + Tagger, + TextCategorizer, + TrainablePipe, +) from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL from spacy.pipeline.senter import DEFAULT_SENTER_MODEL from spacy.pipeline.tagger import DEFAULT_TAGGER_MODEL from spacy.pipeline.textcat import DEFAULT_SINGLE_TEXTCAT_MODEL -from spacy.util import ensure_path, load_model from spacy.tokens import Span +from spacy.util import ensure_path, load_model from ..util import make_tempdir diff --git a/spacy/tests/serialize/test_serialize_tokenizer.py b/spacy/tests/serialize/test_serialize_tokenizer.py index 9b74d7721..e998a78b4 100644 --- a/spacy/tests/serialize/test_serialize_tokenizer.py +++ b/spacy/tests/serialize/test_serialize_tokenizer.py @@ -7,8 +7,13 @@ from spacy.attrs import ENT_IOB, ENT_TYPE from spacy.lang.en import English from spacy.tokenizer import Tokenizer from spacy.tokens import Doc -from spacy.util import compile_infix_regex, compile_prefix_regex -from spacy.util import compile_suffix_regex, get_lang_class, load_model +from spacy.util import ( + compile_infix_regex, + compile_prefix_regex, + compile_suffix_regex, + get_lang_class, + load_model, +) from ..util import assert_packed_msg_equal, make_tempdir diff --git a/spacy/tests/test_architectures.py b/spacy/tests/test_architectures.py index 26eabd4e5..3b5804a69 100644 --- a/spacy/tests/test_architectures.py +++ b/spacy/tests/test_architectures.py @@ -1,7 +1,8 @@ import pytest -from spacy import registry -from thinc.api import Linear from catalogue import RegistryError +from thinc.api import Linear + +from spacy import registry def test_get_architecture(): diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 351e6bf11..7b729d78f 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -1,53 +1,46 @@ -import os import math +import os from collections import Counter -from typing import Tuple, List, Dict, Any -import time from pathlib import Path +from typing import Any, Dict, List, Tuple -import spacy -import numpy import pytest import srsly from click import NoSuchOption from packaging.specifiers import SpecifierSet -from thinc.api import Config, ConfigValidationError -from spacy.tokens import DocBin +from thinc.api import Config +import spacy from spacy import about -from spacy.cli import info -from spacy.cli._util import is_subpath_of, load_project_config, walk_directory -from spacy.cli._util import parse_config_overrides, string_to_list -from spacy.cli._util import substitute_project_variables -from spacy.cli._util import validate_project_commands -from spacy.cli._util import upload_file, download_file -from spacy.cli.debug_data import _compile_gold, _get_labels_from_model -from spacy.cli.debug_data import _get_labels_from_spancat -from spacy.cli.debug_data import _get_distribution, _get_kl_divergence -from spacy.cli.debug_data import _get_span_characteristics -from spacy.cli.debug_data import _print_span_characteristics -from spacy.cli.debug_data import _get_spans_length_freq_dist +from spacy.cli import download_module, info +from spacy.cli._util import parse_config_overrides, string_to_list, walk_directory +from spacy.cli.apply import apply +from spacy.cli.debug_data import ( + _compile_gold, + _get_distribution, + _get_kl_divergence, + _get_labels_from_model, + _get_labels_from_spancat, + _get_span_characteristics, + _get_spans_length_freq_dist, + _print_span_characteristics, +) from spacy.cli.download import get_compatibility, get_version from spacy.cli.evaluate import render_parses -from spacy.cli.init_config import RECOMMENDATIONS, init_config, fill_config -from spacy.cli.init_pipeline import _init_labels -from spacy.cli.package import get_third_party_dependencies -from spacy.cli.package import _is_permitted_package_name -from spacy.cli.project.remote_storage import RemoteStorage -from spacy.cli.project.run import _check_requirements -from spacy.cli.validate import get_model_pkgs -from spacy.cli.apply import apply from spacy.cli.find_threshold import find_threshold +from spacy.cli.init_config import RECOMMENDATIONS, fill_config, init_config +from spacy.cli.init_pipeline import _init_labels +from spacy.cli.package import _is_permitted_package_name, get_third_party_dependencies +from spacy.cli.validate import get_model_pkgs from spacy.lang.en import English from spacy.lang.nl import Dutch from spacy.language import Language -from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate +from spacy.schemas import RecommendationSchema from spacy.tokens import Doc, DocBin from spacy.tokens.span import Span from spacy.training import Example, docs_to_json, offsets_to_biluo_tags -from spacy.training.converters import conll_ner_to_docs, conllu_to_docs -from spacy.training.converters import iob_to_docs -from spacy.util import ENV_VARS, get_minor_version, load_model_from_config, load_config +from spacy.training.converters import conll_ner_to_docs, conllu_to_docs, iob_to_docs +from spacy.util import ENV_VARS, get_minor_version, load_config, load_model_from_config from .util import make_tempdir @@ -127,25 +120,6 @@ def test_issue7055(): assert "model" in filled_cfg["components"]["ner"] -@pytest.mark.issue(11235) -def test_issue11235(): - """ - Test that the cli handles interpolation in the directory names correctly when loading project config. - """ - lang_var = "en" - variables = {"lang": lang_var} - commands = [{"name": "x", "script": ["hello ${vars.lang}"]}] - directories = ["cfg", "${vars.lang}_model"] - project = {"commands": commands, "vars": variables, "directories": directories} - with make_tempdir() as d: - srsly.write_yaml(d / "project.yml", project) - cfg = load_project_config(d) - # Check that the directories are interpolated and created correctly - assert os.path.exists(d / "cfg") - assert os.path.exists(d / f"{lang_var}_model") - assert cfg["commands"][0]["script"][0] == f"hello {lang_var}" - - @pytest.mark.issue(12566) @pytest.mark.parametrize( "factory,output_file", @@ -436,136 +410,6 @@ def test_cli_converters_conll_ner_to_docs(): assert ent.text in ["New York City", "London"] -def test_project_config_validation_full(): - config = { - "vars": {"some_var": 20}, - "directories": ["assets", "configs", "corpus", "scripts", "training"], - "assets": [ - { - "dest": "x", - "extra": True, - "url": "https://example.com", - "checksum": "63373dd656daa1fd3043ce166a59474c", - }, - { - "dest": "y", - "git": { - "repo": "https://github.com/example/repo", - "branch": "develop", - "path": "y", - }, - }, - { - "dest": "z", - "extra": False, - "url": "https://example.com", - "checksum": "63373dd656daa1fd3043ce166a59474c", - }, - ], - "commands": [ - { - "name": "train", - "help": "Train a model", - "script": ["python -m spacy train config.cfg -o training"], - "deps": ["config.cfg", "corpus/training.spcy"], - "outputs": ["training/model-best"], - }, - {"name": "test", "script": ["pytest", "custom.py"], "no_skip": True}, - ], - "workflows": {"all": ["train", "test"], "train": ["train"]}, - } - errors = validate(ProjectConfigSchema, config) - assert not errors - - -@pytest.mark.parametrize( - "config", - [ - {"commands": [{"name": "a"}, {"name": "a"}]}, - {"commands": [{"name": "a"}], "workflows": {"a": []}}, - {"commands": [{"name": "a"}], "workflows": {"b": ["c"]}}, - ], -) -def test_project_config_validation1(config): - with pytest.raises(SystemExit): - validate_project_commands(config) - - -@pytest.mark.parametrize( - "config,n_errors", - [ - ({"commands": {"a": []}}, 1), - ({"commands": [{"help": "..."}]}, 1), - ({"commands": [{"name": "a", "extra": "b"}]}, 1), - ({"commands": [{"extra": "b"}]}, 2), - ({"commands": [{"name": "a", "deps": [123]}]}, 1), - ], -) -def test_project_config_validation2(config, n_errors): - errors = validate(ProjectConfigSchema, config) - assert len(errors) == n_errors - - -@pytest.mark.parametrize( - "int_value", - [10, pytest.param("10", marks=pytest.mark.xfail)], -) -def test_project_config_interpolation(int_value): - variables = {"a": int_value, "b": {"c": "foo", "d": True}} - commands = [ - {"name": "x", "script": ["hello ${vars.a} ${vars.b.c}"]}, - {"name": "y", "script": ["${vars.b.c} ${vars.b.d}"]}, - ] - project = {"commands": commands, "vars": variables} - with make_tempdir() as d: - srsly.write_yaml(d / "project.yml", project) - cfg = load_project_config(d) - assert type(cfg) == dict - assert type(cfg["commands"]) == list - assert cfg["commands"][0]["script"][0] == "hello 10 foo" - assert cfg["commands"][1]["script"][0] == "foo true" - commands = [{"name": "x", "script": ["hello ${vars.a} ${vars.b.e}"]}] - project = {"commands": commands, "vars": variables} - with pytest.raises(ConfigValidationError): - substitute_project_variables(project) - - -@pytest.mark.parametrize( - "greeting", - [342, "everyone", "tout le monde", pytest.param("42", marks=pytest.mark.xfail)], -) -def test_project_config_interpolation_override(greeting): - variables = {"a": "world"} - commands = [ - {"name": "x", "script": ["hello ${vars.a}"]}, - ] - overrides = {"vars.a": greeting} - project = {"commands": commands, "vars": variables} - with make_tempdir() as d: - srsly.write_yaml(d / "project.yml", project) - cfg = load_project_config(d, overrides=overrides) - assert type(cfg) == dict - assert type(cfg["commands"]) == list - assert cfg["commands"][0]["script"][0] == f"hello {greeting}" - - -def test_project_config_interpolation_env(): - variables = {"a": 10} - env_var = "SPACY_TEST_FOO" - env_vars = {"foo": env_var} - commands = [{"name": "x", "script": ["hello ${vars.a} ${env.foo}"]}] - project = {"commands": commands, "vars": variables, "env": env_vars} - with make_tempdir() as d: - srsly.write_yaml(d / "project.yml", project) - cfg = load_project_config(d) - assert cfg["commands"][0]["script"][0] == "hello 10 " - os.environ[env_var] = "123" - with make_tempdir() as d: - srsly.write_yaml(d / "project.yml", project) - cfg = load_project_config(d) - assert cfg["commands"][0]["script"][0] == "hello 10 123" - - @pytest.mark.parametrize( "args,expected", [ @@ -775,21 +619,6 @@ def test_get_third_party_dependencies(): get_third_party_dependencies(nlp.config) -@pytest.mark.parametrize( - "parent,child,expected", - [ - ("/tmp", "/tmp", True), - ("/tmp", "/", False), - ("/tmp", "/tmp/subdir", True), - ("/tmp", "/tmpdir", False), - ("/tmp", "/tmp/subdir/..", True), - ("/tmp", "/tmp/..", False), - ], -) -def test_is_subpath_of(parent, child, expected): - assert is_subpath_of(parent, child) == expected - - @pytest.mark.slow @pytest.mark.parametrize( "factory_name,pipe_name", @@ -851,7 +680,8 @@ def test_debug_data_compile_gold(): assert data["boundary_cross_ents"] == 1 -def test_debug_data_compile_gold_for_spans(): +@pytest.mark.parametrize("component_name", ["spancat", "spancat_singlelabel"]) +def test_debug_data_compile_gold_for_spans(component_name): nlp = English() spans_key = "sc" @@ -861,7 +691,7 @@ def test_debug_data_compile_gold_for_spans(): ref.spans[spans_key] = [Span(ref, 3, 6, "ORG"), Span(ref, 5, 6, "GPE")] eg = Example(pred, ref) - data = _compile_gold([eg], ["spancat"], nlp, True) + data = _compile_gold([eg], [component_name], nlp, True) assert data["spancat"][spans_key] == Counter({"ORG": 1, "GPE": 1}) assert data["spans_length"][spans_key] == {"ORG": [3], "GPE": [1]} @@ -1034,60 +864,6 @@ def test_applycli_user_data(): assert result[0]._.ext == val -def test_local_remote_storage(): - with make_tempdir() as d: - filename = "a.txt" - - content_hashes = ("aaaa", "cccc", "bbbb") - for i, content_hash in enumerate(content_hashes): - # make sure that each subsequent file has a later timestamp - if i > 0: - time.sleep(1) - content = f"{content_hash} content" - loc_file = d / "root" / filename - if not loc_file.parent.exists(): - loc_file.parent.mkdir(parents=True) - with loc_file.open(mode="w") as file_: - file_.write(content) - - # push first version to remote storage - remote = RemoteStorage(d / "root", str(d / "remote")) - remote.push(filename, "aaaa", content_hash) - - # retrieve with full hashes - loc_file.unlink() - remote.pull(filename, command_hash="aaaa", content_hash=content_hash) - with loc_file.open(mode="r") as file_: - assert file_.read() == content - - # retrieve with command hash - loc_file.unlink() - remote.pull(filename, command_hash="aaaa") - with loc_file.open(mode="r") as file_: - assert file_.read() == content - - # retrieve with content hash - loc_file.unlink() - remote.pull(filename, content_hash=content_hash) - with loc_file.open(mode="r") as file_: - assert file_.read() == content - - # retrieve with no hashes - loc_file.unlink() - remote.pull(filename) - with loc_file.open(mode="r") as file_: - assert file_.read() == content - - -def test_local_remote_storage_pull_missing(): - # pulling from a non-existent remote pulls nothing gracefully - with make_tempdir() as d: - filename = "a.txt" - remote = RemoteStorage(d / "root", str(d / "remote")) - assert remote.pull(filename, command_hash="aaaa") is None - assert remote.pull(filename) is None - - def test_cli_find_threshold(capsys): def make_examples(nlp: Language) -> List[Example]: docs: List[Example] = [] @@ -1198,63 +974,6 @@ def test_cli_find_threshold(capsys): ) -@pytest.mark.filterwarnings("ignore::DeprecationWarning") -@pytest.mark.parametrize( - "reqs,output", - [ - [ - """ - spacy - - # comment - - thinc""", - (False, False), - ], - [ - """# comment - --some-flag - spacy""", - (False, False), - ], - [ - """# comment - --some-flag - spacy; python_version >= '3.6'""", - (False, False), - ], - [ - """# comment - spacyunknowndoesnotexist12345""", - (True, False), - ], - ], -) -def test_project_check_requirements(reqs, output): - import pkg_resources - - # excessive guard against unlikely package name - try: - pkg_resources.require("spacyunknowndoesnotexist12345") - except pkg_resources.DistributionNotFound: - assert output == _check_requirements([req.strip() for req in reqs.split("\n")]) - - -def test_upload_download_local_file(): - with make_tempdir() as d1, make_tempdir() as d2: - filename = "f.txt" - content = "content" - local_file = d1 / filename - remote_file = d2 / filename - with local_file.open(mode="w") as file_: - file_.write(content) - upload_file(local_file, remote_file) - local_file.unlink() - download_file(remote_file, local_file) - with local_file.open(mode="r") as file_: - assert file_.read() == content - - def test_walk_directory(): with make_tempdir() as d: files = [ @@ -1342,3 +1061,20 @@ def test_debug_data_trainable_lemmatizer_not_annotated(): data = _compile_gold(train_examples, ["trainable_lemmatizer"], nlp, True) assert data["no_lemma_annotations"] == 2 + + +def test_project_api_imports(): + from spacy.cli import project_run + from spacy.cli.project.run import project_run # noqa: F401, F811 + + +def test_download_rejects_relative_urls(monkeypatch): + """Test that we can't tell spacy download to get an arbitrary model by using a + relative path in the filename""" + + monkeypatch.setattr(download_module, "run_command", lambda cmd: None) + + # Check that normal download works + download_module.download("en_core_web_sm-3.7.1", direct=True) + with pytest.raises(SystemExit): + download_module.download("../en_core_web_sm-3.7.1", direct=True) diff --git a/spacy/tests/test_cli_app.py b/spacy/tests/test_cli_app.py index 5ff4dfa26..1789d60ea 100644 --- a/spacy/tests/test_cli_app.py +++ b/spacy/tests/test_cli_app.py @@ -1,11 +1,14 @@ import os +import sys from pathlib import Path + import pytest import srsly from typer.testing import CliRunner -from spacy.tokens import DocBin, Doc from spacy.cli._util import app, get_git_version +from spacy.tokens import Doc, DocBin, Span + from .util import make_tempdir, normalize_whitespace @@ -231,3 +234,196 @@ def test_project_push_pull(project_dir): result = CliRunner().invoke(app, ["project", "pull", remote, str(project_dir)]) assert result.exit_code == 0 assert test_file.is_file() + + +def test_find_function_valid(): + # example of architecture in main code base + function = "spacy.TextCatBOW.v3" + result = CliRunner().invoke(app, ["find-function", function, "-r", "architectures"]) + assert f"Found registered function '{function}'" in result.stdout + assert "textcat.py" in result.stdout + + result = CliRunner().invoke(app, ["find-function", function]) + assert f"Found registered function '{function}'" in result.stdout + assert "textcat.py" in result.stdout + + # example of architecture in spacy-legacy + function = "spacy.TextCatBOW.v1" + result = CliRunner().invoke(app, ["find-function", function]) + assert f"Found registered function '{function}'" in result.stdout + assert "spacy_legacy" in result.stdout + assert "textcat.py" in result.stdout + + +def test_find_function_invalid(): + # invalid registry + function = "spacy.TextCatBOW.v3" + registry = "foobar" + result = CliRunner().invoke( + app, ["find-function", function, "--registry", registry] + ) + assert f"Unknown function registry: '{registry}'" in result.stdout + + # invalid function + function = "spacy.TextCatBOW.v666" + result = CliRunner().invoke(app, ["find-function", function]) + assert f"Couldn't find registered function: '{function}'" in result.stdout + + +example_words_1 = ["I", "like", "cats"] +example_words_2 = ["I", "like", "dogs"] +example_lemmas_1 = ["I", "like", "cat"] +example_lemmas_2 = ["I", "like", "dog"] +example_tags = ["PRP", "VBP", "NNS"] +example_morphs = [ + "Case=Nom|Number=Sing|Person=1|PronType=Prs", + "Tense=Pres|VerbForm=Fin", + "Number=Plur", +] +example_deps = ["nsubj", "ROOT", "dobj"] +example_pos = ["PRON", "VERB", "NOUN"] +example_ents = ["O", "O", "I-ANIMAL"] +example_spans = [(2, 3, "ANIMAL")] + +TRAIN_EXAMPLE_1 = dict( + words=example_words_1, + lemmas=example_lemmas_1, + tags=example_tags, + morphs=example_morphs, + deps=example_deps, + heads=[1, 1, 1], + pos=example_pos, + ents=example_ents, + spans=example_spans, + cats={"CAT": 1.0, "DOG": 0.0}, +) +TRAIN_EXAMPLE_2 = dict( + words=example_words_2, + lemmas=example_lemmas_2, + tags=example_tags, + morphs=example_morphs, + deps=example_deps, + heads=[1, 1, 1], + pos=example_pos, + ents=example_ents, + spans=example_spans, + cats={"CAT": 0.0, "DOG": 1.0}, +) + + +@pytest.mark.slow +@pytest.mark.parametrize( + "component,examples", + [ + ("tagger", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("morphologizer", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("trainable_lemmatizer", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("parser", [TRAIN_EXAMPLE_1] * 30), + ("ner", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("spancat", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("textcat", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ], +) +def test_init_config_trainable(component, examples, en_vocab): + if component == "textcat": + train_docs = [] + for example in examples: + doc = Doc(en_vocab, words=example["words"]) + doc.cats = example["cats"] + train_docs.append(doc) + elif component == "spancat": + train_docs = [] + for example in examples: + doc = Doc(en_vocab, words=example["words"]) + doc.spans["sc"] = [ + Span(doc, start, end, label) for start, end, label in example["spans"] + ] + train_docs.append(doc) + else: + train_docs = [] + for example in examples: + # cats, spans are not valid kwargs for instantiating a Doc + example = {k: v for k, v in example.items() if k not in ("cats", "spans")} + doc = Doc(en_vocab, **example) + train_docs.append(doc) + + with make_tempdir() as d_in: + train_bin = DocBin(docs=train_docs) + train_bin.to_disk(d_in / "train.spacy") + dev_bin = DocBin(docs=train_docs) + dev_bin.to_disk(d_in / "dev.spacy") + init_config_result = CliRunner().invoke( + app, + [ + "init", + "config", + f"{d_in}/config.cfg", + "--lang", + "en", + "--pipeline", + component, + ], + ) + assert init_config_result.exit_code == 0 + train_result = CliRunner().invoke( + app, + [ + "train", + f"{d_in}/config.cfg", + "--paths.train", + f"{d_in}/train.spacy", + "--paths.dev", + f"{d_in}/dev.spacy", + "--output", + f"{d_in}/model", + ], + ) + assert train_result.exit_code == 0 + assert Path(d_in / "model" / "model-last").exists() + + +@pytest.mark.slow +@pytest.mark.parametrize( + "component,examples", + [("tagger,parser,morphologizer", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2] * 15)], +) +def test_init_config_trainable_multiple(component, examples, en_vocab): + train_docs = [] + for example in examples: + example = {k: v for k, v in example.items() if k not in ("cats", "spans")} + doc = Doc(en_vocab, **example) + train_docs.append(doc) + + with make_tempdir() as d_in: + train_bin = DocBin(docs=train_docs) + train_bin.to_disk(d_in / "train.spacy") + dev_bin = DocBin(docs=train_docs) + dev_bin.to_disk(d_in / "dev.spacy") + init_config_result = CliRunner().invoke( + app, + [ + "init", + "config", + f"{d_in}/config.cfg", + "--lang", + "en", + "--pipeline", + component, + ], + ) + assert init_config_result.exit_code == 0 + train_result = CliRunner().invoke( + app, + [ + "train", + f"{d_in}/config.cfg", + "--paths.train", + f"{d_in}/train.spacy", + "--paths.dev", + f"{d_in}/dev.spacy", + "--output", + f"{d_in}/model", + ], + ) + assert train_result.exit_code == 0 + assert Path(d_in / "model" / "model-last").exists() diff --git a/spacy/tests/test_displacy.py b/spacy/tests/test_displacy.py index 837a92e02..b83c7db07 100644 --- a/spacy/tests/test_displacy.py +++ b/spacy/tests/test_displacy.py @@ -2,10 +2,10 @@ import numpy import pytest from spacy import displacy -from spacy.displacy.render import DependencyRenderer, EntityRenderer +from spacy.displacy.render import DependencyRenderer, EntityRenderer, SpanRenderer from spacy.lang.en import English from spacy.lang.fa import Persian -from spacy.tokens import Span, Doc +from spacy.tokens import Doc, Span @pytest.mark.issue(2361) @@ -113,7 +113,7 @@ def test_issue5838(): doc = nlp(sample_text) doc.ents = [Span(doc, 7, 8, label="test")] html = displacy.render(doc, style="ent") - found = html.count("
") + found = html.count("
") assert found == 4 @@ -350,6 +350,78 @@ def test_displacy_render_wrapper(en_vocab): displacy.set_render_wrapper(lambda html: html) +def test_displacy_render_manual_dep(): + """Test displacy.render with manual data for dep style""" + parsed_dep = { + "words": [ + {"text": "This", "tag": "DT"}, + {"text": "is", "tag": "VBZ"}, + {"text": "a", "tag": "DT"}, + {"text": "sentence", "tag": "NN"}, + ], + "arcs": [ + {"start": 0, "end": 1, "label": "nsubj", "dir": "left"}, + {"start": 2, "end": 3, "label": "det", "dir": "left"}, + {"start": 1, "end": 3, "label": "attr", "dir": "right"}, + ], + "title": "Title", + } + html = displacy.render([parsed_dep], style="dep", manual=True) + for word in parsed_dep["words"]: + assert word["text"] in html + assert word["tag"] in html + + +def test_displacy_render_manual_ent(): + """Test displacy.render with manual data for ent style""" + parsed_ents = [ + { + "text": "But Google is starting from behind.", + "ents": [{"start": 4, "end": 10, "label": "ORG"}], + }, + { + "text": "But Google is starting from behind.", + "ents": [{"start": -100, "end": 100, "label": "COMPANY"}], + "title": "Title", + }, + ] + + html = displacy.render(parsed_ents, style="ent", manual=True) + for parsed_ent in parsed_ents: + assert parsed_ent["ents"][0]["label"] in html + if "title" in parsed_ent: + assert parsed_ent["title"] in html + + +def test_displacy_render_manual_span(): + """Test displacy.render with manual data for span style""" + parsed_spans = [ + { + "text": "Welcome to the Bank of China.", + "spans": [ + {"start_token": 3, "end_token": 6, "label": "ORG"}, + {"start_token": 5, "end_token": 6, "label": "GPE"}, + ], + "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."], + }, + { + "text": "Welcome to the Bank of China.", + "spans": [ + {"start_token": 3, "end_token": 6, "label": "ORG"}, + {"start_token": 5, "end_token": 6, "label": "GPE"}, + ], + "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."], + "title": "Title", + }, + ] + + html = displacy.render(parsed_spans, style="span", manual=True) + for parsed_span in parsed_spans: + assert parsed_span["spans"][0]["label"] in html + if "title" in parsed_span: + assert parsed_span["title"] in html + + def test_displacy_options_case(): ents = ["foo", "BAR"] colors = {"FOO": "red", "bar": "green"} @@ -377,3 +449,42 @@ def test_displacy_manual_sorted_entities(): html = displacy.render(doc, style="ent", manual=True) assert html.find("FIRST") < html.find("SECOND") + + +@pytest.mark.issue(12816) +def test_issue12816(en_vocab) -> None: + """Test that displaCy's span visualizer escapes annotated HTML tags correctly.""" + # Create a doc containing an annotated word and an unannotated HTML tag + doc = Doc(en_vocab, words=["test", ""]) + doc.spans["sc"] = [Span(doc, 0, 1, label="test")] + + # Verify that the HTML tag is escaped when unannotated + html = displacy.render(doc, style="span") + assert "<TEST>" in html + + # Annotate the HTML tag + doc.spans["sc"].append(Span(doc, 1, 2, label="test")) + + # Verify that the HTML tag is still escaped + html = displacy.render(doc, style="span") + assert "<TEST>" in html + + +@pytest.mark.issue(13056) +def test_displacy_span_stacking(): + """Test whether span stacking works properly for multiple overlapping spans.""" + spans = [ + {"start_token": 2, "end_token": 5, "label": "SkillNC"}, + {"start_token": 0, "end_token": 2, "label": "Skill"}, + {"start_token": 1, "end_token": 3, "label": "Skill"}, + ] + tokens = ["Welcome", "to", "the", "Bank", "of", "China", "."] + per_token_info = SpanRenderer._assemble_per_token_info(spans=spans, tokens=tokens) + + assert len(per_token_info) == len(tokens) + assert all([len(per_token_info[i]["entities"]) == 1 for i in (0, 3, 4)]) + assert all([len(per_token_info[i]["entities"]) == 2 for i in (1, 2)]) + assert per_token_info[1]["entities"][0]["render_slot"] == 1 + assert per_token_info[1]["entities"][1]["render_slot"] == 2 + assert per_token_info[2]["entities"][0]["render_slot"] == 2 + assert per_token_info[2]["entities"][1]["render_slot"] == 3 diff --git a/spacy/tests/test_language.py b/spacy/tests/test_language.py index ce3fbb576..ee707f793 100644 --- a/spacy/tests/test_language.py +++ b/spacy/tests/test_language.py @@ -1,21 +1,23 @@ import itertools import logging +import warnings from unittest import mock + import pytest +from thinc.api import CupyOps, NumpyOps, get_current_ops + +import spacy +from spacy.lang.de import German +from spacy.lang.en import English from spacy.language import Language from spacy.scorer import Scorer from spacy.tokens import Doc, Span -from spacy.vocab import Vocab from spacy.training import Example -from spacy.lang.en import English -from spacy.lang.de import German -from spacy.util import registry, ignore_error, raise_error, find_matching_language -import spacy -from thinc.api import CupyOps, NumpyOps, get_current_ops +from spacy.util import find_matching_language, ignore_error, raise_error, registry +from spacy.vocab import Vocab from .util import add_vecs_to_vocab, assert_docs_equal - try: import torch @@ -737,9 +739,13 @@ def test_pass_doc_to_pipeline(nlp, n_process): assert doc.text == texts[0] assert len(doc.cats) > 0 if isinstance(get_current_ops(), NumpyOps) or n_process < 2: - docs = nlp.pipe(docs, n_process=n_process) - assert [doc.text for doc in docs] == texts - assert all(len(doc.cats) for doc in docs) + # Catch warnings to ensure that all worker processes exited + # succesfully. + with warnings.catch_warnings(): + warnings.simplefilter("error") + docs = nlp.pipe(docs, n_process=n_process) + assert [doc.text for doc in docs] == texts + assert all(len(doc.cats) for doc in docs) def test_invalid_arg_to_pipeline(nlp): diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index 618f17334..d2a41ff0f 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -1,24 +1,44 @@ -import pytest -import os import ctypes +import os from pathlib import Path -from spacy.about import __version__ as spacy_version -from spacy import util -from spacy import prefer_gpu, require_gpu, require_cpu -from spacy.ml._precomputable_affine import PrecomputableAffine -from spacy.ml._precomputable_affine import _backprop_precomputable_affine_padding -from spacy.util import dot_to_object, SimpleFrozenList, import_file -from spacy.util import to_ternary_int, find_available_port -from thinc.api import Config, Optimizer, ConfigValidationError -from thinc.api import get_current_ops, set_current_ops, NumpyOps, CupyOps, MPSOps + +import pytest + +try: + from pydantic.v1 import ValidationError +except ImportError: + from pydantic import ValidationError # type: ignore + +from thinc.api import ( + Config, + ConfigValidationError, + CupyOps, + MPSOps, + NumpyOps, + Optimizer, + get_current_ops, + set_current_ops, +) from thinc.compat import has_cupy_gpu, has_torch_mps_gpu -from spacy.training.batchers import minibatch_by_words + +from spacy import prefer_gpu, require_cpu, require_gpu, util +from spacy.about import __version__ as spacy_version from spacy.lang.en import English from spacy.lang.nl import Dutch from spacy.language import DEFAULT_CONFIG_PATH +from spacy.ml._precomputable_affine import ( + PrecomputableAffine, + _backprop_precomputable_affine_padding, +) from spacy.schemas import ConfigSchemaTraining, TokenPattern, TokenPatternSchema -from pydantic import ValidationError - +from spacy.training.batchers import minibatch_by_words +from spacy.util import ( + SimpleFrozenList, + dot_to_object, + find_available_port, + import_file, + to_ternary_int, +) from .util import get_random_doc, make_tempdir @@ -237,6 +257,10 @@ def test_minor_version(a1, a2, b1, b2, is_match): {"training.batch_size": 128, "training.optimizer.learn_rate": 0.01}, {"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}}, ), + ( + {"attribute_ruler.scorer.@scorers": "spacy.tagger_scorer.v1"}, + {"attribute_ruler": {"scorer": {"@scorers": "spacy.tagger_scorer.v1"}}}, + ), ], ) def test_dot_to_dict(dot_notation, expected): @@ -245,6 +269,29 @@ def test_dot_to_dict(dot_notation, expected): assert util.dict_to_dot(result) == dot_notation +@pytest.mark.parametrize( + "dot_notation,expected", + [ + ( + {"token.pos": True, "token._.xyz": True}, + {"token": {"pos": True, "_": {"xyz": True}}}, + ), + ( + {"training.batch_size": 128, "training.optimizer.learn_rate": 0.01}, + {"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}}, + ), + ( + {"attribute_ruler.scorer": {"@scorers": "spacy.tagger_scorer.v1"}}, + {"attribute_ruler": {"scorer": {"@scorers": "spacy.tagger_scorer.v1"}}}, + ), + ], +) +def test_dot_to_dict_overrides(dot_notation, expected): + result = util.dot_to_dict(dot_notation) + assert result == expected + assert util.dict_to_dot(result, for_overrides=True) == dot_notation + + def test_set_dot_to_object(): config = {"foo": {"bar": 1, "baz": {"x": "y"}}, "test": {"a": {"b": "c"}}} with pytest.raises(KeyError): @@ -329,8 +376,9 @@ def test_util_dot_section(): factory = "textcat" [components.textcat.model] - @architectures = "spacy.TextCatBOW.v2" + @architectures = "spacy.TextCatBOW.v3" exclusive_classes = true + length = 262144 ngram_size = 1 no_output_layer = false """ @@ -438,10 +486,10 @@ def test_to_ternary_int(): def test_find_available_port(): host = "0.0.0.0" - port = 5000 - assert find_available_port(port, host) == port, "Port 5000 isn't free" + port = 5001 + assert find_available_port(port, host) == port, "Port 5001 isn't free" - from wsgiref.simple_server import make_server, demo_app + from wsgiref.simple_server import demo_app, make_server with make_server(host, port, demo_app) as httpd: with pytest.warns(UserWarning, match="already in use"): diff --git a/spacy/tests/test_models.py b/spacy/tests/test_models.py index d91ed1201..5228b4544 100644 --- a/spacy/tests/test_models.py +++ b/spacy/tests/test_models.py @@ -1,16 +1,32 @@ from typing import List -import pytest -from thinc.api import fix_random_seed, Adam, set_dropout_rate -from thinc.api import Ragged, reduce_mean, Logistic, chain, Relu -from numpy.testing import assert_array_equal, assert_array_almost_equal + import numpy -from spacy.ml.models import build_Tok2Vec_model, MultiHashEmbed, MaxoutWindowEncoder -from spacy.ml.models import build_bow_text_classifier, build_simple_cnn_text_classifier -from spacy.ml.models import build_spancat_model -from spacy.ml.staticvectors import StaticVectors -from spacy.ml.extract_spans import extract_spans, _get_span_indices +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from thinc.api import ( + Adam, + Logistic, + Ragged, + Relu, + chain, + fix_random_seed, + reduce_mean, + set_dropout_rate, +) + from spacy.lang.en import English from spacy.lang.en.examples import sentences as EN_SENTENCES +from spacy.ml.extract_spans import _get_span_indices, extract_spans +from spacy.ml.models import ( + MaxoutWindowEncoder, + MultiHashEmbed, + build_bow_text_classifier, + build_simple_cnn_text_classifier, + build_spancat_model, + build_Tok2Vec_model, +) +from spacy.ml.staticvectors import StaticVectors +from spacy.util import registry def get_textcat_bow_kwargs(): @@ -269,3 +285,17 @@ def test_spancat_model_forward_backward(nO=5): Y, backprop = model((docs, spans), is_train=True) assert Y.shape == (spans.dataXd.shape[0], nO) backprop(Y) + + +def test_textcat_reduce_invalid_args(): + textcat_reduce = registry.architectures.get("spacy.TextCatReduce.v1") + tok2vec = make_test_tok2vec() + with pytest.raises(ValueError, match=r"must be used with at least one reduction"): + textcat_reduce( + tok2vec=tok2vec, + exclusive_classes=False, + use_reduce_first=False, + use_reduce_last=False, + use_reduce_max=False, + use_reduce_mean=False, + ) diff --git a/spacy/tests/test_pickles.py b/spacy/tests/test_pickles.py index 0c56ae0d2..e3acd27a3 100644 --- a/spacy/tests/test_pickles.py +++ b/spacy/tests/test_pickles.py @@ -1,11 +1,12 @@ -import pytest import numpy +import pytest import srsly + +from spacy.attrs import NORM from spacy.lang.en import English from spacy.strings import StringStore from spacy.tokens import Doc from spacy.vocab import Vocab -from spacy.attrs import NORM @pytest.mark.parametrize("text1,text2", [("hello", "bye")]) diff --git a/spacy/tests/test_scorer.py b/spacy/tests/test_scorer.py index f95c44149..95daf046c 100644 --- a/spacy/tests/test_scorer.py +++ b/spacy/tests/test_scorer.py @@ -1,13 +1,12 @@ -from numpy.testing import assert_almost_equal, assert_array_almost_equal import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal from pytest import approx + +from spacy.lang.en import English +from spacy.scorer import PRFScore, ROCAUCScore, Scorer, _roc_auc_score, _roc_curve +from spacy.tokens import Doc, Span from spacy.training import Example from spacy.training.iob_utils import offsets_to_biluo_tags -from spacy.scorer import Scorer, ROCAUCScore, PRFScore -from spacy.scorer import _roc_auc_score, _roc_curve -from spacy.lang.en import English -from spacy.tokens import Doc, Span - test_las_apple = [ [ diff --git a/spacy/tests/tok2vec.py b/spacy/tests/tok2vec.py new file mode 100644 index 000000000..7e7b689eb --- /dev/null +++ b/spacy/tests/tok2vec.py @@ -0,0 +1,36 @@ +from typing import List + +from thinc.api import Model +from thinc.types import Floats2d + +from spacy.tokens import Doc +from spacy.util import registry + + +@registry.architectures("test.LazyInitTok2Vec.v1") +def build_lazy_init_tok2vec(*, width: int) -> Model[List[Doc], List[Floats2d]]: + """tok2vec model of which the output size is only known after + initialization. This implementation does not output meaningful + embeddings, it is strictly for testing.""" + return Model( + "lazy_init_tok2vec", + lazy_init_tok2vec_forward, + init=lazy_init_tok2vec_init, + dims={"nO": None}, + attrs={"width": width}, + ) + + +def lazy_init_tok2vec_init(model: Model, X=None, Y=None): + width = model.attrs["width"] + model.set_dim("nO", width) + + +def lazy_init_tok2vec_forward(model: Model, X: List[Doc], is_train: bool): + width = model.get_dim("nO") + Y = [model.ops.alloc2f(len(doc), width) for doc in X] + + def backprop(dY): + return [] + + return Y, backprop diff --git a/spacy/tests/tokenizer/test_exceptions.py b/spacy/tests/tokenizer/test_exceptions.py index 85716377a..1f8f52c79 100644 --- a/spacy/tests/tokenizer/test_exceptions.py +++ b/spacy/tests/tokenizer/test_exceptions.py @@ -1,4 +1,5 @@ import sys + import pytest diff --git a/spacy/tests/tokenizer/test_explain.py b/spacy/tests/tokenizer/test_explain.py index 5b4eeca16..78932f653 100644 --- a/spacy/tests/tokenizer/test_explain.py +++ b/spacy/tests/tokenizer/test_explain.py @@ -85,6 +85,18 @@ def test_tokenizer_explain_special_matcher(en_vocab): assert tokens == explain_tokens +def test_tokenizer_explain_special_matcher_whitespace(en_vocab): + rules = {":]": [{"ORTH": ":]"}]} + tokenizer = Tokenizer( + en_vocab, + rules=rules, + ) + text = ": ]" + tokens = [t.text for t in tokenizer(text)] + explain_tokens = [t[1] for t in tokenizer.explain(text)] + assert tokens == explain_tokens + + @hypothesis.strategies.composite def sentence_strategy(draw: hypothesis.strategies.DrawFn, max_n_words: int = 4) -> str: """ @@ -123,6 +135,9 @@ def test_tokenizer_explain_fuzzy(lang: str, sentence: str) -> None: """ tokenizer: Tokenizer = spacy.blank(lang).tokenizer - tokens = [t.text for t in tokenizer(sentence) if not t.is_space] + # Tokenizer.explain is not intended to handle whitespace or control + # characters in the same way as Tokenizer + sentence = re.sub(r"\s+", " ", sentence).strip() + tokens = [t.text for t in tokenizer(sentence)] debug_tokens = [t[1] for t in tokenizer.explain(sentence)] assert tokens == debug_tokens, f"{tokens}, {debug_tokens}, {sentence}" diff --git a/spacy/tests/tokenizer/test_tokenizer.py b/spacy/tests/tokenizer/test_tokenizer.py index 6af58b344..1ea5f78c9 100644 --- a/spacy/tests/tokenizer/test_tokenizer.py +++ b/spacy/tests/tokenizer/test_tokenizer.py @@ -3,15 +3,19 @@ import re import numpy import pytest -from spacy.lang.en import English from spacy.lang.de import German +from spacy.lang.en import English +from spacy.symbols import ORTH from spacy.tokenizer import Tokenizer from spacy.tokens import Doc from spacy.training import Example -from spacy.util import compile_prefix_regex, compile_suffix_regex, ensure_path -from spacy.util import compile_infix_regex +from spacy.util import ( + compile_infix_regex, + compile_prefix_regex, + compile_suffix_regex, + ensure_path, +) from spacy.vocab import Vocab -from spacy.symbols import ORTH @pytest.mark.issue(743) diff --git a/spacy/tests/tokenizer/test_urls.py b/spacy/tests/tokenizer/test_urls.py index 57e970f87..ff8812be1 100644 --- a/spacy/tests/tokenizer/test_urls.py +++ b/spacy/tests/tokenizer/test_urls.py @@ -2,7 +2,6 @@ import pytest from spacy.lang.tokenizer_exceptions import BASE_EXCEPTIONS - URLS_BASIC = [ "http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region®ion=top-news&WT.nav=top-news&_r=0", "www.red-stars.com", diff --git a/spacy/tests/training/test_augmenters.py b/spacy/tests/training/test_augmenters.py index 35860a199..49a83010b 100644 --- a/spacy/tests/training/test_augmenters.py +++ b/spacy/tests/training/test_augmenters.py @@ -1,13 +1,17 @@ -import pytest -from spacy.pipeline._parser_internals.nonproj import contains_cycle -from spacy.training import Corpus, Example -from spacy.training.augment import create_orth_variants_augmenter -from spacy.training.augment import create_lower_casing_augmenter -from spacy.training.augment import make_whitespace_variant -from spacy.lang.en import English -from spacy.tokens import DocBin, Doc, Span -from contextlib import contextmanager import random +from contextlib import contextmanager + +import pytest + +from spacy.lang.en import English +from spacy.pipeline._parser_internals.nonproj import contains_cycle +from spacy.tokens import Doc, DocBin, Span +from spacy.training import Corpus, Example +from spacy.training.augment import ( + create_lower_casing_augmenter, + create_orth_variants_augmenter, + make_whitespace_variant, +) from ..util import make_tempdir diff --git a/spacy/tests/training/test_corpus.py b/spacy/tests/training/test_corpus.py index b4f9cc13a..e7cae9893 100644 --- a/spacy/tests/training/test_corpus.py +++ b/spacy/tests/training/test_corpus.py @@ -1,8 +1,9 @@ -from typing import IO, Generator, Iterable, List, TextIO, Tuple +import tempfile from contextlib import contextmanager from pathlib import Path +from typing import IO, Generator, Iterable, List, TextIO, Tuple + import pytest -import tempfile from spacy.lang.en import English from spacy.training import Example, PlainTextCorpus diff --git a/spacy/tests/training/test_logger.py b/spacy/tests/training/test_logger.py index 0dfd0cbf4..48750026b 100644 --- a/spacy/tests/training/test_logger.py +++ b/spacy/tests/training/test_logger.py @@ -1,6 +1,6 @@ import pytest -import spacy +import spacy from spacy.training import loggers diff --git a/spacy/tests/training/test_new_example.py b/spacy/tests/training/test_new_example.py index 6b15603b3..88f819984 100644 --- a/spacy/tests/training/test_new_example.py +++ b/spacy/tests/training/test_new_example.py @@ -1,8 +1,9 @@ import pytest -from spacy.training.example import Example + from spacy.tokens import Doc -from spacy.vocab import Vocab +from spacy.training.example import Example from spacy.util import to_ternary_int +from spacy.vocab import Vocab def test_Example_init_requires_doc_objects(): diff --git a/spacy/tests/training/test_pretraining.py b/spacy/tests/training/test_pretraining.py index 6cfdeed20..5e5f94622 100644 --- a/spacy/tests/training/test_pretraining.py +++ b/spacy/tests/training/test_pretraining.py @@ -1,4 +1,5 @@ from pathlib import Path + import numpy as np import pytest import srsly @@ -6,14 +7,15 @@ from thinc.api import Config, get_current_ops from spacy import util from spacy.lang.en import English +from spacy.language import DEFAULT_CONFIG_PATH, DEFAULT_CONFIG_PRETRAIN_PATH +from spacy.ml.models.multi_task import create_pretrain_vectors +from spacy.tokens import Doc, DocBin from spacy.training.initialize import init_nlp from spacy.training.loop import train from spacy.training.pretrain import pretrain -from spacy.tokens import Doc, DocBin -from spacy.language import DEFAULT_CONFIG_PRETRAIN_PATH, DEFAULT_CONFIG_PATH -from spacy.ml.models.multi_task import create_pretrain_vectors from spacy.vectors import Vectors from spacy.vocab import Vocab + from ..util import make_tempdir pretrain_string_listener = """ diff --git a/spacy/tests/training/test_readers.py b/spacy/tests/training/test_readers.py index 8c5c81625..22cf75272 100644 --- a/spacy/tests/training/test_readers.py +++ b/spacy/tests/training/test_readers.py @@ -1,10 +1,12 @@ -from typing import Dict, Iterable, Callable +from typing import Callable, Dict, Iterable + import pytest from thinc.api import Config, fix_random_seed + from spacy import Language -from spacy.util import load_model_from_config, registry, resolve_dot_names from spacy.schemas import ConfigSchemaTraining from spacy.training import Example +from spacy.util import load_model_from_config, registry, resolve_dot_names def test_readers(): diff --git a/spacy/tests/training/test_rehearse.py b/spacy/tests/training/test_rehearse.py index 5ac7fc217..7efe57a36 100644 --- a/spacy/tests/training/test_rehearse.py +++ b/spacy/tests/training/test_rehearse.py @@ -1,9 +1,9 @@ -import pytest -import spacy - from typing import List -from spacy.training import Example +import pytest + +import spacy +from spacy.training import Example TRAIN_DATA = [ ( diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py index 7933ea31f..a492a8be3 100644 --- a/spacy/tests/training/test_training.py +++ b/spacy/tests/training/test_training.py @@ -2,20 +2,32 @@ import random import numpy import pytest -import spacy import srsly +from thinc.api import Adam, compounding + +import spacy from spacy.lang.en import English from spacy.tokens import Doc, DocBin -from spacy.training import Alignment, Corpus, Example, biluo_tags_to_offsets -from spacy.training import biluo_tags_to_spans, docs_to_json, iob_to_biluo -from spacy.training import offsets_to_biluo_tags -from spacy.training.alignment_array import AlignmentArray +from spacy.training import ( + Alignment, + Corpus, + Example, + biluo_tags_to_offsets, + biluo_tags_to_spans, + docs_to_json, + iob_to_biluo, + offsets_to_biluo_tags, +) from spacy.training.align import get_alignments +from spacy.training.alignment_array import AlignmentArray from spacy.training.converters import json_to_docs from spacy.training.loop import train_while_improving -from spacy.util import get_words_and_spaces, load_model_from_path, minibatch -from spacy.util import load_config_from_str -from thinc.api import compounding, Adam +from spacy.util import ( + get_words_and_spaces, + load_config_from_str, + load_model_from_path, + minibatch, +) from ..util import make_tempdir diff --git a/spacy/tests/util.py b/spacy/tests/util.py index c2647558d..a5548898c 100644 --- a/spacy/tests/util.py +++ b/spacy/tests/util.py @@ -1,14 +1,16 @@ -import numpy -import tempfile import contextlib import re +import tempfile + +import numpy import srsly -from spacy.tokens import Doc -from spacy.vocab import Vocab -from spacy.util import make_tempdir # noqa: F401 -from spacy.training import split_bilu_label from thinc.api import get_current_ops +from spacy.tokens import Doc +from spacy.training import split_bilu_label +from spacy.util import make_tempdir # noqa: F401 +from spacy.vocab import Vocab + @contextlib.contextmanager def make_tempfile(mode="r"): diff --git a/spacy/tests/vocab_vectors/test_lexeme.py b/spacy/tests/vocab_vectors/test_lexeme.py index d91f41db3..156e3391a 100644 --- a/spacy/tests/vocab_vectors/test_lexeme.py +++ b/spacy/tests/vocab_vectors/test_lexeme.py @@ -1,5 +1,6 @@ import numpy import pytest + from spacy.attrs import IS_ALPHA, IS_DIGIT from spacy.lookups import Lookups from spacy.tokens import Doc diff --git a/spacy/tests/vocab_vectors/test_lookups.py b/spacy/tests/vocab_vectors/test_lookups.py index 94e31a072..addd3fe4f 100644 --- a/spacy/tests/vocab_vectors/test_lookups.py +++ b/spacy/tests/vocab_vectors/test_lookups.py @@ -1,4 +1,5 @@ import pytest + from spacy.lookups import Lookups, Table from spacy.strings import get_string_id from spacy.vocab import Vocab diff --git a/spacy/tests/vocab_vectors/test_similarity.py b/spacy/tests/vocab_vectors/test_similarity.py index 1efcdd81e..5a28f5414 100644 --- a/spacy/tests/vocab_vectors/test_similarity.py +++ b/spacy/tests/vocab_vectors/test_similarity.py @@ -1,9 +1,10 @@ -import pytest import numpy +import pytest + from spacy.tokens import Doc from spacy.vocab import Vocab -from ..util import get_cosine, add_vecs_to_vocab +from ..util import add_vecs_to_vocab, get_cosine @pytest.fixture diff --git a/spacy/tests/vocab_vectors/test_stringstore.py b/spacy/tests/vocab_vectors/test_stringstore.py index a0f8016af..61039fffd 100644 --- a/spacy/tests/vocab_vectors/test_stringstore.py +++ b/spacy/tests/vocab_vectors/test_stringstore.py @@ -1,4 +1,5 @@ import pytest + from spacy.strings import StringStore diff --git a/spacy/tests/vocab_vectors/test_vectors.py b/spacy/tests/vocab_vectors/test_vectors.py index 70835816d..717291314 100644 --- a/spacy/tests/vocab_vectors/test_vectors.py +++ b/spacy/tests/vocab_vectors/test_vectors.py @@ -402,6 +402,7 @@ def test_vectors_serialize(): row_r = v_r.add("D", vector=OPS.asarray([10, 20, 30, 40], dtype="f")) assert row == row_r assert_equal(OPS.to_numpy(v.data), OPS.to_numpy(v_r.data)) + assert v.attr == v_r.attr def test_vector_is_oov(): @@ -646,3 +647,32 @@ def test_equality(): vectors1.resize((5, 9)) vectors2.resize((5, 9)) assert vectors1 == vectors2 + + +def test_vectors_attr(): + data = numpy.asarray([[0, 0, 0], [1, 2, 3], [9, 8, 7]], dtype="f") + # default ORTH + nlp = English() + nlp.vocab.vectors = Vectors(data=data, keys=["A", "B", "C"]) + assert nlp.vocab.strings["A"] in nlp.vocab.vectors.key2row + assert nlp.vocab.strings["a"] not in nlp.vocab.vectors.key2row + assert nlp.vocab["A"].has_vector is True + assert nlp.vocab["a"].has_vector is False + assert nlp("A")[0].has_vector is True + assert nlp("a")[0].has_vector is False + + # custom LOWER + nlp = English() + nlp.vocab.vectors = Vectors(data=data, keys=["a", "b", "c"], attr="LOWER") + assert nlp.vocab.strings["A"] not in nlp.vocab.vectors.key2row + assert nlp.vocab.strings["a"] in nlp.vocab.vectors.key2row + assert nlp.vocab["A"].has_vector is True + assert nlp.vocab["a"].has_vector is True + assert nlp("A")[0].has_vector is True + assert nlp("a")[0].has_vector is True + # add a new vectors entry + assert nlp.vocab["D"].has_vector is False + assert nlp.vocab["d"].has_vector is False + nlp.vocab.set_vector("D", numpy.asarray([4, 5, 6])) + assert nlp.vocab["D"].has_vector is True + assert nlp.vocab["d"].has_vector is True diff --git a/spacy/tests/vocab_vectors/test_vocab_api.py b/spacy/tests/vocab_vectors/test_vocab_api.py index b9c386eb8..e373b9d0b 100644 --- a/spacy/tests/vocab_vectors/test_vocab_api.py +++ b/spacy/tests/vocab_vectors/test_vocab_api.py @@ -1,6 +1,7 @@ import os import pytest + from spacy.attrs import IS_ALPHA, LEMMA, ORTH from spacy.lang.en import English from spacy.parts_of_speech import NOUN, VERB diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd index e6a072053..a902ebad9 100644 --- a/spacy/tokenizer.pxd +++ b/spacy/tokenizer.pxd @@ -1,13 +1,13 @@ +from cymem.cymem cimport Pool from libcpp.vector cimport vector from preshed.maps cimport PreshMap -from cymem.cymem cimport Pool -from .typedefs cimport hash_t -from .structs cimport LexemeC, SpanC, TokenC -from .strings cimport StringStore -from .tokens.doc cimport Doc -from .vocab cimport Vocab, LexemesOrTokens, _Cached from .matcher.phrasematcher cimport PhraseMatcher +from .strings cimport StringStore +from .structs cimport LexemeC, SpanC, TokenC +from .tokens.doc cimport Doc +from .typedefs cimport hash_t +from .vocab cimport LexemesOrTokens, Vocab, _Cached cdef class Tokenizer: @@ -31,24 +31,58 @@ cdef class Tokenizer: cdef Doc _tokenize_affixes(self, str string, bint with_special_cases) cdef int _apply_special_cases(self, Doc doc) except -1 - cdef void _filter_special_spans(self, vector[SpanC] &original, - vector[SpanC] &filtered, int doc_len) nogil - cdef object _prepare_special_spans(self, Doc doc, - vector[SpanC] &filtered) - cdef int _retokenize_special_spans(self, Doc doc, TokenC* tokens, - object span_data) - cdef int _try_specials_and_cache(self, hash_t key, Doc tokens, - int* has_special, - bint with_special_cases) except -1 - cdef int _tokenize(self, Doc tokens, str span, hash_t key, - int* has_special, bint with_special_cases) except -1 - cdef str _split_affixes(self, Pool mem, str string, - vector[LexemeC*] *prefixes, - vector[LexemeC*] *suffixes, int* has_special, - bint with_special_cases) - cdef int _attach_tokens(self, Doc tokens, str string, - vector[LexemeC*] *prefixes, - vector[LexemeC*] *suffixes, int* has_special, - bint with_special_cases) except -1 - cdef int _save_cached(self, const TokenC* tokens, hash_t key, - int* has_special, int n) except -1 + cdef void _filter_special_spans( + self, + vector[SpanC] &original, + vector[SpanC] &filtered, + int doc_len, + ) nogil + cdef object _prepare_special_spans( + self, + Doc doc, + vector[SpanC] &filtered, + ) + cdef int _retokenize_special_spans( + self, + Doc doc, + TokenC* tokens, + object span_data, + ) + cdef int _try_specials_and_cache( + self, + hash_t key, + Doc tokens, + int* has_special, + bint with_special_cases, + ) except -1 + cdef int _tokenize( + self, + Doc tokens, + str span, + hash_t key, + int* has_special, + bint with_special_cases, + ) except -1 + cdef str _split_affixes( + self, + Pool mem, + str string, + vector[LexemeC*] *prefixes, + vector[LexemeC*] *suffixes, int* has_special, + bint with_special_cases, + ) + cdef int _attach_tokens( + self, + Doc tokens, + str string, + vector[LexemeC*] *prefixes, + vector[LexemeC*] *suffixes, int* has_special, + bint with_special_cases, + ) except -1 + cdef int _save_cached( + self, + const TokenC* tokens, + hash_t key, + int* has_special, + int n, + ) except -1 diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index a4a68ae8e..96545828f 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -1,29 +1,25 @@ -# cython: embedsignature=True, profile=True, binding=True +# cython: embedsignature=True, binding=True +cimport cython +from cymem.cymem cimport Pool from cython.operator cimport dereference as deref from cython.operator cimport preincrement as preinc from libc.string cimport memcpy, memset from libcpp.set cimport set as stdset -from cymem.cymem cimport Pool from preshed.maps cimport PreshMap -cimport cython import re -import warnings - -from .tokens.doc cimport Doc -from .strings cimport hash_string from .lexeme cimport EMPTY_LEXEME +from .strings cimport hash_string +from .tokens.doc cimport Doc -from .attrs import intify_attrs -from .symbols import ORTH, NORM -from .errors import Errors, Warnings from . import util -from .util import registry, get_words_and_spaces from .attrs import intify_attrs -from .symbols import ORTH +from .errors import Errors from .scorer import Scorer -from .training import validate_examples +from .symbols import NORM, ORTH from .tokens import Span +from .training import validate_examples +from .util import get_words_and_spaces cdef class Tokenizer: @@ -74,65 +70,72 @@ cdef class Tokenizer: self._special_matcher = PhraseMatcher(self.vocab) self._load_special_cases(rules) - property token_match: - def __get__(self): - return self._token_match + @property + def token_match(self): + return self._token_match - def __set__(self, token_match): - self._token_match = token_match - self._reload_special_cases() + @token_match.setter + def token_match(self, token_match): + self._token_match = token_match + self._reload_special_cases() - property url_match: - def __get__(self): - return self._url_match + @property + def url_match(self): + return self._url_match - def __set__(self, url_match): - self._url_match = url_match - self._reload_special_cases() + @url_match.setter + def url_match(self, url_match): + self._url_match = url_match + self._reload_special_cases() - property prefix_search: - def __get__(self): - return self._prefix_search + @property + def prefix_search(self): + return self._prefix_search - def __set__(self, prefix_search): - self._prefix_search = prefix_search - self._reload_special_cases() + @prefix_search.setter + def prefix_search(self, prefix_search): + self._prefix_search = prefix_search + self._reload_special_cases() - property suffix_search: - def __get__(self): - return self._suffix_search + @property + def suffix_search(self): + return self._suffix_search - def __set__(self, suffix_search): - self._suffix_search = suffix_search - self._reload_special_cases() + @suffix_search.setter + def suffix_search(self, suffix_search): + self._suffix_search = suffix_search + self._reload_special_cases() - property infix_finditer: - def __get__(self): - return self._infix_finditer + @property + def infix_finditer(self): + return self._infix_finditer - def __set__(self, infix_finditer): - self._infix_finditer = infix_finditer - self._reload_special_cases() + @infix_finditer.setter + def infix_finditer(self, infix_finditer): + self._infix_finditer = infix_finditer + self._reload_special_cases() - property rules: - def __get__(self): - return self._rules + @property + def rules(self): + return self._rules - def __set__(self, rules): - self._rules = {} - self._flush_cache() - self._flush_specials() - self._cache = PreshMap() - self._specials = PreshMap() - self._load_special_cases(rules) + @rules.setter + def rules(self, rules): + self._rules = {} + self._flush_cache() + self._flush_specials() + self._cache = PreshMap() + self._specials = PreshMap() + self._load_special_cases(rules) - property faster_heuristics: - def __get__(self): - return bool(self._faster_heuristics) + @property + def faster_heuristics(self): + return bool(self._faster_heuristics) - def __set__(self, faster_heuristics): - self._faster_heuristics = bool(faster_heuristics) - self._reload_special_cases() + @faster_heuristics.setter + def faster_heuristics(self, faster_heuristics): + self._faster_heuristics = bool(faster_heuristics) + self._reload_special_cases() def __reduce__(self): args = (self.vocab, @@ -326,7 +329,7 @@ cdef class Tokenizer: cdef int span_start cdef int span_end while i < doc.length: - if not i in span_data: + if i not in span_data: tokens[i + offset] = doc.c[i] i += 1 else: @@ -397,12 +400,15 @@ cdef class Tokenizer: self._save_cached(&tokens.c[orig_size], orig_key, has_special, tokens.length - orig_size) - cdef str _split_affixes(self, Pool mem, str string, - vector[const LexemeC*] *prefixes, - vector[const LexemeC*] *suffixes, - int* has_special, - bint with_special_cases): - cdef size_t i + cdef str _split_affixes( + self, + Pool mem, + str string, + vector[const LexemeC*] *prefixes, + vector[const LexemeC*] *suffixes, + int* has_special, + bint with_special_cases + ): cdef str prefix cdef str suffix cdef str minus_pre @@ -447,10 +453,6 @@ cdef class Tokenizer: vector[const LexemeC*] *suffixes, int* has_special, bint with_special_cases) except -1: - cdef bint specials_hit = 0 - cdef bint cache_hit = 0 - cdef int split, end - cdef const LexemeC* const* lexemes cdef const LexemeC* lexeme cdef str span cdef int i @@ -460,9 +462,11 @@ cdef class Tokenizer: if string: if self._try_specials_and_cache(hash_string(string), tokens, has_special, with_special_cases): pass - elif (self.token_match and self.token_match(string)) or \ - (self.url_match and \ - self.url_match(string)): + elif ( + (self.token_match and self.token_match(string)) or + (self.url_match and self.url_match(string)) + ): + # We're always saying 'no' to spaces here -- the caller will # fix up the outermost one, with reference to the original. # See Issue #859 @@ -733,9 +737,16 @@ cdef class Tokenizer: if i in spans_by_start: span = spans_by_start[i] exc = [d[ORTH] for d in special_cases[span.label_]] - for j, orth in enumerate(exc): - final_tokens.append((f"SPECIAL-{j + 1}", self.vocab.strings[orth])) - i += len(span) + # The phrase matcher can overmatch for tokens separated by + # spaces in the text but not in the underlying rule, so skip + # cases where the texts aren't identical + if span.text != "".join([self.vocab.strings[orth] for orth in exc]): + final_tokens.append(tokens[i]) + i += 1 + else: + for j, orth in enumerate(exc): + final_tokens.append((f"SPECIAL-{j + 1}", self.vocab.strings[orth])) + i += len(span) else: final_tokens.append(tokens[i]) i += 1 @@ -823,7 +834,7 @@ cdef class Tokenizer: self.infix_finditer = None self.token_match = None self.url_match = None - msg = util.from_bytes(bytes_data, deserializers, exclude) + util.from_bytes(bytes_data, deserializers, exclude) if "prefix_search" in data and isinstance(data["prefix_search"], str): self.prefix_search = re.compile(data["prefix_search"]).search if "suffix_search" in data and isinstance(data["suffix_search"], str): diff --git a/spacy/tokens/__init__.py b/spacy/tokens/__init__.py index 64090925d..3393ca6ec 100644 --- a/spacy/tokens/__init__.py +++ b/spacy/tokens/__init__.py @@ -1,8 +1,8 @@ +from ._serialize import DocBin from .doc import Doc -from .token import Token +from .morphanalysis import MorphAnalysis from .span import Span from .span_group import SpanGroup -from ._serialize import DocBin -from .morphanalysis import MorphAnalysis +from .token import Token -__all__ = ["Doc", "Token", "Span", "SpanGroup", "DocBin", "MorphAnalysis"] +__all__ = ["Doc", "DocBin", "MorphAnalysis", "Span", "SpanGroup", "Token"] diff --git a/spacy/tokens/_dict_proxies.py b/spacy/tokens/_dict_proxies.py index 6edcce13d..b2b496307 100644 --- a/spacy/tokens/_dict_proxies.py +++ b/spacy/tokens/_dict_proxies.py @@ -1,12 +1,12 @@ -from typing import Dict, Iterable, List, Tuple, Union, Optional, TYPE_CHECKING import warnings import weakref from collections import UserDict +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union + import srsly -from .span_group import SpanGroup from ..errors import Errors, Warnings - +from .span_group import SpanGroup if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports diff --git a/spacy/tokens/_retokenize.pyi b/spacy/tokens/_retokenize.pyi index 8834d38c0..097fbd1a9 100644 --- a/spacy/tokens/_retokenize.pyi +++ b/spacy/tokens/_retokenize.pyi @@ -1,8 +1,9 @@ -from typing import Dict, Any, Union, List, Tuple +from typing import Any, Dict, List, Tuple, Union + +from .. import Vocab from .doc import Doc from .span import Span from .token import Token -from .. import Vocab class Retokenizer: def __init__(self, doc: Doc) -> None: ... diff --git a/spacy/tokens/_retokenize.pyx b/spacy/tokens/_retokenize.pyx index 43e6d4aa7..b0e4ff85c 100644 --- a/spacy/tokens/_retokenize.pyx +++ b/spacy/tokens/_retokenize.pyx @@ -1,24 +1,23 @@ -# cython: infer_types=True, bounds_check=False, profile=True -from libc.string cimport memcpy, memset -from libc.stdlib cimport malloc, free +# cython: infer_types=True, bounds_check=False from cymem.cymem cimport Pool +from libc.string cimport memset -from thinc.api import get_array_module import numpy +from thinc.api import get_array_module -from .doc cimport Doc, set_children_from_heads, token_by_start, token_by_end +from ..attrs cimport MORPH, NORM +from ..lexeme cimport EMPTY_LEXEME, Lexeme +from ..structs cimport LexemeC, TokenC +from ..vocab cimport Vocab +from .doc cimport Doc, set_children_from_heads, token_by_start from .span cimport Span from .token cimport Token -from ..lexeme cimport Lexeme, EMPTY_LEXEME -from ..structs cimport LexemeC, TokenC -from ..attrs cimport MORPH, NORM -from ..vocab cimport Vocab -from .underscore import is_writable_attr from ..attrs import intify_attrs -from ..util import SimpleFrozenDict from ..errors import Errors from ..strings import get_string_id +from ..util import SimpleFrozenDict +from .underscore import is_writable_attr cdef class Retokenizer: @@ -147,7 +146,7 @@ def _merge(Doc doc, merges): syntactic root of the span. RETURNS (Token): The first newly merged token. """ - cdef int i, merge_index, start, end, token_index, current_span_index, current_offset, offset, span_index + cdef int i, merge_index, start, token_index, current_span_index, current_offset, offset, span_index cdef Span span cdef const LexemeC* lex cdef TokenC* token @@ -165,7 +164,6 @@ def _merge(Doc doc, merges): merges.sort(key=_get_start) for merge_index, (span, attributes) in enumerate(merges): start = span.start - end = span.end spans.append(span) # House the new merged token where it starts token = &doc.c[start] @@ -203,8 +201,9 @@ def _merge(Doc doc, merges): # for the merged region. To do this, we create a boolean array indicating # whether the row is to be deleted, then use numpy.delete if doc.tensor is not None and doc.tensor.size != 0: - doc.tensor = _resize_tensor(doc.tensor, - [(m[0].start, m[0].end) for m in merges]) + doc.tensor = _resize_tensor( + doc.tensor, [(m[0].start, m[0].end) for m in merges] + ) # Memorize span roots and sets dependencies of the newly merged # tokens to the dependencies of their roots. span_roots = [] @@ -267,11 +266,11 @@ def _merge(Doc doc, merges): span_index += 1 if span_index < len(spans) and i == spans[span_index].start: # First token in a span - doc.c[i - offset] = doc.c[i] # move token to its place + doc.c[i - offset] = doc.c[i] # move token to its place offset += (spans[span_index].end - spans[span_index].start) - 1 in_span = True if not in_span: - doc.c[i - offset] = doc.c[i] # move token to its place + doc.c[i - offset] = doc.c[i] # move token to its place for i in range(doc.length - offset, doc.length): memset(&doc.c[i], 0, sizeof(TokenC)) @@ -345,7 +344,11 @@ def _split(Doc doc, int token_index, orths, heads, attrs): if to_process_tensor: xp = get_array_module(doc.tensor) if xp is numpy: - doc.tensor = xp.append(doc.tensor, xp.zeros((nb_subtokens,doc.tensor.shape[1]), dtype="float32"), axis=0) + doc.tensor = xp.append( + doc.tensor, + xp.zeros((nb_subtokens, doc.tensor.shape[1]), dtype="float32"), + axis=0 + ) else: shape = (doc.tensor.shape[0] + nb_subtokens, doc.tensor.shape[1]) resized_array = xp.zeros(shape, dtype="float32") @@ -367,7 +370,8 @@ def _split(Doc doc, int token_index, orths, heads, attrs): token.norm = 0 # reset norm if to_process_tensor: # setting the tensors of the split tokens to array of zeros - doc.tensor[token_index + i:token_index + i + 1] = xp.zeros((1,doc.tensor.shape[1]), dtype="float32") + doc.tensor[token_index + i:token_index + i + 1] = \ + xp.zeros((1, doc.tensor.shape[1]), dtype="float32") # Update the character offset of the subtokens if i != 0: token.idx = orig_token.idx + idx_offset @@ -455,7 +459,6 @@ def normalize_token_attrs(Vocab vocab, attrs): def set_token_attrs(Token py_token, attrs): cdef TokenC* token = py_token.c cdef const LexemeC* lex = token.lex - cdef Doc doc = py_token.doc # Assign attributes for attr_name, attr_value in attrs.items(): if attr_name == "_": # Set extension attributes diff --git a/spacy/tokens/_serialize.py b/spacy/tokens/_serialize.py index 73c857d1f..873d85835 100644 --- a/spacy/tokens/_serialize.py +++ b/spacy/tokens/_serialize.py @@ -1,22 +1,20 @@ -from typing import List, Dict, Set, Iterable, Iterator, Union, Optional -from pathlib import Path -import numpy -from numpy import ndarray import zlib +from pathlib import Path +from typing import Dict, Iterable, Iterator, List, Optional, Set, Union + +import numpy import srsly +from numpy import ndarray from thinc.api import NumpyOps -from .doc import Doc -from ..vocab import Vocab +from ..attrs import IDS, ORTH, SPACY, intify_attr from ..compat import copy_reg -from ..attrs import SPACY, ORTH, intify_attr, IDS from ..errors import Errors -from ..util import ensure_path, SimpleFrozenList +from ..util import SimpleFrozenList, ensure_path +from ..vocab import Vocab from ._dict_proxies import SpanGroups - -# fmt: off -ALL_ATTRS = ("ORTH", "NORM", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "ENT_ID", "LEMMA", "MORPH", "POS", "SENT_START") -# fmt: on +from .doc import DOCBIN_ALL_ATTRS as ALL_ATTRS +from .doc import Doc class DocBin: diff --git a/spacy/tokens/doc.pxd b/spacy/tokens/doc.pxd index 57d087958..d9719609c 100644 --- a/spacy/tokens/doc.pxd +++ b/spacy/tokens/doc.pxd @@ -1,10 +1,10 @@ -from cymem.cymem cimport Pool cimport numpy as np +from cymem.cymem cimport Pool -from ..vocab cimport Vocab -from ..structs cimport TokenC, LexemeC, SpanC -from ..typedefs cimport attr_t from ..attrs cimport attr_id_t +from ..structs cimport LexemeC, SpanC, TokenC +from ..typedefs cimport attr_t +from ..vocab cimport Vocab cdef attr_t get_token_attr(const TokenC* token, attr_id_t feat_name) nogil @@ -31,7 +31,7 @@ cdef int token_by_start(const TokenC* tokens, int length, int start_char) except cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2 -cdef int [:,:] _get_lca_matrix(Doc, int start, int end) +cdef int [:, :] _get_lca_matrix(Doc, int start, int end) cdef class Doc: @@ -61,7 +61,6 @@ cdef class Doc: cdef int length cdef int max_length - cdef public object noun_chunks_iterator cdef object __weakref__ diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi index 9d45960ab..f0b68862c 100644 --- a/spacy/tokens/doc.pyi +++ b/spacy/tokens/doc.pyi @@ -1,16 +1,32 @@ -from typing import Callable, Protocol, Iterable, Iterator, Optional -from typing import Union, Tuple, List, Dict, Any, overload +from pathlib import Path +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Protocol, + Sequence, + Tuple, + Union, + overload, +) + +import numpy as np from cymem.cymem import Pool from thinc.types import Floats1d, Floats2d, Ints2d -from .span import Span -from .token import Token -from ._dict_proxies import SpanGroups -from ._retokenize import Retokenizer + from ..lexeme import Lexeme from ..vocab import Vocab +from ._dict_proxies import SpanGroups +from ._retokenize import Retokenizer +from .span import Span +from .token import Token from .underscore import Underscore -from pathlib import Path -import numpy as np + +DOCBIN_ALL_ATTRS: Tuple[str, ...] class DocMethod(Protocol): def __call__(self: Doc, *args: Any, **kwargs: Any) -> Any: ... # type: ignore[misc] @@ -26,7 +42,7 @@ class Doc: user_hooks: Dict[str, Callable[..., Any]] user_token_hooks: Dict[str, Callable[..., Any]] user_span_hooks: Dict[str, Callable[..., Any]] - tensor: np.ndarray[Any, np.dtype[np.float_]] + tensor: np.ndarray[Any, np.dtype[np.float64]] user_data: Dict[str, Any] has_unknown_spaces: bool _context: Any @@ -109,7 +125,7 @@ class Doc: vector: Optional[Floats1d] = ..., alignment_mode: str = ..., span_id: Union[int, str] = ..., - ) -> Span: ... + ) -> Optional[Span]: ... def similarity(self, other: Union[Doc, Span, Token, Lexeme]) -> float: ... @property def has_vector(self) -> bool: ... @@ -119,7 +135,12 @@ class Doc: def text(self) -> str: ... @property def text_with_ws(self) -> str: ... - ents: Tuple[Span] + # Ideally the getter would output Tuple[Span] + # see https://github.com/python/mypy/issues/3004 + @property + def ents(self) -> Sequence[Span]: ... + @ents.setter + def ents(self, value: Sequence[Span]) -> None: ... def set_ents( self, entities: List[Span], @@ -145,7 +166,7 @@ class Doc: ) -> Doc: ... def to_array( self, py_attr_ids: Union[int, str, List[Union[int, str]]] - ) -> np.ndarray[Any, np.dtype[np.float_]]: ... + ) -> np.ndarray[Any, np.dtype[np.float64]]: ... @staticmethod def from_docs( docs: List[Doc], @@ -158,15 +179,13 @@ class Doc: self, path: Union[str, Path], *, exclude: Iterable[str] = ... ) -> None: ... def from_disk( - self, path: Union[str, Path], *, exclude: Union[List[str], Tuple[str]] = ... + self, path: Union[str, Path], *, exclude: Iterable[str] = ... ) -> Doc: ... - def to_bytes(self, *, exclude: Union[List[str], Tuple[str]] = ...) -> bytes: ... - def from_bytes( - self, bytes_data: bytes, *, exclude: Union[List[str], Tuple[str]] = ... - ) -> Doc: ... - def to_dict(self, *, exclude: Union[List[str], Tuple[str]] = ...) -> bytes: ... + def to_bytes(self, *, exclude: Iterable[str] = ...) -> bytes: ... + def from_bytes(self, bytes_data: bytes, *, exclude: Iterable[str] = ...) -> Doc: ... + def to_dict(self, *, exclude: Iterable[str] = ...) -> Dict[str, Any]: ... def from_dict( - self, msg: bytes, *, exclude: Union[List[str], Tuple[str]] = ... + self, msg: Dict[str, Any], *, exclude: Iterable[str] = ... ) -> Doc: ... def extend_tensor(self, tensor: Floats2d) -> None: ... def retokenize(self) -> Retokenizer: ... diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 6c196ad78..4d6249569 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -1,47 +1,69 @@ -# cython: infer_types=True, bounds_check=False, profile=True +# cython: infer_types=True, bounds_check=False from typing import Set cimport cython cimport numpy as np -from libc.string cimport memcpy from libc.math cimport sqrt from libc.stdint cimport int32_t, uint64_t +from libc.string cimport memcpy import copy +import itertools +import warnings from collections import Counter, defaultdict from enum import Enum -import itertools + import numpy import srsly from thinc.api import get_array_module, get_current_ops from thinc.util import copy_array -import warnings from .span cimport Span from .token cimport MISSING_DEP -from ._dict_proxies import SpanGroups -from .token cimport Token -from ..lexeme cimport Lexeme, EMPTY_LEXEME -from ..typedefs cimport attr_t, flags_t -from ..attrs cimport attr_id_t -from ..attrs cimport LENGTH, POS, LEMMA, TAG, MORPH, DEP, HEAD, SPACY, ENT_IOB -from ..attrs cimport ENT_TYPE, ENT_ID, ENT_KB_ID, SENT_START, IDX, NORM -from ..attrs import intify_attr, IDS -from ..compat import copy_reg, pickle +from ._dict_proxies import SpanGroups + +from ..attrs cimport ( + DEP, + ENT_ID, + ENT_IOB, + ENT_KB_ID, + ENT_TYPE, + HEAD, + IDX, + LEMMA, + LENGTH, + MORPH, + NORM, + ORTH, + POS, + SENT_START, + SPACY, + TAG, + attr_id_t, +) +from ..lexeme cimport EMPTY_LEXEME, Lexeme +from ..typedefs cimport attr_t +from .token cimport Token + +from .. import parts_of_speech, schemas, util +from ..attrs import IDS, intify_attr +from ..compat import copy_reg from ..errors import Errors, Warnings -from ..morphology import Morphology -from .. import util -from .. import parts_of_speech -from .. import schemas -from .underscore import Underscore, get_ext_args -from ._retokenize import Retokenizer -from ._serialize import ALL_ATTRS as DOCBIN_ALL_ATTRS from ..util import get_words_and_spaces +from ._retokenize import Retokenizer +from .underscore import Underscore, get_ext_args DEF PADDING = 5 +# We store the docbin attrs here rather than in _serialize to avoid +# import cycles. + +# fmt: off +DOCBIN_ALL_ATTRS = ("ORTH", "NORM", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "ENT_ID", "LEMMA", "MORPH", "POS", "SENT_START") +# fmt: on + cdef int bounds_check(int i, int length, int padding) except -1: if (i + padding) < 0: raise IndexError(Errors.E026.format(i=i, length=length)) @@ -591,13 +613,26 @@ cdef class Doc: """ if "similarity" in self.user_hooks: return self.user_hooks["similarity"](self, other) - if isinstance(other, (Lexeme, Token)) and self.length == 1: - if self.c[0].lex.orth == other.orth: + attr = getattr(self.vocab.vectors, "attr", ORTH) + cdef Token this_token + cdef Token other_token + cdef Lexeme other_lex + if len(self) == 1 and isinstance(other, Token): + this_token = self[0] + other_token = other + if Token.get_struct_attr(this_token.c, attr) == Token.get_struct_attr(other_token.c, attr): return 1.0 - elif isinstance(other, (Span, Doc)) and len(self) == len(other): + elif len(self) == 1 and isinstance(other, Lexeme): + this_token = self[0] + other_lex = other + if Token.get_struct_attr(this_token.c, attr) == Lexeme.get_struct_attr(other_lex.c, attr): + return 1.0 + elif isinstance(other, (Doc, Span)) and len(self) == len(other): similar = True - for i in range(self.length): - if self[i].orth != other[i].orth: + for i in range(len(self)): + this_token = self[i] + other_token = other[i] + if Token.get_struct_attr(this_token.c, attr) != Token.get_struct_attr(other_token.c, attr): similar = False break if similar: @@ -632,7 +667,8 @@ cdef class Doc: else: return False - property vector: + @property + def vector(self): """A real-valued meaning representation. Defaults to an average of the token vectors. @@ -641,48 +677,49 @@ cdef class Doc: DOCS: https://spacy.io/api/doc#vector """ - def __get__(self): - if "vector" in self.user_hooks: - return self.user_hooks["vector"](self) - if self._vector is not None: - return self._vector - xp = get_array_module(self.vocab.vectors.data) - if not len(self): - self._vector = xp.zeros((self.vocab.vectors_length,), dtype="f") - return self._vector - elif self.vocab.vectors.size > 0: - self._vector = sum(t.vector for t in self) / len(self) - return self._vector - elif self.tensor.size > 0: - self._vector = self.tensor.mean(axis=0) - return self._vector - else: - return xp.zeros((self.vocab.vectors_length,), dtype="float32") + if "vector" in self.user_hooks: + return self.user_hooks["vector"](self) + if self._vector is not None: + return self._vector + xp = get_array_module(self.vocab.vectors.data) + if not len(self): + self._vector = xp.zeros((self.vocab.vectors_length,), dtype="f") + return self._vector + elif self.vocab.vectors.size > 0: + self._vector = sum(t.vector for t in self) / len(self) + return self._vector + elif self.tensor.size > 0: + self._vector = self.tensor.mean(axis=0) + return self._vector + else: + return xp.zeros((self.vocab.vectors_length,), dtype="float32") - def __set__(self, value): - self._vector = value + @vector.setter + def vector(self, value): + self._vector = value - property vector_norm: + @property + def vector_norm(self): """The L2 norm of the document's vector representation. RETURNS (float): The L2 norm of the vector representation. DOCS: https://spacy.io/api/doc#vector_norm """ - def __get__(self): - if "vector_norm" in self.user_hooks: - return self.user_hooks["vector_norm"](self) - cdef float value - cdef double norm = 0 - if self._vector_norm is None: - norm = 0.0 - for value in self.vector: - norm += value * value - self._vector_norm = sqrt(norm) if norm != 0 else 0 - return self._vector_norm + if "vector_norm" in self.user_hooks: + return self.user_hooks["vector_norm"](self) + cdef float value + cdef double norm = 0 + if self._vector_norm is None: + norm = 0.0 + for value in self.vector: + norm += value * value + self._vector_norm = sqrt(norm) if norm != 0 else 0 + return self._vector_norm - def __set__(self, value): - self._vector_norm = value + @vector_norm.setter + def vector_norm(self, value): + self._vector_norm = value @property def text(self): @@ -701,7 +738,8 @@ cdef class Doc: """ return self.text - property ents: + @property + def ents(self): """The named entities in the document. Returns a tuple of named entity `Span` objects, if the entity recognizer has been applied. @@ -709,55 +747,55 @@ cdef class Doc: DOCS: https://spacy.io/api/doc#ents """ - def __get__(self): - cdef int i - cdef const TokenC* token - cdef int start = -1 - cdef attr_t label = 0 - cdef attr_t kb_id = 0 - cdef attr_t ent_id = 0 - output = [] - for i in range(self.length): - token = &self.c[i] - if token.ent_iob == 1: - if start == -1: - seq = [f"{t.text}|{t.ent_iob_}" for t in self[i-5:i+5]] - raise ValueError(Errors.E093.format(seq=" ".join(seq))) - elif token.ent_iob == 2 or token.ent_iob == 0 or \ - (token.ent_iob == 3 and token.ent_type == 0): - if start != -1: - output.append(Span(self, start, i, label=label, kb_id=kb_id, span_id=ent_id)) - start = -1 - label = 0 - kb_id = 0 - ent_id = 0 - elif token.ent_iob == 3: - if start != -1: - output.append(Span(self, start, i, label=label, kb_id=kb_id, span_id=ent_id)) - start = i - label = token.ent_type - kb_id = token.ent_kb_id - ent_id = token.ent_id - if start != -1: - output.append(Span(self, start, self.length, label=label, kb_id=kb_id, span_id=ent_id)) - # remove empty-label spans - output = [o for o in output if o.label_ != ""] - return tuple(output) + cdef int i + cdef const TokenC* token + cdef int start = -1 + cdef attr_t label = 0 + cdef attr_t kb_id = 0 + cdef attr_t ent_id = 0 + output = [] + for i in range(self.length): + token = &self.c[i] + if token.ent_iob == 1: + if start == -1: + seq = [f"{t.text}|{t.ent_iob_}" for t in self[i-5:i+5]] + raise ValueError(Errors.E093.format(seq=" ".join(seq))) + elif token.ent_iob == 2 or token.ent_iob == 0 or \ + (token.ent_iob == 3 and token.ent_type == 0): + if start != -1: + output.append(Span(self, start, i, label=label, kb_id=kb_id, span_id=ent_id)) + start = -1 + label = 0 + kb_id = 0 + ent_id = 0 + elif token.ent_iob == 3: + if start != -1: + output.append(Span(self, start, i, label=label, kb_id=kb_id, span_id=ent_id)) + start = i + label = token.ent_type + kb_id = token.ent_kb_id + ent_id = token.ent_id + if start != -1: + output.append(Span(self, start, self.length, label=label, kb_id=kb_id, span_id=ent_id)) + # remove empty-label spans + output = [o for o in output if o.label_ != ""] + return tuple(output) - def __set__(self, ents): - # TODO: - # 1. Test basic data-driven ORTH gazetteer - # 2. Test more nuanced date and currency regex - cdef attr_t entity_type, kb_id, ent_id - cdef int ent_start, ent_end - ent_spans = [] - for ent_info in ents: - entity_type_, kb_id, ent_start, ent_end, ent_id = get_entity_info(ent_info) - if isinstance(entity_type_, str): - self.vocab.strings.add(entity_type_) - span = Span(self, ent_start, ent_end, label=entity_type_, kb_id=kb_id, span_id=ent_id) - ent_spans.append(span) - self.set_ents(ent_spans, default=SetEntsDefault.outside) + @ents.setter + def ents(self, ents): + # TODO: + # 1. Test basic data-driven ORTH gazetteer + # 2. Test more nuanced date and currency regex + cdef attr_t kb_id, ent_id + cdef int ent_start, ent_end + ent_spans = [] + for ent_info in ents: + entity_type_, kb_id, ent_start, ent_end, ent_id = get_entity_info(ent_info) + if isinstance(entity_type_, str): + self.vocab.strings.add(entity_type_) + span = Span(self, ent_start, ent_end, label=entity_type_, kb_id=kb_id, span_id=ent_id) + ent_spans.append(span) + self.set_ents(ent_spans, default=SetEntsDefault.outside) def set_ents(self, entities, *, blocked=None, missing=None, outside=None, default=SetEntsDefault.outside): """Set entity annotation. @@ -951,7 +989,6 @@ cdef class Doc: >>> np_array = doc.to_array([LOWER, POS, ENT_TYPE, IS_ALPHA]) """ cdef int i, j - cdef attr_id_t feature cdef np.ndarray[attr_t, ndim=2] output # Handle scalar/list inputs of strings/ints for py_attr_ids # See also #3064 @@ -963,8 +1000,10 @@ cdef class Doc: py_attr_ids = [py_attr_ids] # Allow strings, e.g. 'lemma' or 'LEMMA' try: - py_attr_ids = [(IDS[id_.upper()] if hasattr(id_, "upper") else id_) - for id_ in py_attr_ids] + py_attr_ids = [ + (IDS[id_.upper()] if hasattr(id_, "upper") else id_) + for id_ in py_attr_ids + ] except KeyError as msg: keys = [k for k in IDS.keys() if not k.startswith("FLAG")] raise KeyError(Errors.E983.format(dict="IDS", key=msg, keys=keys)) from None @@ -994,8 +1033,6 @@ cdef class Doc: DOCS: https://spacy.io/api/doc#count_by """ cdef int i - cdef attr_t attr - cdef size_t count if counts is None: counts = Counter() @@ -1057,7 +1094,6 @@ cdef class Doc: cdef int i, col cdef int32_t abs_head_index cdef attr_id_t attr_id - cdef TokenC* tokens = self.c cdef int length = len(array) if length != len(self): raise ValueError(Errors.E971.format(array_length=length, doc_length=len(self))) @@ -1189,7 +1225,7 @@ cdef class Doc: span.label, span.kb_id, span.id, - span.text, # included as a check + span.text, # included as a check )) char_offset += len(doc.text) if len(doc) > 0 and ensure_whitespace and not doc[-1].is_space and not bool(doc[-1].whitespace_): @@ -1293,7 +1329,7 @@ cdef class Doc: path (str / Path): A path to a directory. Paths may be either strings or `Path`-like objects. - exclude (list): String names of serialization fields to exclude. + exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (Doc): The modified `Doc` object. DOCS: https://spacy.io/api/doc#from_disk @@ -1306,7 +1342,7 @@ cdef class Doc: def to_bytes(self, *, exclude=tuple()): """Serialize, i.e. export the document contents to a binary string. - exclude (list): String names of serialization fields to exclude. + exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (bytes): A losslessly serialized copy of the `Doc`, including all annotations. @@ -1318,7 +1354,7 @@ cdef class Doc: """Deserialize, i.e. import the document contents from a binary string. data (bytes): The string to load from. - exclude (list): String names of serialization fields to exclude. + exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (Doc): Itself. DOCS: https://spacy.io/api/doc#from_bytes @@ -1328,11 +1364,8 @@ cdef class Doc: def to_dict(self, *, exclude=tuple()): """Export the document contents to a dictionary for serialization. - exclude (list): String names of serialization fields to exclude. - RETURNS (bytes): A losslessly serialized copy of the `Doc`, including - all annotations. - - DOCS: https://spacy.io/api/doc#to_bytes + exclude (Iterable[str]): String names of serialization fields to exclude. + RETURNS (Dict[str, Any]): A dictionary representation of the `Doc` """ array_head = Doc._get_array_attrs() strings = set() @@ -1378,13 +1411,11 @@ cdef class Doc: return util.to_dict(serializers, exclude) def from_dict(self, msg, *, exclude=tuple()): - """Deserialize, i.e. import the document contents from a binary string. + """Deserialize the document contents from a dictionary representation. - data (bytes): The string to load from. - exclude (list): String names of serialization fields to exclude. + msg (Dict[str, Any]): The dictionary to load from. + exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (Doc): Itself. - - DOCS: https://spacy.io/api/doc#from_dict """ if self.length != 0: raise ValueError(Errors.E033.format(length=self.length)) @@ -1472,7 +1503,6 @@ cdef class Doc: attributes are inherited from the syntactic root of the span. RETURNS (Token): The first newly merged token. """ - cdef str tag, lemma, ent_type attr_len = len(attributes) span_len = len(spans) if not attr_len == span_len: @@ -1588,7 +1618,6 @@ cdef class Doc: for token in char_span[1:]: token.is_sent_start = False - for span_group in doc_json.get("spans", {}): spans = [] for span in doc_json["spans"][span_group]: @@ -1620,7 +1649,7 @@ cdef class Doc: start = token_by_char(self.c, self.length, token_data["start"]) value = token_data["value"] self[start]._.set(token_attr, value) - + for span_attr in doc_json.get("underscore_span", {}): if not Span.has_extension(span_attr): Span.set_extension(span_attr) @@ -1662,7 +1691,7 @@ cdef class Doc: token_data["dep"] = token.dep_ token_data["head"] = token.head.i data["tokens"].append(token_data) - + if self.spans: data["spans"] = {} for span_group in self.spans: @@ -1733,7 +1762,6 @@ cdef class Doc: output.fill(255) cdef int i, j, start_idx, end_idx cdef bytes byte_string - cdef unsigned char utf8_char for i, byte_string in enumerate(byte_strings): j = 0 start_idx = 0 @@ -1786,8 +1814,6 @@ cdef int token_by_char(const TokenC* tokens, int length, int char_idx) except -2 cdef int set_children_from_heads(TokenC* tokens, int start, int end) except -1: # note: end is exclusive - cdef TokenC* head - cdef TokenC* child cdef int i # Set number of left/right children to 0. We'll increment it in the loops. for i in range(start, end): @@ -1887,7 +1913,7 @@ cdef int _get_tokens_lca(Token token_j, Token token_k): return -1 -cdef int [:,:] _get_lca_matrix(Doc doc, int start, int end): +cdef int [:, :] _get_lca_matrix(Doc doc, int start, int end): """Given a doc and a start and end position defining a set of contiguous tokens within it, returns a matrix of Lowest Common Ancestors (LCA), where LCA[i, j] is the index of the lowest common ancestor among token i and j. @@ -1900,7 +1926,7 @@ cdef int [:,:] _get_lca_matrix(Doc doc, int start, int end): RETURNS (int [:, :]): memoryview of numpy.array[ndim=2, dtype=numpy.int32], with shape (n, n), where n = len(doc). """ - cdef int [:,:] lca_matrix + cdef int [:, :] lca_matrix cdef int j, k n_tokens= end - start lca_mat = numpy.empty((n_tokens, n_tokens), dtype=numpy.int32) diff --git a/spacy/tokens/graph.pxd b/spacy/tokens/graph.pxd index 6f2f80656..083ef6522 100644 --- a/spacy/tokens/graph.pxd +++ b/spacy/tokens/graph.pxd @@ -1,7 +1,8 @@ -from libcpp.vector cimport vector from cymem.cymem cimport Pool +from libcpp.vector cimport vector from preshed.maps cimport PreshMap -from ..structs cimport GraphC, EdgeC + +from ..structs cimport EdgeC, GraphC cdef class Graph: diff --git a/spacy/tokens/graph.pyx b/spacy/tokens/graph.pyx index adc4d23c8..6c4ce6ce3 100644 --- a/spacy/tokens/graph.pyx +++ b/spacy/tokens/graph.pyx @@ -1,19 +1,26 @@ # cython: infer_types=True, cdivision=True, boundscheck=False, binding=True -from typing import List, Tuple, Generator -from libc.stdint cimport int32_t, int64_t +# cython: profile=False +from typing import Generator, List, Tuple + +cimport cython +from cython.operator cimport dereference +from libc.stdint cimport int32_t from libcpp.pair cimport pair from libcpp.unordered_map cimport unordered_map from libcpp.unordered_set cimport unordered_set -from cython.operator cimport dereference -cimport cython + import weakref -from preshed.maps cimport map_get_unless_missing + from murmurhash.mrmr cimport hash64 from .. import Errors + from ..typedefs cimport hash_t + from ..strings import get_string_id + from ..structs cimport EdgeC, GraphC + from .token import Token @@ -21,7 +28,7 @@ from .token import Token cdef class Edge: cdef readonly Graph graph cdef readonly int i - + def __init__(self, Graph graph, int i): self.graph = graph self.i = i @@ -37,7 +44,7 @@ cdef class Edge: @property def head(self) -> "Node": return Node(self.graph, self.graph.c.edges[self.i].head) - + @property def tail(self) -> "Tail": return Node(self.graph, self.graph.c.edges[self.i].tail) @@ -63,7 +70,7 @@ cdef class Node: def __init__(self, Graph graph, int i): """A reference to a node of an annotation graph. Each node is made up of an ordered set of zero or more token indices. - + Node references are usually created by the Graph object itself, or from the Node or Edge objects. You usually won't need to instantiate this class yourself. @@ -102,13 +109,13 @@ cdef class Node: @property def is_none(self) -> bool: """Whether the node is a special value, indicating 'none'. - + The NoneNode type is returned by the Graph, Edge and Node objects when there is no match to a query. It has the same API as Node, but it always returns NoneNode, NoneEdge or empty lists for its queries. """ return False - + @property def doc(self) -> "Doc": """The Doc object that the graph refers to.""" @@ -123,19 +130,19 @@ cdef class Node: def head(self, i=None, label=None) -> "Node": """Get the head of the first matching edge, searching by index, label, both or neither. - + For instance, `node.head(i=1)` will get the head of the second edge that this node is a tail of. `node.head(i=1, label="ARG0")` will further check that the second edge has the label `"ARG0"`. - + If no matching node can be found, the graph's NoneNode is returned. """ return self.headed(i=i, label=label) - + def tail(self, i=None, label=None) -> "Node": """Get the tail of the first matching edge, searching by index, label, both or neither. - + If no matching node can be found, the graph's NoneNode is returned. """ return self.tailed(i=i, label=label).tail @@ -164,7 +171,7 @@ cdef class Node: cdef vector[int] edge_indices self._find_edges(edge_indices, "head", label) return [Node(self.graph, self.graph.c.edges[i].head) for i in edge_indices] - + def tails(self, label=None) -> List["Node"]: """Find all matching tails of this node.""" cdef vector[int] edge_indices @@ -193,7 +200,7 @@ cdef class Node: return NoneEdge(self.graph) else: return Edge(self.graph, idx) - + def tailed(self, i=None, label=None) -> Edge: """Find the first matching edge tailed by this node. If no matching edge can be found, the graph's NoneEdge is returned. @@ -276,7 +283,7 @@ cdef class NoneEdge(Edge): def __init__(self, graph): self.graph = graph self.i = -1 - + @property def doc(self) -> "Doc": return self.graph.doc @@ -284,7 +291,7 @@ cdef class NoneEdge(Edge): @property def head(self) -> "NoneNode": return NoneNode(self.graph) - + @property def tail(self) -> "NoneNode": return NoneNode(self.graph) @@ -312,7 +319,7 @@ cdef class NoneNode(Node): def __len__(self): return 0 - + @property def is_none(self): return -1 @@ -333,14 +340,14 @@ cdef class NoneNode(Node): def walk_heads(self): yield from [] - + def walk_tails(self): yield from [] - + cdef class Graph: """A set of directed labelled relationships between sets of tokens. - + EXAMPLE: Construction 1 >>> graph = Graph(doc, name="srl") @@ -365,7 +372,9 @@ cdef class Graph: >>> assert graph.has_node((0,)) >>> assert graph.has_edge((0,), (1,3), label="agent") """ - def __init__(self, doc, *, name="", nodes=[], edges=[], labels=None, weights=None): + def __init__( + self, doc, *, name="", nodes=[], edges=[], labels=None, weights=None # no-cython-lint + ): """Create a Graph object. doc (Doc): The Doc object the graph will refer to. @@ -431,13 +440,11 @@ cdef class Graph: def add_edge(self, head, tail, *, label="", weight=None) -> Edge: """Add an edge to the graph, connecting two groups of tokens. - + If there is already an edge for the (head, tail, label) triple, it will be returned, and no new edge will be created. The weight of the edge will be updated if a weight is specified. """ - label_hash = self.doc.vocab.strings.as_int(label) - weight_float = weight if weight is not None else 0.0 edge_index = add_edge( &self.c, EdgeC( @@ -471,11 +478,11 @@ cdef class Graph: def has_edge(self, head, tail, label) -> bool: """Check whether a (head, tail, label) triple is an edge in the graph.""" return not self.get_edge(head, tail, label=label).is_none - + def add_node(self, indices) -> Node: """Add a node to the graph and return it. Nodes refer to ordered sets of token indices. - + This method is idempotent: if there is already a node for the given indices, it is returned without a new node being created. """ @@ -503,7 +510,7 @@ cdef class Graph: return NoneNode(self) else: return Node(self, node_index) - + def has_node(self, tuple indices) -> bool: """Check whether the graph has a node for the given indices.""" return not self.get_node(indices).is_none @@ -563,7 +570,7 @@ cdef int add_node(GraphC* graph, vector[int32_t]& node) nogil: graph.roots.insert(index) graph.node_map.insert(pair[hash_t, int](key, index)) return index - + cdef int get_node(const GraphC* graph, vector[int32_t] node) nogil: key = hash64(&node[0], node.size() * sizeof(node[0]), 0) diff --git a/spacy/tokens/morphanalysis.pxd b/spacy/tokens/morphanalysis.pxd index 9510875c9..728f0aaf7 100644 --- a/spacy/tokens/morphanalysis.pxd +++ b/spacy/tokens/morphanalysis.pxd @@ -1,6 +1,6 @@ -from ..vocab cimport Vocab -from ..typedefs cimport hash_t from ..structs cimport MorphAnalysisC +from ..typedefs cimport hash_t +from ..vocab cimport Vocab cdef class MorphAnalysis: diff --git a/spacy/tokens/morphanalysis.pyi b/spacy/tokens/morphanalysis.pyi index a5376e80d..b35ff36aa 100644 --- a/spacy/tokens/morphanalysis.pyi +++ b/spacy/tokens/morphanalysis.pyi @@ -1,4 +1,5 @@ from typing import Any, Dict, Iterator, List, Optional, Union + from ..vocab import Vocab class MorphAnalysis: diff --git a/spacy/tokens/morphanalysis.pyx b/spacy/tokens/morphanalysis.pyx index baa3800a1..ea5d07fa4 100644 --- a/spacy/tokens/morphanalysis.pyx +++ b/spacy/tokens/morphanalysis.pyx @@ -1,11 +1,13 @@ -from libc.string cimport memset +# cython: profile=False cimport numpy as np +from libc.string cimport memset from ..errors import Errors from ..morphology import Morphology + +from ..morphology cimport check_feature, get_by_field, list_features +from ..typedefs cimport attr_t, hash_t from ..vocab cimport Vocab -from ..typedefs cimport hash_t, attr_t -from ..morphology cimport list_features, check_feature, get_by_field cdef class MorphAnalysis: @@ -88,4 +90,3 @@ cdef class MorphAnalysis: def __repr__(self): return self.to_json() - diff --git a/spacy/tokens/span.pxd b/spacy/tokens/span.pxd index 78bee0a8c..d77bbea70 100644 --- a/spacy/tokens/span.pxd +++ b/spacy/tokens/span.pxd @@ -1,8 +1,8 @@ cimport numpy as np -from .doc cimport Doc -from ..typedefs cimport attr_t from ..structs cimport SpanC +from ..typedefs cimport attr_t +from .doc cimport Doc cdef class Span: diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 29b8ce703..64b8d7c6c 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -1,22 +1,23 @@ +# cython: profile=False cimport numpy as np -from libc.math cimport sqrt + +import copy +import warnings import numpy from thinc.api import get_array_module -import warnings -import copy -from .doc cimport token_by_start, token_by_end, get_token_attr, _get_lca_matrix -from ..structs cimport TokenC, LexemeC -from ..typedefs cimport flags_t, attr_t, hash_t -from ..attrs cimport attr_id_t -from ..parts_of_speech cimport univ_pos_t from ..attrs cimport * +from ..attrs cimport ORTH, attr_id_t from ..lexeme cimport Lexeme +from ..structs cimport TokenC from ..symbols cimport dep +from ..typedefs cimport attr_t, hash_t +from .doc cimport _get_lca_matrix, get_token_attr +from .token cimport Token -from ..util import normalize_slice from ..errors import Errors, Warnings +from ..util import normalize_slice from .underscore import Underscore, get_ext_args @@ -126,14 +127,17 @@ cdef class Span: self._vector = vector self._vector_norm = vector_norm - def __richcmp__(self, Span other, int op): + def __richcmp__(self, object other, int op): if other is None: if op == 0 or op == 1 or op == 2: return False else: return True + if not isinstance(other, Span): + return False + cdef Span other_span = other self_tuple = (self.c.start_char, self.c.end_char, self.c.label, self.c.kb_id, self.id, self.doc) - other_tuple = (other.c.start_char, other.c.end_char, other.c.label, other.c.kb_id, other.id, other.doc) + other_tuple = (other_span.c.start_char, other_span.c.end_char, other_span.c.label, other_span.c.kb_id, other_span.id, other_span.doc) # < if op == 0: return self_tuple < other_tuple @@ -340,13 +344,26 @@ cdef class Span: """ if "similarity" in self.doc.user_span_hooks: return self.doc.user_span_hooks["similarity"](self, other) - if len(self) == 1 and hasattr(other, "orth"): - if self[0].orth == other.orth: + attr = getattr(self.doc.vocab.vectors, "attr", ORTH) + cdef Token this_token + cdef Token other_token + cdef Lexeme other_lex + if len(self) == 1 and isinstance(other, Token): + this_token = self[0] + other_token = other + if Token.get_struct_attr(this_token.c, attr) == Token.get_struct_attr(other_token.c, attr): + return 1.0 + elif len(self) == 1 and isinstance(other, Lexeme): + this_token = self[0] + other_lex = other + if Token.get_struct_attr(this_token.c, attr) == Lexeme.get_struct_attr(other_lex.c, attr): return 1.0 elif isinstance(other, (Doc, Span)) and len(self) == len(other): similar = True for i in range(len(self)): - if self[i].orth != getattr(other[i], "orth", None): + this_token = self[i] + other_token = other[i] + if Token.get_struct_attr(this_token.c, attr) != Token.get_struct_attr(other_token.c, attr): similar = False break if similar: @@ -580,7 +597,6 @@ cdef class Span: """ return "".join([t.text_with_ws for t in self]) - @property def noun_chunks(self): """Iterate over the base noun phrases in the span. Yields base @@ -741,78 +757,87 @@ cdef class Span: for word in self.rights: yield from word.subtree - property start: - def __get__(self): - return self.c.start + @property + def start(self): + return self.c.start - def __set__(self, int start): - if start < 0: - raise IndexError(Errors.E1032.format(var="start", forbidden="< 0", value=start)) - self.c.start = start + @start.setter + def start(self, int start): + if start < 0: + raise IndexError(Errors.E1032.format(var="start", forbidden="< 0", value=start)) + self.c.start = start - property end: - def __get__(self): - return self.c.end + @property + def end(self): + return self.c.end - def __set__(self, int end): - if end < 0: - raise IndexError(Errors.E1032.format(var="end", forbidden="< 0", value=end)) - self.c.end = end + @end.setter + def end(self, int end): + if end < 0: + raise IndexError(Errors.E1032.format(var="end", forbidden="< 0", value=end)) + self.c.end = end - property start_char: - def __get__(self): - return self.c.start_char + @property + def start_char(self): + return self.c.start_char - def __set__(self, int start_char): - if start_char < 0: - raise IndexError(Errors.E1032.format(var="start_char", forbidden="< 0", value=start_char)) - self.c.start_char = start_char + @start_char.setter + def start_char(self, int start_char): + if start_char < 0: + raise IndexError(Errors.E1032.format(var="start_char", forbidden="< 0", value=start_char)) + self.c.start_char = start_char - property end_char: - def __get__(self): - return self.c.end_char + @property + def end_char(self): + return self.c.end_char - def __set__(self, int end_char): - if end_char < 0: - raise IndexError(Errors.E1032.format(var="end_char", forbidden="< 0", value=end_char)) - self.c.end_char = end_char + @end_char.setter + def end_char(self, int end_char): + if end_char < 0: + raise IndexError(Errors.E1032.format(var="end_char", forbidden="< 0", value=end_char)) + self.c.end_char = end_char - property label: - def __get__(self): - return self.c.label + @property + def label(self): + return self.c.label - def __set__(self, attr_t label): - self.c.label = label + @label.setter + def label(self, attr_t label): + self.c.label = label - property kb_id: - def __get__(self): - return self.c.kb_id + @property + def kb_id(self): + return self.c.kb_id - def __set__(self, attr_t kb_id): - self.c.kb_id = kb_id + @kb_id.setter + def kb_id(self, attr_t kb_id): + self.c.kb_id = kb_id - property id: - def __get__(self): - return self.c.id + @property + def id(self): + return self.c.id - def __set__(self, attr_t id): - self.c.id = id + @id.setter + def id(self, attr_t id): + self.c.id = id - property ent_id: + @property + def ent_id(self): """RETURNS (uint64): The entity ID.""" - def __get__(self): - return self.root.ent_id + return self.root.ent_id - def __set__(self, hash_t key): - raise NotImplementedError(Errors.E200.format(attr="ent_id")) + @ent_id.setter + def ent_id(self, hash_t key): + raise NotImplementedError(Errors.E200.format(attr="ent_id")) - property ent_id_: + @property + def ent_id_(self): """RETURNS (str): The (string) entity ID.""" - def __get__(self): - return self.root.ent_id_ + return self.root.ent_id_ - def __set__(self, str key): - raise NotImplementedError(Errors.E200.format(attr="ent_id_")) + @ent_id_.setter + def ent_id_(self, str key): + raise NotImplementedError(Errors.E200.format(attr="ent_id_")) @property def orth_(self): @@ -827,29 +852,32 @@ cdef class Span: """RETURNS (str): The span's lemma.""" return "".join([t.lemma_ + t.whitespace_ for t in self]).strip() - property label_: + @property + def label_(self): """RETURNS (str): The span's label.""" - def __get__(self): - return self.doc.vocab.strings[self.label] + return self.doc.vocab.strings[self.label] - def __set__(self, str label_): - self.label = self.doc.vocab.strings.add(label_) + @label_.setter + def label_(self, str label_): + self.label = self.doc.vocab.strings.add(label_) - property kb_id_: + @property + def kb_id_(self): """RETURNS (str): The span's KB ID.""" - def __get__(self): - return self.doc.vocab.strings[self.kb_id] + return self.doc.vocab.strings[self.kb_id] - def __set__(self, str kb_id_): - self.kb_id = self.doc.vocab.strings.add(kb_id_) + @kb_id_.setter + def kb_id_(self, str kb_id_): + self.kb_id = self.doc.vocab.strings.add(kb_id_) - property id_: + @property + def id_(self): """RETURNS (str): The span's ID.""" - def __get__(self): - return self.doc.vocab.strings[self.id] + return self.doc.vocab.strings[self.id] - def __set__(self, str id_): - self.id = self.doc.vocab.strings.add(id_) + @id_.setter + def id_(self, str id_): + self.id = self.doc.vocab.strings.add(id_) cdef int _count_words_to_root(const TokenC* token, int sent_length) except -1: diff --git a/spacy/tokens/span_group.pxd b/spacy/tokens/span_group.pxd index 5074aa275..7f4145682 100644 --- a/spacy/tokens/span_group.pxd +++ b/spacy/tokens/span_group.pxd @@ -1,6 +1,8 @@ from libcpp.vector cimport vector + from ..structs cimport SpanC + cdef class SpanGroup: cdef public object _doc_ref cdef public str name diff --git a/spacy/tokens/span_group.pyx b/spacy/tokens/span_group.pyx index c748fa256..257c907bc 100644 --- a/spacy/tokens/span_group.pyx +++ b/spacy/tokens/span_group.pyx @@ -1,10 +1,13 @@ -from typing import Iterable, Tuple, Union, Optional, TYPE_CHECKING -import weakref +# cython: profile=False import struct +import weakref from copy import deepcopy +from typing import Iterable, Optional, Union + import srsly from spacy.errors import Errors + from .span cimport Span @@ -32,7 +35,7 @@ cdef class SpanGroup: DOCS: https://spacy.io/api/spangroup """ - def __init__(self, doc, *, name="", attrs={}, spans=[]): + def __init__(self, doc, *, name="", attrs={}, spans=[]): # no-cython-lint """Create a SpanGroup. doc (Doc): The reference Doc object. @@ -309,7 +312,7 @@ cdef class SpanGroup: other_attrs = deepcopy(other_group.attrs) span_group.attrs.update({ - key: value for key, value in other_attrs.items() \ + key: value for key, value in other_attrs.items() if key not in span_group.attrs }) if len(other_group): diff --git a/spacy/tokens/token.pxd b/spacy/tokens/token.pxd index 58b727764..f4e4611df 100644 --- a/spacy/tokens/token.pxd +++ b/spacy/tokens/token.pxd @@ -1,14 +1,16 @@ from numpy cimport ndarray -from ..vocab cimport Vocab -from ..structs cimport TokenC + from ..attrs cimport * -from ..typedefs cimport attr_t, flags_t -from ..parts_of_speech cimport univ_pos_t -from .doc cimport Doc from ..lexeme cimport Lexeme +from ..parts_of_speech cimport univ_pos_t +from ..structs cimport TokenC +from ..typedefs cimport attr_t, flags_t +from ..vocab cimport Vocab +from .doc cimport Doc from ..errors import Errors + cdef int MISSING_DEP = 0 cdef class Token: @@ -24,7 +26,7 @@ cdef class Token: cdef Token self = Token.__new__(Token, vocab, doc, offset) return self - #cdef inline TokenC struct_from_attrs(Vocab vocab, attrs): + # cdef inline TokenC struct_from_attrs(Vocab vocab, attrs): # cdef TokenC token # attrs = normalize_attrs(attrs) @@ -96,12 +98,10 @@ cdef class Token: elif feat_name == SENT_START: token.sent_start = value - @staticmethod cdef inline int missing_dep(const TokenC* token) nogil: return token.dep == MISSING_DEP - @staticmethod cdef inline int missing_head(const TokenC* token) nogil: return Token.missing_dep(token) diff --git a/spacy/tokens/token.pyi b/spacy/tokens/token.pyi index bd585d034..435ace527 100644 --- a/spacy/tokens/token.pyi +++ b/spacy/tokens/token.pyi @@ -1,18 +1,12 @@ -from typing import ( - Callable, - Protocol, - Iterator, - Optional, - Union, - Tuple, - Any, -) +from typing import Any, Callable, Iterator, Optional, Protocol, Tuple, Union + from thinc.types import Floats1d, FloatsXd -from .doc import Doc -from .span import Span -from .morphanalysis import MorphAnalysis + from ..lexeme import Lexeme from ..vocab import Vocab +from .doc import Doc +from .morphanalysis import MorphAnalysis +from .span import Span from .underscore import Underscore class TokenMethod(Protocol): @@ -59,7 +53,12 @@ class Token: def __bytes__(self) -> bytes: ... def __str__(self) -> str: ... def __repr__(self) -> str: ... - def __richcmp__(self, other: Token, op: int) -> bool: ... + def __lt__(self, other: Any) -> bool: ... + def __le__(self, other: Any) -> bool: ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def __gt__(self, other: Any) -> bool: ... + def __ge__(self, other: Any) -> bool: ... @property def _(self) -> Underscore: ... def nbor(self, i: int = ...) -> Token: ... diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 7fff6b162..a3efd5886 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -1,26 +1,43 @@ # cython: infer_types=True +# cython: profile=False # Compiler crashes on memory view coercion without this. Should report bug. -from cython.view cimport array as cvarray cimport numpy as np + np.import_array() -import numpy -from thinc.api import get_array_module import warnings -from ..typedefs cimport hash_t +from thinc.api import get_array_module + +from ..attrs cimport ( + IS_ALPHA, + IS_ASCII, + IS_BRACKET, + IS_CURRENCY, + IS_DIGIT, + IS_LEFT_PUNCT, + IS_LOWER, + IS_PUNCT, + IS_QUOTE, + IS_RIGHT_PUNCT, + IS_SPACE, + IS_STOP, + IS_TITLE, + IS_UPPER, + LIKE_EMAIL, + LIKE_NUM, + LIKE_URL, + ORTH, +) from ..lexeme cimport Lexeme -from ..attrs cimport IS_ALPHA, IS_ASCII, IS_DIGIT, IS_LOWER, IS_PUNCT, IS_SPACE -from ..attrs cimport IS_BRACKET, IS_QUOTE, IS_LEFT_PUNCT, IS_RIGHT_PUNCT -from ..attrs cimport IS_TITLE, IS_UPPER, IS_CURRENCY, IS_STOP -from ..attrs cimport LIKE_URL, LIKE_NUM, LIKE_EMAIL from ..symbols cimport conj -from .morphanalysis cimport MorphAnalysis +from ..typedefs cimport hash_t from .doc cimport set_children_from_heads +from .morphanalysis cimport MorphAnalysis from .. import parts_of_speech -from ..errors import Errors, Warnings from ..attrs import IOB_STRINGS +from ..errors import Errors, Warnings from .underscore import Underscore, get_ext_args @@ -122,17 +139,20 @@ cdef class Token: def __repr__(self): return self.__str__() - def __richcmp__(self, Token other, int op): + def __richcmp__(self, object other, int op): # http://cython.readthedocs.io/en/latest/src/userguide/special_methods.html if other is None: if op in (0, 1, 2): return False else: return True + if not isinstance(other, Token): + return False + cdef Token other_token = other cdef Doc my_doc = self.doc - cdef Doc other_doc = other.doc + cdef Doc other_doc = other_token.doc my = self.idx - their = other.idx + their = other_token.idx if op == 0: return my < their elif op == 2: @@ -197,11 +217,17 @@ cdef class Token: """ if "similarity" in self.doc.user_token_hooks: return self.doc.user_token_hooks["similarity"](self, other) - if hasattr(other, "__len__") and len(other) == 1 and hasattr(other, "__getitem__"): - if self.c.lex.orth == getattr(other[0], "orth", None): + attr = getattr(self.doc.vocab.vectors, "attr", ORTH) + cdef Token this_token = self + cdef Token other_token + cdef Lexeme other_lex + if isinstance(other, Token): + other_token = other + if Token.get_struct_attr(this_token.c, attr) == Token.get_struct_attr(other_token.c, attr): return 1.0 - elif hasattr(other, "orth"): - if self.c.lex.orth == other.orth: + elif isinstance(other, Lexeme): + other_lex = other + if Token.get_struct_attr(this_token.c, attr) == Lexeme.get_struct_attr(other_lex.c, attr): return 1.0 if self.vocab.vectors.n_keys == 0: warnings.warn(Warnings.W007.format(obj="Token")) @@ -214,7 +240,7 @@ cdef class Token: result = xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm) # ensure we get a scalar back (numpy does this automatically but cupy doesn't) return result.item() - + def has_morph(self): """Check whether the token has annotated morph information. Return False when the morph annotation is unset/missing. @@ -223,15 +249,16 @@ cdef class Token: """ return not self.c.morph == 0 - property morph: - def __get__(self): - return MorphAnalysis.from_id(self.vocab, self.c.morph) + @property + def morph(self): + return MorphAnalysis.from_id(self.vocab, self.c.morph) - def __set__(self, MorphAnalysis morph): - # Check that the morph has the same vocab - if self.vocab != morph.vocab: - raise ValueError(Errors.E1013) - self.c.morph = morph.c.key + @morph.setter + def morph(self, MorphAnalysis morph): + # Check that the morph has the same vocab + if self.vocab != morph.vocab: + raise ValueError(Errors.E1013) + self.c.morph = morph.c.key def set_morph(self, features): cdef hash_t key @@ -351,39 +378,43 @@ cdef class Token: """ return self.c.lex.suffix - property lemma: + @property + def lemma(self): """RETURNS (uint64): ID of the base form of the word, with no inflectional suffixes. """ - def __get__(self): - return self.c.lemma + return self.c.lemma - def __set__(self, attr_t lemma): - self.c.lemma = lemma + @lemma.setter + def lemma(self, attr_t lemma): + self.c.lemma = lemma - property pos: + @property + def pos(self): """RETURNS (uint64): ID of coarse-grained part-of-speech tag.""" - def __get__(self): - return self.c.pos + return self.c.pos - def __set__(self, pos): - self.c.pos = pos + @pos.setter + def pos(self, pos): + self.c.pos = pos - property tag: + @property + def tag(self): """RETURNS (uint64): ID of fine-grained part-of-speech tag.""" - def __get__(self): - return self.c.tag + return self.c.tag - def __set__(self, attr_t tag): - self.c.tag = tag + @tag.setter + def tag(self, attr_t tag): + self.c.tag = tag - property dep: + @property + def dep(self): """RETURNS (uint64): ID of syntactic dependency label.""" - def __get__(self): - return self.c.dep + return self.c.dep - def __set__(self, attr_t label): - self.c.dep = label + @dep.setter + def dep(self, attr_t label): + self.c.dep = label @property def has_vector(self): @@ -398,7 +429,7 @@ cdef class Token: return self.doc.user_token_hooks["has_vector"](self) if self.vocab.vectors.size == 0 and self.doc.tensor.size != 0: return True - return self.vocab.has_vector(self.c.lex.orth) + return self.vocab.has_vector(Token.get_struct_attr(self.c, self.vocab.vectors.attr)) @property def vector(self): @@ -414,7 +445,7 @@ cdef class Token: if self.vocab.vectors.size == 0 and self.doc.tensor.size != 0: return self.doc.tensor[self.i] else: - return self.vocab.get_vector(self.c.lex.orth) + return self.vocab.get_vector(Token.get_struct_attr(self.c, self.vocab.vectors.attr)) @property def vector_norm(self): @@ -468,48 +499,51 @@ cdef class Token: return self.doc.user_token_hooks["sent"](self) return self.doc[self.i : self.i+1].sent - property sent_start: - def __get__(self): - """Deprecated: use Token.is_sent_start instead.""" - # Raising a deprecation warning here causes errors for autocomplete - # Handle broken backwards compatibility case: doc[0].sent_start - # was False. - if self.i == 0: - return False - else: - return self.c.sent_start + @property + def sent_start(self): + """Deprecated: use Token.is_sent_start instead.""" + # Raising a deprecation warning here causes errors for autocomplete + # Handle broken backwards compatibility case: doc[0].sent_start + # was False. + if self.i == 0: + return False + else: + return self.c.sent_start - def __set__(self, value): - self.is_sent_start = value + @sent_start.setter + def sent_start(self, value): + self.is_sent_start = value - property is_sent_start: + @property + def is_sent_start(self): """A boolean value indicating whether the token starts a sentence. `None` if unknown. Defaults to `True` for the first token in the `Doc`. RETURNS (bool / None): Whether the token starts a sentence. None if unknown. """ - def __get__(self): - if self.c.sent_start == 0: - return None - elif self.c.sent_start < 0: - return False - else: - return True + if self.c.sent_start == 0: + return None + elif self.c.sent_start < 0: + return False + else: + return True - def __set__(self, value): - if self.doc.has_annotation("DEP"): - raise ValueError(Errors.E043) - if value is None: - self.c.sent_start = 0 - elif value is True: - self.c.sent_start = 1 - elif value is False: - self.c.sent_start = -1 - else: - raise ValueError(Errors.E044.format(value=value)) + @is_sent_start.setter + def is_sent_start(self, value): + if self.doc.has_annotation("DEP"): + raise ValueError(Errors.E043) + if value is None: + self.c.sent_start = 0 + elif value is True: + self.c.sent_start = 1 + elif value is False: + self.c.sent_start = -1 + else: + raise ValueError(Errors.E044.format(value=value)) - property is_sent_end: + @property + def is_sent_end(self): """A boolean value indicating whether the token ends a sentence. `None` if unknown. Defaults to `True` for the last token in the `Doc`. @@ -518,18 +552,18 @@ cdef class Token: DOCS: https://spacy.io/api/token#is_sent_end """ - def __get__(self): - if self.i + 1 == len(self.doc): - return True - elif self.doc[self.i+1].is_sent_start == None: - return None - elif self.doc[self.i+1].is_sent_start == True: - return True - else: - return False + if self.i + 1 == len(self.doc): + return True + elif self.doc[self.i+1].is_sent_start is None: + return None + elif self.doc[self.i+1].is_sent_start is True: + return True + else: + return False - def __set__(self, value): - raise ValueError(Errors.E196) + @is_sent_end.setter + def is_sent_end(self, value): + raise ValueError(Errors.E196) @property def lefts(self): @@ -656,41 +690,42 @@ cdef class Token: """ return not Token.missing_head(self.c) - property head: + @property + def head(self): """The syntactic parent, or "governor", of this token. If token.has_head() is `False`, this method will return itself. RETURNS (Token): The token predicted by the parser to be the head of the current token. """ - def __get__(self): - if not self.has_head(): - return self - else: - return self.doc[self.i + self.c.head] + if not self.has_head(): + return self + else: + return self.doc[self.i + self.c.head] - def __set__(self, Token new_head): - # This function sets the head of self to new_head and updates the - # counters for left/right dependents and left/right corner for the - # new and the old head - # Check that token is from the same document - if self.doc != new_head.doc: - raise ValueError(Errors.E191) - # Do nothing if old head is new head - if self.i + self.c.head == new_head.i: - return - # Find the widest l/r_edges of the roots of the two tokens involved - # to limit the number of tokens for set_children_from_heads - cdef Token self_root, new_head_root - self_root = ([self] + list(self.ancestors))[-1] - new_head_ancestors = list(new_head.ancestors) - new_head_root = new_head_ancestors[-1] if new_head_ancestors else new_head - start = self_root.c.l_edge if self_root.c.l_edge < new_head_root.c.l_edge else new_head_root.c.l_edge - end = self_root.c.r_edge if self_root.c.r_edge > new_head_root.c.r_edge else new_head_root.c.r_edge - # Set new head - self.c.head = new_head.i - self.i - # Adjust parse properties and sentence starts - set_children_from_heads(self.doc.c, start, end + 1) + @head.setter + def head(self, Token new_head): + # This function sets the head of self to new_head and updates the + # counters for left/right dependents and left/right corner for the + # new and the old head + # Check that token is from the same document + if self.doc != new_head.doc: + raise ValueError(Errors.E191) + # Do nothing if old head is new head + if self.i + self.c.head == new_head.i: + return + # Find the widest l/r_edges of the roots of the two tokens involved + # to limit the number of tokens for set_children_from_heads + cdef Token self_root, new_head_root + self_root = ([self] + list(self.ancestors))[-1] + new_head_ancestors = list(new_head.ancestors) + new_head_root = new_head_ancestors[-1] if new_head_ancestors else new_head + start = self_root.c.l_edge if self_root.c.l_edge < new_head_root.c.l_edge else new_head_root.c.l_edge + end = self_root.c.r_edge if self_root.c.r_edge > new_head_root.c.r_edge else new_head_root.c.r_edge + # Set new head + self.c.head = new_head.i - self.i + # Adjust parse properties and sentence starts + set_children_from_heads(self.doc.c, start, end + 1) @property def conjuncts(self): @@ -718,21 +753,23 @@ cdef class Token: queue.append(child) return tuple([w for w in output if w.i != self.i]) - property ent_type: + @property + def ent_type(self): """RETURNS (uint64): Named entity type.""" - def __get__(self): - return self.c.ent_type + return self.c.ent_type - def __set__(self, ent_type): - self.c.ent_type = ent_type + @ent_type.setter + def ent_type(self, ent_type): + self.c.ent_type = ent_type - property ent_type_: + @property + def ent_type_(self): """RETURNS (str): Named entity type.""" - def __get__(self): - return self.vocab.strings[self.c.ent_type] + return self.vocab.strings[self.c.ent_type] - def __set__(self, ent_type): - self.c.ent_type = self.vocab.strings.add(ent_type) + @ent_type_.setter + def ent_type_(self, ent_type): + self.c.ent_type = self.vocab.strings.add(ent_type) @property def ent_iob(self): @@ -758,41 +795,45 @@ cdef class Token: """ return self.iob_strings()[self.c.ent_iob] - property ent_id: + @property + def ent_id(self): """RETURNS (uint64): ID of the entity the token is an instance of, if any. """ - def __get__(self): - return self.c.ent_id + return self.c.ent_id - def __set__(self, hash_t key): - self.c.ent_id = key + @ent_id.setter + def ent_id(self, hash_t key): + self.c.ent_id = key - property ent_id_: + @property + def ent_id_(self): """RETURNS (str): ID of the entity the token is an instance of, if any. """ - def __get__(self): - return self.vocab.strings[self.c.ent_id] + return self.vocab.strings[self.c.ent_id] - def __set__(self, name): - self.c.ent_id = self.vocab.strings.add(name) + @ent_id_.setter + def ent_id_(self, name): + self.c.ent_id = self.vocab.strings.add(name) - property ent_kb_id: + @property + def ent_kb_id(self): """RETURNS (uint64): Named entity KB ID.""" - def __get__(self): - return self.c.ent_kb_id + return self.c.ent_kb_id - def __set__(self, attr_t ent_kb_id): - self.c.ent_kb_id = ent_kb_id + @ent_kb_id.setter + def ent_kb_id(self, attr_t ent_kb_id): + self.c.ent_kb_id = ent_kb_id - property ent_kb_id_: + @property + def ent_kb_id_(self): """RETURNS (str): Named entity KB ID.""" - def __get__(self): - return self.vocab.strings[self.c.ent_kb_id] + return self.vocab.strings[self.c.ent_kb_id] - def __set__(self, ent_kb_id): - self.c.ent_kb_id = self.vocab.strings.add(ent_kb_id) + @ent_kb_id_.setter + def ent_kb_id_(self, ent_kb_id): + self.c.ent_kb_id = self.vocab.strings.add(ent_kb_id) @property def whitespace_(self): @@ -814,16 +855,17 @@ cdef class Token: """ return self.vocab.strings[self.c.lex.lower] - property norm_: + @property + def norm_(self): """RETURNS (str): The token's norm, i.e. a normalised form of the token text. Usually set in the language's tokenizer exceptions or norm exceptions. """ - def __get__(self): - return self.vocab.strings[self.norm] + return self.vocab.strings[self.norm] - def __set__(self, str norm_): - self.c.norm = self.vocab.strings.add(norm_) + @norm_.setter + def norm_(self, str norm_): + self.c.norm = self.vocab.strings.add(norm_) @property def shape_(self): @@ -853,33 +895,36 @@ cdef class Token: """ return self.vocab.strings[self.c.lex.lang] - property lemma_: + @property + def lemma_(self): """RETURNS (str): The token lemma, i.e. the base form of the word, with no inflectional suffixes. """ - def __get__(self): - return self.vocab.strings[self.c.lemma] + return self.vocab.strings[self.c.lemma] - def __set__(self, str lemma_): - self.c.lemma = self.vocab.strings.add(lemma_) + @lemma_.setter + def lemma_(self, str lemma_): + self.c.lemma = self.vocab.strings.add(lemma_) - property pos_: + @property + def pos_(self): """RETURNS (str): Coarse-grained part-of-speech tag.""" - def __get__(self): - return parts_of_speech.NAMES[self.c.pos] + return parts_of_speech.NAMES[self.c.pos] - def __set__(self, pos_name): - if pos_name not in parts_of_speech.IDS: - raise ValueError(Errors.E1021.format(pp=pos_name)) - self.c.pos = parts_of_speech.IDS[pos_name] + @pos_.setter + def pos_(self, pos_name): + if pos_name not in parts_of_speech.IDS: + raise ValueError(Errors.E1021.format(pp=pos_name)) + self.c.pos = parts_of_speech.IDS[pos_name] - property tag_: + @property + def tag_(self): """RETURNS (str): Fine-grained part-of-speech tag.""" - def __get__(self): - return self.vocab.strings[self.c.tag] + return self.vocab.strings[self.c.tag] - def __set__(self, tag): - self.tag = self.vocab.strings.add(tag) + @tag_.setter + def tag_(self, tag): + self.tag = self.vocab.strings.add(tag) def has_dep(self): """Check whether the token has annotated dep information. @@ -889,13 +934,14 @@ cdef class Token: """ return not Token.missing_dep(self.c) - property dep_: + @property + def dep_(self): """RETURNS (str): The syntactic dependency label.""" - def __get__(self): - return self.vocab.strings[self.c.dep] + return self.vocab.strings[self.c.dep] - def __set__(self, str label): - self.c.dep = self.vocab.strings.add(label) + @dep_.setter + def dep_(self, str label): + self.c.dep = self.vocab.strings.add(label) @property def is_oov(self): diff --git a/spacy/tokens/underscore.py b/spacy/tokens/underscore.py index e9a4e1862..0aa0c1e6d 100644 --- a/spacy/tokens/underscore.py +++ b/spacy/tokens/underscore.py @@ -1,6 +1,7 @@ -from typing import Dict, Any, List, Optional, Tuple, Union, TYPE_CHECKING -import functools import copy +import functools +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + from ..errors import Errors if TYPE_CHECKING: diff --git a/spacy/training/__init__.py b/spacy/training/__init__.py index a6f873f05..5c2ba9932 100644 --- a/spacy/training/__init__.py +++ b/spacy/training/__init__.py @@ -1,12 +1,43 @@ -from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401 -from .example import Example, validate_examples, validate_get_examples # noqa: F401 from .alignment import Alignment # noqa: F401 from .augment import dont_augment, orth_variants_augmenter # noqa: F401 -from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 -from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401 -from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401 -from .iob_utils import split_bilu_label, remove_bilu_prefix # noqa: F401 -from .gold_io import docs_to_json, read_json_file # noqa: F401 from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401 -from .loggers import console_logger # noqa: F401 from .callbacks import create_copy_from_base_model # noqa: F401 +from .corpus import Corpus, JsonlCorpus, PlainTextCorpus # noqa: F401 +from .example import Example, validate_examples, validate_get_examples # noqa: F401 +from .gold_io import docs_to_json, read_json_file # noqa: F401 +from .iob_utils import ( # noqa: F401 + biluo_tags_to_offsets, + biluo_tags_to_spans, + biluo_to_iob, + iob_to_biluo, + offsets_to_biluo_tags, + remove_bilu_prefix, + split_bilu_label, + tags_to_entities, +) +from .loggers import console_logger # noqa: F401 + +__all__ = [ + "Alignment", + "Corpus", + "Example", + "JsonlCorpus", + "PlainTextCorpus", + "biluo_tags_to_offsets", + "biluo_tags_to_spans", + "biluo_to_iob", + "create_copy_from_base_model", + "docs_to_json", + "dont_augment", + "iob_to_biluo", + "minibatch_by_padded_size", + "minibatch_by_words", + "offsets_to_biluo_tags", + "orth_variants_augmenter", + "read_json_file", + "remove_bilu_prefix", + "split_bilu_label", + "tags_to_entities", + "validate_get_examples", + "validate_examples", +] diff --git a/spacy/training/align.pyx b/spacy/training/align.pyx index 0ef1fd35d..c68110e30 100644 --- a/spacy/training/align.pyx +++ b/spacy/training/align.pyx @@ -1,6 +1,7 @@ -from typing import List, Tuple -from itertools import chain +# cython: profile=False import re +from itertools import chain +from typing import List, Tuple from ..errors import Errors @@ -37,10 +38,14 @@ def get_alignments(A: List[str], B: List[str]) -> Tuple[List[List[int]], List[Li b2a.append(set()) # Process the alignment at the current position if A[token_idx_a] == B[token_idx_b] and \ - (char_idx_a == 0 or \ - char_to_token_a[char_idx_a - 1] < token_idx_a) and \ - (char_idx_b == 0 or \ - char_to_token_b[char_idx_b - 1] < token_idx_b): + ( + char_idx_a == 0 or + char_to_token_a[char_idx_a - 1] < token_idx_a + ) and \ + ( + char_idx_b == 0 or + char_to_token_b[char_idx_b - 1] < token_idx_b + ): # Current tokens are identical and both character offsets are the # start of a token (either at the beginning of the document or the # previous character belongs to a different token) diff --git a/spacy/training/alignment.py b/spacy/training/alignment.py index 6d24714bf..3f615d10b 100644 --- a/spacy/training/alignment.py +++ b/spacy/training/alignment.py @@ -1,5 +1,5 @@ -from typing import List from dataclasses import dataclass +from typing import List from .align import get_alignments from .alignment_array import AlignmentArray diff --git a/spacy/training/alignment_array.pxd b/spacy/training/alignment_array.pxd index 056f5bef3..bb28f3ac6 100644 --- a/spacy/training/alignment_array.pxd +++ b/spacy/training/alignment_array.pxd @@ -1,5 +1,6 @@ -from libcpp.vector cimport vector cimport numpy as np +from libcpp.vector cimport vector + cdef class AlignmentArray: cdef np.ndarray _data diff --git a/spacy/training/alignment_array.pyx b/spacy/training/alignment_array.pyx index 01e9d9bf8..f0eb5cf39 100644 --- a/spacy/training/alignment_array.pyx +++ b/spacy/training/alignment_array.pyx @@ -1,6 +1,10 @@ +# cython: profile=False from typing import List -from ..errors import Errors + import numpy + +from ..errors import Errors + from libc.stdint cimport int32_t diff --git a/spacy/training/augment.py b/spacy/training/augment.py index 2fe8c24fb..1ebd3313c 100644 --- a/spacy/training/augment.py +++ b/spacy/training/augment.py @@ -1,12 +1,11 @@ -from typing import Callable, Iterator, Dict, List, Tuple, TYPE_CHECKING -from typing import Optional -import random import itertools +import random from functools import partial +from typing import TYPE_CHECKING, Callable, Dict, Iterator, List, Optional, Tuple from ..util import registry from .example import Example -from .iob_utils import split_bilu_label, _doc_to_biluo_tags_with_partial +from .iob_utils import _doc_to_biluo_tags_with_partial, split_bilu_label if TYPE_CHECKING: from ..language import Language # noqa: F401 diff --git a/spacy/training/batchers.py b/spacy/training/batchers.py index f0b6c3123..050c3351b 100644 --- a/spacy/training/batchers.py +++ b/spacy/training/batchers.py @@ -1,10 +1,18 @@ -from typing import Union, Iterable, Sequence, TypeVar, List, Callable, Iterator -from typing import Optional, Any -from functools import partial import itertools +from functools import partial +from typing import ( + Any, + Callable, + Iterable, + Iterator, + List, + Optional, + Sequence, + TypeVar, + Union, +) -from ..util import registry, minibatch - +from ..util import minibatch, registry Sizing = Union[Sequence[int], int] ItemT = TypeVar("ItemT") diff --git a/spacy/training/callbacks.py b/spacy/training/callbacks.py index 7e2494f5b..21c3d56a1 100644 --- a/spacy/training/callbacks.py +++ b/spacy/training/callbacks.py @@ -1,14 +1,17 @@ -from typing import Callable, Optional +from typing import TYPE_CHECKING, Callable, Optional + from ..errors import Errors -from ..language import Language -from ..util import load_model, registry, logger +from ..util import load_model, logger, registry + +if TYPE_CHECKING: + from ..language import Language @registry.callbacks("spacy.copy_from_base_model.v1") def create_copy_from_base_model( tokenizer: Optional[str] = None, vocab: Optional[str] = None, -) -> Callable[[Language], Language]: +) -> Callable[["Language"], "Language"]: def copy_from_base_model(nlp): if tokenizer: logger.info("Copying tokenizer from: %s", tokenizer) diff --git a/spacy/training/converters/__init__.py b/spacy/training/converters/__init__.py index e91b6aaa6..8173da64c 100644 --- a/spacy/training/converters/__init__.py +++ b/spacy/training/converters/__init__.py @@ -1,4 +1,4 @@ -from .iob_to_docs import iob_to_docs # noqa: F401 from .conll_ner_to_docs import conll_ner_to_docs # noqa: F401 -from .json_to_docs import json_to_docs # noqa: F401 from .conllu_to_docs import conllu_to_docs # noqa: F401 +from .iob_to_docs import iob_to_docs # noqa: F401 +from .json_to_docs import json_to_docs # noqa: F401 diff --git a/spacy/training/converters/conll_ner_to_docs.py b/spacy/training/converters/conll_ner_to_docs.py index 28b21c5f0..b19d1791b 100644 --- a/spacy/training/converters/conll_ner_to_docs.py +++ b/spacy/training/converters/conll_ner_to_docs.py @@ -1,10 +1,10 @@ from wasabi import Printer -from .. import tags_to_entities -from ...training import iob_to_biluo -from ...tokens import Doc, Span from ...errors import Errors -from ...util import load_model, get_lang_class +from ...tokens import Doc, Span +from ...training import iob_to_biluo +from ...util import get_lang_class, load_model +from .. import tags_to_entities def conll_ner_to_docs( diff --git a/spacy/training/converters/conllu_to_docs.py b/spacy/training/converters/conllu_to_docs.py index 7052504cc..bda5c88c3 100644 --- a/spacy/training/converters/conllu_to_docs.py +++ b/spacy/training/converters/conllu_to_docs.py @@ -1,11 +1,12 @@ import re -from .conll_ner_to_docs import n_sents_info -from ...training import iob_to_biluo, biluo_tags_to_spans -from ...tokens import Doc, Token, Span -from ...vocab import Vocab from wasabi import Printer +from ...tokens import Doc, Span, Token +from ...training import biluo_tags_to_spans, iob_to_biluo +from ...vocab import Vocab +from .conll_ner_to_docs import n_sents_info + def conllu_to_docs( input_data, diff --git a/spacy/training/converters/iob_to_docs.py b/spacy/training/converters/iob_to_docs.py index 60fb7df61..45bb65692 100644 --- a/spacy/training/converters/iob_to_docs.py +++ b/spacy/training/converters/iob_to_docs.py @@ -1,11 +1,11 @@ from wasabi import Printer -from .conll_ner_to_docs import n_sents_info -from ...vocab import Vocab -from ...training import iob_to_biluo, tags_to_entities -from ...tokens import Doc, Span from ...errors import Errors +from ...tokens import Doc, Span +from ...training import iob_to_biluo, tags_to_entities from ...util import minibatch +from ...vocab import Vocab +from .conll_ner_to_docs import n_sents_info def iob_to_docs(input_data, n_sents=10, no_print=False, *args, **kwargs): diff --git a/spacy/training/converters/json_to_docs.py b/spacy/training/converters/json_to_docs.py index 4123839f2..b4beedd2f 100644 --- a/spacy/training/converters/json_to_docs.py +++ b/spacy/training/converters/json_to_docs.py @@ -1,9 +1,13 @@ import srsly -from ..gold_io import json_iterate, json_to_annotations -from ..example import annotations_to_doc -from ..example import _fix_legacy_dict_data, _parse_example_dict_data -from ...util import load_model + from ...lang.xx import MultiLanguage +from ...util import load_model +from ..example import ( + _fix_legacy_dict_data, + _parse_example_dict_data, + annotations_to_doc, +) +from ..gold_io import json_iterate, json_to_annotations def json_to_docs(input_data, model=None, **kwargs): diff --git a/spacy/training/corpus.py b/spacy/training/corpus.py index 086ad831c..5cc2733a5 100644 --- a/spacy/training/corpus.py +++ b/spacy/training/corpus.py @@ -1,16 +1,16 @@ -import warnings -from typing import Union, List, Iterable, Iterator, TYPE_CHECKING, Callable -from typing import Optional -from pathlib import Path import random +import warnings +from pathlib import Path +from typing import TYPE_CHECKING, Callable, Iterable, Iterator, List, Optional, Union + import srsly from .. import util +from ..errors import Errors, Warnings +from ..tokens import Doc, DocBin +from ..vocab import Vocab from .augment import dont_augment from .example import Example -from ..errors import Warnings, Errors -from ..tokens import DocBin, Doc -from ..vocab import Vocab if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports @@ -63,7 +63,7 @@ def create_plain_text_reader( path: Optional[Path], min_length: int = 0, max_length: int = 0, -) -> Callable[["Language"], Iterable[Doc]]: +) -> Callable[["Language"], Iterable[Example]]: """Iterate Example objects from a file or directory of plain text UTF-8 files with one line per doc. diff --git a/spacy/training/example.pxd b/spacy/training/example.pxd index 49e239757..a7c71fa88 100644 --- a/spacy/training/example.pxd +++ b/spacy/training/example.pxd @@ -1,6 +1,7 @@ -from ..tokens.doc cimport Doc from libc.stdint cimport uint64_t +from ..tokens.doc cimport Doc + cdef class Example: cdef readonly Doc x diff --git a/spacy/training/example.pyi b/spacy/training/example.pyi new file mode 100644 index 000000000..06639d70c --- /dev/null +++ b/spacy/training/example.pyi @@ -0,0 +1,66 @@ +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple + +from ..tokens import Doc, Span +from ..vocab import Vocab +from .alignment import Alignment + +def annotations_to_doc( + vocab: Vocab, + tok_annot: Dict[str, Any], + doc_annot: Dict[str, Any], +) -> Doc: ... +def validate_examples( + examples: Iterable[Example], + method: str, +) -> None: ... +def validate_get_examples( + get_examples: Callable[[], Iterable[Example]], + method: str, +): ... + +class Example: + x: Doc + y: Doc + + def __init__( + self, + predicted: Doc, + reference: Doc, + *, + alignment: Optional[Alignment] = None, + ): ... + def __len__(self) -> int: ... + @property + def predicted(self) -> Doc: ... + @predicted.setter + def predicted(self, doc: Doc) -> None: ... + @property + def reference(self) -> Doc: ... + @reference.setter + def reference(self, doc: Doc) -> None: ... + def copy(self) -> Example: ... + @classmethod + def from_dict(cls, predicted: Doc, example_dict: Dict[str, Any]) -> Example: ... + @property + def alignment(self) -> Alignment: ... + def get_aligned(self, field: str, as_string=False): ... + def get_aligned_parse(self, projectivize=True): ... + def get_aligned_sent_starts(self): ... + def get_aligned_spans_x2y( + self, x_spans: Iterable[Span], allow_overlap=False + ) -> List[Span]: ... + def get_aligned_spans_y2x( + self, y_spans: Iterable[Span], allow_overlap=False + ) -> List[Span]: ... + def get_aligned_ents_and_ner(self) -> Tuple[List[Span], List[str]]: ... + def get_aligned_ner(self) -> List[str]: ... + def get_matching_ents(self, check_label: bool = True) -> List[Span]: ... + def to_dict(self) -> Dict[str, Any]: ... + def split_sents(self) -> List[Example]: ... + @property + def text(self) -> str: ... + def __str__(self) -> str: ... + def __repr__(self) -> str: ... + +def _parse_example_dict_data(example_dict): ... +def _fix_legacy_dict_data(example_dict): ... diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index 95b0f0de9..2c1ff34cf 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -1,19 +1,29 @@ +# cython: profile=False from collections.abc import Iterable as IterableInstance -import warnings + import numpy + from murmurhash.mrmr cimport hash64 from ..tokens.doc cimport Doc from ..tokens.span cimport Span -from ..tokens.span import Span + from ..attrs import IDS -from .alignment import Alignment -from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags -from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix from ..errors import Errors, Warnings from ..pipeline._parser_internals import nonproj +from ..tokens.span import Span +from .alignment import Alignment +from .iob_utils import ( + biluo_tags_to_spans, + biluo_to_iob, + doc_to_biluo_tags, + offsets_to_biluo_tags, + remove_bilu_prefix, +) + from ..tokens.token cimport MISSING_DEP -from ..util import logger, to_ternary_int, all_equal + +from ..util import all_equal, logger, to_ternary_int cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): @@ -21,9 +31,9 @@ cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): attrs, array = _annot2array(vocab, tok_annot, doc_annot) output = Doc(vocab, words=tok_annot["ORTH"], spaces=tok_annot["SPACY"]) if "entities" in doc_annot: - _add_entities_to_doc(output, doc_annot["entities"]) + _add_entities_to_doc(output, doc_annot["entities"]) if "spans" in doc_annot: - _add_spans_to_doc(output, doc_annot["spans"]) + _add_spans_to_doc(output, doc_annot["spans"]) if array.size: output = output.from_array(attrs, array) # links are currently added with ENT_KB_ID on the token level @@ -78,23 +88,25 @@ cdef class Example: def __len__(self): return len(self.predicted) - property predicted: - def __get__(self): - return self.x + @property + def predicted(self): + return self.x - def __set__(self, doc): - self.x = doc - self._cached_alignment = None - self._cached_words_x = [t.text for t in doc] + @predicted.setter + def predicted(self, doc): + self.x = doc + self._cached_alignment = None + self._cached_words_x = [t.text for t in doc] - property reference: - def __get__(self): - return self.y + @property + def reference(self): + return self.y - def __set__(self, doc): - self.y = doc - self._cached_alignment = None - self._cached_words_y = [t.text for t in doc] + @reference.setter + def reference(self, doc): + self.y = doc + self._cached_alignment = None + self._cached_words_y = [t.text for t in doc] def copy(self): return Example( @@ -151,7 +163,6 @@ cdef class Example: self._y_sig = y_sig return self._cached_alignment - def _get_aligned_vectorized(self, align, gold_values): # Fast path for Doc attributes/fields that are predominantly a single value, # i.e., TAG, POS, MORPH. @@ -194,7 +205,6 @@ cdef class Example: return output.tolist() - def _get_aligned_non_vectorized(self, align, gold_values): # Slower path for fields that return multiple values (resulting # in ragged arrays that cannot be vectorized trivially). @@ -211,7 +221,6 @@ cdef class Example: return output - def get_aligned(self, field, as_string=False): """Return an aligned array for a token attribute.""" align = self.alignment.x2y @@ -320,7 +329,7 @@ cdef class Example: missing=None ) # Now fill the tokens we can align to O. - O = 2 # I=1, O=2, B=3 + O = 2 # I=1, O=2, B=3 # no-cython-lint: E741 for i, ent_iob in enumerate(self.get_aligned("ENT_IOB")): if x_tags[i] is None: if ent_iob == O: @@ -330,7 +339,7 @@ cdef class Example: return x_ents, x_tags def get_aligned_ner(self): - x_ents, x_tags = self.get_aligned_ents_and_ner() + _x_ents, x_tags = self.get_aligned_ents_and_ner() return x_tags def get_matching_ents(self, check_label=True): @@ -388,7 +397,6 @@ cdef class Example: return span_dict - def _links_to_dict(self): links = {} for ent in self.reference.ents: @@ -414,9 +422,9 @@ cdef class Example: seen_indices.update(indices) return output - property text: - def __get__(self): - return self.x.text + @property + def text(self): + return self.x.text def __str__(self): return str(self.to_dict()) @@ -579,6 +587,7 @@ def _fix_legacy_dict_data(example_dict): "doc_annotation": doc_dict } + def _has_field(annot, field): if field not in annot: return False @@ -615,6 +624,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces): ent_types.append("") return ent_iobs, ent_types + def _parse_links(vocab, words, spaces, links): reference = Doc(vocab, words=words, spaces=spaces) starts = {token.idx: token.i for token in reference} diff --git a/spacy/training/gold_io.pyx b/spacy/training/gold_io.pyx index 69654e2c7..afbdf4631 100644 --- a/spacy/training/gold_io.pyx +++ b/spacy/training/gold_io.pyx @@ -1,10 +1,12 @@ +# cython: profile=False import warnings + import srsly + from .. import util from ..errors import Warnings from ..tokens import Doc -from .iob_utils import offsets_to_biluo_tags, tags_to_entities -import json +from .iob_utils import offsets_to_biluo_tags def docs_to_json(docs, doc_id=0, ner_missing_tag="O"): @@ -21,7 +23,13 @@ def docs_to_json(docs, doc_id=0, ner_missing_tag="O"): json_doc = {"id": doc_id, "paragraphs": []} for i, doc in enumerate(docs): raw = None if doc.has_unknown_spaces else doc.text - json_para = {'raw': raw, "sentences": [], "cats": [], "entities": [], "links": []} + json_para = { + 'raw': raw, + "sentences": [], + "cats": [], + "entities": [], + "links": [] + } for cat, val in doc.cats.items(): json_cat = {"label": cat, "value": val} json_para["cats"].append(json_cat) @@ -33,13 +41,17 @@ def docs_to_json(docs, doc_id=0, ner_missing_tag="O"): if ent.kb_id_: link_dict = {(ent.start_char, ent.end_char): {ent.kb_id_: 1.0}} json_para["links"].append(link_dict) - biluo_tags = offsets_to_biluo_tags(doc, json_para["entities"], missing=ner_missing_tag) + biluo_tags = offsets_to_biluo_tags( + doc, json_para["entities"], missing=ner_missing_tag + ) attrs = ("TAG", "POS", "MORPH", "LEMMA", "DEP", "ENT_IOB") include_annotation = {attr: doc.has_annotation(attr) for attr in attrs} for j, sent in enumerate(doc.sents): json_sent = {"tokens": [], "brackets": []} for token in sent: - json_token = {"id": token.i, "orth": token.text, "space": token.whitespace_} + json_token = { + "id": token.i, "orth": token.text, "space": token.whitespace_ + } if include_annotation["TAG"]: json_token["tag"] = token.tag_ if include_annotation["POS"]: @@ -123,9 +135,14 @@ def json_to_annotations(doc): else: sent_starts.append(-1) if "brackets" in sent: - brackets.extend((b["first"] + sent_start_i, - b["last"] + sent_start_i, b["label"]) - for b in sent["brackets"]) + brackets.extend( + ( + b["first"] + sent_start_i, + b["last"] + sent_start_i, + b["label"] + ) + for b in sent["brackets"] + ) example["token_annotation"] = dict( ids=ids, @@ -158,6 +175,7 @@ def json_to_annotations(doc): ) yield example + def json_iterate(bytes utf8_str): # We should've made these files jsonl...But since we didn't, parse out # the docs one-by-one to reduce memory usage. diff --git a/spacy/training/initialize.py b/spacy/training/initialize.py index 9cf759c55..062170221 100644 --- a/spacy/training/initialize.py +++ b/spacy/training/initialize.py @@ -1,24 +1,33 @@ -from typing import Union, Dict, Optional, Any, IO, TYPE_CHECKING -from thinc.api import Config, fix_random_seed, set_gpu_allocator -from thinc.api import ConfigValidationError -from pathlib import Path -import srsly -import numpy -import tarfile import gzip -import zipfile -import tqdm -from itertools import islice +import tarfile import warnings +import zipfile +from itertools import islice +from pathlib import Path +from typing import IO, TYPE_CHECKING, Any, Dict, Optional, Union + +import numpy +import srsly +import tqdm +from thinc.api import Config, ConfigValidationError, fix_random_seed, set_gpu_allocator -from .pretrain import get_tok2vec_ref -from ..lookups import Lookups -from ..vectors import Vectors, Mode as VectorsMode from ..errors import Errors, Warnings +from ..lookups import Lookups from ..schemas import ConfigSchemaTraining -from ..util import registry, load_model_from_config, resolve_dot_names, logger -from ..util import load_model, ensure_path, get_sourced_components -from ..util import OOV_RANK, DEFAULT_OOV_PROB +from ..util import ( + DEFAULT_OOV_PROB, + OOV_RANK, + ensure_path, + get_sourced_components, + load_model, + load_model_from_config, + logger, + registry, + resolve_dot_names, +) +from ..vectors import Mode as VectorsMode +from ..vectors import Vectors +from .pretrain import get_tok2vec_ref if TYPE_CHECKING: from ..language import Language # noqa: F401 @@ -67,7 +76,8 @@ def init_nlp(config: Config, *, use_gpu: int = -1) -> "Language": with nlp.select_pipes(enable=resume_components): logger.info("Resuming training for: %s", resume_components) nlp.resume_training(sgd=optimizer) - # Make sure that listeners are defined before initializing further + # Make sure that internal component names are synced and listeners are + # defined before initializing further nlp._link_components() with nlp.select_pipes(disable=[*frozen_components, *resume_components]): if T["max_epochs"] == -1: @@ -206,9 +216,14 @@ def convert_vectors( prune: int, name: Optional[str] = None, mode: str = VectorsMode.default, + attr: str = "ORTH", ) -> None: vectors_loc = ensure_path(vectors_loc) if vectors_loc and vectors_loc.parts[-1].endswith(".npz"): + if attr != "ORTH": + raise ValueError( + "ORTH is the only attribute supported for vectors in .npz format." + ) nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=numpy.load(vectors_loc.open("rb")) ) @@ -236,11 +251,15 @@ def convert_vectors( nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=vectors_data, + attr=attr, **floret_settings, ) else: nlp.vocab.vectors = Vectors( - strings=nlp.vocab.strings, data=vectors_data, keys=vector_keys + strings=nlp.vocab.strings, + data=vectors_data, + keys=vector_keys, + attr=attr, ) nlp.vocab.deduplicate_vectors() if name is None: @@ -283,7 +302,7 @@ def read_vectors( shape = (truncate_vectors, shape[1]) vectors_data = numpy.zeros(shape=shape, dtype="f") vectors_keys = [] - for i, line in enumerate(tqdm.tqdm(f)): + for i, line in enumerate(tqdm.tqdm(f, disable=None)): line = line.rstrip() pieces = line.rsplit(" ", vectors_data.shape[1]) word = pieces.pop(0) diff --git a/spacy/training/iob_utils.py b/spacy/training/iob_utils.py index 0d4d246b0..64d02a1e2 100644 --- a/spacy/training/iob_utils.py +++ b/spacy/training/iob_utils.py @@ -1,8 +1,8 @@ -from typing import List, Dict, Tuple, Iterable, Union, Iterator, cast import warnings +from typing import Dict, Iterable, Iterator, List, Tuple, Union, cast from ..errors import Errors, Warnings -from ..tokens import Span, Doc +from ..tokens import Doc, Span def iob_to_biluo(tags: Iterable[str]) -> List[str]: diff --git a/spacy/training/loggers.py b/spacy/training/loggers.py index 7de31822e..1ec0b7b25 100644 --- a/spacy/training/loggers.py +++ b/spacy/training/loggers.py @@ -1,13 +1,14 @@ -from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO, Union -from wasabi import Printer -from pathlib import Path -import tqdm import sys -import srsly +from pathlib import Path +from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import srsly +import tqdm +from wasabi import Printer -from ..util import registry -from ..errors import Errors from .. import util +from ..errors import Errors +from ..util import registry if TYPE_CHECKING: from ..language import Language # noqa: F401 diff --git a/spacy/training/loop.py b/spacy/training/loop.py index eca40e3d9..56df53957 100644 --- a/spacy/training/loop.py +++ b/spacy/training/loop.py @@ -1,17 +1,28 @@ -from typing import List, Callable, Tuple, Dict, Iterable, Union, Any, IO -from typing import Optional, TYPE_CHECKING +import random +import shutil +import sys from pathlib import Path from timeit import default_timer as timer -from thinc.api import Optimizer, Config, constant, fix_random_seed, set_gpu_allocator -from wasabi import Printer -import random -import sys -import shutil +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, +) + +from thinc.api import Config, Optimizer, constant, fix_random_seed, set_gpu_allocator +from wasabi import Printer -from .example import Example -from ..schemas import ConfigSchemaTraining from ..errors import Errors -from ..util import resolve_dot_names, registry, logger +from ..schemas import ConfigSchemaTraining +from ..util import logger, registry, resolve_dot_names +from .example import Example if TYPE_CHECKING: from ..language import Language # noqa: F401 diff --git a/spacy/training/pretrain.py b/spacy/training/pretrain.py index ebbc5d837..14a813a09 100644 --- a/spacy/training/pretrain.py +++ b/spacy/training/pretrain.py @@ -1,20 +1,26 @@ -from typing import Optional, Callable, Iterable, Union, List -from thinc.api import Config, fix_random_seed, set_gpu_allocator, Model, Optimizer -from thinc.api import set_dropout_rate -from pathlib import Path -from collections import Counter -import srsly -import time import re +import time +from collections import Counter +from pathlib import Path +from typing import Callable, Iterable, List, Optional, Union +import srsly +from thinc.api import ( + Config, + Model, + Optimizer, + fix_random_seed, + set_dropout_rate, + set_gpu_allocator, +) from thinc.config import ConfigValidationError from wasabi import Printer -from .example import Example from ..errors import Errors -from ..tokens import Doc from ..schemas import ConfigSchemaPretrain -from ..util import registry, load_model_from_config, dot_to_object +from ..tokens import Doc +from ..util import dot_to_object, load_model_from_config, registry +from .example import Example def pretrain( diff --git a/spacy/ty.py b/spacy/ty.py index 7e79a3d4d..f389456c0 100644 --- a/spacy/ty.py +++ b/spacy/ty.py @@ -1,13 +1,21 @@ -from typing import TYPE_CHECKING -from typing import Optional, Any, Iterable, Dict, Callable, Sequence, List +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Sequence, +) + +from thinc.api import Model, Optimizer from .compat import Protocol, runtime_checkable -from thinc.api import Optimizer, Model - if TYPE_CHECKING: - from .training import Example from .language import Language + from .training import Example @runtime_checkable diff --git a/spacy/typedefs.pxd b/spacy/typedefs.pxd index 8cdc70e42..72d4d99ac 100644 --- a/spacy/typedefs.pxd +++ b/spacy/typedefs.pxd @@ -1,6 +1,4 @@ -from libc.stdint cimport uint16_t, uint32_t, uint64_t, uintptr_t, int32_t -from libc.stdint cimport uint8_t - +from libc.stdint cimport int32_t, uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t ctypedef float weight_t ctypedef uint64_t hash_t diff --git a/spacy/typedefs.pyx b/spacy/typedefs.pyx index e69de29bb..61bf62038 100644 --- a/spacy/typedefs.pyx +++ b/spacy/typedefs.pyx @@ -0,0 +1 @@ +# cython: profile=False diff --git a/spacy/util.py b/spacy/util.py index 8cc89217d..c127be03c 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1,38 +1,62 @@ -from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast -from typing import Optional, Iterable, Callable, Tuple, Type -from typing import Iterator, Pattern, Generator, TYPE_CHECKING -from types import ModuleType -import os +import functools import importlib import importlib.util -import re -from pathlib import Path -import thinc -from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer -from thinc.api import ConfigValidationError, Model -import functools -import itertools -import numpy -import srsly -import catalogue -from catalogue import RegistryError, Registry -import langcodes -import sys -import warnings -from packaging.specifiers import SpecifierSet, InvalidSpecifier -from packaging.version import Version, InvalidVersion -from packaging.requirements import Requirement -import subprocess -from contextlib import contextmanager -from collections import defaultdict -import tempfile -import shutil -import shlex import inspect -import pkgutil +import itertools import logging +import os +import pkgutil +import re +import shlex +import shutil import socket import stat +import subprocess +import sys +import tempfile +import warnings +from collections import defaultdict +from contextlib import contextmanager +from pathlib import Path +from types import ModuleType +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generator, + Iterable, + Iterator, + List, + Mapping, + NoReturn, + Optional, + Pattern, + Set, + Tuple, + Type, + Union, + cast, +) + +import catalogue +import langcodes +import numpy +import srsly +import thinc +from catalogue import Registry, RegistryError +from packaging.requirements import Requirement +from packaging.specifiers import InvalidSpecifier, SpecifierSet +from packaging.version import InvalidVersion, Version +from thinc.api import ( + Adam, + Config, + ConfigValidationError, + Model, + NumpyOps, + Optimizer, + get_current_ops, +) try: import cupy.random @@ -43,13 +67,12 @@ except ImportError: # and have since moved to Thinc. We're importing them here so people's code # doesn't break, but they should always be imported from Thinc from now on, # not from spacy.util. -from thinc.api import fix_random_seed, compounding, decaying # noqa: F401 +from thinc.api import compounding, decaying, fix_random_seed # noqa: F401 - -from .symbols import ORTH -from .compat import cupy, CudaStream, is_windows, importlib_metadata -from .errors import Errors, Warnings, OLD_MODEL_SHORTCUTS from . import about +from .compat import CudaStream, cupy, importlib_metadata, is_windows +from .errors import OLD_MODEL_SHORTCUTS, Errors, Warnings +from .symbols import ORTH if TYPE_CHECKING: # This lets us add type hints for mypy etc. without causing circular imports @@ -78,7 +101,6 @@ logger.addHandler(logger_stream_handler) class ENV_VARS: CONFIG_OVERRIDES = "SPACY_CONFIG_OVERRIDES" - PROJECT_USE_GIT_VERSION = "SPACY_PROJECT_USE_GIT_VERSION" class registry(thinc.registry): @@ -96,6 +118,7 @@ class registry(thinc.registry): augmenters = catalogue.create("spacy", "augmenters", entry_points=True) loggers = catalogue.create("spacy", "loggers", entry_points=True) scorers = catalogue.create("spacy", "scorers", entry_points=True) + vectors = catalogue.create("spacy", "vectors", entry_points=True) # These are factories registered via third-party packages and the # spacy_factories entry point. This registry only exists so we can easily # load them via the entry points. The "true" factories are added via the @@ -511,7 +534,7 @@ def load_model_from_path( if not meta: meta = get_model_meta(model_path) config_path = model_path / "config.cfg" - overrides = dict_to_dot(config) + overrides = dict_to_dot(config, for_overrides=True) config = load_config(config_path, overrides=overrides) nlp = load_model_from_config( config, @@ -871,7 +894,7 @@ def load_meta(path: Union[str, Path]) -> Dict[str, Any]: if "spacy_version" in meta: if not is_compatible_version(about.__version__, meta["spacy_version"]): lower_version = get_model_lower_version(meta["spacy_version"]) - lower_version = get_minor_version(lower_version) # type: ignore[arg-type] + lower_version = get_base_version(lower_version) # type: ignore[arg-type] if lower_version is not None: lower_version = "v" + lower_version elif "spacy_git_version" in meta: @@ -951,23 +974,12 @@ def replace_model_node(model: Model, target: Model, replacement: Model) -> None: def split_command(command: str) -> List[str]: """Split a string command using shlex. Handles platform compatibility. - command (str) : The command to split RETURNS (List[str]): The split command. """ return shlex.split(command, posix=not is_windows) -def join_command(command: List[str]) -> str: - """Join a command using shlex. shlex.join is only available for Python 3.8+, - so we're using a workaround here. - - command (List[str]): The command to join. - RETURNS (str): The joined command - """ - return " ".join(shlex.quote(cmd) for cmd in command) - - def run_command( command: Union[str, List[str]], *, @@ -976,7 +988,6 @@ def run_command( ) -> subprocess.CompletedProcess: """Run a command on the command line as a subprocess. If the subprocess returns a non-zero exit code, a system exit is performed. - command (str / List[str]): The command. If provided as a string, the string will be split using shlex.split. stdin (Optional[Any]): stdin to read from or None. @@ -1027,7 +1038,6 @@ def run_command( @contextmanager def working_dir(path: Union[str, Path]) -> Iterator[Path]: """Change current working directory and returns to previous on exit. - path (str / Path): The directory to navigate to. YIELDS (Path): The absolute path to the current working directory. This should be used if the block needs to perform actions within the working @@ -1046,7 +1056,6 @@ def working_dir(path: Union[str, Path]) -> Iterator[Path]: def make_tempdir() -> Generator[Path, None, None]: """Execute a block in a temporary directory and remove the directory and its contents at the end of the with block. - YIELDS (Path): The path of the temp directory. """ d = Path(tempfile.mkdtemp()) @@ -1059,35 +1068,47 @@ def make_tempdir() -> Generator[Path, None, None]: rmfunc(path) try: - shutil.rmtree(str(d), onerror=force_remove) + if sys.version_info >= (3, 12): + shutil.rmtree(str(d), onexc=force_remove) + else: + shutil.rmtree(str(d), onerror=force_remove) except PermissionError as e: warnings.warn(Warnings.W091.format(dir=d, msg=e)) -def is_cwd(path: Union[Path, str]) -> bool: - """Check whether a path is the current working directory. - - path (Union[Path, str]): The directory path. - RETURNS (bool): Whether the path is the current working directory. - """ - return str(Path(path).resolve()).lower() == str(Path.cwd().resolve()).lower() - - def is_in_jupyter() -> bool: - """Check if user is running spaCy from a Jupyter notebook by detecting the - IPython kernel. Mainly used for the displaCy visualizer. - RETURNS (bool): True if in Jupyter, False if not. + """Check if user is running spaCy from a Jupyter or Colab notebook by + detecting the IPython kernel. Mainly used for the displaCy visualizer. + RETURNS (bool): True if in Jupyter/Colab, False if not. """ # https://stackoverflow.com/a/39662359/6400719 + # https://stackoverflow.com/questions/15411967 try: - shell = get_ipython().__class__.__name__ # type: ignore[name-defined] - if shell == "ZMQInteractiveShell": + if get_ipython().__class__.__name__ == "ZMQInteractiveShell": # type: ignore[name-defined] return True # Jupyter notebook or qtconsole + if get_ipython().__class__.__module__ == "google.colab._shell": # type: ignore[name-defined] + return True # Colab notebook except NameError: - return False # Probably standard Python interpreter + pass # Probably standard Python interpreter + # additional check for Colab + try: + import google.colab + + return True # Colab notebook + except ImportError: + pass return False +def is_in_interactive() -> bool: + """Check if user is running spaCy from an interactive Python + shell. Will return True in Jupyter notebooks too. + RETURNS (bool): True if in interactive mode, False if not. + """ + # https://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode + return hasattr(sys, "ps1") or hasattr(sys, "ps2") + + def get_object_name(obj: Any) -> str: """Get a human-readable name of a Python object, e.g. a pipeline component. @@ -1479,14 +1500,19 @@ def dot_to_dict(values: Dict[str, Any]) -> Dict[str, dict]: return result -def dict_to_dot(obj: Dict[str, dict]) -> Dict[str, Any]: +def dict_to_dot(obj: Dict[str, dict], *, for_overrides: bool = False) -> Dict[str, Any]: """Convert dot notation to a dict. For example: {"token": {"pos": True, "_": {"xyz": True }}} becomes {"token.pos": True, "token._.xyz": True}. - values (Dict[str, dict]): The dict to convert. + obj (Dict[str, dict]): The dict to convert. + for_overrides (bool): Whether to enable special handling for registered + functions in overrides. RETURNS (Dict[str, Any]): The key/value pairs. """ - return {".".join(key): value for key, value in walk_dict(obj)} + return { + ".".join(key): value + for key, value in walk_dict(obj, for_overrides=for_overrides) + } def dot_to_object(config: Config, section: str): @@ -1528,13 +1554,20 @@ def set_dot_to_object(config: Config, section: str, value: Any) -> None: def walk_dict( - node: Dict[str, Any], parent: List[str] = [] + node: Dict[str, Any], parent: List[str] = [], *, for_overrides: bool = False ) -> Iterator[Tuple[List[str], Any]]: - """Walk a dict and yield the path and values of the leaves.""" + """Walk a dict and yield the path and values of the leaves. + + for_overrides (bool): Whether to treat registered functions that start with + @ as final values rather than dicts to traverse. + """ for key, value in node.items(): key_parent = [*parent, key] - if isinstance(value, dict): - yield from walk_dict(value, key_parent) + if isinstance(value, dict) and ( + not for_overrides + or not any(value_key.startswith("@") for value_key in value) + ): + yield from walk_dict(value, key_parent, for_overrides=for_overrides) else: yield (key_parent, value) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index be0f6db09..6ff99bb59 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -1,24 +1,32 @@ -cimport numpy as np -from libc.stdint cimport uint32_t, uint64_t +# cython: infer_types=True, binding=True +from typing import Callable + from cython.operator cimport dereference as deref +from libc.stdint cimport uint32_t, uint64_t from libcpp.set cimport set as cppset from murmurhash.mrmr cimport hash128_x64 -import functools -import numpy -from typing import cast import warnings from enum import Enum +from pathlib import Path +from typing import TYPE_CHECKING, Union, cast + +import numpy import srsly from thinc.api import Ops, get_array_module, get_current_ops from thinc.backends import get_array_ops from thinc.types import Floats2d +from .attrs cimport ORTH, attr_id_t from .strings cimport StringStore -from .strings import get_string_id -from .errors import Errors, Warnings from . import util +from .attrs import IDS +from .errors import Errors, Warnings +from .strings import get_string_id + +if TYPE_CHECKING: + from .vocab import Vocab # noqa: F401 # no-cython-lint def unpickle_vectors(bytes_data): @@ -34,7 +42,71 @@ class Mode(str, Enum): return list(cls.__members__.keys()) -cdef class Vectors: +cdef class BaseVectors: + def __init__(self, *, strings=None): + # Make sure abstract BaseVectors is not instantiated. + if self.__class__ == BaseVectors: + raise TypeError( + Errors.E1046.format(cls_name=self.__class__.__name__) + ) + + def __getitem__(self, key): + raise NotImplementedError + + def __contains__(self, key): + raise NotImplementedError + + def is_full(self): + raise NotImplementedError + + def get_batch(self, keys): + raise NotImplementedError + + @property + def shape(self): + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + @property + def vectors_length(self): + raise NotImplementedError + + @property + def size(self): + raise NotImplementedError + + def add(self, key, *, vector=None): + raise NotImplementedError + + def to_ops(self, ops: Ops): + pass + + # add dummy methods for to_bytes, from_bytes, to_disk and from_disk to + # allow serialization + def to_bytes(self, **kwargs): + return b"" + + def from_bytes(self, data: bytes, **kwargs): + return self + + def to_disk(self, path: Union[str, Path], **kwargs): + return None + + def from_disk(self, path: Union[str, Path], **kwargs): + return self + + +@util.registry.vectors("spacy.Vectors.v1") +def create_mode_vectors() -> Callable[["Vocab"], BaseVectors]: + def vectors_factory(vocab: "Vocab") -> BaseVectors: + return Vectors(strings=vocab.strings) + + return vectors_factory + + +cdef class Vectors(BaseVectors): """Store, save and load word vectors. Vectors data is kept in the vectors.data attribute, which should be an @@ -63,8 +135,9 @@ cdef class Vectors: cdef readonly uint32_t hash_seed cdef readonly unicode bow cdef readonly unicode eow + cdef readonly attr_id_t attr - def __init__(self, *, strings=None, shape=None, data=None, keys=None, name=None, mode=Mode.default, minn=0, maxn=0, hash_count=1, hash_seed=0, bow="<", eow=">"): + def __init__(self, *, strings=None, shape=None, data=None, keys=None, name=None, mode=Mode.default, minn=0, maxn=0, hash_count=1, hash_seed=0, bow="<", eow=">", attr="ORTH"): """Create a new vector store. strings (StringStore): The string store. @@ -79,6 +152,8 @@ cdef class Vectors: hash_seed (int): The floret hash seed (default: 0). bow (str): The floret BOW string (default: "<"). eow (str): The floret EOW string (default: ">"). + attr (Union[int, str]): The token attribute for the vector keys + (default: "ORTH"). DOCS: https://spacy.io/api/vectors#init """ @@ -102,10 +177,18 @@ cdef class Vectors: self.hash_seed = hash_seed self.bow = bow self.eow = eow + if isinstance(attr, (int, long)): + self.attr = attr + else: + attr = attr.upper() + if attr == "TEXT": + attr = "ORTH" + self.attr = IDS.get(attr, ORTH) + if self.mode == Mode.default: if data is None: if shape is None: - shape = (0,0) + shape = (0, 0) ops = get_current_ops() data = ops.xp.zeros(shape, dtype="f") self._unset = cppset[int]({i for i in range(data.shape[0])}) @@ -246,11 +329,10 @@ cdef class Vectors: def __eq__(self, other): # Check for equality, with faster checks first return ( - self.shape == other.shape - and self.key2row == other.key2row - and self.to_bytes(exclude=["strings"]) - == other.to_bytes(exclude=["strings"]) - ) + self.shape == other.shape + and self.key2row == other.key2row + and self.to_bytes(exclude=["strings"]) == other.to_bytes(exclude=["strings"]) + ) def resize(self, shape, inplace=False): """Resize the underlying vectors array. If inplace=True, the memory @@ -506,11 +588,12 @@ cdef class Vectors: # vectors e.g. (10000, 300) # sims e.g. (1024, 10000) sims = xp.dot(batch, vectors.T) - best_rows[i:i+batch_size] = xp.argpartition(sims, -n, axis=1)[:,-n:] - scores[i:i+batch_size] = xp.partition(sims, -n, axis=1)[:,-n:] + best_rows[i:i+batch_size] = xp.argpartition(sims, -n, axis=1)[:, -n:] + scores[i:i+batch_size] = xp.partition(sims, -n, axis=1)[:, -n:] if sort and n >= 2: - sorted_index = xp.arange(scores.shape[0])[:,None][i:i+batch_size],xp.argsort(scores[i:i+batch_size], axis=1)[:,::-1] + sorted_index = xp.arange(scores.shape[0])[:, None][i:i+batch_size], \ + xp.argsort(scores[i:i+batch_size], axis=1)[:, ::-1] scores[i:i+batch_size] = scores[sorted_index] best_rows[i:i+batch_size] = best_rows[sorted_index] @@ -524,8 +607,12 @@ cdef class Vectors: numpy_rows = get_current_ops().to_numpy(best_rows) keys = xp.asarray( - [[row2key[row] for row in numpy_rows[i] if row in row2key] - for i in range(len(queries)) ], dtype="uint64") + [ + [row2key[row] for row in numpy_rows[i] if row in row2key] + for i in range(len(queries)) + ], + dtype="uint64" + ) return (keys, best_rows, scores) def to_ops(self, ops: Ops): @@ -545,6 +632,7 @@ cdef class Vectors: "hash_seed": self.hash_seed, "bow": self.bow, "eow": self.eow, + "attr": self.attr, } def _set_cfg(self, cfg): @@ -555,6 +643,7 @@ cdef class Vectors: self.hash_seed = cfg.get("hash_seed", 0) self.bow = cfg.get("bow", "<") self.eow = cfg.get("eow", ">") + self.attr = cfg.get("attr", ORTH) def to_disk(self, path, *, exclude=tuple()): """Save the current state to a directory. @@ -566,9 +655,9 @@ cdef class Vectors: """ xp = get_array_module(self.data) if xp is numpy: - save_array = lambda arr, file_: xp.save(file_, arr, allow_pickle=False) + save_array = lambda arr, file_: xp.save(file_, arr, allow_pickle=False) # no-cython-lint else: - save_array = lambda arr, file_: xp.save(file_, arr) + save_array = lambda arr, file_: xp.save(file_, arr) # no-cython-lint def save_vectors(path): # the source of numpy.save indicates that the file object is closed after use. diff --git a/spacy/vocab.pxd b/spacy/vocab.pxd index 9c951b2b7..43e47af1d 100644 --- a/spacy/vocab.pxd +++ b/spacy/vocab.pxd @@ -1,12 +1,12 @@ -from libcpp.vector cimport vector -from preshed.maps cimport PreshMap from cymem.cymem cimport Pool +from libcpp.vector cimport vector from murmurhash.mrmr cimport hash64 +from preshed.maps cimport PreshMap +from .morphology cimport Morphology +from .strings cimport StringStore from .structs cimport LexemeC, TokenC from .typedefs cimport attr_t, hash_t -from .strings cimport StringStore -from .morphology cimport Morphology cdef LexemeC EMPTY_LEXEME @@ -32,7 +32,7 @@ cdef class Vocab: cdef public object writing_system cdef public object get_noun_chunks cdef readonly int length - cdef public object _unused_object # TODO remove in v4, see #9150 + cdef public object _unused_object # TODO remove in v4, see #9150 cdef public object lex_attr_getters cdef public object cfg diff --git a/spacy/vocab.pyi b/spacy/vocab.pyi index 4cc359c47..b7ff20348 100644 --- a/spacy/vocab.pyi +++ b/spacy/vocab.pyi @@ -1,14 +1,15 @@ -from typing import Callable, Iterator, Optional, Union, List, Dict -from typing import Any, Iterable +from pathlib import Path +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union + from thinc.types import Floats1d, FloatsXd + from . import Language -from .strings import StringStore from .lexeme import Lexeme from .lookups import Lookups from .morphology import Morphology +from .strings import StringStore from .tokens import Doc, Span from .vectors import Vectors -from pathlib import Path def create_vocab( lang: Optional[str], defaults: Any, vectors_name: Optional[str] = ... diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 27f8e5f98..19e6eb005 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -1,26 +1,23 @@ -# cython: profile=True -from libc.string cimport memcpy +import functools import numpy import srsly from thinc.api import get_array_module, get_current_ops -import functools -from .lexeme cimport EMPTY_LEXEME, OOV_RANK -from .lexeme cimport Lexeme -from .typedefs cimport attr_t -from .tokens.token cimport Token from .attrs cimport LANG, ORTH +from .lexeme cimport EMPTY_LEXEME, OOV_RANK, Lexeme +from .tokens.token cimport Token +from .typedefs cimport attr_t +from . import util +from .attrs import IS_STOP, NORM, intify_attrs from .compat import copy_reg from .errors import Errors -from .attrs import intify_attrs, NORM, IS_STOP -from .vectors import Vectors, Mode as VectorsMode -from .util import registry -from .lookups import Lookups -from . import util +from .lang.lex_attrs import LEX_ATTRS, get_lang, is_stop from .lang.norm_exceptions import BASE_NORMS -from .lang.lex_attrs import LEX_ATTRS, is_stop, get_lang +from .lookups import Lookups +from .vectors import Mode as VectorsMode +from .vectors import Vectors def create_vocab(lang, defaults, vectors_name=None): @@ -50,9 +47,17 @@ cdef class Vocab: DOCS: https://spacy.io/api/vocab """ - def __init__(self, lex_attr_getters=None, strings=tuple(), lookups=None, - oov_prob=-20., vectors_name=None, writing_system={}, - get_noun_chunks=None, **deprecated_kwargs): + def __init__( + self, + lex_attr_getters=None, + strings=tuple(), + lookups=None, + oov_prob=-20., + vectors_name=None, + writing_system={}, # no-cython-lint + get_noun_chunks=None, + **deprecated_kwargs + ): """Create the vocabulary. lex_attr_getters (dict): A dictionary mapping attribute IDs to @@ -83,15 +88,17 @@ cdef class Vocab: self.writing_system = writing_system self.get_noun_chunks = get_noun_chunks - property vectors: - def __get__(self): - return self._vectors + @property + def vectors(self): + return self._vectors - def __set__(self, vectors): + @vectors.setter + def vectors(self, vectors): + if hasattr(vectors, "strings"): for s in vectors.strings: self.strings.add(s) - self._vectors = vectors - self._vectors.strings = self.strings + self._vectors = vectors + self._vectors.strings = self.strings @property def lang(self): @@ -149,7 +156,6 @@ cdef class Vocab: cdef LexemeC* lex cdef hash_t key = self.strings[string] lex = self._by_orth.get(key) - cdef size_t addr if lex != NULL: assert lex.orth in self.strings if lex.orth != key: @@ -182,13 +188,13 @@ cdef class Vocab: # of the doc ownership). # TODO: Change the C API so that the mem isn't passed in here. mem = self.mem - #if len(string) < 3 or self.length < 10000: + # if len(string) < 3 or self.length < 10000: # mem = self.mem cdef bint is_oov = mem is not self.mem lex = mem.alloc(1, sizeof(LexemeC)) lex.orth = self.strings.add(string) lex.length = len(string) - if self.vectors is not None: + if self.vectors is not None and hasattr(self.vectors, "key2row"): lex.id = self.vectors.key2row.get(lex.orth, OOV_RANK) else: lex.id = OOV_RANK @@ -284,12 +290,17 @@ cdef class Vocab: @property def vectors_length(self): - return self.vectors.shape[1] + if hasattr(self.vectors, "shape"): + return self.vectors.shape[1] + else: + return -1 def reset_vectors(self, *, width=None, shape=None): """Drop the current vector table. Because all vectors must be the same width, you have to call this to change the size of the vectors. """ + if not isinstance(self.vectors, Vectors): + raise ValueError(Errors.E849.format(method="reset_vectors", vectors_type=type(self.vectors))) if width is not None and shape is not None: raise ValueError(Errors.E065.format(width=width, shape=shape)) elif shape is not None: @@ -299,6 +310,8 @@ cdef class Vocab: self.vectors = Vectors(strings=self.strings, shape=(self.vectors.shape[0], width)) def deduplicate_vectors(self): + if not isinstance(self.vectors, Vectors): + raise ValueError(Errors.E849.format(method="deduplicate_vectors", vectors_type=type(self.vectors))) if self.vectors.mode != VectorsMode.default: raise ValueError(Errors.E858.format( mode=self.vectors.mode, @@ -352,6 +365,8 @@ cdef class Vocab: DOCS: https://spacy.io/api/vocab#prune_vectors """ + if not isinstance(self.vectors, Vectors): + raise ValueError(Errors.E849.format(method="prune_vectors", vectors_type=type(self.vectors))) if self.vectors.mode != VectorsMode.default: raise ValueError(Errors.E858.format( mode=self.vectors.mode, @@ -364,8 +379,13 @@ cdef class Vocab: self[orth] # Make prob negative so it sorts by rank ascending # (key2row contains the rank) - priority = [(-lex.prob, self.vectors.key2row[lex.orth], lex.orth) - for lex in self if lex.orth in self.vectors.key2row] + priority = [] + cdef Lexeme lex + cdef attr_t value + for lex in self: + value = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + if value in self.vectors.key2row: + priority.append((-lex.prob, self.vectors.key2row[value], value)) priority.sort() indices = xp.asarray([i for (prob, i, key) in priority], dtype="uint64") keys = xp.asarray([key for (prob, i, key) in priority], dtype="uint64") @@ -398,8 +418,10 @@ cdef class Vocab: """ if isinstance(orth, str): orth = self.strings.add(orth) - if self.has_vector(orth): - return self.vectors[orth] + cdef Lexeme lex = self[orth] + key = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + if self.has_vector(key): + return self.vectors[key] xp = get_array_module(self.vectors.data) vectors = xp.zeros((self.vectors_length,), dtype="f") return vectors @@ -415,15 +437,16 @@ cdef class Vocab: """ if isinstance(orth, str): orth = self.strings.add(orth) - if self.vectors.is_full and orth not in self.vectors: + cdef Lexeme lex = self[orth] + key = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + if self.vectors.is_full and key not in self.vectors: new_rows = max(100, int(self.vectors.shape[0]*1.3)) if self.vectors.shape[1] == 0: width = vector.size else: width = self.vectors.shape[1] self.vectors.resize((new_rows, width)) - lex = self[orth] # Add word to vocab if necessary - row = self.vectors.add(orth, vector=vector) + row = self.vectors.add(key, vector=vector) if row >= 0: lex.rank = row @@ -438,20 +461,22 @@ cdef class Vocab: """ if isinstance(orth, str): orth = self.strings.add(orth) - return orth in self.vectors + cdef Lexeme lex = self[orth] + key = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + return key in self.vectors - property lookups: - def __get__(self): - return self._lookups - - def __set__(self, lookups): - self._lookups = lookups - if lookups.has_table("lexeme_norm"): - self.lex_attr_getters[NORM] = util.add_lookups( - self.lex_attr_getters.get(NORM, LEX_ATTRS[NORM]), - self.lookups.get_table("lexeme_norm"), - ) + @property + def lookups(self): + return self._lookups + @lookups.setter + def lookups(self, lookups): + self._lookups = lookups + if lookups.has_table("lexeme_norm"): + self.lex_attr_getters[NORM] = util.add_lookups( + self.lex_attr_getters.get(NORM, LEX_ATTRS[NORM]), + self.lookups.get_table("lexeme_norm"), + ) def to_disk(self, path, *, exclude=tuple()): """Save the current state to a directory. @@ -465,7 +490,6 @@ cdef class Vocab: path = util.ensure_path(path) if not path.exists(): path.mkdir() - setters = ["strings", "vectors"] if "strings" not in exclude: self.strings.to_disk(path / "strings.json") if "vectors" not in exclude: @@ -484,7 +508,6 @@ cdef class Vocab: DOCS: https://spacy.io/api/vocab#to_disk """ path = util.ensure_path(path) - getters = ["strings", "vectors"] if "strings" not in exclude: self.strings.from_disk(path / "strings.json") # TODO: add exclude? if "vectors" not in exclude: diff --git a/website/docs/api/architectures.mdx b/website/docs/api/architectures.mdx index 268c04a07..956234ac0 100644 --- a/website/docs/api/architectures.mdx +++ b/website/docs/api/architectures.mdx @@ -78,16 +78,16 @@ subword features, and a [MaxoutWindowEncoder](/api/architectures#MaxoutWindowEncoder) encoding layer consisting of a CNN and a layer-normalized maxout activation function. -| Name | Description | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `width` | The width of the input and output. These are required to be the same, so that residual connections can be used. Recommended values are `96`, `128` or `300`. ~~int~~ | -| `depth` | The number of convolutional layers to use. Recommended values are between `2` and `8`. ~~int~~ | -| `embed_size` | The number of rows in the hash embedding tables. This can be surprisingly small, due to the use of the hash embeddings. Recommended values are between `2000` and `10000`. ~~int~~ | -| `window_size` | The number of tokens on either side to concatenate during the convolutions. The receptive field of the CNN will be `depth * (window_size * 2 + 1)`, so a 4-layer network with a window size of `2` will be sensitive to 20 words at a time. Recommended value is `1`. ~~int~~ | -| `maxout_pieces` | The number of pieces to use in the maxout non-linearity. If `1`, the [`Mish`](https://thinc.ai/docs/api-layers#mish) non-linearity is used instead. Recommended values are `1`-`3`. ~~int~~ | -| `subword_features` | Whether to also embed subword features, specifically the prefix, suffix and word shape. This is recommended for alphabetic languages like English, but not if single-character tokens are used for a language such as Chinese. ~~bool~~ | -| `pretrained_vectors` | Whether to also use static vectors. ~~bool~~ | -| **CREATES** | The model using the architecture. ~~Model[List[Doc], List[Floats2d]]~~ | +| Name | Description | +| -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `width` | The width of the input and output. These are required to be the same, so that residual connections can be used. Recommended values are `96`, `128` or `300`. ~~int~~ | +| `depth` | The number of convolutional layers to use. Recommended values are between `2` and `8`. ~~int~~ | +| `embed_size` | The number of rows in the hash embedding tables. This can be surprisingly small, due to the use of the hash embeddings. Recommended values are between `2000` and `10000`. ~~int~~ | +| `window_size` | The number of tokens on either side to concatenate during the convolutions. The receptive field of the CNN will be `depth * window_size * 2 + 1`, so a 4-layer network with a window size of `2` will be sensitive to 17 words at a time. Recommended value is `1`. ~~int~~ | +| `maxout_pieces` | The number of pieces to use in the maxout non-linearity. If `1`, the [`Mish`](https://thinc.ai/docs/api-layers#mish) non-linearity is used instead. Recommended values are `1`-`3`. ~~int~~ | +| `subword_features` | Whether to also embed subword features, specifically the prefix, suffix and word shape. This is recommended for alphabetic languages like English, but not if single-character tokens are used for a language such as Chinese. ~~bool~~ | +| `pretrained_vectors` | Whether to also use static vectors. ~~bool~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], List[Floats2d]]~~ | ### spacy.Tok2VecListener.v1 {id="Tok2VecListener"} @@ -303,7 +303,7 @@ mapped to a zero vector. See the documentation on | `nM` | The width of the static vectors. ~~Optional[int]~~ | | `dropout` | Optional dropout rate. If set, it's applied per dimension over the whole batch. Defaults to `None`. ~~Optional[float]~~ | | `init_W` | The [initialization function](https://thinc.ai/docs/api-initializers). Defaults to [`glorot_uniform_init`](https://thinc.ai/docs/api-initializers#glorot_uniform_init). ~~Callable[[Ops, Tuple[int, ...]]], FloatsXd]~~ | -| `key_attr` | Defaults to `"ORTH"`. ~~str~~ | +| `key_attr` | This setting is ignored in spaCy v3.6+. To set a custom key attribute for vectors, configure it through [`Vectors`](/api/vectors) or [`spacy init vectors`](/api/cli#init-vectors). Defaults to `"ORTH"`. ~~str~~ | | **CREATES** | The model using the architecture. ~~Model[List[Doc], Ragged]~~ | ### spacy.FeatureExtractor.v1 {id="FeatureExtractor"} @@ -481,6 +481,286 @@ The other arguments are shared between all versions. +## Curated Transformer architectures {id="curated-trf",source="https://github.com/explosion/spacy-curated-transformers/blob/main/spacy_curated_transformers/models/architectures.py"} + +The following architectures are provided by the package +[`spacy-curated-transformers`](https://github.com/explosion/spacy-curated-transformers). +See the [usage documentation](/usage/embeddings-transformers#transformers) for +how to integrate the architectures into your training config. + +When loading the model +[from the Hugging Face Hub](/api/curatedtransformer#hf_trfencoder_loader), the +model config's parameters must be same as the hyperparameters used by the +pre-trained model. The +[`init fill-curated-transformer`](/api/cli#init-fill-curated-transformer) CLI +command can be used to automatically fill in these values. + +### spacy-curated-transformers.AlbertTransformer.v1 + +Construct an ALBERT transformer model. + +| Name | Description | +| ------------------------------ | ---------------------------------------------------------------------------------------- | +| `vocab_size` | Vocabulary size. ~~int~~ | +| `with_spans` | Callback that constructs a span generator model. ~~Callable~~ | +| `piece_encoder` | The piece encoder to segment input tokens. ~~Model~~ | +| `attention_probs_dropout_prob` | Dropout probability of the self-attention layers. ~~float~~ | +| `embedding_width` | Width of the embedding representations. ~~int~~ | +| `hidden_act` | Activation used by the point-wise feed-forward layers. ~~str~~ | +| `hidden_dropout_prob` | Dropout probability of the point-wise feed-forward and embedding layers. ~~float~~ | +| `hidden_width` | Width of the final representations. ~~int~~ | +| `intermediate_width` | Width of the intermediate projection layer in the point-wise feed-forward layer. ~~int~~ | +| `layer_norm_eps` | Epsilon for layer normalization. ~~float~~ | +| `max_position_embeddings` | Maximum length of position embeddings. ~~int~~ | +| `model_max_length` | Maximum length of model inputs. ~~int~~ | +| `num_attention_heads` | Number of self-attention heads. ~~int~~ | +| `num_hidden_groups` | Number of layer groups whose constituents share parameters. ~~int~~ | +| `num_hidden_layers` | Number of hidden layers. ~~int~~ | +| `padding_idx` | Index of the padding meta-token. ~~int~~ | +| `type_vocab_size` | Type vocabulary size. ~~int~~ | +| `mixed_precision` | Use mixed-precision training. ~~bool~~ | +| `grad_scaler_config` | Configuration passed to the PyTorch gradient scaler. ~~dict~~ | +| **CREATES** | The model using the architecture ~~Model~~ | + +### spacy-curated-transformers.BertTransformer.v1 + +Construct a BERT transformer model. + +| Name | Description | +| ------------------------------ | ---------------------------------------------------------------------------------------- | +| `vocab_size` | Vocabulary size. ~~int~~ | +| `with_spans` | Callback that constructs a span generator model. ~~Callable~~ | +| `piece_encoder` | The piece encoder to segment input tokens. ~~Model~~ | +| `attention_probs_dropout_prob` | Dropout probability of the self-attention layers. ~~float~~ | +| `hidden_act` | Activation used by the point-wise feed-forward layers. ~~str~~ | +| `hidden_dropout_prob` | Dropout probability of the point-wise feed-forward and embedding layers. ~~float~~ | +| `hidden_width` | Width of the final representations. ~~int~~ | +| `intermediate_width` | Width of the intermediate projection layer in the point-wise feed-forward layer. ~~int~~ | +| `layer_norm_eps` | Epsilon for layer normalization. ~~float~~ | +| `max_position_embeddings` | Maximum length of position embeddings. ~~int~~ | +| `model_max_length` | Maximum length of model inputs. ~~int~~ | +| `num_attention_heads` | Number of self-attention heads. ~~int~~ | +| `num_hidden_layers` | Number of hidden layers. ~~int~~ | +| `padding_idx` | Index of the padding meta-token. ~~int~~ | +| `type_vocab_size` | Type vocabulary size. ~~int~~ | +| `mixed_precision` | Use mixed-precision training. ~~bool~~ | +| `grad_scaler_config` | Configuration passed to the PyTorch gradient scaler. ~~dict~~ | +| **CREATES** | The model using the architecture ~~Model~~ | + +### spacy-curated-transformers.CamembertTransformer.v1 + +Construct a CamemBERT transformer model. + +| Name | Description | +| ------------------------------ | ---------------------------------------------------------------------------------------- | +| `vocab_size` | Vocabulary size. ~~int~~ | +| `with_spans` | Callback that constructs a span generator model. ~~Callable~~ | +| `piece_encoder` | The piece encoder to segment input tokens. ~~Model~~ | +| `attention_probs_dropout_prob` | Dropout probability of the self-attention layers. ~~float~~ | +| `hidden_act` | Activation used by the point-wise feed-forward layers. ~~str~~ | +| `hidden_dropout_prob` | Dropout probability of the point-wise feed-forward and embedding layers. ~~float~~ | +| `hidden_width` | Width of the final representations. ~~int~~ | +| `intermediate_width` | Width of the intermediate projection layer in the point-wise feed-forward layer. ~~int~~ | +| `layer_norm_eps` | Epsilon for layer normalization. ~~float~~ | +| `max_position_embeddings` | Maximum length of position embeddings. ~~int~~ | +| `model_max_length` | Maximum length of model inputs. ~~int~~ | +| `num_attention_heads` | Number of self-attention heads. ~~int~~ | +| `num_hidden_layers` | Number of hidden layers. ~~int~~ | +| `padding_idx` | Index of the padding meta-token. ~~int~~ | +| `type_vocab_size` | Type vocabulary size. ~~int~~ | +| `mixed_precision` | Use mixed-precision training. ~~bool~~ | +| `grad_scaler_config` | Configuration passed to the PyTorch gradient scaler. ~~dict~~ | +| **CREATES** | The model using the architecture ~~Model~~ | + +### spacy-curated-transformers.RobertaTransformer.v1 + +Construct a RoBERTa transformer model. + +| Name | Description | +| ------------------------------ | ---------------------------------------------------------------------------------------- | +| `vocab_size` | Vocabulary size. ~~int~~ | +| `with_spans` | Callback that constructs a span generator model. ~~Callable~~ | +| `piece_encoder` | The piece encoder to segment input tokens. ~~Model~~ | +| `attention_probs_dropout_prob` | Dropout probability of the self-attention layers. ~~float~~ | +| `hidden_act` | Activation used by the point-wise feed-forward layers. ~~str~~ | +| `hidden_dropout_prob` | Dropout probability of the point-wise feed-forward and embedding layers. ~~float~~ | +| `hidden_width` | Width of the final representations. ~~int~~ | +| `intermediate_width` | Width of the intermediate projection layer in the point-wise feed-forward layer. ~~int~~ | +| `layer_norm_eps` | Epsilon for layer normalization. ~~float~~ | +| `max_position_embeddings` | Maximum length of position embeddings. ~~int~~ | +| `model_max_length` | Maximum length of model inputs. ~~int~~ | +| `num_attention_heads` | Number of self-attention heads. ~~int~~ | +| `num_hidden_layers` | Number of hidden layers. ~~int~~ | +| `padding_idx` | Index of the padding meta-token. ~~int~~ | +| `type_vocab_size` | Type vocabulary size. ~~int~~ | +| `mixed_precision` | Use mixed-precision training. ~~bool~~ | +| `grad_scaler_config` | Configuration passed to the PyTorch gradient scaler. ~~dict~~ | +| **CREATES** | The model using the architecture ~~Model~~ | + +### spacy-curated-transformers.XlmrTransformer.v1 + +Construct a XLM-RoBERTa transformer model. + +| Name | Description | +| ------------------------------ | ---------------------------------------------------------------------------------------- | +| `vocab_size` | Vocabulary size. ~~int~~ | +| `with_spans` | Callback that constructs a span generator model. ~~Callable~~ | +| `piece_encoder` | The piece encoder to segment input tokens. ~~Model~~ | +| `attention_probs_dropout_prob` | Dropout probability of the self-attention layers. ~~float~~ | +| `hidden_act` | Activation used by the point-wise feed-forward layers. ~~str~~ | +| `hidden_dropout_prob` | Dropout probability of the point-wise feed-forward and embedding layers. ~~float~~ | +| `hidden_width` | Width of the final representations. ~~int~~ | +| `intermediate_width` | Width of the intermediate projection layer in the point-wise feed-forward layer. ~~int~~ | +| `layer_norm_eps` | Epsilon for layer normalization. ~~float~~ | +| `max_position_embeddings` | Maximum length of position embeddings. ~~int~~ | +| `model_max_length` | Maximum length of model inputs. ~~int~~ | +| `num_attention_heads` | Number of self-attention heads. ~~int~~ | +| `num_hidden_layers` | Number of hidden layers. ~~int~~ | +| `padding_idx` | Index of the padding meta-token. ~~int~~ | +| `type_vocab_size` | Type vocabulary size. ~~int~~ | +| `mixed_precision` | Use mixed-precision training. ~~bool~~ | +| `grad_scaler_config` | Configuration passed to the PyTorch gradient scaler. ~~dict~~ | +| **CREATES** | The model using the architecture ~~Model~~ | + +### spacy-curated-transformers.ScalarWeight.v1 + +Construct a model that accepts a list of transformer layer outputs and returns a +weighted representation of the same. + +| Name | Description | +| -------------------- | ----------------------------------------------------------------------------- | +| `num_layers` | Number of transformer hidden layers. ~~int~~ | +| `dropout_prob` | Dropout probability. ~~float~~ | +| `mixed_precision` | Use mixed-precision training. ~~bool~~ | +| `grad_scaler_config` | Configuration passed to the PyTorch gradient scaler. ~~dict~~ | +| **CREATES** | The model using the architecture ~~Model[ScalarWeightInT, ScalarWeightOutT]~~ | + +### spacy-curated-transformers.TransformerLayersListener.v1 + +Construct a listener layer that communicates with one or more upstream +Transformer components. This layer extracts the output of the last transformer +layer and performs pooling over the individual pieces of each `Doc` token, +returning their corresponding representations. The upstream name should either +be the wildcard string '\*', or the name of the Transformer component. + +In almost all cases, the wildcard string will suffice as there'll only be one +upstream Transformer component. But in certain situations, e.g: you have +disjoint datasets for certain tasks, or you'd like to use a pre-trained pipeline +but a downstream task requires its own token representations, you could end up +with more than one Transformer component in the pipeline. + +| Name | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `layers` | The number of layers produced by the upstream transformer component, excluding the embedding layer. ~~int~~ | +| `width` | The width of the vectors produced by the upstream transformer component. ~~int~~ | +| `pooling` | Model that is used to perform pooling over the piece representations. ~~Model~~ | +| `upstream_name` | A string to identify the 'upstream' Transformer component to communicate with. ~~str~~ | +| `grad_factor` | Factor to multiply gradients with. ~~float~~ | +| **CREATES** | A model that returns the relevant vectors from an upstream transformer component. ~~Model[List[Doc], List[Floats2d]]~~ | + +### spacy-curated-transformers.LastTransformerLayerListener.v1 + +Construct a listener layer that communicates with one or more upstream +Transformer components. This layer extracts the output of the last transformer +layer and performs pooling over the individual pieces of each Doc token, +returning their corresponding representations. The upstream name should either +be the wildcard string '\*', or the name of the Transformer component. + +In almost all cases, the wildcard string will suffice as there'll only be one +upstream Transformer component. But in certain situations, e.g: you have +disjoint datasets for certain tasks, or you'd like to use a pre-trained pipeline +but a downstream task requires its own token representations, you could end up +with more than one Transformer component in the pipeline. + +| Name | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `width` | The width of the vectors produced by the upstream transformer component. ~~int~~ | +| `pooling` | Model that is used to perform pooling over the piece representations. ~~Model~~ | +| `upstream_name` | A string to identify the 'upstream' Transformer component to communicate with. ~~str~~ | +| `grad_factor` | Factor to multiply gradients with. ~~float~~ | +| **CREATES** | A model that returns the relevant vectors from an upstream transformer component. ~~Model[List[Doc], List[Floats2d]]~~ | + +### spacy-curated-transformers.ScalarWeightingListener.v1 + +Construct a listener layer that communicates with one or more upstream +Transformer components. This layer calculates a weighted representation of all +transformer layer outputs and performs pooling over the individual pieces of +each Doc token, returning their corresponding representations. + +Requires its upstream Transformer components to return all layer outputs from +their models. The upstream name should either be the wildcard string '\*', or +the name of the Transformer component. + +In almost all cases, the wildcard string will suffice as there'll only be one +upstream Transformer component. But in certain situations, e.g: you have +disjoint datasets for certain tasks, or you'd like to use a pre-trained pipeline +but a downstream task requires its own token representations, you could end up +with more than one Transformer component in the pipeline. + +| Name | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `width` | The width of the vectors produced by the upstream transformer component. ~~int~~ | +| `weighting` | Model that is used to perform the weighting of the different layer outputs. ~~Model~~ | +| `pooling` | Model that is used to perform pooling over the piece representations. ~~Model~~ | +| `upstream_name` | A string to identify the 'upstream' Transformer component to communicate with. ~~str~~ | +| `grad_factor` | Factor to multiply gradients with. ~~float~~ | +| **CREATES** | A model that returns the relevant vectors from an upstream transformer component. ~~Model[List[Doc], List[Floats2d]]~~ | + +### spacy-curated-transformers.BertWordpieceEncoder.v1 + +Construct a WordPiece piece encoder model that accepts a list of token sequences +or documents and returns a corresponding list of piece identifiers. This encoder +also splits each token on punctuation characters, as expected by most BERT +models. + +This model must be separately initialized using an appropriate loader. + +### spacy-curated-transformers.ByteBpeEncoder.v1 + +Construct a Byte-BPE piece encoder model that accepts a list of token sequences +or documents and returns a corresponding list of piece identifiers. + +This model must be separately initialized using an appropriate loader. + +### spacy-curated-transformers.CamembertSentencepieceEncoder.v1 + +Construct a SentencePiece piece encoder model that accepts a list of token +sequences or documents and returns a corresponding list of piece identifiers +with CamemBERT post-processing applied. + +This model must be separately initialized using an appropriate loader. + +### spacy-curated-transformers.CharEncoder.v1 + +Construct a character piece encoder model that accepts a list of token sequences +or documents and returns a corresponding list of piece identifiers. + +This model must be separately initialized using an appropriate loader. + +### spacy-curated-transformers.SentencepieceEncoder.v1 + +Construct a SentencePiece piece encoder model that accepts a list of token +sequences or documents and returns a corresponding list of piece identifiers. + +This model must be separately initialized using an appropriate loader. + +### spacy-curated-transformers.WordpieceEncoder.v1 + +Construct a WordPiece piece encoder model that accepts a list of token sequences +or documents and returns a corresponding list of piece identifiers. This encoder +also splits each token on punctuation characters, as expected by most BERT +models. + +This model must be separately initialized using an appropriate loader. + +### spacy-curated-transformers.XlmrSentencepieceEncoder.v1 + +Construct a SentencePiece piece encoder model that accepts a list of token +sequences or documents and returns a corresponding list of piece identifiers +with XLM-RoBERTa post-processing applied. + +This model must be separately initialized using an appropriate loader. + ## Pretraining architectures {id="pretrain",source="spacy/ml/models/multi_task.py"} The spacy `pretrain` command lets you initialize a `Tok2Vec` layer in your @@ -682,8 +962,9 @@ single-label use-cases where `exclusive_classes = true`, while the > nO = null > > [model.linear_model] -> @architectures = "spacy.TextCatBOW.v2" +> @architectures = "spacy.TextCatBOW.v3" > exclusive_classes = true +> length = 262144 > ngram_size = 1 > no_output_layer = false > @@ -737,54 +1018,15 @@ but used an internal `tok2vec` instead of taking it as argument: -### spacy.TextCatCNN.v2 {id="TextCatCNN"} +### spacy.TextCatBOW.v3 {id="TextCatBOW"} > #### Example Config > > ```ini > [model] -> @architectures = "spacy.TextCatCNN.v2" -> exclusive_classes = false -> nO = null -> -> [model.tok2vec] -> @architectures = "spacy.HashEmbedCNN.v2" -> pretrained_vectors = null -> width = 96 -> depth = 4 -> embed_size = 2000 -> window_size = 1 -> maxout_pieces = 3 -> subword_features = true -> ``` - -A neural network model where token vectors are calculated using a CNN. The -vectors are mean pooled and used as features in a feed-forward network. This -architecture is usually less accurate than the ensemble, but runs faster. - -| Name | Description | -| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `exclusive_classes` | Whether or not categories are mutually exclusive. ~~bool~~ | -| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ | -| `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | -| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | - - - -[TextCatCNN.v1](/api/legacy#TextCatCNN_v1) had the exact same signature, but was -not yet resizable. Since v2, new labels can be added to this component, even -after training. - - - -### spacy.TextCatBOW.v2 {id="TextCatBOW"} - -> #### Example Config -> -> ```ini -> [model] -> @architectures = "spacy.TextCatBOW.v2" +> @architectures = "spacy.TextCatBOW.v3" > exclusive_classes = false +> length = 262144 > ngram_size = 1 > no_output_layer = false > nO = null @@ -798,17 +1040,108 @@ the others, but may not be as accurate, especially if texts are short. | `exclusive_classes` | Whether or not categories are mutually exclusive. ~~bool~~ | | `ngram_size` | Determines the maximum length of the n-grams in the BOW model. For instance, `ngram_size=3` would give unigram, trigram and bigram features. ~~int~~ | | `no_output_layer` | Whether or not to add an output layer to the model (`Softmax` activation if `exclusive_classes` is `True`, else `Logistic`). ~~bool~~ | +| `length` | The size of the weights vector. The length will be rounded up to the next power of two if it is not a power of two. Defaults to `262144`. ~~int~~ | | `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | | **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | - + -[TextCatBOW.v1](/api/legacy#TextCatBOW_v1) had the exact same signature, but was -not yet resizable. Since v2, new labels can be added to this component, even -after training. +- [TextCatBOW.v1](/api/legacy#TextCatBOW_v1) was not yet resizable. Since v2, + new labels can be added to this component, even after training. +- [TextCatBOW.v1](/api/legacy#TextCatBOW_v1) and + [TextCatBOW.v2](/api/legacy#TextCatBOW_v2) used an erroneous sparse linear + layer that only used a small number of the allocated parameters. +- [TextCatBOW.v1](/api/legacy#TextCatBOW_v1) and + [TextCatBOW.v2](/api/legacy#TextCatBOW_v2) did not have the `length` argument. +### spacy.TextCatParametricAttention.v1 {id="TextCatParametricAttention"} + +> #### Example Config +> +> ```ini +> [model] +> @architectures = "spacy.TextCatParametricAttention.v1" +> exclusive_classes = true +> nO = null +> +> [model.tok2vec] +> @architectures = "spacy.Tok2Vec.v2" +> +> [model.tok2vec.embed] +> @architectures = "spacy.MultiHashEmbed.v2" +> width = 64 +> rows = [2000, 2000, 1000, 1000, 1000, 1000] +> attrs = ["ORTH", "LOWER", "PREFIX", "SUFFIX", "SHAPE", "ID"] +> include_static_vectors = false +> +> [model.tok2vec.encode] +> @architectures = "spacy.MaxoutWindowEncoder.v2" +> width = ${model.tok2vec.embed.width} +> window_size = 1 +> maxout_pieces = 3 +> depth = 2 +> ``` + +A neural network model that is built upon Tok2Vec and uses parametric attention +to attend to tokens that are relevant to text classification. + +| Name | Description | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `tok2vec` | The `tok2vec` layer to build the neural network upon. ~~Model[List[Doc], List[Floats2d]]~~ | +| `exclusive_classes` | Whether or not categories are mutually exclusive. ~~bool~~ | +| `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | + +### spacy.TextCatReduce.v1 {id="TextCatReduce"} + +> #### Example Config +> +> ```ini +> [model] +> @architectures = "spacy.TextCatReduce.v1" +> exclusive_classes = false +> use_reduce_first = false +> use_reduce_last = false +> use_reduce_max = false +> use_reduce_mean = true +> nO = null +> +> [model.tok2vec] +> @architectures = "spacy.HashEmbedCNN.v2" +> pretrained_vectors = null +> width = 96 +> depth = 4 +> embed_size = 2000 +> window_size = 1 +> maxout_pieces = 3 +> subword_features = true +> ``` + +A classifier that pools token hidden representations of each `Doc` using first, +max or mean reduction and then applies a classification layer. Reductions are +concatenated when multiple reductions are used. + + + +`TextCatReduce` is a generalization of the older +[`TextCatCNN`](/api/legacy#TextCatCNN_v2) model. `TextCatCNN` always uses a mean +reduction, whereas `TextCatReduce` also supports first/max reductions. + + + +| Name | Description | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `exclusive_classes` | Whether or not categories are mutually exclusive. ~~bool~~ | +| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ | +| `use_reduce_first` | Pool by using the hidden representation of the first token of a `Doc`. ~~bool~~ | +| `use_reduce_last` | Pool by using the hidden representation of the last token of a `Doc`. ~~bool~~ | +| `use_reduce_max` | Pool by taking the maximum values of the hidden representations of a `Doc`. ~~bool~~ | +| `use_reduce_mean` | Pool by taking the mean of all hidden representations of a `Doc`. ~~bool~~ | +| `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | + ## Span classification architectures {id="spancat",source="spacy/ml/models/spancat.py"} ### spacy.SpanCategorizer.v1 {id="SpanCategorizer"} diff --git a/website/docs/api/basevectors.mdx b/website/docs/api/basevectors.mdx new file mode 100644 index 000000000..993b9a33e --- /dev/null +++ b/website/docs/api/basevectors.mdx @@ -0,0 +1,143 @@ +--- +title: BaseVectors +teaser: Abstract class for word vectors +tag: class +source: spacy/vectors.pyx +version: 3.7 +--- + +`BaseVectors` is an abstract class to support the development of custom vectors +implementations. + +For use in training with [`StaticVectors`](/api/architectures#staticvectors), +`get_batch` must be implemented. For improved performance, use efficient +batching in `get_batch` and implement `to_ops` to copy the vector data to the +current device. See an example custom implementation for +[BPEmb subword embeddings](/usage/embeddings-transformers#custom-vectors). + +## BaseVectors.\_\_init\_\_ {id="init",tag="method"} + +Create a new vector store. + +| Name | Description | +| -------------- | --------------------------------------------------------------------------------------------------------------------- | +| _keyword-only_ | | +| `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ | + +## BaseVectors.\_\_getitem\_\_ {id="getitem",tag="method"} + +Get a vector by key. If the key is not found in the table, a `KeyError` should +be raised. + +| Name | Description | +| ----------- | ---------------------------------------------------------------- | +| `key` | The key to get the vector for. ~~Union[int, str]~~ | +| **RETURNS** | The vector for the key. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | + +## BaseVectors.\_\_len\_\_ {id="len",tag="method"} + +Return the number of vectors in the table. + +| Name | Description | +| ----------- | ------------------------------------------- | +| **RETURNS** | The number of vectors in the table. ~~int~~ | + +## BaseVectors.\_\_contains\_\_ {id="contains",tag="method"} + +Check whether there is a vector entry for the given key. + +| Name | Description | +| ----------- | -------------------------------------------- | +| `key` | The key to check. ~~int~~ | +| **RETURNS** | Whether the key has a vector entry. ~~bool~~ | + +## BaseVectors.add {id="add",tag="method"} + +Add a key to the table, if possible. If no keys can be added, return `-1`. + +| Name | Description | +| ----------- | ----------------------------------------------------------------------------------- | +| `key` | The key to add. ~~Union[str, int]~~ | +| **RETURNS** | The row the vector was added to, or `-1` if the operation is not supported. ~~int~~ | + +## BaseVectors.shape {id="shape",tag="property"} + +Get `(rows, dims)` tuples of number of rows and number of dimensions in the +vector table. + +| Name | Description | +| ----------- | ------------------------------------------ | +| **RETURNS** | A `(rows, dims)` pair. ~~Tuple[int, int]~~ | + +## BaseVectors.size {id="size",tag="property"} + +The vector size, i.e. `rows * dims`. + +| Name | Description | +| ----------- | ------------------------ | +| **RETURNS** | The vector size. ~~int~~ | + +## BaseVectors.is_full {id="is_full",tag="property"} + +Whether the vectors table is full and no slots are available for new keys. + +| Name | Description | +| ----------- | ------------------------------------------- | +| **RETURNS** | Whether the vectors table is full. ~~bool~~ | + +## BaseVectors.get_batch {id="get_batch",tag="method",version="3.2"} + +Get the vectors for the provided keys efficiently as a batch. Required to use +the vectors with [`StaticVectors`](/api/architectures#StaticVectors) for +training. + +| Name | Description | +| ------ | --------------------------------------- | +| `keys` | The keys. ~~Iterable[Union[int, str]]~~ | + +## BaseVectors.to_ops {id="to_ops",tag="method"} + +Dummy method. Implement this to change the embedding matrix to use different +Thinc ops. + +| Name | Description | +| ----- | -------------------------------------------------------- | +| `ops` | The Thinc ops to switch the embedding matrix to. ~~Ops~~ | + +## BaseVectors.to_disk {id="to_disk",tag="method"} + +Dummy method to allow serialization. Implement to save vector data with the +pipeline. + +| Name | Description | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | + +## BaseVectors.from_disk {id="from_disk",tag="method"} + +Dummy method to allow serialization. Implement to load vector data from a saved +pipeline. + +| Name | Description | +| ----------- | ----------------------------------------------------------------------------------------------- | +| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| **RETURNS** | The modified vectors object. ~~BaseVectors~~ | + +## BaseVectors.to_bytes {id="to_bytes",tag="method"} + +Dummy method to allow serialization. Implement to serialize vector data to a +binary string. + +| Name | Description | +| ----------- | ---------------------------------------------------- | +| **RETURNS** | The serialized form of the vectors object. ~~bytes~~ | + +## BaseVectors.from_bytes {id="from_bytes",tag="method"} + +Dummy method to allow serialization. Implement to load vector data from a binary +string. + +| Name | Description | +| ----------- | ----------------------------------- | +| `data` | The data to load from. ~~bytes~~ | +| **RETURNS** | The vectors object. ~~BaseVectors~~ | diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index 95bffd7f3..6c47c8f1e 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -7,6 +7,7 @@ menu: - ['info', 'info'] - ['validate', 'validate'] - ['init', 'init'] + - ['find-function', 'find-function'] - ['convert', 'convert'] - ['debug', 'debug'] - ['train', 'train'] @@ -185,6 +186,29 @@ $ python -m spacy init fill-config [base_path] [output_file] [--diff] | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Complete and auto-filled config file for training. | +### init fill-curated-transformer {id="init-fill-curated-transformer",version="3.7",tag="command"} + +Auto-fill the Hugging Face model hyperpameters and loader parameters of a +[Curated Transformer](/api/curatedtransformer) pipeline component in a +[.cfg file](/usage/training#config). The name and revision of the +[Hugging Face model](https://huggingface.co/models) can either be passed as +command-line arguments or read from the +`initialize.components.transformer.encoder_loader` config section. + +```bash +$ python -m spacy init fill-curated-transformer [base_path] [output_file] [--model-name] [--model-revision] [--pipe-name] [--code] +``` + +| Name | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `base_path` | Path to base config to fill, e.g. generated by the [quickstart widget](/usage/training#quickstart). ~~Path (positional)~~ | +| `output_file` | Path to output `.cfg` file or "-" to write to stdout so you can pipe it to a file. Defaults to "-" (stdout). ~~Path (positional)~~ | +| `--model-name`, `-m` | Name of the Hugging Face model. Defaults to the model name from the encoder loader config. ~~Optional[str] (option)~~ | +| `--model-revision`, `-r` | Revision of the Hugging Face model. Defaults to `main`. ~~Optional[str] (option)~~ | +| `--pipe-name`, `-n` | Name of the Curated Transformer pipe whose config is to be filled. Defaults to the first transformer pipe. ~~Optional[str] (option)~~ | +| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | +| **CREATES** | Complete and auto-filled config file for training. | + ### init vectors {id="init-vectors",version="3",tag="command"} Convert [word vectors](/usage/linguistic-features#vectors-similarity) for use @@ -211,7 +235,8 @@ $ python -m spacy init vectors [lang] [vectors_loc] [output_dir] [--prune] [--tr | `output_dir` | Pipeline output directory. Will be created if it doesn't exist. ~~Path (positional)~~ | | `--truncate`, `-t` | Number of vectors to truncate to when reading in vectors file. Defaults to `0` for no truncation. ~~int (option)~~ | | `--prune`, `-p` | Number of vectors to prune the vocabulary to. Defaults to `-1` for no pruning. ~~int (option)~~ | -| `--mode`, `-m` | Vectors mode: `default` or [`floret`](https://github.com/explosion/floret). Defaults to `default`. ~~Optional[str] \(option)~~ | +| `--mode`, `-m` | Vectors mode: `default` or [`floret`](https://github.com/explosion/floret). Defaults to `default`. ~~str \(option)~~ | +| `--attr`, `-a` | Token attribute to use for vectors, e.g. `LOWER` or `NORM`) Defaults to `ORTH`. ~~str \(option)~~ | | `--name`, `-n` | Name to assign to the word vectors in the `meta.json`, e.g. `en_core_web_md.vectors`. ~~Optional[str] \(option)~~ | | `--verbose`, `-V` | Print additional information and explanations. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | @@ -250,6 +275,27 @@ $ python -m spacy init labels [config_path] [output_path] [--code] [--verbose] [ | overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ | | **CREATES** | The label files. | +## find-function {id="find-function",version="3.7",tag="command"} + +Find the module, path and line number to the file for a given registered +function. This functionality is helpful to understand where registered +functions, as used in the config file, are defined. + +```bash +$ python -m spacy find-function [func_name] [--registry] +``` + +> #### Example +> +> ```bash +> $ python -m spacy find-function spacy.TextCatBOW.v1 +> ``` + +| Name | Description | +| ------------------ | ----------------------------------------------------- | +| `func_name` | Name of the registered function. ~~str (positional)~~ | +| `--registry`, `-r` | Name of the catalogue registry. ~~str (option)~~ | + ## convert {id="convert",tag="command"} Convert files into spaCy's @@ -1018,6 +1064,42 @@ $ python -m spacy debug model ./config.cfg tagger -l "5,15" -DIM -PAR -P0 -P1 -P | overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ | | **PRINTS** | Debugging information. | +### debug pieces {id="debug-pieces",version="3.7",tag="command"} + +Analyze word- or sentencepiece stats. + +```bash +$ python -m spacy debug pieces [config_path] [--code] [--name] [overrides] +``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `config_path` | Path to config file. ~~Union[Path, str] (positional)~~ | +| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | +| `--name`, `-n` | Name of the Curated Transformer pipe whose config is to be filled. Defaults to the first transformer pipe. ~~Optional[str] (option)~~ | +| overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ | +| **PRINTS** | Debugging information. | + + + +```bash +$ python -m spacy debug pieces ./config.cfg +``` + +``` +========================= Training corpus statistics ========================= +Median token length: 1.0 +Mean token length: 1.54 +Token length range: [1, 13] + +======================= Development corpus statistics ======================= +Median token length: 1.0 +Mean token length: 1.44 +Token length range: [1, 8] +``` + + + ## train {id="train",tag="command"} Train a pipeline. Expects data in spaCy's @@ -1160,7 +1242,7 @@ skew. To render a sample of dependency parses in a HTML file using the `--displacy-path` argument. ```bash -$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] +$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] [--per-component] [--spans-key] ``` | Name | Description | @@ -1174,6 +1256,7 @@ $ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [-- | `--displacy-path`, `-dp` | Directory to output rendered parses as HTML. If not set, no visualizations will be generated. ~~Optional[Path] \(option)~~ | | `--displacy-limit`, `-dl` | Number of parses to generate per file. Defaults to `25`. Keep in mind that a significantly higher number might cause the `.html` files to render slowly. ~~int (option)~~ | | `--per-component`, `-P` 3.6 | Whether to return the scores keyed by component name. Defaults to `False`. ~~bool (flag)~~ | +| `--spans-key`, `-sk` 3.6.2 | Spans key to use when evaluating `Doc.spans`. Defaults to `sc`. ~~str (option)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Training results and optional metrics and visualizations. | @@ -1185,20 +1268,21 @@ the [binary `.spacy` format](/api/data-formats#binary-training). The pipeline is warmed up before any measurements are taken. ```cli -$ python -m spacy benchmark speed [model] [data_path] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup] +$ python -m spacy benchmark speed [model] [data_path] [--code] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup] ``` -| Name | Description | -| -------------------- | -------------------------------------------------------------------------------------------------------- | -| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ | -| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ | -| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ | -| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ | -| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | -| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ | -| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. | +| Name | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ | +| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ | +| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | +| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ | +| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ | +| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ | +| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. | ## apply {id="apply", version="3.5", tag="command"} @@ -1213,6 +1297,9 @@ input formats are: When a directory is provided it is traversed recursively to collect all files. +When loading a .spacy file, any potential annotations stored on the `Doc` that are not overwritten by the pipeline will be preserved. +If you want to evaluate the pipeline on raw text only, make sure that the .spacy file does not contain any annotations. + ```bash $ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process] ``` @@ -1461,9 +1548,9 @@ obsolete files is left up to you. Remotes can be defined in the `remotes` section of the [`project.yml`](/usage/projects#project-yml). Under the hood, spaCy uses -[`Pathy`](https://github.com/justindujardin/pathy) to communicate with the -remote storages, so you can use any protocol that `Pathy` supports, including -[S3](https://aws.amazon.com/s3/), +[`cloudpathlib`](https://cloudpathlib.drivendata.org) to communicate with the +remote storages, so you can use any protocol that `cloudpathlib` supports, +including [S3](https://aws.amazon.com/s3/), [Google Cloud Storage](https://cloud.google.com/storage), and the local filesystem, although you may need to install extra dependencies to use certain protocols. diff --git a/website/docs/api/curatedtransformer.mdx b/website/docs/api/curatedtransformer.mdx new file mode 100644 index 000000000..3e63ef7c2 --- /dev/null +++ b/website/docs/api/curatedtransformer.mdx @@ -0,0 +1,580 @@ +--- +title: CuratedTransformer +teaser: + Pipeline component for multi-task learning with Curated Transformer models +tag: class +source: github.com/explosion/spacy-curated-transformers/blob/main/spacy_curated_transformers/pipeline/transformer.py +version: 3.7 +api_base_class: /api/pipe +api_string_name: curated_transformer +--- + + + +This component is available via the extension package +[`spacy-curated-transformers`](https://github.com/explosion/spacy-curated-transformers). +It exposes the component via entry points, so if you have the package installed, +using `factory = "curated_transformer"` in your +[training config](/usage/training#config) will work out-of-the-box. + + + +This pipeline component lets you use a curated set of transformer models in your +pipeline. spaCy Curated Transformers currently supports the following model +types: + +- ALBERT +- BERT +- CamemBERT +- RoBERTa +- XLM-RoBERT + +If you want to use another type of model, use +[spacy-transformers](/api/spacy-transformers), which allows you to use all +Hugging Face transformer models with spaCy. + +You will usually connect downstream components to a shared Curated Transformer +pipe using one of the Curated Transformer listener layers. This works similarly +to spaCy's [Tok2Vec](/api/tok2vec), and the +[Tok2VecListener](/api/architectures/#Tok2VecListener) sublayer. The component +assigns the output of the transformer to the `Doc`'s extension attributes. To +access the values, you can use the custom +[`Doc._.trf_data`](#assigned-attributes) attribute. + +For more details, see the [usage documentation](/usage/embeddings-transformers). + +## Assigned Attributes {id="assigned-attributes"} + +The component sets the following +[custom extension attribute](/usage/processing-pipeline#custom-components-attributes): + +| Location | Value | +| ---------------- | -------------------------------------------------------------------------- | +| `Doc._.trf_data` | Curated Transformer outputs for the `Doc` object. ~~DocTransformerOutput~~ | + +## Config and Implementation {id="config"} + +The default config is defined by the pipeline component factory and describes +how the component should be configured. You can override its settings via the +`config` argument on [`nlp.add_pipe`](/api/language#add_pipe) or in your +[`config.cfg` for training](/usage/training#config). See the +[model architectures](/api/architectures#curated-trf) documentation for details +on the curated transformer architectures and their arguments and +hyperparameters. + +> #### Example +> +> ```python +> from spacy_curated_transformers.pipeline.transformer import DEFAULT_CONFIG +> +> nlp.add_pipe("curated_transformer", config=DEFAULT_CONFIG) +> ``` + +| Setting | Description | +| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | The Thinc [`Model`](https://thinc.ai/docs/api-model) wrapping the transformer. Defaults to [`XlmrTransformer`](/api/architectures#curated-trf). ~~Model~~ | +| `frozen` | If `True`, the model's weights are frozen and no backpropagation is performed. ~~bool~~ | +| `all_layer_outputs` | If `True`, the model returns the outputs of all the layers. Otherwise, only the output of the last layer is returned. This must be set to `True` if any of the pipe's downstream listeners require the outputs of all transformer layers. ~~bool~~ | + +```python +https://github.com/explosion/spacy-curated-transformers/blob/main/spacy_curated_transformers/pipeline/transformer.py +``` + +## CuratedTransformer.\_\_init\_\_ {id="init",tag="method"} + +> #### Example +> +> ```python +> # Construction via add_pipe with default model +> trf = nlp.add_pipe("curated_transformer") +> +> # Construction via add_pipe with custom config +> config = { +> "model": { +> "@architectures": "spacy-curated-transformers.XlmrTransformer.v1", +> "vocab_size": 250002, +> "num_hidden_layers": 12, +> "hidden_width": 768, +> "piece_encoder": { +> "@architectures": "spacy-curated-transformers.XlmrSentencepieceEncoder.v1" +> } +> } +> } +> trf = nlp.add_pipe("curated_transformer", config=config) +> +> # Construction from class +> from spacy_curated_transformers import CuratedTransformer +> trf = CuratedTransformer(nlp.vocab, model) +> ``` + +Construct a `CuratedTransformer` component. One or more subsequent spaCy +components can use the transformer outputs as features in its model, with +gradients backpropagated to the single shared weights. The activations from the +transformer are saved in the [`Doc._.trf_data`](#assigned-attributes) extension +attribute. You can also provide a callback to set additional annotations. In +your application, you would normally use a shortcut for this and instantiate the +component using its string name and [`nlp.add_pipe`](/api/language#create_pipe). + +| Name | Description | +| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `model` | One of the supported pre-trained transformer models. ~~Model~~ | +| _keyword-only_ | | +| `name` | The component instance name. ~~str~~ | +| `frozen` | If `True`, the model's weights are frozen and no backpropagation is performed. ~~bool~~ | +| `all_layer_outputs` | If `True`, the model returns the outputs of all the layers. Otherwise, only the output of the last layer is returned. This must be set to `True` if any of the pipe's downstream listeners require the outputs of all transformer layers. ~~bool~~ | + +## CuratedTransformer.\_\_call\_\_ {id="call",tag="method"} + +Apply the pipe to one document. The document is modified in place, and returned. +This usually happens under the hood when the `nlp` object is called on a text +and all pipeline components are applied to the `Doc` in order. Both +[`__call__`](/api/curatedtransformer#call) and +[`pipe`](/api/curatedtransformer#pipe) delegate to the +[`predict`](/api/curatedtransformer#predict) and +[`set_annotations`](/api/curatedtransformer#set_annotations) methods. + +> #### Example +> +> ```python +> doc = nlp("This is a sentence.") +> trf = nlp.add_pipe("curated_transformer") +> # This usually happens under the hood +> processed = trf(doc) +> ``` + +| Name | Description | +| ----------- | -------------------------------- | +| `doc` | The document to process. ~~Doc~~ | +| **RETURNS** | The processed document. ~~Doc~~ | + +## CuratedTransformer.pipe {id="pipe",tag="method"} + +Apply the pipe to a stream of documents. This usually happens under the hood +when the `nlp` object is called on a text and all pipeline components are +applied to the `Doc` in order. Both [`__call__`](/api/curatedtransformer#call) +and [`pipe`](/api/curatedtransformer#pipe) delegate to the +[`predict`](/api/curatedtransformer#predict) and +[`set_annotations`](/api/curatedtransformer#set_annotations) methods. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> for doc in trf.pipe(docs, batch_size=50): +> pass +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------- | +| `stream` | A stream of documents. ~~Iterable[Doc]~~ | +| _keyword-only_ | | +| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ | +| **YIELDS** | The processed documents in order. ~~Doc~~ | + +## CuratedTransformer.initialize {id="initialize",tag="method"} + +Initialize the component for training and return an +[`Optimizer`](https://thinc.ai/docs/api-optimizers). `get_examples` should be a +function that returns an iterable of [`Example`](/api/example) objects. **At +least one example should be supplied.** The data examples are used to +**initialize the model** of the component and can either be the full training +data or a representative sample. Initialization includes validating the network, +[inferring missing shapes](https://thinc.ai/docs/usage-models#validation) and +setting up the label scheme based on the data. This method is typically called +by [`Language.initialize`](/api/language#initialize). + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> trf.initialize(lambda: examples, nlp=nlp) +> ``` + +| Name | Description | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Must contain at least one `Example`. ~~Callable[[], Iterable[Example]]~~ | +| _keyword-only_ | | +| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ | +| `encoder_loader` | Initialization callback for the transformer model. ~~Optional[Callable]~~ | +| `piece_loader` | Initialization callback for the input piece encoder. ~~Optional[Callable]~~ | + +## CuratedTransformer.predict {id="predict",tag="method"} + +Apply the component's model to a batch of [`Doc`](/api/doc) objects without +modifying them. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> scores = trf.predict([doc1, doc2]) +> ``` + +| Name | Description | +| ----------- | ------------------------------------------- | +| `docs` | The documents to predict. ~~Iterable[Doc]~~ | +| **RETURNS** | The model's prediction for each document. | + +## CuratedTransformer.set_annotations {id="set_annotations",tag="method"} + +Assign the extracted features to the `Doc` objects. By default, the +[`DocTransformerOutput`](/api/curatedtransformer#doctransformeroutput) object is +written to the [`Doc._.trf_data`](#assigned-attributes) attribute. Your +`set_extra_annotations` callback is then called, if provided. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> scores = trf.predict(docs) +> trf.set_annotations(docs, scores) +> ``` + +| Name | Description | +| -------- | ------------------------------------------------------------ | +| `docs` | The documents to modify. ~~Iterable[Doc]~~ | +| `scores` | The scores to set, produced by `CuratedTransformer.predict`. | + +## CuratedTransformer.update {id="update",tag="method"} + +Prepare for an update to the transformer. + +Like the [`Tok2Vec`](api/tok2vec) component, the `CuratedTransformer` component +is unusual in that it does not receive "gold standard" annotations to calculate +a weight update. The optimal output of the transformer data is unknown; it's a +hidden layer inside the network that is updated by backpropagating from output +layers. + +The `CuratedTransformer` component therefore does not perform a weight update +during its own `update` method. Instead, it runs its transformer model and +communicates the output and the backpropagation callback to any downstream +components that have been connected to it via the transformer listener sublayer. +If there are multiple listeners, the last layer will actually backprop to the +transformer and call the optimizer, while the others simply increment the +gradients. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> optimizer = nlp.initialize() +> losses = trf.update(examples, sgd=optimizer) +> ``` + +| Name | Description | +| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `examples` | A batch of [`Example`](/api/example) objects. Only the [`Example.predicted`](/api/example#predicted) `Doc` object is used, the reference `Doc` is ignored. ~~Iterable[Example]~~ | +| _keyword-only_ | | +| `drop` | The dropout rate. ~~float~~ | +| `sgd` | An optimizer. Will be created via [`create_optimizer`](#create_optimizer) if not set. ~~Optional[Optimizer]~~ | +| `losses` | Optional record of the loss during training. Updated using the component name as the key. ~~Optional[Dict[str, float]]~~ | +| **RETURNS** | The updated `losses` dictionary. ~~Dict[str, float]~~ | + +## CuratedTransformer.create_optimizer {id="create_optimizer",tag="method"} + +Create an optimizer for the pipeline component. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> optimizer = trf.create_optimizer() +> ``` + +| Name | Description | +| ----------- | ---------------------------- | +| **RETURNS** | The optimizer. ~~Optimizer~~ | + +## CuratedTransformer.use_params {id="use_params",tag="method, contextmanager"} + +Modify the pipe's model to use the given parameter values. At the end of the +context, the original parameters are restored. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> with trf.use_params(optimizer.averages): +> trf.to_disk("/best_model") +> ``` + +| Name | Description | +| -------- | -------------------------------------------------- | +| `params` | The parameter values to use in the model. ~~dict~~ | + +## CuratedTransformer.to_disk {id="to_disk",tag="method"} + +Serialize the pipe to disk. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> trf.to_disk("/path/to/transformer") +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | + +## CuratedTransformer.from_disk {id="from_disk",tag="method"} + +Load the pipe from disk. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> trf.from_disk("/path/to/transformer") +> ``` + +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------- | +| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The modified `CuratedTransformer` object. ~~CuratedTransformer~~ | + +## CuratedTransformer.to_bytes {id="to_bytes",tag="method"} + +> #### Example +> +> ```python +> trf = nlp.add_pipe("curated_transformer") +> trf_bytes = trf.to_bytes() +> ``` + +Serialize the pipe to a bytestring. + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The serialized form of the `CuratedTransformer` object. ~~bytes~~ | + +## CuratedTransformer.from_bytes {id="from_bytes",tag="method"} + +Load the pipe from a bytestring. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> trf_bytes = trf.to_bytes() +> trf = nlp.add_pipe("curated_transformer") +> trf.from_bytes(trf_bytes) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `bytes_data` | The data to load from. ~~bytes~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The `CuratedTransformer` object. ~~CuratedTransformer~~ | + +## Serialization Fields {id="serialization-fields"} + +During serialization, spaCy will export several data fields used to restore +different aspects of the object. If needed, you can exclude them from +serialization by passing in the string names via the `exclude` argument. + +> #### Example +> +> ```python +> data = trf.to_disk("/path", exclude=["vocab"]) +> ``` + +| Name | Description | +| ------- | -------------------------------------------------------------- | +| `vocab` | The shared [`Vocab`](/api/vocab). | +| `cfg` | The config file. You usually don't want to exclude this. | +| `model` | The binary model data. You usually don't want to exclude this. | + +## DocTransformerOutput {id="doctransformeroutput",tag="dataclass"} + +Curated Transformer outputs for one `Doc` object. Stores the dense +representations generated by the transformer for each piece identifier. Piece +identifiers are grouped by token. Instances of this class are typically assigned +to the [`Doc._.trf_data`](/api/curatedtransformer#assigned-attributes) extension +attribute. + +> #### Example +> +> ```python +> # Get the last hidden layer output for "is" (token index 1) +> doc = nlp("This is a text.") +> tensors = doc._.trf_data.last_hidden_layer_state[1] +> ``` + +| Name | Description | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `all_outputs` | List of `Ragged` tensors that correspends to outputs of the different transformer layers. Each tensor element corresponds to a piece identifier's representation. ~~List[Ragged]~~ | +| `last_layer_only` | If only the last transformer layer's outputs are preserved. ~~bool~~ | + +### DocTransformerOutput.embedding_layer {id="doctransformeroutput-embeddinglayer",tag="property"} + +Return the output of the transformer's embedding layer or `None` if +`last_layer_only` is `True`. + +| Name | Description | +| ----------- | -------------------------------------------- | +| **RETURNS** | Embedding layer output. ~~Optional[Ragged]~~ | + +### DocTransformerOutput.last_hidden_layer_state {id="doctransformeroutput-lasthiddenlayerstate",tag="property"} + +Return the output of the transformer's last hidden layer. + +| Name | Description | +| ----------- | ------------------------------------ | +| **RETURNS** | Last hidden layer output. ~~Ragged~~ | + +### DocTransformerOutput.all_hidden_layer_states {id="doctransformeroutput-allhiddenlayerstates",tag="property"} + +Return the outputs of all transformer layers (excluding the embedding layer). + +| Name | Description | +| ----------- | -------------------------------------- | +| **RETURNS** | Hidden layer outputs. ~~List[Ragged]~~ | + +### DocTransformerOutput.num_outputs {id="doctransformeroutput-numoutputs",tag="property"} + +Return the number of layer outputs stored in the `DocTransformerOutput` instance +(including the embedding layer). + +| Name | Description | +| ----------- | -------------------------- | +| **RETURNS** | Numbef of outputs. ~~int~~ | + +## Span Getters {id="span_getters",source="github.com/explosion/spacy-transformers/blob/master/spacy_curated_transformers/span_getters.py"} + +Span getters are functions that take a batch of [`Doc`](/api/doc) objects and +return a lists of [`Span`](/api/span) objects for each doc to be processed by +the transformer. This is used to manage long documents by cutting them into +smaller sequences before running the transformer. The spans are allowed to +overlap, and you can also omit sections of the `Doc` if they are not relevant. +Span getters can be referenced in the +`[components.transformer.model.with_spans]` block of the config to customize the +sequences processed by the transformer. + +| Name | Description | +| ----------- | ------------------------------------------------------------- | +| `docs` | A batch of `Doc` objects. ~~Iterable[Doc]~~ | +| **RETURNS** | The spans to process by the transformer. ~~List[List[Span]]~~ | + +### WithStridedSpans.v1 {id="strided_spans",tag="registered function"} + +> #### Example config +> +> ```ini +> [transformer.model.with_spans] +> @architectures = "spacy-curated-transformers.WithStridedSpans.v1" +> stride = 96 +> window = 128 +> ``` + +Create a span getter for strided spans. If you set the `window` and `stride` to +the same value, the spans will cover each token once. Setting `stride` lower +than `window` will allow for an overlap, so that some tokens are counted twice. +This can be desirable, because it allows all tokens to have both a left and +right context. + +| Name | Description | +| -------- | ------------------------ | +| `window` | The window size. ~~int~~ | +| `stride` | The stride size. ~~int~~ | + +## Model Loaders + +[Curated Transformer models](/api/architectures#curated-trf) are constructed +with default hyperparameters and randomized weights when the pipeline is +created. To load the weights of an existing pre-trained model into the pipeline, +one of the following loader callbacks can be used. The pre-trained model must +have the same hyperparameters as the model used by the pipeline. + +### HFTransformerEncoderLoader.v1 {id="hf_trfencoder_loader",tag="registered_function"} + +Construct a callback that initializes a supported transformer model with weights +from a corresponding HuggingFace model. + +| Name | Description | +| ---------- | ------------------------------------------ | +| `name` | Name of the HuggingFace model. ~~str~~ | +| `revision` | Name of the model revision/branch. ~~str~~ | + +### PyTorchCheckpointLoader.v1 {id="pytorch_checkpoint_loader",tag="registered_function"} + +Construct a callback that initializes a supported transformer model with weights +from a PyTorch checkpoint. + +| Name | Description | +| ------ | ---------------------------------------- | +| `path` | Path to the PyTorch checkpoint. ~~Path~~ | + +## Tokenizer Loaders + +[Curated Transformer models](/api/architectures#curated-trf) must be paired with +a matching tokenizer (piece encoder) model in a spaCy pipeline. As with the +transformer models, tokenizers are constructed with an empty vocabulary during +pipeline creation - They need to be initialized with an appropriate loader +before use in training/inference. + +### ByteBPELoader.v1 {id="bytebpe_loader",tag="registered_function"} + +Construct a callback that initializes a Byte-BPE piece encoder model. + +| Name | Description | +| ------------- | ------------------------------------- | +| `vocab_path` | Path to the vocabulary file. ~~Path~~ | +| `merges_path` | Path to the merges file. ~~Path~~ | + +### CharEncoderLoader.v1 {id="charencoder_loader",tag="registered_function"} + +Construct a callback that initializes a character piece encoder model. + +| Name | Description | +| ----------- | --------------------------------------------------------------------------- | +| `path` | Path to the serialized character model. ~~Path~~ | +| `bos_piece` | Piece used as a beginning-of-sentence token. Defaults to `"[BOS]"`. ~~str~~ | +| `eos_piece` | Piece used as a end-of-sentence token. Defaults to `"[EOS]"`. ~~str~~ | +| `unk_piece` | Piece used as a stand-in for unknown tokens. Defaults to `"[UNK]"`. ~~str~~ | +| `normalize` | Unicode normalization form to use. Defaults to `"NFKC"`. ~~str~~ | + +### HFPieceEncoderLoader.v1 {id="hf_pieceencoder_loader",tag="registered_function"} + +Construct a callback that initializes a HuggingFace piece encoder model. Used in +conjunction with the HuggingFace model loader. + +| Name | Description | +| ---------- | ------------------------------------------ | +| `name` | Name of the HuggingFace model. ~~str~~ | +| `revision` | Name of the model revision/branch. ~~str~~ | + +### SentencepieceLoader.v1 {id="sentencepiece_loader",tag="registered_function"} + +Construct a callback that initializes a SentencePiece piece encoder model. + +| Name | Description | +| ------ | ---------------------------------------------------- | +| `path` | Path to the serialized SentencePiece model. ~~Path~~ | + +### WordpieceLoader.v1 {id="wordpiece_loader",tag="registered_function"} + +Construct a callback that initializes a WordPiece piece encoder model. + +| Name | Description | +| ------ | ------------------------------------------------ | +| `path` | Path to the serialized WordPiece model. ~~Path~~ | + +## Callbacks + +### gradual_transformer_unfreezing.v1 {id="gradual_transformer_unfreezing",tag="registered_function"} + +Construct a callback that can be used to gradually unfreeze the weights of one +or more Transformer components during training. This can be used to prevent +catastrophic forgetting during fine-tuning. + +| Name | Description | +| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `target_pipes` | A dictionary whose keys and values correspond to the names of Transformer components and the training step at which they should be unfrozen respectively. ~~Dict[str, int]~~ | diff --git a/website/docs/api/entitylinker.mdx b/website/docs/api/entitylinker.mdx index 7bf4ccf75..f4b83d88b 100644 --- a/website/docs/api/entitylinker.mdx +++ b/website/docs/api/entitylinker.mdx @@ -61,7 +61,7 @@ architectures and their arguments and hyperparameters. | `incl_context` | Whether or not to include the local context in the model. Defaults to `True`. ~~bool~~ | | `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [EntityLinker](/api/architectures#EntityLinker). ~~Model~~ | | `entity_vector_length` | Size of encoding vectors in the KB. Defaults to `64`. ~~int~~ | -| `use_gold_ents` | Whether to copy entities from the gold docs or not. Defaults to `True`. If `False`, entities must be set in the training data or by an annotating component in the pipeline. ~~int~~ | +| `use_gold_ents` | Whether to copy entities from the gold docs or not. Defaults to `True`. If `False`, entities must be set in the training data or by an annotating component in the pipeline. ~~bool~~ | | `get_candidates` | Function that generates plausible candidates for a given `Span` object. Defaults to [CandidateGenerator](/api/architectures#CandidateGenerator), a function looking up exact, case-dependent aliases in the KB. ~~Callable[[KnowledgeBase, Span], Iterable[Candidate]]~~ | | `get_candidates_batch` 3.5 | Function that generates plausible candidates for a given batch of `Span` objects. Defaults to [CandidateBatchGenerator](/api/architectures#CandidateBatchGenerator), a function looking up exact, case-dependent aliases in the KB. ~~Callable[[KnowledgeBase, Iterable[Span]], Iterable[Iterable[Candidate]]]~~ | | `generate_empty_kb` 3.5.1 | Function that generates an empty `KnowledgeBase` object. Defaults to [`spacy.EmptyKB.v2`](/api/architectures#EmptyKB), which generates an empty [`InMemoryLookupKB`](/api/inmemorylookupkb). ~~Callable[[Vocab, int], KnowledgeBase]~~ | diff --git a/website/docs/api/language.mdx b/website/docs/api/language.mdx index de23156b9..068e8ea78 100644 --- a/website/docs/api/language.mdx +++ b/website/docs/api/language.mdx @@ -856,7 +856,7 @@ token-to-vector embedding component like [`Tok2Vec`](/api/tok2vec) or training a pipeline with components sourced from an existing pipeline: if multiple components (e.g. tagger, parser, NER) listen to the same token-to-vector component, but some of them are frozen and not updated, their -performance may degrade significally as the token-to-vector component is updated +performance may degrade significantly as the token-to-vector component is updated with new data. To prevent this, listeners can be replaced with a standalone token-to-vector layer that is owned by the component and doesn't change if the component isn't updated. diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx new file mode 100644 index 000000000..cefd5c66e --- /dev/null +++ b/website/docs/api/large-language-models.mdx @@ -0,0 +1,1689 @@ +--- +title: Large Language Models +teaser: Integrating LLMs into structured NLP pipelines +menu: + - ['Config and implementation', 'config'] + - ['Tasks', 'tasks'] + - ['Models', 'models'] + - ['Cache', 'cache'] + - ['Various Functions', 'various-functions'] +--- + +[The `spacy-llm` package](https://github.com/explosion/spacy-llm) integrates +Large Language Models (LLMs) into spaCy, featuring a modular system for **fast +prototyping** and **prompting**, and turning unstructured responses into +**robust outputs** for various NLP tasks, **no training data** required. + +## Config and implementation {id="config"} + +An LLM component is implemented through the `LLMWrapper` class. It is accessible +through a generic `llm` +[component factory](https://spacy.io/usage/processing-pipelines#custom-components-factories) +as well as through task-specific component factories: `llm_ner`, `llm_spancat`, +`llm_rel`, `llm_textcat`, `llm_sentiment`, `llm_summarization`, +`llm_entity_linker`, `llm_raw` and `llm_translation`. For these factories, the +GPT-3-5 model from OpenAI is used by default, but this can be customized. + +> #### Example +> +> ```python +> # Construction via add_pipe with the default GPT 3.5 model and an explicitly defined task +> config = {"task": {"@llm_tasks": "spacy.NER.v3", "labels": ["PERSON", "ORGANISATION", "LOCATION"]}} +> llm = nlp.add_pipe("llm", config=config) +> +> # Construction via add_pipe with a task-specific factory and default GPT3.5 model +> llm = nlp.add_pipe("llm_ner") +> +> # Construction via add_pipe with a task-specific factory and custom model +> llm = nlp.add_pipe("llm_ner", config={"model": {"@llm_models": "spacy.Dolly.v1", "name": "dolly-v2-12b"}}) +> +> # Construction from class +> from spacy_llm.pipeline import LLMWrapper +> llm = LLMWrapper(vocab=nlp.vocab, task=task, model=model, cache=cache, save_io=True) +> ``` + +### LLMWrapper.\_\_init\_\_ {id="init",tag="method"} + +Create a new pipeline instance. In your application, you would normally use a +shortcut for this and instantiate the component using its string name and +[`nlp.add_pipe`](/api/language#add_pipe). + +| Name | Description | +| -------------- | -------------------------------------------------------------------------------------------------- | +| `name` | String name of the component instance. `llm` by default. ~~str~~ | +| _keyword-only_ | | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `task` | An [LLM Task](#tasks) can generate prompts and parse LLM responses. ~~LLMTask~~ | +| `model` | The [LLM Model](#models) queries a specific LLM API.. ~~Callable[[Iterable[Any]], Iterable[Any]]~~ | +| `cache` | [Cache](#cache) to use for caching prompts and responses per doc. ~~Cache~~ | +| `save_io` | Whether to save LLM I/O (prompts and responses) in the `Doc._.llm_io` custom attribute. ~~bool~~ | + +### LLMWrapper.\_\_call\_\_ {id="call",tag="method"} + +Apply the pipe to one document. The document is modified in place and returned. +This usually happens under the hood when the `nlp` object is called on a text +and all pipeline components are applied to the `Doc` in order. + +> #### Example +> +> ```python +> doc = nlp("Ingrid visited Paris.") +> llm_ner = nlp.add_pipe("llm_ner") +> # This usually happens under the hood +> processed = llm_ner(doc) +> ``` + +| Name | Description | +| ----------- | -------------------------------- | +| `doc` | The document to process. ~~Doc~~ | +| **RETURNS** | The processed document. ~~Doc~~ | + +### LLMWrapper.pipe {id="pipe",tag="method"} + +Apply the pipe to a stream of documents. This usually happens under the hood +when the `nlp` object is called on a text and all pipeline components are +applied to the `Doc` in order. + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> for doc in llm_ner.pipe(docs, batch_size=50): +> pass +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------- | +| `docs` | A stream of documents. ~~Iterable[Doc]~~ | +| _keyword-only_ | | +| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ | +| **YIELDS** | The processed documents in order. ~~Doc~~ | + +### LLMWrapper.add_label {id="add_label",tag="method"} + +Add a new label to the pipe's task. Alternatively, provide the labels upon the +[task](#task) definition, or through the `[initialize]` block of the +[config](#config). + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.add_label("MY_LABEL") +> ``` + +| Name | Description | +| ----------- | ----------------------------------------------------------- | +| `label` | The label to add. ~~str~~ | +| **RETURNS** | `0` if the label is already present, otherwise `1`. ~~int~~ | + +### LLMWrapper.to_disk {id="to_disk",tag="method"} + +Serialize the pipe to disk. + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.to_disk("/path/to/llm_ner") +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | + +### LLMWrapper.from_disk {id="from_disk",tag="method"} + +Load the pipe from disk. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.from_disk("/path/to/llm_ner") +> ``` + +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------- | +| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The modified `LLMWrapper` object. ~~LLMWrapper~~ | + +### LLMWrapper.to_bytes {id="to_bytes",tag="method"} + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> ner_bytes = llm_ner.to_bytes() +> ``` + +Serialize the pipe to a bytestring. + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The serialized form of the `LLMWrapper` object. ~~bytes~~ | + +### LLMWrapper.from_bytes {id="from_bytes",tag="method"} + +Load the pipe from a bytestring. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> ner_bytes = llm_ner.to_bytes() +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.from_bytes(ner_bytes) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `bytes_data` | The data to load from. ~~bytes~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The `LLMWrapper` object. ~~LLMWrapper~~ | + +### LLMWrapper.labels {id="labels",tag="property"} + +The labels currently added to the component. Empty tuple if the LLM's task does +not require labels. + +> #### Example +> +> ```python +> llm_ner.add_label("MY_LABEL") +> assert "MY_LABEL" in llm_ner.labels +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------ | +| **RETURNS** | The labels added to the component. ~~Tuple[str, ...]~~ | + +## Tasks {id="tasks"} + +In `spacy-llm`, a _task_ defines an NLP problem or question and its solution +using an LLM. It does so by implementing the following responsibilities: + +1. Loading a prompt template and injecting documents' data into the prompt. + Optionally, include fewshot examples in the prompt. +2. Splitting the prompt into several pieces following a map-reduce paradigm, + _if_ the prompt is too long to fit into the model's context and the task + supports sharding prompts. +3. Parsing the LLM's responses back into structured information and validating + the parsed output. + +Two different task interfaces are supported: `ShardingLLMTask` and +`NonShardingLLMTask`. Only the former supports the sharding of documents, i. e. +splitting up prompts if they are too long. + +All tasks are registered in the `llm_tasks` registry. + +### On Sharding {id="task-sharding"} + +"Sharding" describes, generally speaking, the process of distributing parts of a +dataset across multiple storage units for easier processing and lookups. In +`spacy-llm` we use this term (synonymously: "mapping") to describe the splitting +up of prompts if they are too long for a model to handle, and "fusing" +(synonymously: "reducing") to describe how the model responses for several +shards are merged back together into a single document. + +Prompts are broken up in a manner that _always_ keeps the prompt in the template +intact, meaning that the instructions to the LLM will always stay complete. The +document content however will be split, if the length of the fully rendered +prompt exceeds a model context length. + +A toy example: let's assume a model has a context window of 25 tokens and the +prompt template for our fictional, sharding-supporting task looks like this: + +``` +Estimate the sentiment of this text: +"{text}" +Estimated sentiment: +``` + +Depending on how tokens are counted exactly (this is a config setting), we might +come up with `n = 12` tokens for the number of tokens in the prompt +instructions. Furthermore let's assume that our `text` is "This has been +amazing - I can't remember the last time I left the cinema so impressed." - +which has roughly 19 tokens. + +Considering we only have 13 tokens to add to our prompt before we hit the +context limit, we'll have to split our prompt into two parts. Thus `spacy-llm`, +assuming the task used supports sharding, will split the prompt into two (the +default splitting strategy splits by tokens, but alternative splitting +strategies splitting e. g. by sentences can be configured): + +_(Prompt 1/2)_ + +``` +Estimate the sentiment of this text: +"This has been amazing - I can't remember " +Estimated sentiment: +``` + +_(Prompt 2/2)_ + +``` +Estimate the sentiment of this text: +"the last time I left the cinema so impressed." +Estimated sentiment: +``` + +The reduction step is task-specific - a sentiment estimation task might e. g. do +a weighted average of the sentiment scores. Note that prompt sharding introduces +potential inaccuracies, as the LLM won't have access to the entire document at +once. Depending on your use case this might or might not be problematic. + +### `NonShardingLLMTask` {id="task-nonsharding"} + +#### task.generate_prompts {id="task-nonsharding-generate-prompts"} + +Takes a collection of documents, and returns a collection of "prompts", which +can be of type `Any`. Often, prompts are of type `str` - but this is not +enforced to allow for maximum flexibility in the framework. + +| Argument | Description | +| ----------- | ---------------------------------------- | +| `docs` | The input documents. ~~Iterable[Doc]~~ | +| **RETURNS** | The generated prompts. ~~Iterable[Any]~~ | + +#### task.parse_responses {id="task-non-sharding-parse-responses"} + +Takes a collection of LLM responses and the original documents, parses the +responses into structured information, and sets the annotations on the +documents. The `parse_responses` function is free to set the annotations in any +way, including `Doc` fields like `ents`, `spans` or `cats`, or using custom +defined fields. + +The `responses` are of type `Iterable[Any]`, though they will often be `str` +objects. This depends on the return type of the [model](#models). + +| Argument | Description | +| ----------- | ------------------------------------------------------ | +| `docs` | The input documents. ~~Iterable[Doc]~~ | +| `responses` | The responses received from the LLM. ~~Iterable[Any]~~ | +| **RETURNS** | The annotated documents. ~~Iterable[Doc]~~ | + +### `ShardingLLMTask` {id="task-sharding"} + +#### task.generate_prompts {id="task-sharding-generate-prompts"} + +Takes a collection of documents, breaks them up into shards if necessary to fit +all content into the model's context, and returns a collection of collections of +"prompts" (i. e. each doc can have multiple shards, each of which have exactly +one prompt), which can be of type `Any`. Often, prompts are of type `str` - but +this is not enforced to allow for maximum flexibility in the framework. + +| Argument | Description | +| ----------- | -------------------------------------------------- | +| `docs` | The input documents. ~~Iterable[Doc]~~ | +| **RETURNS** | The generated prompts. ~~Iterable[Iterable[Any]]~~ | + +#### task.parse_responses {id="task-sharding-parse-responses"} + +Receives a collection of collections of LLM responses (i. e. each doc can have +multiple shards, each of which have exactly one prompt / prompt response) and +the original shards, parses the responses into structured information, sets the +annotations on the shards, and merges back doc shards into single docs. The +`parse_responses` function is free to set the annotations in any way, including +`Doc` fields like `ents`, `spans` or `cats`, or using custom defined fields. + +The `responses` are of type `Iterable[Iterable[Any]]`, though they will often be +`str` objects. This depends on the return type of the [model](#models). + +| Argument | Description | +| ----------- | ---------------------------------------------------------------- | +| `shards` | The input document shards. ~~Iterable[Iterable[Doc]]~~ | +| `responses` | The responses received from the LLM. ~~Iterable[Iterable[Any]]~~ | +| **RETURNS** | The annotated documents. ~~Iterable[Doc]~~ | + +### Translation {id="translation"} + +The translation task translates texts from a defined or inferred source to a +defined target language. + +#### spacy.Translation.v1 {id="translation-v1"} + +`spacy.Translation.v1` supports both zero-shot and few-shot prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Translation.v1" +> examples = null +> target_lang = "Spanish" +> ``` + +| Argument | Description | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [translation.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/translation.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[TranslationTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `TranslationExample`. ~~Optional[Type[FewshotExample]]~~ | +| `source_lang` | Language to translate from. Doesn't have to be set. ~~Optional[str]~~ | +| `target_lang` | Language to translate to. No default value, has to be set. ~~str~~ | +| `field` | Name of extension attribute to store translation in (i. e. the translation will be available in `doc._.{field}`). Defaults to `translation`. ~~str~~ | + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: 'Top of the morning to you!' + translation: '¡Muy buenos días!' +- text: 'The weather is great today.' + translation: 'El clima está fantástico hoy.' +- text: 'Do you know what will happen tomorrow?' + translation: '¿Sabes qué pasará mañana?' +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Translation.v1" +target_lang = "Spanish" +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "translation_examples.yml" +``` + +### Raw prompting {id="raw"} + +Different to all other tasks `spacy.Raw.vX` doesn't provide a specific prompt, +wrapping doc data, to the model. Instead it instructs the model to reply to the +doc content. This is handy for use cases like question answering (where each doc +contains one question) or if you want to include customized prompts for each +doc. + +#### spacy.Raw.v1 {id="raw-v1"} + +Note that since this task may request arbitrary information, it doesn't do any +parsing per se - the model response is stored in a custom `Doc` attribute (i. e. +can be accessed via `doc._.{field}`). + +It supports both zero-shot and few-shot prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Raw.v1" +> examples = null +> ``` + +| Argument | Description | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [raw.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/raw.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[RawTask]]~~ | +| `prompt_example_type` | Type to use for fewshot examples. Defaults to `RawExample`. ~~Optional[Type[FewshotExample]]~~ | +| `field` | Name of extension attribute to store model reply in (i. e. the reply will be available in `doc._.{field}`). Defaults to `reply`. ~~str~~ | + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +# Each example can follow an arbitrary pattern. It might help the prompt performance though if the examples resemble +# the actual docs' content. +- text: "3 + 5 = x. What's x?" + reply: '8' + +- text: 'Write me a limerick.' + reply: + "There was an Old Man with a beard, Who said, 'It is just as I feared! Two + Owls and a Hen, Four Larks and a Wren, Have all built their nests in my + beard!" + +- text: "Analyse the sentiment of the text 'This is great'." + reply: "'This is great' expresses a very positive sentiment." +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Raw.v1" +field = "llm_reply" +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "raw_examples.yml" +``` + +### Summarization {id="summarization"} + +A summarization task takes a document as input and generates a summary that is +stored in an extension attribute. + +#### spacy.Summarization.v1 {id="summarization-v1"} + +The `spacy.Summarization.v1` task supports both zero-shot and few-shot +prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Summarization.v1" +> examples = null +> max_n_words = null +> ``` + +| Argument | Description | +| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [summarization.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/summarization.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SummarizationTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `SummarizationExample`. ~~Optional[Type[FewshotExample]]~~ | +| `max_n_words` | Maximum number of words to be used in summary. Note that this should not expected to work exactly. Defaults to `None`. ~~Optional[int]~~ | +| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `summary`. ~~str~~ | + +The summarization task prompts the model for a concise summary of the provided +text. It optionally allows to limit the response to a certain number of tokens - +note that this requirement will be included in the prompt, but the task doesn't +perform a hard cut-off. It's hence possible that your summary exceeds +`max_n_words`. + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: > + The United Nations, referred to informally as the UN, is an + intergovernmental organization whose stated purposes are to maintain + international peace and security, develop friendly relations among nations, + achieve international cooperation, and serve as a centre for harmonizing the + actions of nations. It is the world's largest international organization. + The UN is headquartered on international territory in New York City, and the + organization has other offices in Geneva, Nairobi, Vienna, and The Hague, + where the International Court of Justice is headquartered.\n\n The UN was + established after World War II with the aim of preventing future world wars, + and succeeded the League of Nations, which was characterized as + ineffective. + summary: + 'The UN is an international organization that promotes global peace, + cooperation, and harmony. Established after WWII, its purpose is to prevent + future world wars.' +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Summarization.v1" +max_n_words = 20 +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "summarization_examples.yml" +``` + +### EL (Entity Linking) {id="nel"} + +The EL links recognized entities (see [NER](#ner)) to those in a knowledge base +(KB). The EL task prompts the LLM to select the most likely candidate from the +KB, whose structure can be arbitrary. + +Note that the documents processed by the entity linking task are expected to +have recognized entities in their `.ents` attribute. This can be achieved by +either running the [NER task](#ner), using a trained spaCy NER model or setting +the entities manually prior to running the EL task. + +In order to be able to pull data from the KB, an object implementing the +`CandidateSelector` protocol has to be provided. This requires two functions: +(1) `__call__()` to fetch candidate entities for entity mentions in the text +(assumed to be available in `Doc.ents`) and (2) `get_entity_description()` to +fetch descriptions for any given entity ID. Descriptions can be empty, but +ideally provide more context for entities stored in the KB. + +`spacy-llm` provides a `CandidateSelector` implementation +(`spacy.CandidateSelector.v1`) that leverages a spaCy knowledge base - as used +in an `entity_linking` component - to select candidates. This knowledge base can +be loaded from an existing spaCy pipeline (note that the pipeline's EL component +doesn't have to be trained) or from a separate .yaml file. + +#### spacy.EntityLinker.v1 {id="el-v1"} + +Supports zero- and few-shot prompting. Relies on a configurable component +suggesting viable entities before letting the LLM pick the most likely +candidate. + +> #### Example config for spacy.EntityLinker.v1 +> +> ```ini +> [paths] +> el_nlp = null +> +> ... +> +> [components.llm.task] +> @llm_tasks = "spacy.EntityLinker.v1" +> +> [initialize] +> [initialize.components] +> [initialize.components.llm] +> [initialize.components.llm.candidate_selector] +> @llm_misc = "spacy.CandidateSelector.v1" +> +> # Load a KB from a KB file. For loading KBs from spaCy pipelines see spacy.KBObjectLoader.v1. +> [initialize.components.llm.candidate_selector.kb_loader] +> @llm_misc = "spacy.KBFileLoader.v1" +> # Path to knowledge base .yaml file. +> path = ${paths.el_kb} +> ``` + +| Argument | Description | +| --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [entity_linker.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/entity_linker.v1.jinja). ~~str~~ | +| `parse_responses` | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[EntityLinkerTask]]~~ | +| `prompt_example_type` | Type to use for fewshot examples. Defaults to `ELExample`. ~~Optional[Type[FewshotExample]]~~ | +| `examples` | Optional callable that reads a file containing task examples for few-shot learning. If `None` is passed, zero-shot learning will be used. Defaults to `None`. ~~ExamplesConfigType~~ | +| `scorer` | Scorer function. Defaults to the metric used by spaCy to evaluate entity linking performance. ~~Optional[Scorer]~~ | + +##### spacy.CandidateSelector.v1 {id="candidate-selector-v1"} + +`spacy.CandidateSelector.v1` is an implementation of the `CandidateSelector` +protocol required by [`spacy.EntityLinker.v1`](#el-v1). The built-in candidate +selector method allows loading existing knowledge bases in several ways, e. g. +loading from a spaCy pipeline with a (not necessarily trained) entity linking +component, and loading from a file describing the knowlege base as a .yaml file. +Either way the loaded data will be converted to a spaCy `InMemoryLookupKB` +instance. The KB's selection capabilities are used to select the most likely +entity candidates for the specified mentions. + +> #### Example config for spacy.CandidateSelector.v1 +> +> ```ini +> [initialize] +> [initialize.components] +> [initialize.components.llm] +> [initialize.components.llm.candidate_selector] +> @llm_misc = "spacy.CandidateSelector.v1" +> +> # Load a KB from a KB file. For loading KBs from spaCy pipelines see spacy.KBObjectLoader.v1. +> [initialize.components.llm.candidate_selector.kb_loader] +> @llm_misc = "spacy.KBFileLoader.v1" +> # Path to knowledge base .yaml file. +> path = ${paths.el_kb} +> ``` + +| Argument | Description | +| ----------- | ----------------------------------------------------------------- | +| `kb_loader` | KB loader object. ~~InMemoryLookupKBLoader~~ | +| `top_n` | Top-n candidates to include in the prompt. Defaults to 5. ~~int~~ | + +##### spacy.KBObjectLoader.v1 {id="kb-object-loader-v1"} + +Adheres to the `InMemoryLookupKBLoader` interface required by +[`spacy.CandidateSelector.v1`](#candidate-selector-v1). Loads a knowledge base +from an existing spaCy pipeline. + +> #### Example config for spacy.KBObjectLoader.v1 +> +> ```ini +> [initialize.components.llm.candidate_selector.kb_loader] +> @llm_misc = "spacy.KBObjectLoader.v1" +> # Path to knowledge base directory in serialized spaCy pipeline. +> path = ${paths.el_kb} +> # Path to spaCy pipeline. If this is not specified, spacy-llm tries to determine this automatically (but may fail). +> nlp_path = ${paths.el_nlp} +> # Path to file with descriptions for entity. +> desc_path = ${paths.el_desc} +> ``` + +| Argument | Description | +| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `path` | Path to KB file. ~~Union[str, Path]~~ | +| `nlp_path` | Path to serialized NLP pipeline. If None, path will be guessed. ~~Optional[Union[Path, str]]~~ | +| `desc_path` | Path to file with descriptions for entities. ~~int~~ | +| `ent_desc_reader` | Entity description reader. Defaults to an internal method expecting a CSV file without header row, with ";" as delimiters, and with two columns - one for the entitys' IDs, one for their descriptions. ~~Optional[EntDescReader]~~ | + +##### spacy.KBFileLoader.v1 {id="kb-file-loader-v1"} + +Adheres to the `InMemoryLookupKBLoader` interface required by +[`spacy.CandidateSelector.v1`](#candidate-selector-v1). Loads a knowledge base +from a knowledge base file. The KB .yaml file has to stick to the following +format: + +```yaml +entities: + # The key should be whatever ID identifies this entity uniquely in your knowledge base. + ID1: + name: "..." + desc: "..." + ID2: + ... +# Data on aliases in your knowledge base - e. g. "Apple" for the entity "Apple Inc.". +aliases: + - alias: "..." + # List of all entities that this alias refers to. + entities: ["ID1", "ID2", ...] + # Optional: prior probabilities that this alias refers to the n-th entity in the "entities" attribute. + probabilities: [0.5, 0.2, ...] + - alias: "..." + entities: [...] + probabilities: [...] + ... +``` + +See +[here](https://github.com/explosion/spacy-llm/blob/main/usage_examples/el_openai/el_kb_data.yml) +for a toy example of how such a KB file might look like. + +> #### Example config for spacy.KBFileLoader.v1 +> +> ```ini +> [initialize.components.llm.candidate_selector.kb_loader] +> @llm_misc = "spacy.KBFileLoader.v1" +> # Path to knowledge base file. +> path = ${paths.el_kb} +> ``` + +| Argument | Description | +| -------- | ------------------------------------- | +| `path` | Path to KB file. ~~Union[str, Path]~~ | + +### NER {id="ner"} + +The NER task identifies non-overlapping entities in text. + +#### spacy.NER.v3 {id="ner-v3"} + +Version 3 is fundamentally different to v1 and v2, as it implements +Chain-of-Thought prompting, based on the +[PromptNER paper](https://arxiv.org/pdf/2305.15444.pdf) by Ashok and Lipton +(2023). On an internal use-case, we have found this implementation to obtain +significant better accuracy - with an increase of F-score of up to 15 percentage +points. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NER.v3" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> ``` + +When no examples are [specified](/usage/large-language-models#few-shot-prompts), +the v3 implementation will use a dummy example in the prompt. Technically this +means that the task will always perform few-shot prompting under the hood. + +| Argument | Description | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `template` | Custom prompt template to send to LLM model. Defaults to [ner.v3.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/ner.v3.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[NERTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `NERExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `description` (NEW) | A description of what to recognize or not recognize as entities. ~~str~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | + +Note that the `single_match` parameter, used in v1 and v2, is not supported +anymore, as the CoT parsing algorithm takes care of this automatically. + +New to v3 is the fact that you can provide an explicit description of what +entities should look like. You can use this feature in addition to +`label_definitions`. + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v3" +labels = ["DISH", "INGREDIENT", "EQUIPMENT"] +description = Entities are the names food dishes, + ingredients, and any kind of cooking equipment. + Adjectives, verbs, adverbs are not entities. + Pronouns are not entities. + +[components.llm.task.label_definitions] +DISH = "Known food dishes, e.g. Lobster Ravioli, garlic bread" +INGREDIENT = "Individual parts of a food dish, including herbs and spices." +EQUIPMENT = "Any kind of cooking equipment. e.g. oven, cooking pot, grill" +``` + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +While not required, this task works best when both positive and negative +examples are provided. The format is different than the files required for v1 +and v2, as additional fields such as `is_entity` and `reason` should now be +provided. + +```json +[ + { + "text": "You can't get a great chocolate flavor with carob.", + "spans": [ + { + "text": "chocolate", + "is_entity": false, + "label": "==NONE==", + "reason": "is a flavor in this context, not an ingredient" + }, + { + "text": "carob", + "is_entity": true, + "label": "INGREDIENT", + "reason": "is an ingredient to add chocolate flavor" + } + ] + }, + ... +] +``` + +```ini +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "${paths.examples}" +``` + +For a fully working example, see this +[usage example](https://github.com/explosion/spacy-llm/tree/main/usage_examples/ner_v3_openai). + +#### spacy.NER.v2 {id="ner-v2"} + +This version supports explicitly defining the provided labels with custom +descriptions, and further supports zero-shot and few-shot prompting just like +v1. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NER.v2" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `template` (NEW) | Custom prompt template to send to LLM model. Defaults to [ner.v2.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/ner.v2.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[NERTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `NERExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` (NEW) | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +The parameters `alignment_mode`, `case_sensitive_matching` and `single_match` +are identical to the [v1](#ner-v1) implementation. The format of few-shot +examples are also the same. + +> Label descriptions can also be used with explicit examples to give as much +> info to the LLM model as possible. + +New to v2 is the fact that you can write definitions for each label and provide +them via the `label_definitions` argument. This lets you tell the LLM exactly +what you're looking for rather than relying on the LLM to interpret its task +given just the label name. Label descriptions are freeform so you can write +whatever you want here, but a brief description along with some examples and +counter examples seems to work quite well. + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v2" +labels = PERSON,SPORTS_TEAM + +[components.llm.task.label_definitions] +PERSON = "Extract any named individual in the text." +SPORTS_TEAM = "Extract the names of any professional sports team. e.g. Golden State Warriors, LA Lakers, Man City, Real Madrid" +``` + +For a fully working example, see this +[usage example](https://github.com/explosion/spacy-llm/tree/main/usage_examples/ner_dolly). + +#### spacy.NER.v1 {id="ner-v1"} + +The original version of the built-in NER task supports both zero-shot and +few-shot prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NER.v1" +> labels = PERSON,ORGANISATION,LOCATION +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[NERTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `NERExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | Comma-separated list of labels. ~~str~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +The NER task implementation doesn't currently ask the LLM for specific offsets, +but simply expects a list of strings that represent the enties in the document. +This means that a form of string matching is required. This can be configured by +the following parameters: + +- The `single_match` parameter is typically set to `False` to allow for multiple + matches. For instance, the response from the LLM might only mention the entity + "Paris" once, but you'd still want to mark it every time it occurs in the + document. +- The case-sensitive matching is typically set to `False` to be robust against + case variances in the LLM's output. +- The `alignment_mode` argument is used to match entities as returned by the LLM + to the tokens from the original `Doc` - specifically it's used as argument in + the call to [`doc.char_span()`](/api/doc#char_span). The `"strict"` mode will + only keep spans that strictly adhere to the given token boundaries. + `"contract"` will only keep those tokens that are fully within the given + range, e.g. reducing `"New Y"` to `"New"`. Finally, `"expand"` will expand the + span to the next token boundaries, e.g. expanding `"New Y"` out to + `"New York"`. + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: Jack and Jill went up the hill. + entities: + PERSON: + - Jack + - Jill + LOCATION: + - hill +- text: Jack fell down and broke his crown. + entities: + PERSON: + - Jack +``` + +```ini +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "ner_examples.yml" +``` + +### SpanCat {id="spancat"} + +The SpanCat task identifies potentially overlapping entities in text. + +#### spacy.SpanCat.v3 {id="spancat-v3"} + +The built-in SpanCat v3 task is a simple adaptation of the NER v3 task to +support overlapping entities and store its annotations in `doc.spans`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.SpanCat.v3" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `template` | Custom prompt template to send to LLM model. Defaults to [`spancat.v3.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/spancat.v3.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SpanCatTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `SpanCatExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `description` (NEW) | A description of what to recognize or not recognize as entities. ~~str~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | + +Note that the `single_match` parameter, used in v1 and v2, is not supported +anymore, as the CoT parsing algorithm takes care of this automatically. + +#### spacy.SpanCat.v2 {id="spancat-v2"} + +The built-in SpanCat v2 task is a simple adaptation of the NER v2 task to +support overlapping entities and store its annotations in `doc.spans`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.SpanCat.v2" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `template` (NEW) | Custom prompt template to send to LLM model. Defaults to [`spancat.v2.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/spancat.v2.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SpanCatTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `SpanCatExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` (NEW) | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +Except for the `spans_key` parameter, the SpanCat v2 task reuses the +configuration from the NER v2 task. Refer to [its documentation](#ner-v2) for +more insight. + +#### spacy.SpanCat.v1 {id="spancat-v1"} + +The original version of the built-in SpanCat task is a simple adaptation of the +v1 NER task to support overlapping entities and store its annotations in +`doc.spans`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.SpanCat.v1" +> labels = PERSON,ORGANISATION,LOCATION +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SpanCatTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `SpanCatExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | Comma-separated list of labels. ~~str~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +Except for the `spans_key` parameter, the SpanCat v1 task reuses the +configuration from the NER v1 task. Refer to [its documentation](#ner-v1) for +more insight. + +### TextCat {id="textcat"} + +The TextCat task labels documents with relevant categories. + +#### spacy.TextCat.v3 {id="textcat-v3"} + +On top of the functionality from v2, version 3 of the built-in TextCat tasks +allows setting definitions of labels. Those definitions are included in the +prompt. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.TextCat.v3" +> labels = ["COMPLIMENT", "INSULT"] +> +> [components.llm.task.label_definitions] +> "COMPLIMENT" = "a polite expression of praise or admiration.", +> "INSULT" = "a disrespectful or scornfully abusive remark or act." +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [`textcat.v3.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.v3.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SpanCatTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `TextCatExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` (NEW) | Dictionary of label definitions. Included in the prompt, if set. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +The formatting of few-shot examples is the same as those for the +[v1](#textcat-v1) implementation. + +#### spacy.TextCat.v2 {id="textcat-v2"} + +V2 includes all v1 functionality, with an improved prompt template. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.TextCat.v2" +> labels = ["COMPLIMENT", "INSULT"] +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` (NEW) | Custom prompt template to send to LLM model. Defaults to [`textcat.v2.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.v2.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SpanCatTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `TextCatExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +The formatting of few-shot examples is the same as those for the +[v1](#textcat-v1) implementation. + +#### spacy.TextCat.v1 {id="textcat-v1"} + +Version 1 of the built-in TextCat task supports both zero-shot and few-shot +prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.TextCat.v1" +> labels = COMPLIMENT,INSULT +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `examples` | Optional function that generates examples for few-shot learning. Deafults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SpanCatTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `TextCatExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | Comma-separated list of labels. ~~str~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```json +[ + { + "text": "You look great!", + "answer": "Compliment" + }, + { + "text": "You are not very clever at all.", + "answer": "Insult" + } +] +``` + +```ini +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "textcat_examples.json" +``` + +If you want to perform few-shot learning with a binary classifier (i. e. a text +either should or should not be assigned to a given class), you can provide +positive and negative examples with answers of "POS" or "NEG". "POS" means that +this example should be assigned the class label defined in the configuration, +"NEG" means it shouldn't. E. g. for spam classification: + +```json +[ + { + "text": "You won the lottery! Wire a fee of 200$ to be able to withdraw your winnings.", + "answer": "POS" + }, + { + "text": "Your order #123456789 has arrived", + "answer": "NEG" + } +] +``` + +### REL {id="rel"} + +The REL task extracts relations between named entities. + +#### spacy.REL.v1 {id="rel-v1"} + +The built-in REL task supports both zero-shot and few-shot prompting. It relies +on an upstream NER component for entities extraction. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.REL.v1" +> labels = ["LivesIn", "Visits"] +> ``` + +| Argument | Description | +| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [`rel.v3.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/rel.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[RELTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `RELExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` | Dictionary providing a description for each relation label. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```json +{"text": "Laura bought a house in Boston with her husband Mark.", "ents": [{"start_char": 0, "end_char": 5, "label": "PERSON"}, {"start_char": 24, "end_char": 30, "label": "GPE"}, {"start_char": 48, "end_char": 52, "label": "PERSON"}], "relations": [{"dep": 0, "dest": 1, "relation": "LivesIn"}, {"dep": 2, "dest": 1, "relation": "LivesIn"}]} +{"text": "Michael travelled through South America by bike.", "ents": [{"start_char": 0, "end_char": 7, "label": "PERSON"}, {"start_char": 26, "end_char": 39, "label": "LOC"}], "relations": [{"dep": 0, "dest": 1, "relation": "Visits"}]} +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.REL.v1" +labels = ["LivesIn", "Visits"] + +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "rel_examples.jsonl" +``` + +Note: the REL task relies on pre-extracted entities to make its prediction. +Hence, you'll need to add a component that populates `doc.ents` with recognized +spans to your spaCy pipeline and put it _before_ the REL component. + +For a fully working example, see this +[usage example](https://github.com/explosion/spacy-llm/tree/main/usage_examples/rel_openai). + +### Lemma {id="lemma"} + +The Lemma task lemmatizes the provided text and updates the `lemma_` attribute +in the doc's tokens accordingly. + +#### spacy.Lemma.v1 {id="lemma-v1"} + +This task supports both zero-shot and few-shot prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Lemma.v1" +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [lemma.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/lemma.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[LemmaTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `LemmaExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | + +The task prompts the LLM to lemmatize the passed text and return the lemmatized +version as a list of tokens and their corresponding lemma. E. g. the text +`I'm buying ice cream for my friends` should invoke the response + +``` +I: I +'m: be +buying: buy +ice: ice +cream: cream +for: for +my: my +friends: friend +.: . +``` + +If for any given text/doc instance the number of lemmas returned by the LLM +doesn't match the number of tokens from the pipeline's tokenizer, no lemmas are +stored in the corresponding doc's tokens. Otherwise the tokens `.lemma_` +property is updated with the lemma suggested by the LLM. + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: I'm buying ice cream. + lemmas: + - 'I': 'I' + - "'m": 'be' + - 'buying': 'buy' + - 'ice': 'ice' + - 'cream': 'cream' + - '.': '.' + +- text: I've watered the plants. + lemmas: + - 'I': 'I' + - "'ve": 'have' + - 'watered': 'water' + - 'the': 'the' + - 'plants': 'plant' + - '.': '.' +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Lemma.v1" +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "lemma_examples.yml" +``` + +### Sentiment {id="sentiment"} + +Performs sentiment analysis on provided texts. Scores between 0 and 1 are stored +in `Doc._.sentiment` - the higher, the more positive. Note in cases of parsing +issues (e. g. in case of unexpected LLM responses) the value might be `None`. + +#### spacy.Sentiment.v1 {id="sentiment-v1"} + +This task supports both zero-shot and few-shot prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Sentiment.v1" +> examples = null +> ``` + +| Argument | Description | +| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [sentiment.v1.jinja](./spacy_llm/tasks/templates/sentiment.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `parse_responses` (NEW) | Callable for parsing LLM responses for this task. Defaults to the internal parsing method for this task. ~~Optional[TaskResponseParser[SentimentTask]]~~ | +| `prompt_example_type` (NEW) | Type to use for fewshot examples. Defaults to `SentimentExample`. ~~Optional[Type[FewshotExample]]~~ | +| `scorer` (NEW) | Scorer function that evaluates the task performance on provided examples. Defaults to the metric used by spaCy. ~~Optional[Scorer]~~ | +| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `sentiment`. ~~str~~ | + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: 'This is horrifying.' + score: 0 +- text: 'This is underwhelming.' + score: 0.25 +- text: 'This is ok.' + score: 0.5 +- text: "I'm looking forward to this!" + score: 1.0 +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Sentiment.v1" +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "sentiment_examples.yml" +``` + +### NoOp {id="noop"} + +This task is only useful for testing - it tells the LLM to do nothing, and does +not set any fields on the `docs`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NoOp.v1" +> ``` + +#### spacy.NoOp.v1 {id="noop-v1"} + +This task needs no further configuration. + +## Models {id="models"} + +A _model_ defines which LLM model to query, and how to query it. It can be a +simple function taking a collection of prompts (consistent with the output type +of `task.generate_prompts()`) and returning a collection of responses +(consistent with the expected input of `parse_responses`). Generally speaking, +it's a function of type +`Callable[[Iterable[Iterable[Any]]], Iterable[Iterable[Any]]]`, but specific +implementations can have other signatures, like +`Callable[[Iterable[Iterable[str]]], Iterable[Iterable[str]]]`. + +Note: the model signature expects a nested iterable so it's able to deal with +sharded docs. Unsharded docs (i. e. those produced by (nonsharding +tasks)[/api/large-language-models#task-nonsharding]) are reshaped to fit the +expected data structure. + +### Models via REST API {id="models-rest"} + +These models all take the same parameters, but note that the `config` should +contain provider-specific keys and values, as it will be passed onwards to the +provider's API. + +| Argument | Description | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | Model name, i. e. any supported variant for this particular model. Default depends on the specific model (cf. below) ~~str~~ | +| `config` | Further configuration passed on to the model. Default depends on the specific model (cf. below). ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `5`. ~~int~~ | +| `max_request_time` | Max. time (in seconds) to wait for request to terminate before raising an exception. Defaults to `30.0`. ~~float~~ | +| `interval` | Time interval (in seconds) for API retries in seconds. Defaults to `1.0`. ~~float~~ | +| `endpoint` | Endpoint URL. Defaults to the provider's standard URL, if available (which is not the case for providers with exclusively custom deployments, such as Azure) ~~Optional[str]~~ | + +> #### Example config: +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.GPT-4.v1" +> name = "gpt-4" +> config = {"temperature": 0.0} +> ``` + +Currently, these models are provided as part of the core library: + +| Model | Provider | Supported names | Default name | Default config | +| ----------------------------- | ----------------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------- | ------------------------------------ | +| `spacy.GPT-4.v1` | OpenAI | `["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]` | `"gpt-4"` | `{}` | +| `spacy.GPT-4.v2` | OpenAI | `["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]` | `"gpt-4"` | `{temperature=0.0}` | +| `spacy.GPT-4.v3` | OpenAI | All names of [GPT-4 models](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) offered by OpenAI | `"gpt-4"` | `{temperature=0.0}` | +| `spacy.GPT-3-5.v1` | OpenAI | `["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613-16k", "gpt-3.5-turbo-instruct"]` | `"gpt-3.5-turbo"` | `{}` | +| `spacy.GPT-3-5.v2` | OpenAI | `["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613-16k", "gpt-3.5-turbo-instruct"]` | `"gpt-3.5-turbo"` | `{temperature=0.0}` | +| `spacy.GPT-3-5.v3` | OpenAI | All names of [GPT-3.5 models](https://platform.openai.com/docs/models/gpt-3-5) offered by OpenAI | `"gpt-3.5-turbo"` | `{temperature=0.0}` | +| `spacy.Davinci.v1` | OpenAI | `["davinci"]` | `"davinci"` | `{}` | +| `spacy.Davinci.v2` | OpenAI | `["davinci"]` | `"davinci"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Davinci.v1` | OpenAI | `["text-davinci-003", "text-davinci-002"]` | `"text-davinci-003"` | `{}` | +| `spacy.Text-Davinci.v2` | OpenAI | `["text-davinci-003", "text-davinci-002"]` | `"text-davinci-003"` | `{temperature=0.0, max_tokens=1000}` | +| `spacy.Code-Davinci.v1` | OpenAI | `["code-davinci-002"]` | `"code-davinci-002"` | `{}` | +| `spacy.Code-Davinci.v2` | OpenAI | `["code-davinci-002"]` | `"code-davinci-002"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Curie.v1` | OpenAI | `["curie"]` | `"curie"` | `{}` | +| `spacy.Curie.v2` | OpenAI | `["curie"]` | `"curie"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Curie.v1` | OpenAI | `["text-curie-001"]` | `"text-curie-001"` | `{}` | +| `spacy.Text-Curie.v2` | OpenAI | `["text-curie-001"]` | `"text-curie-001"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Babbage.v1` | OpenAI | `["babbage"]` | `"babbage"` | `{}` | +| `spacy.Babbage.v2` | OpenAI | `["babbage"]` | `"babbage"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Babbage.v1` | OpenAI | `["text-babbage-001"]` | `"text-babbage-001"` | `{}` | +| `spacy.Text-Babbage.v2` | OpenAI | `["text-babbage-001"]` | `"text-babbage-001"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Ada.v1` | OpenAI | `["ada"]` | `"ada"` | `{}` | +| `spacy.Ada.v2` | OpenAI | `["ada"]` | `"ada"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Ada.v1` | OpenAI | `["text-ada-001"]` | `"text-ada-001"` | `{}` | +| `spacy.Text-Ada.v2` | OpenAI | `["text-ada-001"]` | `"text-ada-001"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Azure.v1` | Microsoft, OpenAI | Arbitrary values | No default | `{temperature=0.0}` | +| `spacy.Command.v1` | Cohere | `["command", "command-light", "command-light-nightly", "command-nightly"]` | `"command"` | `{}` | +| `spacy.Claude-2-1.v1` | Anthropic | `["claude-2-1"]` | `"claude-2-1"` | `{}` | +| `spacy.Claude-2.v1` | Anthropic | `["claude-2", "claude-2-100k"]` | `"claude-2"` | `{}` | +| `spacy.Claude-1.v1` | Anthropic | `["claude-1", "claude-1-100k"]` | `"claude-1"` | `{}` | +| `spacy.Claude-1-0.v1` | Anthropic | `["claude-1.0"]` | `"claude-1.0"` | `{}` | +| `spacy.Claude-1-2.v1` | Anthropic | `["claude-1.2"]` | `"claude-1.2"` | `{}` | +| `spacy.Claude-1-3.v1` | Anthropic | `["claude-1.3", "claude-1.3-100k"]` | `"claude-1.3"` | `{}` | +| `spacy.Claude-instant-1.v1` | Anthropic | `["claude-instant-1", "claude-instant-1-100k"]` | `"claude-instant-1"` | `{}` | +| `spacy.Claude-instant-1-1.v1` | Anthropic | `["claude-instant-1.1", "claude-instant-1.1-100k"]` | `"claude-instant-1.1"` | `{}` | +| `spacy.PaLM.v1` | Google | `["chat-bison-001", "text-bison-001"]` | `"text-bison-001"` | `{temperature=0.0}` | + +To use these models, make sure that you've [set the relevant API](#api-keys) +keys as environment variables. + +**⚠️ A note on `spacy.Azure.v1`.** Working with Azure OpenAI is slightly +different than working with models from other providers: + +- In Azure LLMs have to be made available by creating a _deployment_ of a given + model (e. g. GPT-3.5). This deployment can have an arbitrary name. The `name` + argument, which everywhere else denotes the model name (e. g. `claude-1.0`, + `gpt-3.5`), here refers to the _deployment name_. +- Deployed Azure OpenAI models are reachable via a resource-specific base URL, + usually of the form `https://{resource}.openai.azure.com`. Hence the URL has + to be specified via the `base_url` argument. +- Azure further expects the _API version_ to be specified. The default value for + this, via the `api_version` argument, is currently `2023-05-15` but may be + updated in the future. +- Finally, since we can't infer information about the model from the deployment + name, `spacy-llm` requires the `model_type` to be set to either + `"completions"` or `"chat"`, depending on whether the deployed model is a + completion or chat model. + +#### API Keys {id="api-keys"} + +Note that when using hosted services, you have to ensure that the proper API +keys are set as environment variables as described by the corresponding +provider's documentation. + +E. g. when using OpenAI, you have to get an API key from openai.com, and ensure +that the keys are set as environmental variables: + +```shell +export OPENAI_API_KEY="sk-..." +export OPENAI_API_ORG="org-..." +``` + +For Cohere: + +```shell +export CO_API_KEY="..." +``` + +For Anthropic: + +```shell +export ANTHROPIC_API_KEY="..." +``` + +For PaLM: + +```shell +export PALM_API_KEY="..." +``` + +### Models via HuggingFace {id="models-hf"} + +These models all take the same parameters: + +| Argument | Description | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. ~~str~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Llama2.v1" +> name = "Llama-2-7b-hf" +> ``` + +Currently, these models are provided as part of the core library: + +| Model | Provider | Supported names | HF directory | +| -------------------- | --------------- | ------------------------------------------------------------------------------------------------------------ | -------------------------------------- | +| `spacy.Dolly.v1` | Databricks | `["dolly-v2-3b", "dolly-v2-7b", "dolly-v2-12b"]` | https://huggingface.co/databricks | +| `spacy.Falcon.v1` | TII | `["falcon-rw-1b", "falcon-7b", "falcon-7b-instruct", "falcon-40b-instruct"]` | https://huggingface.co/tiiuae | +| `spacy.Llama2.v1` | Meta AI | `["Llama-2-7b-hf", "Llama-2-13b-hf", "Llama-2-70b-hf"]` | https://huggingface.co/meta-llama | +| `spacy.Mistral.v1` | Mistral AI | `["Mistral-7B-v0.1", "Mistral-7B-Instruct-v0.1"]` | https://huggingface.co/mistralai | +| `spacy.StableLM.v1` | Stability AI | `["stablelm-base-alpha-3b", "stablelm-base-alpha-7b", "stablelm-tuned-alpha-3b", "stablelm-tuned-alpha-7b"]` | https://huggingface.co/stabilityai | +| `spacy.OpenLLaMA.v1` | OpenLM Research | `["open_llama_3b", "open_llama_7b", "open_llama_7b_v2", "open_llama_13b"]` | https://huggingface.co/openlm-research | + + + +Some models available on Hugging Face (HF), such as Llama 2, are _gated models_. +That means that users have to fulfill certain requirements to be allowed access +to these models. In the case of Llama 2 you'll need to request agree to Meta's +Terms of Service while logged in with your HF account. After Meta grants you +permission to use Llama 2, you'll be able to download and use the model. + +This requires that you are logged in with your HF account on your local +machine - check out the HF quick start documentation. In a nutshell, you'll need +to create an access token on HF and log in to HF using your access token, e. g. +with `huggingface-cli login`. + + + +Note that Hugging Face will download the model the first time you use it - you +can +[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. + +#### Installation with HuggingFace {id="install-hf"} + +To use models from HuggingFace, ideally you have a GPU enabled and have +installed `transformers`, `torch` and CUDA in your virtual environment. This +allows you to have the setting `device=cuda:0` in your config, which ensures +that the model is loaded entirely on the GPU (and fails otherwise). + +You can do so with + +```shell +python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" +``` + +If you don't have access to a GPU, you can install `accelerate` and +set`device_map=auto` instead, but be aware that this may result in some layers +getting distributed to the CPU or even the hard drive, which may ultimately +result in extremely slow queries. + +```shell +python -m pip install "accelerate>=0.16.0,<1.0" +``` + +### LangChain models {id="langchain-models"} + +To use [LangChain](https://github.com/hwchase17/langchain) for the API retrieval +part, make sure you have installed it first: + +```shell +python -m pip install "langchain==0.0.191" +# Or install with spacy-llm directly +python -m pip install "spacy-llm[extras]" +``` + +Note that LangChain currently only supports Python 3.9 and beyond. + +LangChain models in `spacy-llm` work slightly differently. `langchain`'s models +are parsed automatically, each LLM class in `langchain` has one entry in +`spacy-llm`'s registry. As `langchain`'s design has one class per API and not +per model, this results in registry entries like `langchain.OpenAI.v1` - i. e. +there is one registry entry per API and not per model (family), as for the REST- +and HuggingFace-based entries. + +The name of the model to be used has to be passed in via the `name` attribute. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "langchain.OpenAI.v1" +> name = "gpt-3.5-turbo" +> query = {"@llm_queries": "spacy.CallLangChain.v1"} +> config = {"temperature": 0.0} +> ``` + +| Argument | Description | +| -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | The name of a mdodel supported by LangChain for this API. ~~str~~ | +| `config` | Configuration passed on to the LangChain model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `query` | Function that executes the prompts. If `None`, defaults to `spacy.CallLangChain.v1`. ~~Optional[Callable[["langchain.llms.BaseLLM", Iterable[Any]], Iterable[Any]]]~~ | + +The default `query` (`spacy.CallLangChain.v1`) executes the prompts by running +`model(text)` for each given textual prompt. + +## Cache {id="cache"} + +Interacting with LLMs, either through an external API or a local instance, is +costly. Since developing an NLP pipeline generally means a lot of exploration +and prototyping, `spacy-llm` implements a built-in cache to avoid reprocessing +the same documents at each run that keeps batches of documents stored on disk. + +> #### Example config +> +> ```ini +> [components.llm.cache] +> @llm_misc = "spacy.BatchCache.v1" +> path = "path/to/cache" +> batch_size = 64 +> max_batches_in_mem = 4 +> ``` + +| Argument | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | Cache directory. If `None`, no caching is performed, and this component will act as a NoOp. Defaults to `None`. ~~Optional[Union[str, Path]]~~ | +| `batch_size` | Number of docs in one batch (file). Once a batch is full, it will be peristed to disk. Defaults to 64. ~~int~~ | +| `max_batches_in_mem` | Max. number of batches to hold in memory. Allows you to limit the effect on your memory if you're handling a lot of docs. Defaults to 4. ~~int~~ | + +When retrieving a document, the `BatchCache` will first figure out what batch +the document belongs to. If the batch isn't in memory it will try to load the +batch from disk and then move it into memory. + +Note that since the cache is generated by a registered function, you can also +provide your own registered function returning your own cache implementation. If +you wish to do so, ensure that your cache object adheres to the `Protocol` +defined in `spacy_llm.ty.Cache`. + +## Various functions {id="various-functions"} + +### spacy.FewShotReader.v1 {id="fewshotreader-v1"} + +This function is registered in spaCy's `misc` registry, and reads in examples +from a `.yml`, `.yaml`, `.json` or `.jsonl` file. It uses +[`srsly`](https://github.com/explosion/srsly) to read in these files and parses +them depending on the file extension. + +> #### Example config +> +> ```ini +> [components.llm.task.examples] +> @misc = "spacy.FewShotReader.v1" +> path = "ner_examples.yml" +> ``` + +| Argument | Description | +| -------- | ----------------------------------------------------------------------------------------------- | +| `path` | Path to an examples file with suffix `.yml`, `.yaml`, `.json` or `.jsonl`. ~~Union[str, Path]~~ | + +### spacy.FileReader.v1 {id="filereader-v1"} + +This function is registered in spaCy's `misc` registry, and reads a file +provided to the `path` to return a `str` representation of its contents. This +function is typically used to read +[Jinja](https://jinja.palletsprojects.com/en/3.1.x/) files containing the prompt +template. + +> #### Example config +> +> ```ini +> [components.llm.task.template] +> @misc = "spacy.FileReader.v1" +> path = "ner_template.jinja2" +> ``` + +| Argument | Description | +| -------- | ------------------------------------------------- | +| `path` | Path to the file to be read. ~~Union[str, Path]~~ | + +### Normalizer functions {id="normalizer-functions"} + +These functions provide simple normalizations for string comparisons, e.g. +between a list of specified labels and a label given in the raw text of the LLM +response. They are registered in spaCy's `misc` registry and have the signature +`Callable[[str], str]`. + +- `spacy.StripNormalizer.v1`: only apply `text.strip()` +- `spacy.LowercaseNormalizer.v1`: applies `text.strip().lower()` to compare + strings in a case-insensitive way. diff --git a/website/docs/api/legacy.mdx b/website/docs/api/legacy.mdx index ea6d3a899..b44df5387 100644 --- a/website/docs/api/legacy.mdx +++ b/website/docs/api/legacy.mdx @@ -162,7 +162,10 @@ network has an internal CNN Tok2Vec layer and uses attention. Since `spacy.TextCatCNN.v2`, this architecture has become resizable, which means that you can add labels to a previously trained textcat. `TextCatCNN` v1 did not -yet support that. +yet support that. `TextCatCNN` has been replaced by the more general +[`TextCatReduce`](/api/architectures#TextCatReduce) layer. `TextCatCNN` is +identical to `TextCatReduce` with `use_reduce_mean=true`, +`use_reduce_first=false`, `reduce_last=false` and `use_reduce_max=false`. > #### Example Config > @@ -194,11 +197,58 @@ architecture is usually less accurate than the ensemble, but runs faster. | `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | | **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | +### spacy.TextCatCNN.v2 {id="TextCatCNN_v2"} + +> #### Example Config +> +> ```ini +> [model] +> @architectures = "spacy.TextCatCNN.v2" +> exclusive_classes = false +> nO = null +> +> [model.tok2vec] +> @architectures = "spacy.HashEmbedCNN.v2" +> pretrained_vectors = null +> width = 96 +> depth = 4 +> embed_size = 2000 +> window_size = 1 +> maxout_pieces = 3 +> subword_features = true +> ``` + +A neural network model where token vectors are calculated using a CNN. The +vectors are mean pooled and used as features in a feed-forward network. This +architecture is usually less accurate than the ensemble, but runs faster. + +`TextCatCNN` has been replaced by the more general +[`TextCatReduce`](/api/architectures#TextCatReduce) layer. `TextCatCNN` is +identical to `TextCatReduce` with `use_reduce_mean=true`, +`use_reduce_first=false`, `reduce_last=false` and `use_reduce_max=false`. + +| Name | Description | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `exclusive_classes` | Whether or not categories are mutually exclusive. ~~bool~~ | +| `tok2vec` | The [`tok2vec`](#tok2vec) layer of the model. ~~Model~~ | +| `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | + + + +[TextCatCNN.v1](/api/legacy#TextCatCNN_v1) had the exact same signature, but was +not yet resizable. Since v2, new labels can be added to this component, even +after training. + + + ### spacy.TextCatBOW.v1 {id="TextCatBOW_v1"} Since `spacy.TextCatBOW.v2`, this architecture has become resizable, which means that you can add labels to a previously trained textcat. `TextCatBOW` v1 did not -yet support that. +yet support that. Versions of this model before `spacy.TextCatBOW.v3` used an +erroneous sparse linear layer that only used a small number of the allocated +parameters. > #### Example Config > @@ -222,6 +272,33 @@ the others, but may not be as accurate, especially if texts are short. | `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | | **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | +### spacy.TextCatBOW.v2 {id="TextCatBOW"} + +Versions of this model before `spacy.TextCatBOW.v3` used an erroneous sparse +linear layer that only used a small number of the allocated parameters. + +> #### Example Config +> +> ```ini +> [model] +> @architectures = "spacy.TextCatBOW.v2" +> exclusive_classes = false +> ngram_size = 1 +> no_output_layer = false +> nO = null +> ``` + +An n-gram "bag-of-words" model. This architecture should run much faster than +the others, but may not be as accurate, especially if texts are short. + +| Name | Description | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `exclusive_classes` | Whether or not categories are mutually exclusive. ~~bool~~ | +| `ngram_size` | Determines the maximum length of the n-grams in the BOW model. For instance, `ngram_size=3` would give unigram, trigram and bigram features. ~~int~~ | +| `no_output_layer` | Whether or not to add an output layer to the model (`Softmax` activation if `exclusive_classes` is `True`, else `Logistic`). ~~bool~~ | +| `nO` | Output dimension, determined by the number of different labels. If not set, the [`TextCategorizer`](/api/textcategorizer) component will set it when `initialize` is called. ~~Optional[int]~~ | +| **CREATES** | The model using the architecture. ~~Model[List[Doc], Floats2d]~~ | + ### spacy.TransitionBasedParser.v1 {id="TransitionBasedParser_v1"} Identical to diff --git a/website/docs/api/spancategorizer.mdx b/website/docs/api/spancategorizer.mdx index 81a473ac2..98a1948ee 100644 --- a/website/docs/api/spancategorizer.mdx +++ b/website/docs/api/spancategorizer.mdx @@ -67,7 +67,6 @@ architectures and their arguments and hyperparameters. > ```python > from spacy.pipeline.spancat import DEFAULT_SPANCAT_SINGLELABEL_MODEL > config = { -> "threshold": 0.5, > "spans_key": "labeled_spans", > "model": DEFAULT_SPANCAT_SINGLELABEL_MODEL, > "suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]}, @@ -90,6 +89,21 @@ architectures and their arguments and hyperparameters. | `negative_weight` 3.5.1 | Multiplier for the loss terms. It can be used to downweight the negative samples if there are too many. It is only used when `add_negative_label` is `True`. Defaults to `1.0`. ~~float~~ | | `allow_overlap` 3.5.1 | If `True`, the data is assumed to contain overlapping spans. It is only available when `max_positive` is exactly 1. Defaults to `True`. ~~bool~~ | + + +If you set a non-default value for `spans_key`, you'll have to update +`[training.score_weights]` as well so that weights are computed properly. E. g. +for `spans_key == "myspankey"`, include this in your config: + +```ini +[training.score_weights] +spans_myspankey_f = 1.0 +spans_myspankey_p = 0.0 +spans_myspankey_r = 0.0 +``` + + + ```python %%GITHUB_SPACY/spacy/pipeline/spancat.py ``` @@ -522,7 +536,7 @@ has two columns, indicating the start and end position. | Name | Description | | ----------- | ---------------------------------------------------------------------------- | | `min_size` | The minimal phrase lengths to suggest (inclusive). ~~[int]~~ | -| `max_size` | The maximal phrase lengths to suggest (exclusive). ~~[int]~~ | +| `max_size` | The maximal phrase lengths to suggest (inclusive). ~~[int]~~ | | **CREATES** | The suggester function. ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ | ### spacy.preset_spans_suggester.v1 {id="preset_spans_suggester"} diff --git a/website/docs/api/spanfinder.mdx b/website/docs/api/spanfinder.mdx index ca3104c85..ef4a6baa5 100644 --- a/website/docs/api/spanfinder.mdx +++ b/website/docs/api/spanfinder.mdx @@ -60,7 +60,7 @@ architectures and their arguments and hyperparameters. | `model` | A model instance that is given a list of documents and predicts a probability for each token. ~~Model[List[Doc], Floats2d]~~ | | `spans_key` | Key of the [`Doc.spans`](/api/doc#spans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ | | `threshold` | Minimum probability to consider a prediction positive. Defaults to `0.5`. ~~float~~ | -| `max_length` | Maximum length of the produced spans, defaults to `None` meaning unlimited length. ~~Optional[int]~~ | +| `max_length` | Maximum length of the produced spans, defaults to `25`. ~~Optional[int]~~ | | `min_length` | Minimum length of the produced spans, defaults to `None` meaning shortest span length is 1. ~~Optional[int]~~ | | `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ | diff --git a/website/docs/api/spanruler.mdx b/website/docs/api/spanruler.mdx index d2d41f620..5889b1906 100644 --- a/website/docs/api/spanruler.mdx +++ b/website/docs/api/spanruler.mdx @@ -117,7 +117,7 @@ config. Any existing patterns are removed on initialization. > > [initialize.components.span_ruler.patterns] > @readers = "srsly.read_jsonl.v1" -> path = "corpus/span_ruler_patterns.jsonl +> path = "corpus/span_ruler_patterns.jsonl" > ``` | Name | Description | diff --git a/website/docs/api/top-level.mdx b/website/docs/api/top-level.mdx index 64ec342cd..9cdc0c8ab 100644 --- a/website/docs/api/top-level.mdx +++ b/website/docs/api/top-level.mdx @@ -68,7 +68,7 @@ weights, and returns it. cls = spacy.util.get_lang_class(lang) # 1. Get Language class, e.g. English nlp = cls() # 2. Initialize it for name in pipeline: - nlp.add_pipe(name) # 3. Add the component to the pipeline + nlp.add_pipe(name, config={...}) # 3. Add the component to the pipeline nlp.from_disk(data_path) # 4. Load in the binary data ``` @@ -343,6 +343,130 @@ use with the `manual=True` argument in `displacy.render`. | `options` | Span-specific visualisation options. ~~Dict[str, Any]~~ | | **RETURNS** | Generated entities keyed by text (original text) and ents. ~~dict~~ | +### Visualizer data structures {id="displacy_structures"} + +You can use displaCy's data format to manually render data. This can be useful +if you want to visualize output from other libraries. You can find examples of +displaCy's different data formats below. + +> #### DEP example data structure +> +> ```json +> { +> "words": [ +> { "text": "This", "tag": "DT" }, +> { "text": "is", "tag": "VBZ" }, +> { "text": "a", "tag": "DT" }, +> { "text": "sentence", "tag": "NN" } +> ], +> "arcs": [ +> { "start": 0, "end": 1, "label": "nsubj", "dir": "left" }, +> { "start": 2, "end": 3, "label": "det", "dir": "left" }, +> { "start": 1, "end": 3, "label": "attr", "dir": "right" } +> ] +> } +> ``` + +#### Dependency Visualizer data structure {id="structure-dep"} + +| Dictionary Key | Description | +| -------------- | ----------------------------------------------------------------------------------------------------------- | +| `words` | List of dictionaries describing a word token (see structure below). ~~List[Dict[str, Any]]~~ | +| `arcs` | List of dictionaries describing the relations between words (see structure below). ~~List[Dict[str, Any]]~~ | +| _Optional_ | | +| `title` | Title of the visualization. ~~Optional[str]~~ | +| `settings` | Dependency Visualizer options (see [here](/api/top-level#displacy_options)). ~~Dict[str, Any]~~ | + + + +| Dictionary Key | Description | +| -------------- | ---------------------------------------- | +| `text` | Text content of the word. ~~str~~ | +| `tag` | Fine-grained part-of-speech. ~~str~~ | +| `lemma` | Base form of the word. ~~Optional[str]~~ | + + + + + +| Dictionary Key | Description | +| -------------- | ---------------------------------------------------- | +| `start` | The index of the starting token. ~~int~~ | +| `end` | The index of the ending token. ~~int~~ | +| `label` | The type of dependency relation. ~~str~~ | +| `dir` | Direction of the relation (`left`, `right`). ~~str~~ | + + + +> #### ENT example data structure +> +> ```json +> { +> "text": "But Google is starting from behind.", +> "ents": [{ "start": 4, "end": 10, "label": "ORG" }] +> } +> ``` + +#### Named Entity Recognition data structure {id="structure-ent"} + +| Dictionary Key | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `text` | String representation of the document text. ~~str~~ | +| `ents` | List of dictionaries describing entities (see structure below). ~~List[Dict[str, Any]]~~ | +| _Optional_ | | +| `title` | Title of the visualization. ~~Optional[str]~~ | +| `settings` | Entity Visualizer options (see [here](/api/top-level#displacy_options)). ~~Dict[str, Any]~~ | + + + +| Dictionary Key | Description | +| -------------- | ---------------------------------------------------------------------- | +| `start` | The index of the first character of the entity. ~~int~~ | +| `end` | The index of the last character of the entity. (not inclusive) ~~int~~ | +| `label` | Label attached to the entity. ~~str~~ | +| _Optional_ | | +| `kb_id` | `KnowledgeBase` ID. ~~str~~ | +| `kb_url` | `KnowledgeBase` URL. ~~str~~ | + + + +> #### SPAN example data structure +> +> ```json +> { +> "text": "Welcome to the Bank of China.", +> "spans": [ +> { "start_token": 3, "end_token": 6, "label": "ORG" }, +> { "start_token": 5, "end_token": 6, "label": "GPE" } +> ], +> "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."] +> } +> ``` + +#### Span Classification data structure {id="structure-span"} + +| Dictionary Key | Description | +| -------------- | ----------------------------------------------------------------------------------------- | +| `text` | String representation of the document text. ~~str~~ | +| `spans` | List of dictionaries describing spans (see structure below). ~~List[Dict[str, Any]]~~ | +| `tokens` | List of word tokens. ~~List[str]~~ | +| _Optional_ | | +| `title` | Title of the visualization. ~~Optional[str]~~ | +| `settings` | Span Visualizer options (see [here](/api/top-level#displacy_options)). ~~Dict[str, Any]~~ | + + + +| Dictionary Key | Description | +| -------------- | ------------------------------------------------------------- | +| `start_token` | The index of the first token of the span in `tokens`. ~~int~~ | +| `end_token` | The index of the last token of the span in `tokens`. ~~int~~ | +| `label` | Label attached to the span. ~~str~~ | +| _Optional_ | | +| `kb_id` | `KnowledgeBase` ID. ~~str~~ | +| `kb_url` | `KnowledgeBase` URL. ~~str~~ | + + + ### Visualizer options {id="displacy_options"} The `options` argument lets you specify additional settings for each visualizer. diff --git a/website/docs/api/transformer.mdx b/website/docs/api/transformer.mdx index d3a4457e1..9dcafb557 100644 --- a/website/docs/api/transformer.mdx +++ b/website/docs/api/transformer.mdx @@ -397,6 +397,17 @@ are wrapped into the by this class. Instances of this class are typically assigned to the [`Doc._.trf_data`](/api/transformer#assigned-attributes) extension attribute. +> #### Example +> +> ```python +> # Get the last hidden layer output for "is" (token index 1) +> doc = nlp("This is a text.") +> indices = doc._.trf_data.align[1].data.flatten() +> last_hidden_state = doc._.trf_data.model_output.last_hidden_state +> dim = last_hidden_state.shape[-1] +> tensors = last_hidden_state.reshape(-1, dim)[indices] +> ``` + | Name | Description | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `tokens` | A slice of the tokens data produced by the tokenizer. This may have several fields, including the token IDs, the texts and the attention mask. See the [`transformers.BatchEncoding`](https://huggingface.co/transformers/main_classes/tokenizer.html#transformers.BatchEncoding) object for details. ~~dict~~ | diff --git a/website/docs/api/vectors.mdx b/website/docs/api/vectors.mdx index fcb505501..0e92eb12b 100644 --- a/website/docs/api/vectors.mdx +++ b/website/docs/api/vectors.mdx @@ -60,6 +60,7 @@ modified later. | `hash_seed` 3.2 | The floret hash seed (default: `0`). ~~int~~ | | `bow` 3.2 | The floret BOW string (default: `"<"`). ~~str~~ | | `eow` 3.2 | The floret EOW string (default: `">"`). ~~str~~ | +| `attr` 3.6 | The token attribute for the vector keys (default: `"ORTH"`). ~~Union[int, str]~~ | ## Vectors.\_\_getitem\_\_ {id="getitem",tag="method"} @@ -296,10 +297,9 @@ The vector size, i.e. `rows * dims`. ## Vectors.is_full {id="is_full",tag="property"} -Whether the vectors table is full and has no slots are available for new keys. -If a table is full, it can be resized using -[`Vectors.resize`](/api/vectors#resize). In `floret` mode, the table is always -full and cannot be resized. +Whether the vectors table is full and no slots are available for new keys. If a +table is full, it can be resized using [`Vectors.resize`](/api/vectors#resize). +In `floret` mode, the table is always full and cannot be resized. > #### Example > @@ -453,8 +453,9 @@ Load state from a binary string. ## Attributes {id="attributes"} -| Name | Description | -| --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `data` | Stored vectors data. `numpy` is used for CPU vectors, `cupy` for GPU vectors. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | -| `key2row` | Dictionary mapping word hashes to rows in the `Vectors.data` table. ~~Dict[int, int]~~ | -| `keys` | Array keeping the keys in order, such that `keys[vectors.key2row[key]] == key`. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | +| Name | Description | +| ----------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `data` | Stored vectors data. `numpy` is used for CPU vectors, `cupy` for GPU vectors. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | +| `key2row` | Dictionary mapping word hashes to rows in the `Vectors.data` table. ~~Dict[int, int]~~ | +| `keys` | Array keeping the keys in order, such that `keys[vectors.key2row[key]] == key`. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | +| `attr` 3.6 | The token attribute for the vector keys. ~~int~~ | diff --git a/website/docs/api/vocab.mdx b/website/docs/api/vocab.mdx index fe774d1a8..57618397d 100644 --- a/website/docs/api/vocab.mdx +++ b/website/docs/api/vocab.mdx @@ -13,7 +13,7 @@ between `Doc` objects. Note that a `Vocab` instance is not static. It increases in size as texts with -new tokens are processed. +new tokens are processed. Some models may have an empty vocab at initialization. @@ -93,6 +93,7 @@ given string, you need to look it up in > #### Example > > ```python +> nlp("I'm eating an apple") > apple = nlp.vocab.strings["apple"] > oov = nlp.vocab.strings["dskfodkfos"] > assert apple in nlp.vocab diff --git a/website/public/images/displacy-long2.html b/website/docs/images/displacy-long2.html similarity index 100% rename from website/public/images/displacy-long2.html rename to website/docs/images/displacy-long2.html diff --git a/website/docs/models/index.mdx b/website/docs/models/index.mdx index 366d44f0e..54f3c4906 100644 --- a/website/docs/models/index.mdx +++ b/website/docs/models/index.mdx @@ -108,12 +108,12 @@ In the `sm`/`md`/`lg` models: #### CNN/CPU pipelines with floret vectors -The Finnish, Korean and Swedish `md` and `lg` pipelines use -[floret vectors](/usage/v3-2#vectors) instead of default vectors. If you're -running a trained pipeline on texts and working with [`Doc`](/api/doc) objects, -you shouldn't notice any difference with floret vectors. With floret vectors no -tokens are out-of-vocabulary, so [`Token.is_oov`](/api/token#attributes) will -return `False` for all tokens. +The Croatian, Finnish, Korean, Slovenian, Swedish and Ukrainian `md` and `lg` +pipelines use [floret vectors](/usage/v3-2#vectors) instead of default vectors. +If you're running a trained pipeline on texts and working with [`Doc`](/api/doc) +objects, you shouldn't notice any difference with floret vectors. With floret +vectors no tokens are out-of-vocabulary, so +[`Token.is_oov`](/api/token#attributes) will return `False` for all tokens. If you access vectors directly for similarity comparisons, there are a few differences because floret vectors don't include a fixed word list like the @@ -132,10 +132,20 @@ vector keys for default vectors. ### Transformer pipeline design {id="design-trf"} -In the transformer (`trf`) models, the `tagger`, `parser` and `ner` (if present) -all listen to the `transformer` component. The `attribute_ruler` and +In the transformer (`trf`) pipelines, the `tagger`, `parser` and `ner` (if +present) all listen to the `transformer` component. The `attribute_ruler` and `lemmatizer` have the same configuration as in the CNN models. +For spaCy v3.0-v3.6, `trf` pipelines use +[`spacy-transformers`](https://github.com/explosion/spacy-transformers) and the +transformer output in `doc._.trf_data` is a +[`TransformerData`](/api/transformer#transformerdata) object. + +For spaCy v3.7+, `trf` pipelines use +[`spacy-curated-transformers`](https://github.com/explosion/spacy-curated-transformers) +and `doc._.trf_data` is a +[`DocTransformerOutput`](/api/curatedtransformer#doctransformeroutput) object. + ### Modifying the default pipeline {id="design-modify"} For faster processing, you may only want to run a subset of the components in a diff --git a/website/docs/usage/101/_named-entities.mdx b/website/docs/usage/101/_named-entities.mdx index 9ae4134d8..da43c0ddd 100644 --- a/website/docs/usage/101/_named-entities.mdx +++ b/website/docs/usage/101/_named-entities.mdx @@ -31,8 +31,6 @@ for ent in doc.ents: Using spaCy's built-in [displaCy visualizer](/usage/visualizers), here's what our example sentence and its named entities look like: -