From e3173bd86d65a534f92578b85b0e5058a5c845f4 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 18 Nov 2022 16:24:22 +0900 Subject: [PATCH 01/11] Remove spikex from Universe (#11825) --- website/meta/universe.json | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 661f5da12..57bf2d3e3 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -461,37 +461,6 @@ }, "category": ["standalone"] }, - { - "id": "spikex", - "title": "SpikeX - SpaCy Pipes for Knowledge Extraction", - "slogan": "Use SpikeX to build knowledge extraction tools with almost-zero effort", - "description": "SpikeX is a collection of pipes ready to be plugged in a spaCy pipeline. It aims to help in building knowledge extraction tools with almost-zero effort.", - "github": "erre-quadro/spikex", - "pip": "spikex", - "code_example": [ - "from spacy import load as spacy_load", - "from spikex.wikigraph import load as wg_load", - "from spikex.pipes import WikiPageX", - "", - "# load a spacy model and get a doc", - "nlp = spacy_load('en_core_web_sm')", - "doc = nlp('An apple a day keeps the doctor away')", - "# load a WikiGraph", - "wg = wg_load('simplewiki_core')", - "# get a WikiPageX and extract all pages", - "wikipagex = WikiPageX(wg)", - "doc = wikipagex(doc)", - "# see all pages extracted from the doc", - "for span in doc._.wiki_spans:", - " print(span._.wiki_pages)" - ], - "category": ["pipeline", "standalone"], - "author": "Erre Quadro", - "author_links": { - "github": "erre-quadro", - "website": "https://www.errequadrosrl.com" - } - }, { "id": "spacy-dbpedia-spotlight", "title": "DBpedia Spotlight for SpaCy", From 89bfd06fbd89cc00ca2007bf795326538126f937 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 18 Nov 2022 18:24:13 +0900 Subject: [PATCH 02/11] Auto-format code with black (#11826) Co-authored-by: explosion-bot --- spacy/pipeline/textcat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/pipeline/textcat.py b/spacy/pipeline/textcat.py index a86eb99d2..9490e3cb1 100644 --- a/spacy/pipeline/textcat.py +++ b/spacy/pipeline/textcat.py @@ -293,7 +293,7 @@ class TextCategorizer(TrainablePipe): bp_scores(gradient) if sgd is not None: self.finish_update(sgd) - losses[self.name] += (gradient ** 2).sum() + losses[self.name] += (gradient**2).sum() return losses def _examples_to_truth( @@ -327,7 +327,7 @@ class TextCategorizer(TrainablePipe): not_missing = self.model.ops.asarray(not_missing) # type: ignore d_scores = scores - truths d_scores *= not_missing - mean_square_error = (d_scores ** 2).mean() + mean_square_error = (d_scores**2).mean() return float(mean_square_error), d_scores def add_label(self, label: str) -> int: From f0d8309a289015ae44f994e8c0207cdfe41583ec Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 21 Nov 2022 07:12:03 +0000 Subject: [PATCH 03/11] fix comparison of constants (#11834) Co-authored-by: MarcoGorelli <> --- .pre-commit-config.yaml | 2 +- spacy/tests/vocab_vectors/test_vocab_api.py | 21 +++++++++++++++++++++ spacy/vocab.pyx | 4 ++-- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df59697b1..e2c5e98fd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - id: black language_version: python3.7 additional_dependencies: ['click==8.0.4'] -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/pycqa/flake8 rev: 5.0.4 hooks: - id: flake8 diff --git a/spacy/tests/vocab_vectors/test_vocab_api.py b/spacy/tests/vocab_vectors/test_vocab_api.py index 16cf80a08..b9c386eb8 100644 --- a/spacy/tests/vocab_vectors/test_vocab_api.py +++ b/spacy/tests/vocab_vectors/test_vocab_api.py @@ -1,8 +1,13 @@ +import os + import pytest from spacy.attrs import IS_ALPHA, LEMMA, ORTH +from spacy.lang.en import English from spacy.parts_of_speech import NOUN, VERB from spacy.vocab import Vocab +from ..util import make_tempdir + @pytest.mark.issue(1868) def test_issue1868(): @@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text): def test_vocab_writing_system(en_vocab): assert en_vocab.writing_system["direction"] == "ltr" assert en_vocab.writing_system["has_case"] is True + + +def test_to_disk(): + nlp = English() + with make_tempdir() as d: + nlp.vocab.to_disk(d) + assert "vectors" in os.listdir(d) + assert "lookups.bin" in os.listdir(d) + + +def test_to_disk_exclude(): + nlp = English() + with make_tempdir() as d: + nlp.vocab.to_disk(d, exclude=("vectors", "lookups")) + assert "vectors" not in os.listdir(d) + assert "lookups.bin" not in os.listdir(d) diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 428cadd82..27f8e5f98 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -468,9 +468,9 @@ cdef class Vocab: setters = ["strings", "vectors"] if "strings" not in exclude: self.strings.to_disk(path / "strings.json") - if "vectors" not in "exclude": + if "vectors" not in exclude: self.vectors.to_disk(path, exclude=["strings"]) - if "lookups" not in "exclude": + if "lookups" not in exclude: self.lookups.to_disk(path) def from_disk(self, path, *, exclude=tuple()): From f1ddac187de7e67923e8ee63192787179f70fa4c Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 23 Nov 2022 18:51:31 +0900 Subject: [PATCH 04/11] Remove unused error object (#11837) --- spacy/language.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/spacy/language.py b/spacy/language.py index 836f3abf9..2789b6690 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -784,14 +784,6 @@ class Language: factory_name, source, name=name ) else: - if not self.has_factory(factory_name): - err = Errors.E002.format( - name=factory_name, - opts=", ".join(self.factory_names), - method="add_pipe", - lang=util.get_object_name(self), - lang_code=self.lang, - ) pipe_component = self.create_pipe( factory_name, name=name, From 8271cfb4cd8a907ff11f12841ee1ceb171b3f528 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 23 Nov 2022 19:03:18 +0900 Subject: [PATCH 05/11] Remove Learning Path spaCy (#11846) --- website/meta/universe.json | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 57bf2d3e3..97b53e9c5 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1993,17 +1993,6 @@ }, "category": ["books"] }, - { - "type": "education", - "id": "learning-path-spacy", - "title": "Learning Path: Mastering spaCy for Natural Language Processing", - "slogan": "O'Reilly, 2017", - "description": "spaCy, a fast, user-friendly library for teaching computers to understand text, simplifies NLP techniques, such as speech tagging and syntactic dependencies, so you can easily extract information, attributes, and objects from massive amounts of text to then document, measure, and analyze. This Learning Path is a hands-on introduction to using spaCy to discover insights through natural language processing. While end-to-end natural language processing solutions can be complex, you’ll learn the linguistics, algorithms, and machine learning skills to get the job done.", - "url": "https://www.safaribooksonline.com/library/view/learning-path-mastering/9781491986653/", - "thumb": "https://i.imgur.com/9MIgMAc.jpg", - "author": "Aaron Kramer", - "category": ["courses"] - }, { "type": "education", "id": "introduction-into-spacy-3", From 5ea14af32b4203bc3087dec63091e63fe4ac95b7 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Wed, 23 Nov 2022 17:54:58 +0100 Subject: [PATCH 06/11] Add `training.before_update` callback (#11739) * Add `training.before_update` callback This callback can be used to implement training paradigms like gradual (un)freezing of components (e.g: the Transformer) after a certain number of training steps to mitigate catastrophic forgetting during fine-tuning. * Fix type annotation, default config value * Generalize arguments passed to the callback * Update schema * Pass `epoch` to callback, rename `current_step` to `step` * Add test * Simplify test * Replace config string with `spacy.blank` * Apply suggestions from code review Co-authored-by: Adriane Boyd * Cleanup imports Co-authored-by: Adriane Boyd --- spacy/default_config.cfg | 2 ++ spacy/schemas.py | 1 + spacy/tests/training/test_training.py | 40 ++++++++++++++++++++++++++- spacy/training/loop.py | 6 ++++ website/docs/api/data-formats.md | 1 + 5 files changed, 49 insertions(+), 1 deletion(-) diff --git a/spacy/default_config.cfg b/spacy/default_config.cfg index 86a72926e..694fb732f 100644 --- a/spacy/default_config.cfg +++ b/spacy/default_config.cfg @@ -90,6 +90,8 @@ dev_corpus = "corpora.dev" train_corpus = "corpora.train" # Optional callback before nlp object is saved to disk after training before_to_disk = null +# Optional callback that is invoked at the start of each training step +before_update = null [training.logger] @loggers = "spacy.ConsoleLogger.v1" diff --git a/spacy/schemas.py b/spacy/schemas.py index c824d76b9..e48fe1702 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -329,6 +329,7 @@ class ConfigSchemaTraining(BaseModel): frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training") annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training") before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk") + before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step") # fmt: on class Config: diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py index 4384a796d..7933ea31f 100644 --- a/spacy/tests/training/test_training.py +++ b/spacy/tests/training/test_training.py @@ -2,6 +2,7 @@ import random import numpy import pytest +import spacy import srsly from spacy.lang.en import English from spacy.tokens import Doc, DocBin @@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags from spacy.training.alignment_array import AlignmentArray from spacy.training.align import get_alignments from spacy.training.converters import json_to_docs +from spacy.training.loop import train_while_improving from spacy.util import get_words_and_spaces, load_model_from_path, minibatch from spacy.util import load_config_from_str -from thinc.api import compounding +from thinc.api import compounding, Adam from ..util import make_tempdir @@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc): retokenizer.merge(doc1[0:2]) retokenizer.merge(doc1[5:7]) assert example.get_aligned("ORTH", as_string=True) == expected2 + + +def test_training_before_update(doc): + def before_update(nlp, args): + assert args["step"] == 0 + assert args["epoch"] == 1 + + # Raise an error here as the rest of the loop + # will not run to completion due to uninitialized + # models. + raise ValueError("ran_before_update") + + def generate_batch(): + yield 1, [Example(doc, doc)] + + nlp = spacy.blank("en") + nlp.add_pipe("tagger") + optimizer = Adam() + generator = train_while_improving( + nlp, + optimizer, + generate_batch(), + lambda: None, + dropout=0.1, + eval_frequency=100, + accumulate_gradient=10, + patience=10, + max_steps=100, + exclude=[], + annotating_components=[], + before_update=before_update, + ) + + with pytest.raises(ValueError, match="ran_before_update"): + for _ in generator: + pass diff --git a/spacy/training/loop.py b/spacy/training/loop.py index 06372cbb0..885257772 100644 --- a/spacy/training/loop.py +++ b/spacy/training/loop.py @@ -59,6 +59,7 @@ def train( batcher = T["batcher"] train_logger = T["logger"] before_to_disk = create_before_to_disk_callback(T["before_to_disk"]) + before_update = T["before_update"] # Helper function to save checkpoints. This is a closure for convenience, # to avoid passing in all the args all the time. @@ -89,6 +90,7 @@ def train( eval_frequency=T["eval_frequency"], exclude=frozen_components, annotating_components=annotating_components, + before_update=before_update, ) clean_output_dir(output_path) stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n") @@ -150,6 +152,7 @@ def train_while_improving( max_steps: int, exclude: List[str], annotating_components: List[str], + before_update: Optional[Callable[["Language", Dict[str, Any]], None]], ): """Train until an evaluation stops improving. Works as a generator, with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`, @@ -198,6 +201,9 @@ def train_while_improving( words_seen = 0 start_time = timer() for step, (epoch, batch) in enumerate(train_data): + if before_update: + before_update_args = {"step": step, "epoch": epoch} + before_update(nlp, before_update_args) dropout = next(dropouts) # type: ignore for subbatch in subdivide_batch(batch, accumulate_gradient): nlp.update( diff --git a/website/docs/api/data-formats.md b/website/docs/api/data-formats.md index ce06c4ea8..768844cf3 100644 --- a/website/docs/api/data-formats.md +++ b/website/docs/api/data-formats.md @@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train). | `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ | | `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ | | `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ | +| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ | | `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ | | `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ | | `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ | From 8f062b849c846ecdf59263c82632b9fbd4eca9d0 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 24 Nov 2022 16:03:42 +0100 Subject: [PATCH 07/11] Fix Matcher cython profile=True header (#11867) --- spacy/matcher/matcher.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index e1dba01a2..c4a057ca0 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -1,4 +1,4 @@ -# cython: infer_types=True, cython: profile=True +# cython: infer_types=True, profile=True from typing import List, Iterable from libcpp.vector cimport vector From 30d31fd335306921aa7e8be081ecb396880aa14b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 25 Nov 2022 11:12:46 +0100 Subject: [PATCH 08/11] Update Russian and Ukrainian lemmatizers (#11811) * pymorph2 issues #11620, #11626, #11625: - #11620: pymorphy2_lookup - #11626: handle multiple forms pointing to the same normal form + handling empty POS tag - #11625: matching DET that are labelled as PRON by pymorhp2 * Move lemmatizer algorithm changes back into RussianLemmatizer * Fix uk pymorphy3_lookup mode init * Move and update tests for ru/uk lookup lemmatizer modes * Fix typo * Remove traces of previous behavior for uninflected POS * Refactor to private generic-looking pymorphy methods * Remove xfailed uk lemmatizer cases * Update spacy/lang/ru/lemmatizer.py Co-authored-by: Richard Hudson Co-authored-by: Dmytro S Lituiev Co-authored-by: Richard Hudson --- spacy/lang/ru/lemmatizer.py | 51 ++++++++++++++++++-------- spacy/lang/uk/lemmatizer.py | 2 +- spacy/tests/conftest.py | 18 ++++----- spacy/tests/lang/ru/test_lemmatizer.py | 15 ++++++++ spacy/tests/lang/uk/test_lemmatizer.py | 18 ++++++--- 5 files changed, 73 insertions(+), 31 deletions(-) diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py index c37a3a91a..f4a35de38 100644 --- a/spacy/lang/ru/lemmatizer.py +++ b/spacy/lang/ru/lemmatizer.py @@ -28,34 +28,39 @@ class RussianLemmatizer(Lemmatizer): from pymorphy2 import MorphAnalyzer except ImportError: raise ImportError( - "The Russian lemmatizer mode 'pymorphy2' requires the " - "pymorphy2 library. Install it with: pip install pymorphy2" + "The lemmatizer mode 'pymorphy2' requires the " + "pymorphy2 library and dictionaries. Install them with: " + "pip install pymorphy2" + "# for Ukrainian dictionaries:" + "pip install pymorphy2-dicts-uk" ) from None if getattr(self, "_morph", None) is None: - self._morph = MorphAnalyzer() - elif mode == "pymorphy3": + self._morph = MorphAnalyzer(lang="ru") + elif mode in {"pymorphy3", "pymorphy3_lookup"}: try: from pymorphy3 import MorphAnalyzer except ImportError: raise ImportError( - "The Russian lemmatizer mode 'pymorphy3' requires the " - "pymorphy3 library. Install it with: pip install pymorphy3" + "The lemmatizer mode 'pymorphy3' requires the " + "pymorphy3 library and dictionaries. Install them with: " + "pip install pymorphy3" + "# for Ukrainian dictionaries:" + "pip install pymorphy3-dicts-uk" ) from None if getattr(self, "_morph", None) is None: - self._morph = MorphAnalyzer() + self._morph = MorphAnalyzer(lang="ru") super().__init__( vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer ) - def pymorphy2_lemmatize(self, token: Token) -> List[str]: + def _pymorphy_lemmatize(self, token: Token) -> List[str]: string = token.text univ_pos = token.pos_ morphology = token.morph.to_dict() if univ_pos == "PUNCT": return [PUNCT_RULES.get(string, string)] if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"): - # Skip unchangeable pos - return [string.lower()] + return self._pymorphy_lookup_lemmatize(token) analyses = self._morph.parse(string) filtered_analyses = [] for analysis in analyses: @@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer): # Skip suggested parse variant for unknown word for pymorphy continue analysis_pos, _ = oc2ud(str(analysis.tag)) - if analysis_pos == univ_pos or ( - analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN") + if ( + analysis_pos == univ_pos + or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN")) + or ((analysis_pos == "PRON") and (univ_pos == "DET")) ): filtered_analyses.append(analysis) if not len(filtered_analyses): @@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer): dict.fromkeys([analysis.normal_form for analysis in filtered_analyses]) ) - def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: + def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]: string = token.text analyses = self._morph.parse(string) - if len(analyses) == 1: - return [analyses[0].normal_form] + # often multiple forms would derive from the same normal form + # thus check _unique_ normal forms + normal_forms = set([an.normal_form for an in analyses]) + if len(normal_forms) == 1: + return [next(iter(normal_forms))] return [string] + def pymorphy2_lemmatize(self, token: Token) -> List[str]: + return self._pymorphy_lemmatize(token) + + def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]: + return self._pymorphy_lookup_lemmatize(token) + def pymorphy3_lemmatize(self, token: Token) -> List[str]: - return self.pymorphy2_lemmatize(token) + return self._pymorphy_lemmatize(token) + + def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]: + return self._pymorphy_lookup_lemmatize(token) def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]: diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py index 8337e7328..37015cc2a 100644 --- a/spacy/lang/uk/lemmatizer.py +++ b/spacy/lang/uk/lemmatizer.py @@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer): ) from None if getattr(self, "_morph", None) is None: self._morph = MorphAnalyzer(lang="uk") - elif mode == "pymorphy3": + elif mode in {"pymorphy3", "pymorphy3_lookup"}: try: from pymorphy3 import MorphAnalyzer except ImportError: diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 0fc74243d..3a5c8e451 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -337,17 +337,17 @@ def ru_tokenizer(): return get_lang_class("ru")().tokenizer -@pytest.fixture +@pytest.fixture(scope="session") def ru_lemmatizer(): pytest.importorskip("pymorphy3") return get_lang_class("ru")().add_pipe("lemmatizer") -@pytest.fixture +@pytest.fixture(scope="session") def ru_lookup_lemmatizer(): - pytest.importorskip("pymorphy2") + pytest.importorskip("pymorphy3") return get_lang_class("ru")().add_pipe( - "lemmatizer", config={"mode": "pymorphy2_lookup"} + "lemmatizer", config={"mode": "pymorphy3_lookup"} ) @@ -423,19 +423,19 @@ def uk_tokenizer(): return get_lang_class("uk")().tokenizer -@pytest.fixture +@pytest.fixture(scope="session") def uk_lemmatizer(): pytest.importorskip("pymorphy3") pytest.importorskip("pymorphy3_dicts_uk") return get_lang_class("uk")().add_pipe("lemmatizer") -@pytest.fixture +@pytest.fixture(scope="session") def uk_lookup_lemmatizer(): - pytest.importorskip("pymorphy2") - pytest.importorskip("pymorphy2_dicts_uk") + pytest.importorskip("pymorphy3") + pytest.importorskip("pymorphy3_dicts_uk") return get_lang_class("uk")().add_pipe( - "lemmatizer", config={"mode": "pymorphy2_lookup"} + "lemmatizer", config={"mode": "pymorphy3_lookup"} ) diff --git a/spacy/tests/lang/ru/test_lemmatizer.py b/spacy/tests/lang/ru/test_lemmatizer.py index e82fd4f8c..9a5a9ad68 100644 --- a/spacy/tests/lang/ru/test_lemmatizer.py +++ b/spacy/tests/lang/ru/test_lemmatizer.py @@ -81,6 +81,7 @@ def test_ru_lemmatizer_punct(ru_lemmatizer): def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): + assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup" words = ["мама", "мыла", "раму"] pos = ["NOUN", "VERB", "NOUN"] morphs = [ @@ -92,3 +93,17 @@ def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer): doc = ru_lookup_lemmatizer(doc) lemmas = [token.lemma_ for token in doc] assert lemmas == ["мама", "мыла", "раму"] + + +@pytest.mark.parametrize( + "word,lemma", + ( + ("бременем", "бремя"), + ("будешь", "быть"), + ("какая-то", "какой-то"), + ), +) +def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma): + assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup" + doc = Doc(ru_lookup_lemmatizer.vocab, words=[word]) + assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma diff --git a/spacy/tests/lang/uk/test_lemmatizer.py b/spacy/tests/lang/uk/test_lemmatizer.py index 788744aa1..a65bb25e5 100644 --- a/spacy/tests/lang/uk/test_lemmatizer.py +++ b/spacy/tests/lang/uk/test_lemmatizer.py @@ -8,12 +8,20 @@ pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_uk_lemmatizer(uk_lemmatizer): """Check that the default uk lemmatizer runs.""" doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"]) + assert uk_lemmatizer.mode == "pymorphy3" uk_lemmatizer(doc) assert [token.lemma for token in doc] -def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer): - """Check that the lookup uk lemmatizer runs.""" - doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"]) - uk_lookup_lemmatizer(doc) - assert [token.lemma for token in doc] +@pytest.mark.parametrize( + "word,lemma", + ( + ("якийсь", "якийсь"), + ("розповідають", "розповідати"), + ("розповіси", "розповісти"), + ), +) +def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma): + assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup" + doc = Doc(uk_lookup_lemmatizer.vocab, words=[word]) + assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma From dece775279955e4aa84f718675a72ff34174a7ee Mon Sep 17 00:00:00 2001 From: kadarakos Date: Fri, 25 Nov 2022 11:31:28 +0100 Subject: [PATCH 09/11] correct ndim in docs (#11869) --- website/docs/api/vectors.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/vectors.md b/website/docs/api/vectors.md index 9636ea04c..d4702b592 100644 --- a/website/docs/api/vectors.md +++ b/website/docs/api/vectors.md @@ -50,7 +50,7 @@ modified later. | _keyword-only_ | | | `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ | | `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ | -| `data` | The vector data. ~~numpy.ndarray[ndim=1, dtype=float32]~~ | +| `data` | The vector data. ~~numpy.ndarray[ndim=2, dtype=float32]~~ | | `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ | | `name` | A name to identify the vectors table. ~~str~~ | | `mode` 3.2 | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ | From c0fd8a2e71ce5eaad07e0b555fab8a152373fdc6 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 25 Nov 2022 11:44:55 +0100 Subject: [PATCH 10/11] find-threshold: CLI command for multi-label classifier threshold tuning (#11280) * Add foundation for find-threshold CLI functionality. * Finish first draft for find-threshold. * Add tests. * Revert adjusted import statements. * Fix mypy errors. * Fix imports. * Harmonize arguments with spacy evaluate command. * Generalize component and threshold handling. Harmonize arguments with 'spacy evaluate' CLI. * Fix Spancat test. * Add beta parameter to Scorer and PRFScore. * Make beta a component scorer setting. * Remove beta. * Update nlp.config (workaround). * Reload pipeline on threshold change. Adjust tests. Remove confection reference. * Remove assumption of component being a Pipe object or having a .cfg attribute. * Adjust test output and reference values. * Remove beta references. Delete universe.json. * Reverting unnecessary changes. Removing unused default values. Renaming variables in find-cli tests. * Update spacy/cli/find_threshold.py Co-authored-by: Adriane Boyd * Remove adding labels in tests. * Remove unused error * Undo changes to PRFScorer * Change default value for n_trials. Log table iteratively. * Add warnings for pointless applications of find_threshold(). * Fix imports. * Adjust type check of TextCategorizer to exclude subclasses. * Change check of if there's only one unique value in scores. * Update spacy/cli/find_threshold.py Co-authored-by: Sofie Van Landeghem * Incorporate feedback. * Fix test issue. Update docstring. * Update docs & docstring. * Update spacy/tests/test_cli.py Co-authored-by: Adriane Boyd * Add examples to docs. Rename _nlp to nlp in tests. * Update spacy/cli/find_threshold.py Co-authored-by: Sofie Van Landeghem * Update spacy/cli/find_threshold.py Co-authored-by: Sofie Van Landeghem Co-authored-by: Adriane Boyd Co-authored-by: Sofie Van Landeghem --- spacy/cli/__init__.py | 1 + spacy/cli/find_threshold.py | 233 ++++++++++++++++++++++++++++++++++++ spacy/errors.py | 1 + spacy/pipeline/spancat.py | 4 +- spacy/tests/test_cli.py | 124 ++++++++++++++++++- website/docs/api/cli.md | 41 +++++++ 6 files changed, 399 insertions(+), 5 deletions(-) create mode 100644 spacy/cli/find_threshold.py diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index ce76ef9a9..aab2c8d12 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -27,6 +27,7 @@ from .project.dvc import project_update_dvc # noqa: F401 from .project.push import project_push # noqa: F401 from .project.pull import project_pull # noqa: F401 from .project.document import project_document # noqa: F401 +from .find_threshold import find_threshold # noqa: F401 @app.command("link", no_args_is_help=True, deprecated=True, hidden=True) diff --git a/spacy/cli/find_threshold.py b/spacy/cli/find_threshold.py new file mode 100644 index 000000000..efa664832 --- /dev/null +++ b/spacy/cli/find_threshold.py @@ -0,0 +1,233 @@ +import functools +import operator +from pathlib import Path +import logging +from typing import Optional, Tuple, Any, Dict, List + +import numpy +import wasabi.tables + +from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer +from ..errors import Errors +from ..training import Corpus +from ._util import app, Arg, Opt, import_code, setup_gpu +from .. import util + +_DEFAULTS = { + "n_trials": 11, + "use_gpu": -1, + "gold_preproc": False, +} + + +@app.command( + "find-threshold", + context_settings={"allow_extra_args": False, "ignore_unknown_options": True}, +) +def find_threshold_cli( + # fmt: off + model: str = Arg(..., help="Model name or path"), + data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True), + pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"), + threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"), + scores_key: str = Arg(..., help="Metric to optimize"), + n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"), + code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"), + use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"), + gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"), + verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"), + # fmt: on +): + """ + Runs prediction trials for a trained model with varying tresholds to maximize + the specified metric. The search space for the threshold is traversed linearly + from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout` + (the corresponding API call to `spacy.cli.find_threshold.find_threshold()` + returns all results). + + This is applicable only for components whose predictions are influenced by + thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note + that the full path to the corresponding threshold attribute in the config has to + be provided. + + DOCS: https://spacy.io/api/cli#find-threshold + """ + + util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + import_code(code_path) + find_threshold( + model=model, + data_path=data_path, + pipe_name=pipe_name, + threshold_key=threshold_key, + scores_key=scores_key, + n_trials=n_trials, + use_gpu=use_gpu, + gold_preproc=gold_preproc, + silent=False, + ) + + +def find_threshold( + model: str, + data_path: Path, + pipe_name: str, + threshold_key: str, + scores_key: str, + *, + n_trials: int = _DEFAULTS["n_trials"], # type: ignore + use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore + gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore + silent: bool = True, +) -> Tuple[float, float, Dict[float, float]]: + """ + Runs prediction trials for models with varying tresholds to maximize the specified metric. + model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory. + data_path (Path): Path to file with DocBin with docs to use for threshold search. + pipe_name (str): Name of pipe to examine thresholds for. + threshold_key (str): Key of threshold attribute in component's configuration. + scores_key (str): Name of score to metric to optimize. + n_trials (int): Number of trials to determine optimal thresholds. + use_gpu (int): GPU ID or -1 for CPU. + gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the + tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due + to train/test skew. + silent (bool): Whether to print non-error-related output to stdout. + RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all + evaluated thresholds. + """ + + setup_gpu(use_gpu, silent=silent) + data_path = util.ensure_path(data_path) + if not data_path.exists(): + wasabi.msg.fail("Evaluation data not found", data_path, exits=1) + nlp = util.load_model(model) + + if pipe_name not in nlp.component_names: + raise AttributeError( + Errors.E001.format(name=pipe_name, opts=nlp.component_names) + ) + pipe = nlp.get_pipe(pipe_name) + if not hasattr(pipe, "scorer"): + raise AttributeError(Errors.E1045) + + if type(pipe) == TextCategorizer: + wasabi.msg.warn( + "The `textcat` component doesn't use a threshold as it's not applicable to the concept of " + "exclusive classes. All thresholds will yield the same results." + ) + + if not silent: + wasabi.msg.info( + title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} " + f"trials." + ) + + # Load evaluation corpus. + corpus = Corpus(data_path, gold_preproc=gold_preproc) + dev_dataset = list(corpus(nlp)) + config_keys = threshold_key.split(".") + + def set_nested_item( + config: Dict[str, Any], keys: List[str], value: float + ) -> Dict[str, Any]: + """Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200. + config (Dict[str, Any]): Configuration dictionary. + keys (List[Any]): Path to value to set. + value (float): Value to set. + RETURNS (Dict[str, Any]): Updated dictionary. + """ + functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value + return config + + def filter_config( + config: Dict[str, Any], keys: List[str], full_key: str + ) -> Dict[str, Any]: + """Filters provided config dictionary so that only the specified keys path remains. + config (Dict[str, Any]): Configuration dictionary. + keys (List[Any]): Path to value to set. + full_key (str): Full user-specified key. + RETURNS (Dict[str, Any]): Filtered dictionary. + """ + if keys[0] not in config: + wasabi.msg.fail( + title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.", + text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: " + f"{list(config.keys())}", + exits=1, + ) + return { + keys[0]: filter_config(config[keys[0]], keys[1:], full_key) + if len(keys) > 1 + else config[keys[0]] + } + + # Evaluate with varying threshold values. + scores: Dict[float, float] = {} + config_keys_full = ["components", pipe_name, *config_keys] + table_col_widths = (10, 10) + thresholds = numpy.linspace(0, 1, n_trials) + print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths)) + for threshold in thresholds: + # Reload pipeline with overrides specifying the new threshold. + nlp = util.load_model( + model, + config=set_nested_item( + filter_config( + nlp.config, config_keys_full, ".".join(config_keys_full) + ).copy(), + config_keys_full, + threshold, + ), + ) + if hasattr(pipe, "cfg"): + setattr( + nlp.get_pipe(pipe_name), + "cfg", + set_nested_item(getattr(pipe, "cfg"), config_keys, threshold), + ) + + eval_scores = nlp.evaluate(dev_dataset) + if scores_key not in eval_scores: + wasabi.msg.fail( + title=f"Failed to look up score `{scores_key}` in evaluation results.", + text=f"Make sure you specified the correct value for `scores_key`. The following scores are " + f"available: {list(eval_scores.keys())}", + exits=1, + ) + scores[threshold] = eval_scores[scores_key] + + if not isinstance(scores[threshold], (float, int)): + wasabi.msg.fail( + f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric " + f"scores.", + exits=1, + ) + print( + wasabi.row( + [round(threshold, 3), round(scores[threshold], 3)], + widths=table_col_widths, + ) + ) + + best_threshold = max(scores.keys(), key=(lambda key: scores[key])) + + # If all scores are identical, emit warning. + if len(set(scores.values())) == 1: + wasabi.msg.warn( + title="All scores are identical. Verify that all settings are correct.", + text="" + if ( + not isinstance(pipe, MultiLabel_TextCategorizer) + or scores_key in ("cats_macro_f", "cats_micro_f") + ) + else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.", + ) + + else: + if not silent: + print( + f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}." + ) + + return best_threshold, scores[best_threshold], scores diff --git a/spacy/errors.py b/spacy/errors.py index 1d29f0e17..a8de5fb90 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -956,6 +956,7 @@ class Errors(metaclass=ErrorsWithCodes): "sure it's overwritten on the subclass.") E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default " "knowledge base, use `InMemoryLookupKB`.") + E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 956bbb72c..0a84c72fd 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -1,7 +1,7 @@ -from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast +from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops from thinc.api import Optimizer -from thinc.types import Ragged, Ints2d, Floats2d, Ints1d +from thinc.types import Ragged, Ints2d, Floats2d import numpy diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 8225e14f1..1c4d0c98f 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -1,9 +1,10 @@ import os import math +from collections import Counter +from typing import Tuple, List, Dict, Any import pkg_resources -from random import sample -from typing import Counter +import numpy import pytest import srsly from click import NoSuchOption @@ -28,11 +29,12 @@ from spacy.cli.package import get_third_party_dependencies from spacy.cli.package import _is_permitted_package_name from spacy.cli.project.run import _check_requirements from spacy.cli.validate import get_model_pkgs +from spacy.cli.find_threshold import find_threshold from spacy.lang.en import English from spacy.lang.nl import Dutch from spacy.language import Language from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate -from spacy.tokens import Doc +from spacy.tokens import Doc, DocBin from spacy.tokens.span import Span from spacy.training import Example, docs_to_json, offsets_to_biluo_tags from spacy.training.converters import conll_ner_to_docs, conllu_to_docs @@ -859,6 +861,122 @@ def test_span_length_freq_dist_output_must_be_correct(): assert list(span_freqs.keys()) == [3, 1, 4, 5, 2] +def test_cli_find_threshold(capsys): + thresholds = numpy.linspace(0, 1, 10) + + def make_examples(nlp: Language) -> List[Example]: + docs: List[Example] = [] + + for t in [ + ( + "I am angry and confused in the Bank of America.", + { + "cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0}, + "spans": {"sc": [(31, 46, "ORG")]}, + }, + ), + ( + "I am confused but happy in New York.", + { + "cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}, + "spans": {"sc": [(27, 35, "GPE")]}, + }, + ), + ]: + doc = nlp.make_doc(t[0]) + docs.append(Example.from_dict(doc, t[1])) + + return docs + + def init_nlp( + components: Tuple[Tuple[str, Dict[str, Any]], ...] = () + ) -> Tuple[Language, List[Example]]: + new_nlp = English() + new_nlp.add_pipe( # type: ignore + factory_name="textcat_multilabel", + name="tc_multi", + config={"threshold": 0.9}, + ) + + # Append additional components to pipeline. + for cfn, comp_config in components: + new_nlp.add_pipe(cfn, config=comp_config) + + new_examples = make_examples(new_nlp) + new_nlp.initialize(get_examples=lambda: new_examples) + for i in range(5): + new_nlp.update(new_examples) + + return new_nlp, new_examples + + with make_tempdir() as docs_dir: + # Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches + # the current model behavior with the examples above. This can break once the model behavior changes and serves + # mostly as a smoke test. + nlp, examples = init_nlp() + DocBin(docs=[example.reference for example in examples]).to_disk( + docs_dir / "docs.spacy" + ) + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + res = find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="tc_multi", + threshold_key="threshold", + scores_key="cats_macro_f", + silent=True, + ) + assert res[0] != thresholds[0] + assert thresholds[0] < res[0] < thresholds[9] + assert res[1] == 1.0 + assert res[2][1.0] == 0.0 + + # Test with spancat. + nlp, _ = init_nlp((("spancat", {}),)) + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + res = find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="spancat", + threshold_key="threshold", + scores_key="spans_sc_f", + silent=True, + ) + assert res[0] != thresholds[0] + assert thresholds[0] < res[0] < thresholds[8] + assert res[1] >= 0.6 + assert res[2][1.0] == 0.0 + + # Having multiple textcat_multilabel components should work, since the name has to be specified. + nlp, _ = init_nlp((("textcat_multilabel", {}),)) + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + assert find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="tc_multi", + threshold_key="threshold", + scores_key="cats_macro_f", + silent=True, + ) + + # Specifying the name of an non-existing pipe should fail. + nlp, _ = init_nlp() + with make_tempdir() as nlp_dir: + nlp.to_disk(nlp_dir) + with pytest.raises(AttributeError): + find_threshold( + model=nlp_dir, + data_path=docs_dir / "docs.spacy", + pipe_name="_", + threshold_key="threshold", + scores_key="cats_macro_f", + silent=True, + ) + + @pytest.mark.parametrize( "reqs,output", [ diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index 6e581b903..b42ba8a4f 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -12,6 +12,7 @@ menu: - ['train', 'train'] - ['pretrain', 'pretrain'] - ['evaluate', 'evaluate'] + - ['find-threshold', 'find-threshold'] - ['assemble', 'assemble'] - ['package', 'package'] - ['project', 'project'] @@ -1161,6 +1162,46 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Training results and optional metrics and visualizations. | +## find-threshold {#find-threshold new="3.5" tag="command"} + +Runs prediction trials for a trained model with varying tresholds to maximize +the specified metric. The search space for the threshold is traversed linearly +from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout` +(the corresponding API call to `spacy.cli.find_threshold.find_threshold()` +returns all results). + +This is applicable only for components whose predictions are influenced by +thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note +that the full path to the corresponding threshold attribute in the config has to +be provided. + +> #### Examples +> +> ```cli +> # For textcat_multilabel: +> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f +> ``` +> +> ```cli +> # For spancat: +> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f +> ``` + + +| Name | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ | +| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ | +| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ | +| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ | +| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ | +| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ | +| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | +| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ | +| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | + ## assemble {#assemble tag="command"} Assemble a pipeline from a config file without additional training. Expects a From 681ec209147ba476a4062e5fec2248c7e0c50d68 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 25 Nov 2022 13:00:57 +0100 Subject: [PATCH 11/11] Add smart_open requirement, update deprecated options (#11864) * Switch from deprecated `ignore_ext` to `compression` * Add upload/download test for local files --- requirements.txt | 1 + setup.cfg | 1 + spacy/cli/_util.py | 2 +- spacy/tests/test_cli.py | 16 ++++++++++++++++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 23bfa6f14..dd2eff0c2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 typer>=0.3.0,<0.8.0 pathy>=0.3.5 +smart-open>=5.2.1,<7.0.0 # Third party dependencies numpy>=1.15.0 requests>=2.13.0,<3.0.0 diff --git a/setup.cfg b/setup.cfg index 82d4d2758..330dc8205 100644 --- a/setup.cfg +++ b/setup.cfg @@ -53,6 +53,7 @@ install_requires = # Third-party dependencies typer>=0.3.0,<0.8.0 pathy>=0.3.5 + smart-open>=5.2.1,<7.0.0 tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 requests>=2.13.0,<3.0.0 diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index 897964a88..872f69c88 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -358,7 +358,7 @@ def download_file(src: Union[str, "Pathy"], dest: Path, *, force: bool = False) if dest.exists() and not force: return None src = str(src) - with smart_open.open(src, mode="rb", ignore_ext=True) as input_file: + with smart_open.open(src, mode="rb", compression="disable") as input_file: with dest.open(mode="wb") as output_file: shutil.copyfileobj(input_file, output_file) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 1c4d0c98f..525c6d255 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -17,6 +17,7 @@ from spacy.cli._util import is_subpath_of, load_project_config from spacy.cli._util import parse_config_overrides, string_to_list from spacy.cli._util import substitute_project_variables from spacy.cli._util import validate_project_commands +from spacy.cli._util import upload_file, download_file from spacy.cli.debug_data import _compile_gold, _get_labels_from_model from spacy.cli.debug_data import _get_labels_from_spancat from spacy.cli.debug_data import _get_distribution, _get_kl_divergence @@ -1014,3 +1015,18 @@ def test_project_check_requirements(reqs, output): pkg_resources.require("spacyunknowndoesnotexist12345") except pkg_resources.DistributionNotFound: assert output == _check_requirements([req.strip() for req in reqs.split("\n")]) + + +def test_upload_download_local_file(): + with make_tempdir() as d1, make_tempdir() as d2: + filename = "f.txt" + content = "content" + local_file = d1 / filename + remote_file = d2 / filename + with local_file.open(mode="w") as file_: + file_.write(content) + upload_file(local_file, remote_file) + local_file.unlink() + download_file(remote_file, local_file) + with local_file.open(mode="r") as file_: + assert file_.read() == content