From afd7a2476d2491af864d0723bff96191ea61b429 Mon Sep 17 00:00:00 2001 From: Damian Romero <12145757+damian-romero@users.noreply.github.com> Date: Thu, 1 Dec 2022 07:06:28 -0500 Subject: [PATCH 01/20] Fix typo in vocab.md table (#11908) * Fix typo in vocab.md table Fixes explosion/spaCy/#11907 * Reformat vocab.md with Prettier --- website/docs/api/vocab.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/api/vocab.md b/website/docs/api/vocab.md index afbd1301d..5e4de219a 100644 --- a/website/docs/api/vocab.md +++ b/website/docs/api/vocab.md @@ -308,14 +308,14 @@ Load state from a binary string. > assert type(PERSON) == int > ``` -| Name | Description | -| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ | -| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ | -| `vectors_length` | Number of dimensions for each word vector. ~~int~~ | -| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ | -| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | -| `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/ap/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | +| Name | Description | +| ---------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `strings` | A table managing the string-to-int mapping. ~~StringStore~~ | +| `vectors` | A table associating word IDs to word vectors. ~~Vectors~~ | +| `vectors_length` | Number of dimensions for each word vector. ~~int~~ | +| `lookups` | The available lookup tables in this vocab. ~~Lookups~~ | +| `writing_system` | A dict with information about the language's writing system. ~~Dict[str, Any]~~ | +| `get_noun_chunks` 3.0 | A function that yields base noun phrases used for [`Doc.noun_chunks`](/api/doc#noun_chunks). ~~Optional[Callable[[Union[Doc, Span], Iterator[Tuple[int, int, int]]]]]~~ | ## Serialization fields {#serialization-fields} From 9cf3fa9711dfff94e88d6e137a52ebabdcceaad8 Mon Sep 17 00:00:00 2001 From: Zhangrp Date: Thu, 1 Dec 2022 20:30:27 +0800 Subject: [PATCH 02/20] Add docs for biluo_to_iob and iob_to_biluo. (#11901) * Add docs for biluo_to_iob and iob_to_biluo. * Fix typos. * Remove redundant links. --- website/docs/api/top-level.md | 48 +++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 211affa4a..26a5d42f4 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -1004,6 +1004,54 @@ This method was previously available as `spacy.gold.spans_from_biluo_tags`. | `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags with each tag describing one token. Each tag string will be of the form of either `""`, `"O"` or `"{action}-{label}"`, where action is one of `"B"`, `"I"`, `"L"`, `"U"`. ~~List[str]~~ | | **RETURNS** | A sequence of `Span` objects with added entity labels. ~~List[Span]~~ | +### training.biluo_to_iob {#biluo_to_iob tag="function"} + +Convert a sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags to +[IOB](/usage/linguistic-features#accessing-ner) tags. This is useful if you want +use the BILUO tags with a model that only supports IOB tags. + +> #### Example +> +> ```python +> from spacy.training import biluo_to_iob +> +> tags = ["O", "O", "B-LOC", "I-LOC", "L-LOC", "O"] +> iob_tags = biluo_to_iob(tags) +> assert iob_tags == ["O", "O", "B-LOC", "I-LOC", "I-LOC", "O"] +> ``` + +| Name | Description | +| ----------- | --------------------------------------------------------------------------------------- | +| `tags` | A sequence of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ | +| **RETURNS** | A list of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ | + +### training.iob_to_biluo {#iob_to_biluo tag="function"} + +Convert a sequence of [IOB](/usage/linguistic-features#accessing-ner) tags to +[BILUO](/usage/linguistic-features#accessing-ner) tags. This is useful if you +want use the IOB tags with a model that only supports BILUO tags. + + + +This method was previously available as `spacy.gold.iob_to_biluo`. + + + +> #### Example +> +> ```python +> from spacy.training import iob_to_biluo +> +> tags = ["O", "O", "B-LOC", "I-LOC", "O"] +> biluo_tags = iob_to_biluo(tags) +> assert biluo_tags == ["O", "O", "B-LOC", "L-LOC", "O"] +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------------------------------------- | +| `tags` | A sequence of [IOB](/usage/linguistic-features#accessing-ner) tags. ~~Iterable[str]~~ | +| **RETURNS** | A list of [BILUO](/usage/linguistic-features#accessing-ner) tags. ~~List[str]~~ | + ## Utility functions {#util source="spacy/util.py"} spaCy comes with a small collection of utility functions located in From 445c670a2d537598b3d562fb7f444050164a260b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Dec 2022 09:33:52 +0100 Subject: [PATCH 03/20] Fix spancat for zero suggestions (#11860) * Add test for spancat predict with zero suggestions * Fix spancat for zero suggestions * Undo changes to extract_spans * Use .sum() as in update --- spacy/pipeline/spancat.py | 5 +++- spacy/tests/pipeline/test_spancat.py | 43 ++++++++++++++++++++++------ 2 files changed, 38 insertions(+), 10 deletions(-) diff --git a/spacy/pipeline/spancat.py b/spacy/pipeline/spancat.py index 0a84c72fd..a3388e81a 100644 --- a/spacy/pipeline/spancat.py +++ b/spacy/pipeline/spancat.py @@ -272,7 +272,10 @@ class SpanCategorizer(TrainablePipe): DOCS: https://spacy.io/api/spancategorizer#predict """ indices = self.suggester(docs, ops=self.model.ops) - scores = self.model.predict((docs, indices)) # type: ignore + if indices.lengths.sum() == 0: + scores = self.model.ops.alloc2f(0, 0) + else: + scores = self.model.predict((docs, indices)) # type: ignore return indices, scores def set_candidates( diff --git a/spacy/tests/pipeline/test_spancat.py b/spacy/tests/pipeline/test_spancat.py index 15256a763..e9db983d3 100644 --- a/spacy/tests/pipeline/test_spancat.py +++ b/spacy/tests/pipeline/test_spancat.py @@ -372,24 +372,39 @@ def test_overfitting_IO_overlapping(): def test_zero_suggestions(): - # Test with a suggester that returns 0 suggestions + # Test with a suggester that can return 0 suggestions - @registry.misc("test_zero_suggester") - def make_zero_suggester(): - def zero_suggester(docs, *, ops=None): + @registry.misc("test_mixed_zero_suggester") + def make_mixed_zero_suggester(): + def mixed_zero_suggester(docs, *, ops=None): if ops is None: ops = get_current_ops() - return Ragged( - ops.xp.zeros((0, 0), dtype="i"), ops.xp.zeros((len(docs),), dtype="i") - ) + spans = [] + lengths = [] + for doc in docs: + if len(doc) > 0 and len(doc) % 2 == 0: + spans.append((0, 1)) + lengths.append(1) + else: + lengths.append(0) + spans = ops.asarray2i(spans) + lengths_array = ops.asarray1i(lengths) + if len(spans) > 0: + output = Ragged(ops.xp.vstack(spans), lengths_array) + else: + output = Ragged(ops.xp.zeros((0, 0), dtype="i"), lengths_array) + return output - return zero_suggester + return mixed_zero_suggester fix_random_seed(0) nlp = English() spancat = nlp.add_pipe( "spancat", - config={"suggester": {"@misc": "test_zero_suggester"}, "spans_key": SPAN_KEY}, + config={ + "suggester": {"@misc": "test_mixed_zero_suggester"}, + "spans_key": SPAN_KEY, + }, ) train_examples = make_examples(nlp) optimizer = nlp.initialize(get_examples=lambda: train_examples) @@ -397,6 +412,16 @@ def test_zero_suggestions(): assert set(spancat.labels) == {"LOC", "PERSON"} nlp.update(train_examples, sgd=optimizer) + # empty doc + nlp("") + # single doc with zero suggestions + nlp("one") + # single doc with one suggestion + nlp("two two") + # batch with mixed zero/one suggestions + list(nlp.pipe(["one", "two two", "three three three", "", "four four four four"])) + # batch with no suggestions + list(nlp.pipe(["", "one", "three three three"])) def test_set_candidates(): From f9d17a644b3d037924f715c03672ada6d12e4d86 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Fri, 2 Dec 2022 18:17:11 +0900 Subject: [PATCH 04/20] Config generation fails for GPU without transformers (#11899) If you don't have spacy-transformers installed, but try to use `init config` with the GPU flag, you'll get an error. The issue is that the `use_transformers` flag in the config is conflated with the GPU flag, and then there's an attempt to access transformers config info that may not exist. There may be a better way to do this, but this stops the error. --- spacy/cli/templates/quickstart_training.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index 58864883a..b961ac892 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -1,7 +1,7 @@ {# This is a template for training configs used for the quickstart widget in the docs and the init config command. It encodes various best practices and can help generate the best possible configuration, given a user's requirements. #} -{%- set use_transformer = hardware != "cpu" -%} +{%- set use_transformer = hardware != "cpu" and transformer_data -%} {%- set transformer = transformer_data[optimize] if use_transformer else {} -%} {%- set listener_components = ["tagger", "morphologizer", "parser", "ner", "textcat", "textcat_multilabel", "entity_linker", "spancat", "trainable_lemmatizer"] -%} [paths] From df0cb4b77be6e20a62143f5f65c3e165a4a45bcc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 2 Dec 2022 14:49:12 +0100 Subject: [PATCH 05/20] Auto-format code with black (#11913) Co-authored-by: explosion-bot --- spacy/util.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spacy/util.py b/spacy/util.py index cba403361..8d211a9a5 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1643,7 +1643,9 @@ def _pipe( docs: Iterable["Doc"], proc: "PipeCallable", name: str, - default_error_handler: Callable[[str, "PipeCallable", List["Doc"], Exception], NoReturn], + default_error_handler: Callable[ + [str, "PipeCallable", List["Doc"], Exception], NoReturn + ], kwargs: Mapping[str, Any], ) -> Iterator["Doc"]: if hasattr(proc, "pipe"): From 4b2097a2713b548cba1c841fa5cb8f6f42e3e30f Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 5 Dec 2022 08:29:13 +0100 Subject: [PATCH 06/20] fix links (#11927) --- website/docs/usage/v3-4.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/usage/v3-4.md b/website/docs/usage/v3-4.md index 597fc3cc8..e10110b71 100644 --- a/website/docs/usage/v3-4.md +++ b/website/docs/usage/v3-4.md @@ -66,8 +66,8 @@ The English CNN pipelines have new word vectors: | Package | Model Version | TAG | Parser LAS | NER F | | ----------------------------------------------- | ------------- | ---: | ---------: | ----: | | [`en_core_web_md`](/models/en#en_core_web_md) | v3.3.0 | 97.3 | 90.1 | 84.6 | -| [`en_core_web_md`](/models/en#en_core_web_lg) | v3.4.0 | 97.2 | 90.3 | 85.5 | -| [`en_core_web_lg`](/models/en#en_core_web_md) | v3.3.0 | 97.4 | 90.1 | 85.3 | +| [`en_core_web_md`](/models/en#en_core_web_md) | v3.4.0 | 97.2 | 90.3 | 85.5 | +| [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.3.0 | 97.4 | 90.1 | 85.3 | | [`en_core_web_lg`](/models/en#en_core_web_lg) | v3.4.0 | 97.3 | 90.2 | 85.6 | ## Notes about upgrading from v3.3 {#upgrading} From 5848656b5e3287d77674ce678e321eadea52f68e Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 5 Dec 2022 17:43:23 +0900 Subject: [PATCH 07/20] Switch ubuntu-latest to ubuntu-20.04 in main tests (#11928) * Switch ubuntu-latest to ubuntu-20.04 in main tests * Only use 20.04 for 3.6 --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9c3b92f06..0f7ea91f9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -41,7 +41,7 @@ jobs: matrix: # We're only running one platform per Python version to speed up builds Python36Linux: - imageName: "ubuntu-latest" + imageName: "ubuntu-20.04" python.version: "3.6" # Python36Windows: # imageName: "windows-latest" @@ -50,7 +50,7 @@ jobs: # imageName: "macos-latest" # python.version: "3.6" # Python37Linux: - # imageName: "ubuntu-latest" + # imageName: "ubuntu-20.04" # python.version: "3.7" Python37Windows: imageName: "windows-latest" From 6f342bdd72f300cdc431d0e0f2a168c62fd2a861 Mon Sep 17 00:00:00 2001 From: Darigov Research <30328618+darigovresearch@users.noreply.github.com> Date: Mon, 5 Dec 2022 08:49:04 +0000 Subject: [PATCH 08/20] docs: Adds link to license in readme (#11924) Would resolve https://github.com/explosion/spaCy/issues/11923 if merged --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index abfc3da67..7595460fb 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ parsing, **named entity recognition**, **text classification** and more, multi-task learning with pretrained **transformers** like BERT, as well as a production-ready [**training system**](https://spacy.io/usage/training) and easy model packaging, deployment and workflow management. spaCy is commercial -open-source software, released under the MIT license. +open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). 💫 **Version 3.4 out now!** [Check out the release notes here.](https://github.com/explosion/spaCy/releases) From 8afa8b5a7b8ee51eb42b83dabd0f3c1276369e73 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 5 Dec 2022 10:00:00 +0100 Subject: [PATCH 09/20] Refactor kwargs in CLI msg for future wasabi compatibility (#11918) Necessary for mypy with wasabi v1+. --- spacy/cli/project/run.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/cli/project/run.py b/spacy/cli/project/run.py index a109c4a5a..6dd174902 100644 --- a/spacy/cli/project/run.py +++ b/spacy/cli/project/run.py @@ -101,8 +101,8 @@ def project_run( if not (project_dir / dep).exists(): err = f"Missing dependency specified by command '{subcommand}': {dep}" err_help = "Maybe you forgot to run the 'project assets' command or a previous step?" - err_kwargs = {"exits": 1} if not dry else {} - msg.fail(err, err_help, **err_kwargs) + err_exits = 1 if not dry else None + msg.fail(err, err_help, exits=err_exits) check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION) with working_dir(project_dir) as current_dir: msg.divider(subcommand) From 1aadcfcb37ba166558688782fabbcbe3e32ea020 Mon Sep 17 00:00:00 2001 From: Ryn Daniels <397565+ryndaniels@users.noreply.github.com> Date: Mon, 5 Dec 2022 11:17:10 +0200 Subject: [PATCH 10/20] update lock-threads to v4 (#11930) --- .github/workflows/lock.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index c9833cdba..794adee85 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -15,11 +15,11 @@ jobs: action: runs-on: ubuntu-latest steps: - - uses: dessant/lock-threads@v3 + - uses: dessant/lock-threads@v4 with: process-only: 'issues' issue-inactive-days: '30' - issue-comment: > - This thread has been automatically locked since there - has not been any recent activity after it was closed. + issue-comment: > + This thread has been automatically locked since there + has not been any recent activity after it was closed. Please open a new issue for related bugs. From 23085ffef4bba62aff0de5993ff405cb3ff3528c Mon Sep 17 00:00:00 2001 From: Zhangrp Date: Tue, 6 Dec 2022 16:42:12 +0800 Subject: [PATCH 11/20] Fix interpolation in directory names, see #11235. (#11914) --- spacy/cli/_util.py | 8 ++++---- spacy/tests/test_cli.py | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/spacy/cli/_util.py b/spacy/cli/_util.py index 7ce006108..9b97a9f19 100644 --- a/spacy/cli/_util.py +++ b/spacy/cli/_util.py @@ -158,15 +158,15 @@ def load_project_config( sys.exit(1) validate_project_version(config) validate_project_commands(config) + if interpolate: + err = f"{PROJECT_FILE} validation error" + with show_validation_error(title=err, hint_fill=False): + config = substitute_project_variables(config, overrides) # Make sure directories defined in config exist for subdir in config.get("directories", []): dir_path = path / subdir if not dir_path.exists(): dir_path.mkdir(parents=True) - if interpolate: - err = f"{PROJECT_FILE} validation error" - with show_validation_error(title=err, hint_fill=False): - config = substitute_project_variables(config, overrides) return config diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 2e706458f..3104b49ff 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -123,6 +123,25 @@ def test_issue7055(): assert "model" in filled_cfg["components"]["ner"] +@pytest.mark.issue(11235) +def test_issue11235(): + """ + Test that the cli handles interpolation in the directory names correctly when loading project config. + """ + lang_var = "en" + variables = {"lang": lang_var} + commands = [{"name": "x", "script": ["hello ${vars.lang}"]}] + directories = ["cfg", "${vars.lang}_model"] + project = {"commands": commands, "vars": variables, "directories": directories} + with make_tempdir() as d: + srsly.write_yaml(d / "project.yml", project) + cfg = load_project_config(d) + # Check that the directories are interpolated and created correctly + assert os.path.exists(d / "cfg") + assert os.path.exists(d / f"{lang_var}_model") + assert cfg["commands"][0]["script"][0] == f"hello {lang_var}" + + def test_cli_info(): nlp = Dutch() nlp.add_pipe("textcat") From 27fac7df2e67a0dbfefd68834c14fb1f9505da49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Wed, 7 Dec 2022 05:53:41 +0100 Subject: [PATCH 12/20] EditTreeLemmatizer: correctly add strings when initializing from labels (#11934) Strings in replacement nodes where not added to the `StringStore` when `EditTreeLemmatizer` was initialized from a set of labels. The corresponding test did not capture this because it added the strings through the examples that were passed to the initialization. This change fixes both this bug in the initialization as the 'shadowing' of the bug in the test. --- spacy/pipeline/edit_tree_lemmatizer.py | 4 +- .../pipeline/test_edit_tree_lemmatizer.py | 37 ++++++++++++++++++- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/spacy/pipeline/edit_tree_lemmatizer.py b/spacy/pipeline/edit_tree_lemmatizer.py index 12f9b73a3..a56c9975e 100644 --- a/spacy/pipeline/edit_tree_lemmatizer.py +++ b/spacy/pipeline/edit_tree_lemmatizer.py @@ -328,9 +328,9 @@ class EditTreeLemmatizer(TrainablePipe): tree = dict(tree) if "orig" in tree: - tree["orig"] = self.vocab.strings[tree["orig"]] + tree["orig"] = self.vocab.strings.add(tree["orig"]) if "orig" in tree: - tree["subst"] = self.vocab.strings[tree["subst"]] + tree["subst"] = self.vocab.strings.add(tree["subst"]) trees.append(tree) diff --git a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py index cf541e301..b12ca5dd4 100644 --- a/spacy/tests/pipeline/test_edit_tree_lemmatizer.py +++ b/spacy/tests/pipeline/test_edit_tree_lemmatizer.py @@ -60,10 +60,45 @@ def test_initialize_from_labels(): nlp2 = Language() lemmatizer2 = nlp2.add_pipe("trainable_lemmatizer") lemmatizer2.initialize( - get_examples=lambda: train_examples, + # We want to check that the strings in replacement nodes are + # added to the string store. Avoid that they get added through + # the examples. + get_examples=lambda: train_examples[:1], labels=lemmatizer.label_data, ) assert lemmatizer2.tree2label == {1: 0, 3: 1, 4: 2, 6: 3} + assert lemmatizer2.label_data == { + "trees": [ + {"orig": "S", "subst": "s"}, + { + "prefix_len": 1, + "suffix_len": 0, + "prefix_tree": 0, + "suffix_tree": 4294967295, + }, + {"orig": "s", "subst": ""}, + { + "prefix_len": 0, + "suffix_len": 1, + "prefix_tree": 4294967295, + "suffix_tree": 2, + }, + { + "prefix_len": 0, + "suffix_len": 0, + "prefix_tree": 4294967295, + "suffix_tree": 4294967295, + }, + {"orig": "E", "subst": "e"}, + { + "prefix_len": 1, + "suffix_len": 0, + "prefix_tree": 5, + "suffix_tree": 4294967295, + }, + ], + "labels": (1, 3, 4, 6), + } def test_no_data(): From 916191848ab7bf90e88f23401451695f61903112 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 7 Dec 2022 18:09:04 +0900 Subject: [PATCH 13/20] Update scattertext example code (#11937) * Update scattertext example code * Remove PMI Filter Threshold --- website/meta/universe.json | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 97b53e9c5..8ca657561 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1468,13 +1468,26 @@ "image": "https://jasonkessler.github.io/2012conventions0.0.2.2.png", "code_example": [ "import spacy", - "import scattertext as st", "", - "nlp = spacy.load('en')", - "corpus = st.CorpusFromPandas(convention_df,", - " category_col='party',", - " text_col='text',", - " nlp=nlp).build()" + "from scattertext import SampleCorpora, produce_scattertext_explorer", + "from scattertext import produce_scattertext_html", + "from scattertext.CorpusFromPandas import CorpusFromPandas", + "", + "nlp = spacy.load('en_core_web_sm')", + "convention_df = SampleCorpora.ConventionData2012.get_data()", + "corpus = CorpusFromPandas(convention_df,", + " category_col='party',", + " text_col='text',", + " nlp=nlp).build()", + "", + "html = produce_scattertext_html(corpus,", + " category='democrat',", + " category_name='Democratic',", + " not_category_name='Republican',", + " minimum_term_frequency=5,", + " width_in_pixels=1000)", + "open('./simple.html', 'wb').write(html.encode('utf-8'))", + "print('Open ./simple.html in Chrome or Firefox.')" ], "author": "Jason Kessler", "author_links": { From 5c3a60e8f4273aff7bd47bce01d62c8224967045 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 7 Dec 2022 23:52:35 +0900 Subject: [PATCH 14/20] Add in errors used in the beam code that were removed at some point (#11935) I don't think there's any way to use the beam code at the moment, but as long as it's around the errors it refers to should also be present. --- spacy/errors.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/spacy/errors.py b/spacy/errors.py index e34614b0f..0e5ef91ed 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -345,6 +345,11 @@ class Errors(metaclass=ErrorsWithCodes): "clear the existing vectors and resize the table.") E074 = ("Error interpreting compiled match pattern: patterns are expected " "to end with the attribute {attr}. Got: {bad_attr}.") + E079 = ("Error computing states in beam: number of predicted beams " + "({pbeams}) does not equal number of gold beams ({gbeams}).") + E080 = ("Duplicate state found in beam: {key}.") + E081 = ("Error getting gradient in beam: number of histories ({n_hist}) " + "does not equal number of losses ({losses}).") E082 = ("Error deprojectivizing parse: number of heads ({n_heads}), " "projective heads ({n_proj_heads}) and labels ({n_labels}) do not " "match.") From 73919336fb1b003425373a07d41e5541dc5c3c46 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Wed, 7 Dec 2022 23:56:03 +0900 Subject: [PATCH 15/20] Remove spacy-sentence-segmenter from Universe (#11932) --- website/meta/universe.json | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 8ca657561..db533c3b2 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1023,25 +1023,6 @@ }, "category": ["pipeline"] }, - { - "id": "spacy-sentence-segmenter", - "title": "Sentence Segmenter", - "slogan": "Custom sentence segmentation for spaCy", - "code_example": [ - "from seg.newline.segmenter import NewLineSegmenter", - "import spacy", - "", - "nlseg = NewLineSegmenter()", - "nlp = spacy.load('en')", - "nlp.add_pipe(nlseg.set_sent_starts, name='sentence_segmenter', before='parser')", - "doc = nlp(my_doc_text)" - ], - "author": "tc64", - "author_links": { - "github": "tc64" - }, - "category": ["pipeline"] - }, { "id": "spacy_cld", "title": "spaCy-CLD", From 6d2ca1ab3a545491acbe058035677a263135e52a Mon Sep 17 00:00:00 2001 From: vincent d warmerdam Date: Wed, 7 Dec 2022 16:02:09 +0100 Subject: [PATCH 16/20] Update custom solutions links (#11903) * Update custom solutions Will now point to https://explosion.ai/custom-solutions * added-sidebar * added-analysis-to-readme * update-landing-page --- README.md | 2 ++ website/meta/sidebars.json | 2 +- website/meta/site.json | 2 +- website/src/widgets/landing.js | 4 ++-- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7595460fb..195424551 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ open-source software, released under the [MIT license](https://github.com/explos | 🛠 **[Changelog]** | Changes and version history. | | 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | | spaCy Tailored Pipelines | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-pipelines)** | +| spaCy Tailored Pipelines | Bespoke advice for problem solving, strategy and analysis for applied NLP projects. Services include data strategy, code reviews, pipeline design and annotation coaching. Curious? Fill in our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-analysis)** | [spacy 101]: https://spacy.io/usage/spacy-101 [new in v3.0]: https://spacy.io/usage/v3 @@ -59,6 +60,7 @@ open-source software, released under the [MIT license](https://github.com/explos [changelog]: https://spacy.io/usage#changelog [contribute]: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md + ## 💬 Where to ask questions The spaCy project is maintained by the [spaCy team](https://explosion.ai/about). diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index 2d8745d77..339e4085b 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -45,7 +45,7 @@ { "text": "v2.x Documentation", "url": "https://v2.spacy.io" }, { "text": "Custom Solutions", - "url": "https://explosion.ai/spacy-tailored-pipelines" + "url": "https://explosion.ai/custom-solutions" } ] } diff --git a/website/meta/site.json b/website/meta/site.json index 360a72178..fa79d3c69 100644 --- a/website/meta/site.json +++ b/website/meta/site.json @@ -51,7 +51,7 @@ { "text": "Online Course", "url": "https://course.spacy.io" }, { "text": "Custom Solutions", - "url": "https://explosion.ai/spacy-tailored-pipelines" + "url": "https://explosion.ai/custom-solutions" } ] }, diff --git a/website/src/widgets/landing.js b/website/src/widgets/landing.js index b7ae35f6e..c3aaa8a22 100644 --- a/website/src/widgets/landing.js +++ b/website/src/widgets/landing.js @@ -105,13 +105,13 @@ const Landing = ({ data }) => { - + spaCy Tailored Pipelines From f22fc7a1138545a2a75975909b5af554e8e1d616 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 9 Dec 2022 10:15:52 +0100 Subject: [PATCH 17/20] Auto-format code with black (#11955) Co-authored-by: explosion-bot --- spacy/tests/test_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 3104b49ff..42af08749 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -140,7 +140,7 @@ def test_issue11235(): assert os.path.exists(d / "cfg") assert os.path.exists(d / f"{lang_var}_model") assert cfg["commands"][0]["script"][0] == f"hello {lang_var}" - + def test_cli_info(): nlp = Dutch() From 8c291ace0c0978e70257906438d3585022090e9f Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 12 Dec 2022 08:38:36 +0100 Subject: [PATCH 18/20] Extend to wasabi v1.1 (#11945) * Extend to wasabi v1.1 * Temporarily run mypy and tests with newest wasabi * Temporarily skip check requirements test * Revert "Temporarily skip check requirements test" This reverts commit 44f4ce20a8e8c92e8bfc8042cc68333589a96253. * Revert "Temporarily run mypy and tests with newest wasabi" This reverts commit e677a2257ced55e696cafc3a8e89eb2f7ddfc369. --- requirements.txt | 2 +- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 778c05e21..0440835f2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ preshed>=3.0.2,<3.1.0 thinc>=8.1.0,<8.2.0 ml_datasets>=0.2.0,<0.3.0 murmurhash>=0.28.0,<1.1.0 -wasabi>=0.9.1,<1.1.0 +wasabi>=0.9.1,<1.2.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 typer>=0.3.0,<0.8.0 diff --git a/setup.cfg b/setup.cfg index 5768c9d3e..cf6e6f84b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,7 +47,7 @@ install_requires = cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 thinc>=8.1.0,<8.2.0 - wasabi>=0.9.1,<1.1.0 + wasabi>=0.9.1,<1.2.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 # Third-party dependencies From 0591e67265d7378769c0fc0df4020817f2d514ec Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 12 Dec 2022 08:45:35 +0100 Subject: [PATCH 19/20] Cast to uint64 for all array-based doc representations (#11933) * Convert all individual values explicitly to uint64 for array-based doc representations * Temporarily test with latest numpy v1.24.0rc * Remove unnecessary conversion from attr_t * Reduce number of individual casts * Convert specifically from int32 to uint64 * Revert "Temporarily test with latest numpy v1.24.0rc" This reverts commit eb0e3c5006515b9a7ff52bae59484c909b8a3f65. * Also use int32 in tests --- spacy/tests/doc/test_array.py | 4 ++-- spacy/tokens/doc.pyx | 2 ++ spacy/tokens/span.pyx | 4 ++-- spacy/training/example.pyx | 15 ++++++++------- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/spacy/tests/doc/test_array.py b/spacy/tests/doc/test_array.py index c334cc6eb..1f2d7d999 100644 --- a/spacy/tests/doc/test_array.py +++ b/spacy/tests/doc/test_array.py @@ -123,14 +123,14 @@ def test_doc_from_array_heads_in_bounds(en_vocab): # head before start arr = doc.to_array(["HEAD"]) - arr[0] = -1 + arr[0] = numpy.int32(-1).astype(numpy.uint64) doc_from_array = Doc(en_vocab, words=words) with pytest.raises(ValueError): doc_from_array.from_array(["HEAD"], arr) # head after end arr = doc.to_array(["HEAD"]) - arr[0] = 5 + arr[0] = numpy.int32(5).astype(numpy.uint64) doc_from_array = Doc(en_vocab, words=words) with pytest.raises(ValueError): doc_from_array.from_array(["HEAD"], arr) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index f2621292c..075bc4d15 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -359,6 +359,7 @@ cdef class Doc: for annot in annotations: if annot: if annot is heads or annot is sent_starts or annot is ent_iobs: + annot = numpy.array(annot, dtype=numpy.int32).astype(numpy.uint64) for i in range(len(words)): if attrs.ndim == 1: attrs[i] = annot[i] @@ -1558,6 +1559,7 @@ cdef class Doc: for j, (attr, annot) in enumerate(token_annotations.items()): if attr is HEAD: + annot = numpy.array(annot, dtype=numpy.int32).astype(numpy.uint64) for i in range(len(words)): array[i, j] = annot[i] elif attr is MORPH: diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index c3495f497..99a5f43bd 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -299,7 +299,7 @@ cdef class Span: for ancestor in ancestors: ancestor_i = ancestor.i - self.c.start if ancestor_i in range(length): - array[i, head_col] = ancestor_i - i + array[i, head_col] = numpy.int32(ancestor_i - i).astype(numpy.uint64) # if there is no appropriate ancestor, define a new artificial root value = array[i, head_col] @@ -307,7 +307,7 @@ cdef class Span: new_root = old_to_new_root.get(ancestor_i, None) if new_root is not None: # take the same artificial root as a previous token from the same sentence - array[i, head_col] = new_root - i + array[i, head_col] = numpy.int32(new_root - i).astype(numpy.uint64) else: # set this token as the new artificial root array[i, head_col] = 0 diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index dfd337b9e..95b0f0de9 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -443,26 +443,27 @@ def _annot2array(vocab, tok_annot, doc_annot): if key not in IDS: raise ValueError(Errors.E974.format(obj="token", key=key)) elif key in ["ORTH", "SPACY"]: - pass + continue elif key == "HEAD": attrs.append(key) - values.append([h-i if h is not None else 0 for i, h in enumerate(value)]) + row = [h-i if h is not None else 0 for i, h in enumerate(value)] elif key == "DEP": attrs.append(key) - values.append([vocab.strings.add(h) if h is not None else MISSING_DEP for h in value]) + row = [vocab.strings.add(h) if h is not None else MISSING_DEP for h in value] elif key == "SENT_START": attrs.append(key) - values.append([to_ternary_int(v) for v in value]) + row = [to_ternary_int(v) for v in value] elif key == "MORPH": attrs.append(key) - values.append([vocab.morphology.add(v) for v in value]) + row = [vocab.morphology.add(v) for v in value] else: attrs.append(key) if not all(isinstance(v, str) for v in value): types = set([type(v) for v in value]) raise TypeError(Errors.E969.format(field=key, types=types)) from None - values.append([vocab.strings.add(v) for v in value]) - array = numpy.asarray(values, dtype="uint64") + row = [vocab.strings.add(v) for v in value] + values.append([numpy.array(v, dtype=numpy.int32).astype(numpy.uint64) if v < 0 else v for v in row]) + array = numpy.array(values, dtype=numpy.uint64) return attrs, array.T From e5c7f3b0776d49c4f6aab7e02b503cdb84fb2134 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 12 Dec 2022 10:13:10 +0100 Subject: [PATCH 20/20] CI: Install thinc-apple-ops through extra (#11963) --- .github/azure-steps.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 2f77706b8..d0db75f9a 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -107,7 +107,7 @@ steps: displayName: "Run CPU tests" - script: | - python -m pip install --pre thinc-apple-ops + python -m pip install 'spacy[apple]' python -m pytest --pyargs spacy displayName: "Run CPU tests with thinc-apple-ops" condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.11'))