From 605f84938b6cea3d8c91b0aeb4dd6d41e375fe82 Mon Sep 17 00:00:00 2001 From: Gor Arakelyan Date: Fri, 10 Jun 2022 13:33:17 +0400 Subject: [PATCH 01/29] Add "Aim-spaCy" to spaCy Universe (#10943) * Add Aim-spaCy to spaCy universe * Update Aim thumbnail * Fix author links Co-authored-by: Paul O'Leary McCann --- website/meta/universe.json | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index b7f340f52..9b644adf4 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,28 @@ { "resources": [ + { + "id": "aim-spacy", + "title": "Aim-spaCy", + "slogan": "Aim-spaCy is an Aim-based spaCy experiment tracker.", + "description": "Aim-spaCy helps to easily collect, store and explore training logs for spaCy, including: hyper-parameters, metrics and displaCy visualizations", + "github": "aimhubio/aim-spacy", + "pip": "aim-spacy", + "code_example": [ + "https://github.com/aimhubio/aim-spacy/tree/master/examples" + ], + "code_language": "python", + "url": "https://aimstack.io/spacy", + "thumb": "https://user-images.githubusercontent.com/13848158/172912427-ee9327ea-3cd8-47fa-8427-6c0d36cd831f.png", + "image": "https://user-images.githubusercontent.com/13848158/136364717-0939222c-55b6-44f0-ad32-d9ab749546e4.png", + "author": "AimStack", + "author_links": { + "twitter": "aimstackio", + "github": "aimhubio", + "website": "https://aimstack.io" + }, + "category": ["visualizers"], + "tags": ["experiment-tracking", "visualization"] + }, { "id": "spacy-report", "title": "spacy-report", From 97e8a5041b14a5e125866245b3f789e1b8caf7b9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 10 Jun 2022 13:21:33 +0200 Subject: [PATCH 02/29] Auto-format code with black (#10945) Co-authored-by: explosion-bot --- spacy/tests/parser/test_nonproj.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/spacy/tests/parser/test_nonproj.py b/spacy/tests/parser/test_nonproj.py index b420c300f..051d0ef0c 100644 --- a/spacy/tests/parser/test_nonproj.py +++ b/spacy/tests/parser/test_nonproj.py @@ -49,7 +49,9 @@ def test_parser_contains_cycle(tree, cyclic_tree, partial_tree, multirooted_tree assert contains_cycle(multirooted_tree) is None -def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multirooted_tree): +def test_parser_is_nonproj_arc( + cyclic_tree, nonproj_tree, partial_tree, multirooted_tree +): assert is_nonproj_arc(0, nonproj_tree) is False assert is_nonproj_arc(1, nonproj_tree) is False assert is_nonproj_arc(2, nonproj_tree) is False @@ -62,7 +64,9 @@ def test_parser_is_nonproj_arc(cyclic_tree, nonproj_tree, partial_tree, multiroo assert is_nonproj_arc(7, partial_tree) is False assert is_nonproj_arc(17, multirooted_tree) is False assert is_nonproj_arc(16, multirooted_tree) is True - with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'): + with pytest.raises( + ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]" + ): is_nonproj_arc(6, cyclic_tree) @@ -73,7 +77,9 @@ def test_parser_is_nonproj_tree( assert is_nonproj_tree(nonproj_tree) is True assert is_nonproj_tree(partial_tree) is False assert is_nonproj_tree(multirooted_tree) is True - with pytest.raises(ValueError, match=r'Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]'): + with pytest.raises( + ValueError, match=r"Found cycle in dependency graph: \[1, 2, 2, 4, 5, 3, 2\]" + ): is_nonproj_tree(cyclic_tree) From a83a50119520ea8708f0ef0730f65f486556c273 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Fri, 10 Jun 2022 18:12:28 +0200 Subject: [PATCH 03/29] precomputable_biaffine: avoid concatenation (#10911) The `forward` of `precomputable_biaffine` performs matrix multiplication and then `vstack`s the result with padding. This creates a temporary array used for the output of matrix concatenation. This change avoids the temporary by pre-allocating an array that is large enough for the output of matrix multiplication plus padding and fills the array in-place. This gave me a small speedup (a bit over 100 WPS) on de_core_news_lg on M1 Max (after changing thinc-apple-ops to support in-place gemm as BLIS does). --- spacy/ml/_precomputable_affine.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/spacy/ml/_precomputable_affine.py b/spacy/ml/_precomputable_affine.py index b99de2d2b..007d68aca 100644 --- a/spacy/ml/_precomputable_affine.py +++ b/spacy/ml/_precomputable_affine.py @@ -22,9 +22,11 @@ def forward(model, X, is_train): nP = model.get_dim("nP") nI = model.get_dim("nI") W = model.get_param("W") - Yf = model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True) + # Preallocate array for layer output, including padding. + Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False) + model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:]) Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) - Yf = model.ops.xp.vstack((model.get_param("pad"), Yf)) + Yf[0] = model.get_param("pad") def backward(dY_ids): # This backprop is particularly tricky, because we get back a different From 126d1db1234295a901d57553e275a6d9adf593ab Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Mon, 13 Jun 2022 10:56:45 +0200 Subject: [PATCH 04/29] Add failing test: `test_matcher_extension_in_set_predicate` (#10948) --- spacy/tests/matcher/test_matcher_api.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py index a27baf130..e8c3d53e8 100644 --- a/spacy/tests/matcher/test_matcher_api.py +++ b/spacy/tests/matcher/test_matcher_api.py @@ -476,6 +476,17 @@ def test_matcher_extension_set_membership(en_vocab): assert len(matches) == 0 +@pytest.mark.xfail(reason="IN predicate must handle sequence values in extensions") +def test_matcher_extension_in_set_predicate(en_vocab): + matcher = Matcher(en_vocab) + Token.set_extension("ext", default=[]) + pattern = [{"_": {"ext": {"IN": ["A", "C"]}}}] + matcher.add("M", [pattern]) + doc = Doc(en_vocab, words=["a", "b", "c"]) + doc[0]._.ext = ["A", "B"] + assert len(matcher(doc)) == 1 + + def test_matcher_basic_check(en_vocab): matcher = Matcher(en_vocab) # Potential mistake: pass in pattern instead of list of patterns From 0d352c46ed74484429b53809370ca0041b139f12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Wed, 15 Jun 2022 15:32:02 +0200 Subject: [PATCH 05/29] vectors: remove use of float as row number (#10955) The float -1 was returned rather than the integer -1 as the row for unknown keys. This doesn't introduce a realy bug, since such floats cast (without issues) to int in the conversion to NumPy arrays. Still, it's nice to to do the correct thing :). --- spacy/vectors.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index bcba9d03f..93f6818ee 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -339,7 +339,7 @@ cdef class Vectors: return self.key2row.get(key, -1) elif keys is not None: keys = [get_string_id(key) for key in keys] - rows = [self.key2row.get(key, -1.) for key in keys] + rows = [self.key2row.get(key, -1) for key in keys] return xp.asarray(rows, dtype="i") else: row2key = {row: key for key, row in self.key2row.items()} From 3d3fbeda9f5fa3164a0aef983d606c67b677a744 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Thu, 16 Jun 2022 11:42:34 +0200 Subject: [PATCH 06/29] Update for CBlas changes in Thinc 8.1.0.dev2 (#10970) --- pyproject.toml | 2 +- requirements.txt | 2 +- setup.cfg | 4 ++-- spacy/ml/parser_model.pyx | 5 +++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 14e09e30f..4fea41be2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=8.1.0.dev0,<8.2.0", + "thinc>=8.1.0.dev2,<8.2.0", "pathy", "numpy>=1.15.0", ] diff --git a/requirements.txt b/requirements.txt index b2929145e..082ef1522 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0 spacy-loggers>=1.0.0,<2.0.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=8.1.0.dev0,<8.2.0 +thinc>=8.1.0.dev2,<8.2.0 ml_datasets>=0.2.0,<0.3.0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.9.1,<1.1.0 diff --git a/setup.cfg b/setup.cfg index c6036a8b3..110a2e4ee 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,7 @@ setup_requires = cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 murmurhash>=0.28.0,<1.1.0 - thinc>=8.1.0.dev0,<8.2.0 + thinc>=8.1.0.dev2,<8.2.0 install_requires = # Our libraries spacy-legacy>=3.0.9,<3.1.0 @@ -46,7 +46,7 @@ install_requires = murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=8.1.0.dev0,<8.2.0 + thinc>=8.1.0.dev2,<8.2.0 wasabi>=0.9.1,<1.1.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 diff --git a/spacy/ml/parser_model.pyx b/spacy/ml/parser_model.pyx index 57f933b07..e045dc3b7 100644 --- a/spacy/ml/parser_model.pyx +++ b/spacy/ml/parser_model.pyx @@ -4,6 +4,7 @@ from libc.math cimport exp from libc.string cimport memset, memcpy from libc.stdlib cimport calloc, free, realloc from thinc.backends.linalg cimport Vec, VecVec +from thinc.backends.cblas cimport saxpy, sgemm import numpy import numpy.random @@ -112,7 +113,7 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) else: # Compute hidden-to-output - cblas.sgemm()(False, True, n.states, n.classes, n.hiddens, + sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, 1.0, A.hiddens, n.hiddens, W.hidden_weights, n.hiddens, 0.0, A.scores, n.classes) @@ -147,7 +148,7 @@ cdef void sum_state_features(CBlas cblas, float* output, else: idx = token_ids[f] * id_stride + f*O feature = &cached[idx] - cblas.saxpy()(O, one, feature, 1, &output[b*O], 1) + saxpy(cblas)(O, one, feature, 1, &output[b*O], 1) token_ids += F From a7f6bc5dfb9df4f010d1748d7352f0ff75e7ac61 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 17 Jun 2022 12:15:36 +0200 Subject: [PATCH 07/29] Workaround for Typer optional default values with Python calls (#10788) * Workaround for Typer optional default values with Python calls: added test and workaround. * @rmitsch Workaround for Typer optional default values with Python calls: reverting some black formatting changes. Co-authored-by: Sofie Van Landeghem * @rmitsch Workaround for Typer optional default values with Python calls: removing return type hint. Co-authored-by: Sofie Van Landeghem * Workaround for Typer optional default values with Python calls: fixed imports, added GitHub issue marker. * Workaround for Typer optional default values with Python calls: removed forcing of default values for optional arguments in init_config_cli(). Added default values for init_config(). Synchronized default values for init_config_cli() and init_config(). * Workaround for Typer optional default values with Python calls: removed unused import. * Workaround for Typer optional default values with Python calls: fixed usage of optimize in init_config_cli(). * Workaround for Typer optional default values with Pythhon calls: remove output_file from InitDefaultValues. * Workaround for Typer optional default values with Python calls: rename class for default init values. * Workaround for Typer optional default values with Python calls: remove newline. * remove introduced newlines * Remove test_init_config_from_python_without_optional_args(). * remove leftover import * reformat import * remove duplicate Co-authored-by: Sofie Van Landeghem --- spacy/cli/init_config.py | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/spacy/cli/init_config.py b/spacy/cli/init_config.py index d4cd939c2..b634caa4c 100644 --- a/spacy/cli/init_config.py +++ b/spacy/cli/init_config.py @@ -10,6 +10,7 @@ from jinja2 import Template from .. import util from ..language import DEFAULT_CONFIG_PRETRAIN_PATH from ..schemas import RecommendationSchema +from ..util import SimpleFrozenList from ._util import init_cli, Arg, Opt, show_validation_error, COMMAND from ._util import string_to_list, import_code @@ -24,16 +25,30 @@ class Optimizations(str, Enum): accuracy = "accuracy" +class InitValues: + """ + Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and + init_config(), i.e. initialization calls via CLI respectively Python. + """ + + lang = "en" + pipeline = SimpleFrozenList(["tagger", "parser", "ner"]) + optimize = Optimizations.efficiency + gpu = False + pretraining = False + force_overwrite = False + + @init_cli.command("config") def init_config_cli( # fmt: off output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True), - lang: str = Opt("en", "--lang", "-l", help="Two-letter code of the language to use"), - pipeline: str = Opt("tagger,parser,ner", "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), - optimize: Optimizations = Opt(Optimizations.efficiency.value, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), - gpu: bool = Opt(False, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), - pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), - force_overwrite: bool = Opt(False, "--force", "-F", help="Force overwriting the output file"), + lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"), + pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"), + optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."), + gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."), + pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"), + force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"), # fmt: on ): """ @@ -133,11 +148,11 @@ def fill_config( def init_config( *, - lang: str, - pipeline: List[str], - optimize: str, - gpu: bool, - pretraining: bool = False, + lang: str = InitValues.lang, + pipeline: List[str] = InitValues.pipeline, + optimize: str = InitValues.optimize, + gpu: bool = InitValues.gpu, + pretraining: bool = InitValues.pretraining, silent: bool = True, ) -> Config: msg = Printer(no_print=silent) From d50668dbf054dddc42bb55dcf3431affc4660736 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 17 Jun 2022 15:55:34 +0200 Subject: [PATCH 08/29] Made _initialize_X() methods private. (#10978) --- spacy/kb.pyx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/spacy/kb.pyx b/spacy/kb.pyx index 9a765c8e4..ae1983a8d 100644 --- a/spacy/kb.pyx +++ b/spacy/kb.pyx @@ -93,14 +93,14 @@ cdef class KnowledgeBase: self.vocab = vocab self._create_empty_vectors(dummy_hash=self.vocab.strings[""]) - def initialize_entities(self, int64_t nr_entities): + def _initialize_entities(self, int64_t nr_entities): self._entry_index = PreshMap(nr_entities + 1) self._entries = entry_vec(nr_entities + 1) - def initialize_vectors(self, int64_t nr_entities): + def _initialize_vectors(self, int64_t nr_entities): self._vectors_table = float_matrix(nr_entities + 1) - def initialize_aliases(self, int64_t nr_aliases): + def _initialize_aliases(self, int64_t nr_aliases): self._alias_index = PreshMap(nr_aliases + 1) self._aliases_table = alias_vec(nr_aliases + 1) @@ -155,8 +155,8 @@ cdef class KnowledgeBase: raise ValueError(Errors.E140) nr_entities = len(set(entity_list)) - self.initialize_entities(nr_entities) - self.initialize_vectors(nr_entities) + self._initialize_entities(nr_entities) + self._initialize_vectors(nr_entities) i = 0 cdef KBEntryC entry @@ -388,9 +388,9 @@ cdef class KnowledgeBase: nr_entities = header[0] nr_aliases = header[1] entity_vector_length = header[2] - self.initialize_entities(nr_entities) - self.initialize_vectors(nr_entities) - self.initialize_aliases(nr_aliases) + self._initialize_entities(nr_entities) + self._initialize_vectors(nr_entities) + self._initialize_aliases(nr_aliases) self.entity_vector_length = entity_vector_length def deserialize_vectors(b): @@ -512,8 +512,8 @@ cdef class KnowledgeBase: cdef int64_t entity_vector_length reader.read_header(&nr_entities, &entity_vector_length) - self.initialize_entities(nr_entities) - self.initialize_vectors(nr_entities) + self._initialize_entities(nr_entities) + self._initialize_vectors(nr_entities) self.entity_vector_length = entity_vector_length # STEP 1: load entity vectors @@ -552,7 +552,7 @@ cdef class KnowledgeBase: # STEP 3: load aliases cdef int64_t nr_aliases reader.read_alias_length(&nr_aliases) - self.initialize_aliases(nr_aliases) + self._initialize_aliases(nr_aliases) cdef int64_t nr_candidates cdef vector[int64_t] entry_indices From 6313787fb65002328a5858c2c3f5c5db29ebe3e1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 17 Jun 2022 19:41:55 +0100 Subject: [PATCH 09/29] Auto-format code with black (#10977) Co-authored-by: explosion-bot --- spacy/ml/_precomputable_affine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/ml/_precomputable_affine.py b/spacy/ml/_precomputable_affine.py index 007d68aca..7a25e7574 100644 --- a/spacy/ml/_precomputable_affine.py +++ b/spacy/ml/_precomputable_affine.py @@ -23,7 +23,7 @@ def forward(model, X, is_train): nI = model.get_dim("nI") W = model.get_param("W") # Preallocate array for layer output, including padding. - Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False) + Yf = model.ops.alloc2f(X.shape[0] + 1, nF * nO * nP, zeros=False) model.ops.gemm(X, W.reshape((nF * nO * nP, nI)), trans2=True, out=Yf[1:]) Yf = Yf.reshape((Yf.shape[0], nF, nO, nP)) Yf[0] = model.get_param("pad") From eaeca5eb6a6e233b1f1f73c47fbfaf3f51720c18 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 17 Jun 2022 20:02:37 +0100 Subject: [PATCH 10/29] account for NER labels with a hyphen in the name (#10960) * account for NER labels with a hyphen in the name * cleanup * fix docstring * add return type to helper method * shorter method and few more occurrences * user helper method across repo * fix circular import * partial revert to avoid circular import --- spacy/cli/debug_data.py | 8 +++---- .../pipeline/_parser_internals/arc_eager.pyx | 3 ++- spacy/pipeline/_parser_internals/ner.pyx | 3 ++- spacy/pipeline/dep_parser.pyx | 3 ++- spacy/pipeline/ner.pyx | 4 ++-- spacy/tests/parser/test_ner.py | 22 ++++++++++++++++--- spacy/tests/util.py | 3 ++- spacy/training/__init__.py | 1 + spacy/training/augment.py | 8 +++---- spacy/training/example.pyx | 4 ++-- spacy/training/iob_utils.py | 10 ++++++++- 11 files changed, 48 insertions(+), 21 deletions(-) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 0061515c6..8a6dde955 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -10,7 +10,7 @@ import math from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides from ._util import import_code, debug_cli -from ..training import Example +from ..training import Example, remove_bilu_prefix from ..training.initialize import get_sourced_components from ..schemas import ConfigSchemaTraining from ..pipeline._parser_internals import nonproj @@ -758,9 +758,9 @@ def _compile_gold( # "Illegal" whitespace entity data["ws_ents"] += 1 if label.startswith(("B-", "U-")): - combined_label = label.split("-")[1] + combined_label = remove_bilu_prefix(label) data["ner"][combined_label] += 1 - if sent_starts[i] == True and label.startswith(("I-", "L-")): + if sent_starts[i] and label.startswith(("I-", "L-")): data["boundary_cross_ents"] += 1 elif label == "-": data["ner"]["-"] += 1 @@ -908,7 +908,7 @@ def _get_examples_without_label( for eg in data: if component == "ner": labels = [ - label.split("-")[1] + remove_bilu_prefix(label) for label in eg.get_aligned_ner() if label not in ("O", "-", None) ] diff --git a/spacy/pipeline/_parser_internals/arc_eager.pyx b/spacy/pipeline/_parser_internals/arc_eager.pyx index d60f1c3e6..257b5ef8a 100644 --- a/spacy/pipeline/_parser_internals/arc_eager.pyx +++ b/spacy/pipeline/_parser_internals/arc_eager.pyx @@ -10,6 +10,7 @@ from ...strings cimport hash_string from ...structs cimport TokenC from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.token cimport MISSING_DEP +from ...training import split_bilu_label from ...training.example cimport Example from .stateclass cimport StateClass from ._state cimport StateC, ArcC @@ -687,7 +688,7 @@ cdef class ArcEager(TransitionSystem): return self.c[name_or_id] name = name_or_id if '-' in name: - move_str, label_str = name.split('-', 1) + move_str, label_str = split_bilu_label(name) label = self.strings[label_str] else: move_str = name diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index 3edeff19a..fab872f00 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -13,6 +13,7 @@ from ...typedefs cimport weight_t, attr_t from ...lexeme cimport Lexeme from ...attrs cimport IS_SPACE from ...structs cimport TokenC, SpanC +from ...training import split_bilu_label from ...training.example cimport Example from .stateclass cimport StateClass from ._state cimport StateC @@ -182,7 +183,7 @@ cdef class BiluoPushDown(TransitionSystem): if name == '-' or name == '' or name is None: return Transition(clas=0, move=MISSING, label=0, score=0) elif '-' in name: - move_str, label_str = name.split('-', 1) + move_str, label_str = split_bilu_label(name) # Deprecated, hacky way to denote 'not this entity' if label_str.startswith('!'): raise ValueError(Errors.E869.format(label=name)) diff --git a/spacy/pipeline/dep_parser.pyx b/spacy/pipeline/dep_parser.pyx index 50c57ee5b..e5f686158 100644 --- a/spacy/pipeline/dep_parser.pyx +++ b/spacy/pipeline/dep_parser.pyx @@ -12,6 +12,7 @@ from ..language import Language from ._parser_internals import nonproj from ._parser_internals.nonproj import DELIMITER from ..scorer import Scorer +from ..training import remove_bilu_prefix from ..util import registry @@ -314,7 +315,7 @@ cdef class DependencyParser(Parser): # Get the labels from the model by looking at the available moves for move in self.move_names: if "-" in move: - label = move.split("-")[1] + label = remove_bilu_prefix(move) if DELIMITER in label: label = label.split(DELIMITER)[1] labels.add(label) diff --git a/spacy/pipeline/ner.pyx b/spacy/pipeline/ner.pyx index 4835a8c4b..25f48c9f8 100644 --- a/spacy/pipeline/ner.pyx +++ b/spacy/pipeline/ner.pyx @@ -6,10 +6,10 @@ from thinc.api import Model, Config from ._parser_internals.transition_system import TransitionSystem from .transition_parser cimport Parser from ._parser_internals.ner cimport BiluoPushDown - from ..language import Language from ..scorer import get_ner_prf, PRFScore from ..util import registry +from ..training import remove_bilu_prefix default_model_config = """ @@ -242,7 +242,7 @@ cdef class EntityRecognizer(Parser): def labels(self): # Get the labels from the model by looking at the available moves, e.g. # B-PERSON, I-PERSON, L-PERSON, U-PERSON - labels = set(move.split("-")[1] for move in self.move_names + labels = set(remove_bilu_prefix(move) for move in self.move_names if move[0] in ("B", "I", "L", "U")) return tuple(sorted(labels)) diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index b3b29d1f9..53bb2d554 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -10,7 +10,7 @@ from spacy.lang.it import Italian from spacy.language import Language from spacy.lookups import Lookups from spacy.pipeline._parser_internals.ner import BiluoPushDown -from spacy.training import Example, iob_to_biluo +from spacy.training import Example, iob_to_biluo, split_bilu_label from spacy.tokens import Doc, Span from spacy.vocab import Vocab import logging @@ -110,6 +110,9 @@ def test_issue2385(): # maintain support for iob2 format tags3 = ("B-PERSON", "I-PERSON", "B-PERSON") assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"] + # ensure it works with hyphens in the name + tags4 = ("B-MULTI-PERSON", "I-MULTI-PERSON", "B-MULTI-PERSON") + assert iob_to_biluo(tags4) == ["B-MULTI-PERSON", "L-MULTI-PERSON", "U-MULTI-PERSON"] @pytest.mark.issue(2800) @@ -154,6 +157,19 @@ def test_issue3209(): assert ner2.move_names == move_names +def test_labels_from_BILUO(): + """Test that labels are inferred correctly when there's a - in label. + """ + nlp = English() + ner = nlp.add_pipe("ner") + ner.add_label("LARGE-ANIMAL") + nlp.initialize() + move_names = ["O", "B-LARGE-ANIMAL", "I-LARGE-ANIMAL", "L-LARGE-ANIMAL", "U-LARGE-ANIMAL"] + labels = {"LARGE-ANIMAL"} + assert ner.move_names == move_names + assert set(ner.labels) == labels + + @pytest.mark.issue(4267) def test_issue4267(): """Test that running an entity_ruler after ner gives consistent results""" @@ -298,7 +314,7 @@ def test_oracle_moves_missing_B(en_vocab): elif tag == "O": moves.add_action(move_types.index("O"), "") else: - action, label = tag.split("-") + action, label = split_bilu_label(tag) moves.add_action(move_types.index("B"), label) moves.add_action(move_types.index("I"), label) moves.add_action(move_types.index("L"), label) @@ -324,7 +340,7 @@ def test_oracle_moves_whitespace(en_vocab): elif tag == "O": moves.add_action(move_types.index("O"), "") else: - action, label = tag.split("-") + action, label = split_bilu_label(tag) moves.add_action(move_types.index(action), label) moves.get_oracle_sequence(example) diff --git a/spacy/tests/util.py b/spacy/tests/util.py index 365ea4349..d5f3c39ff 100644 --- a/spacy/tests/util.py +++ b/spacy/tests/util.py @@ -5,6 +5,7 @@ import srsly from spacy.tokens import Doc from spacy.vocab import Vocab from spacy.util import make_tempdir # noqa: F401 +from spacy.training import split_bilu_label from thinc.api import get_current_ops @@ -40,7 +41,7 @@ def apply_transition_sequence(parser, doc, sequence): desired state.""" for action_name in sequence: if "-" in action_name: - move, label = action_name.split("-") + move, label = split_bilu_label(action_name) parser.add_label(label) with parser.step_through(doc) as stepwise: for transition in sequence: diff --git a/spacy/training/__init__.py b/spacy/training/__init__.py index a4feb01f4..71d1fa775 100644 --- a/spacy/training/__init__.py +++ b/spacy/training/__init__.py @@ -5,6 +5,7 @@ from .augment import dont_augment, orth_variants_augmenter # noqa: F401 from .iob_utils import iob_to_biluo, biluo_to_iob # noqa: F401 from .iob_utils import offsets_to_biluo_tags, biluo_tags_to_offsets # noqa: F401 from .iob_utils import biluo_tags_to_spans, tags_to_entities # noqa: F401 +from .iob_utils import split_bilu_label, remove_bilu_prefix # noqa: F401 from .gold_io import docs_to_json, read_json_file # noqa: F401 from .batchers import minibatch_by_padded_size, minibatch_by_words # noqa: F401 from .loggers import console_logger # noqa: F401 diff --git a/spacy/training/augment.py b/spacy/training/augment.py index 59a39c7ee..55d780ba4 100644 --- a/spacy/training/augment.py +++ b/spacy/training/augment.py @@ -3,10 +3,10 @@ from typing import Optional import random import itertools from functools import partial -from pydantic import BaseModel, StrictStr from ..util import registry from .example import Example +from .iob_utils import split_bilu_label if TYPE_CHECKING: from ..language import Language # noqa: F401 @@ -278,10 +278,8 @@ def make_whitespace_variant( ent_prev = doc_dict["entities"][position - 1] ent_next = doc_dict["entities"][position] if "-" in ent_prev and "-" in ent_next: - ent_iob_prev = ent_prev.split("-")[0] - ent_type_prev = ent_prev.split("-", 1)[1] - ent_iob_next = ent_next.split("-")[0] - ent_type_next = ent_next.split("-", 1)[1] + ent_iob_prev, ent_type_prev = split_bilu_label(ent_prev) + ent_iob_next, ent_type_next = split_bilu_label(ent_next) if ( ent_iob_prev in ("B", "I") and ent_iob_next in ("I", "L") diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index 3035388a6..045f0b483 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -9,7 +9,7 @@ from ..tokens.span import Span from ..attrs import IDS from .alignment import Alignment from .iob_utils import biluo_to_iob, offsets_to_biluo_tags, doc_to_biluo_tags -from .iob_utils import biluo_tags_to_spans +from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix from ..errors import Errors, Warnings from ..pipeline._parser_internals import nonproj from ..tokens.token cimport MISSING_DEP @@ -519,7 +519,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces): else: ent_iobs.append(iob_tag.split("-")[0]) if iob_tag.startswith("I") or iob_tag.startswith("B"): - ent_types.append(iob_tag.split("-", 1)[1]) + ent_types.append(remove_bilu_prefix(iob_tag)) else: ent_types.append("") return ent_iobs, ent_types diff --git a/spacy/training/iob_utils.py b/spacy/training/iob_utils.py index 64492c2bc..61f83a1c3 100644 --- a/spacy/training/iob_utils.py +++ b/spacy/training/iob_utils.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Tuple, Iterable, Union, Iterator +from typing import List, Dict, Tuple, Iterable, Union, Iterator, cast import warnings from ..errors import Errors, Warnings @@ -218,6 +218,14 @@ def tags_to_entities(tags: Iterable[str]) -> List[Tuple[str, int, int]]: return entities +def split_bilu_label(label: str) -> Tuple[str, str]: + return cast(Tuple[str, str], label.split("-", 1)) + + +def remove_bilu_prefix(label: str) -> str: + return label.split("-", 1)[1] + + # Fallbacks to make backwards-compat easier offsets_from_biluo_tags = biluo_tags_to_offsets spans_from_biluo_tags = biluo_tags_to_spans From 4c058eb40a1843191352a1501bead0dc99526bed Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 17 Jun 2022 21:24:13 +0200 Subject: [PATCH 11/29] `enable` argument for spacy.load() (#10784) * Enable flag on spacy.load: foundation for include, enable arguments. * Enable flag on spacy.load: fixed tests. * Enable flag on spacy.load: switched from pretrained model to empty model with added pipes for tests. * Enable flag on spacy.load: switched to more consistent error on misspecification of component activity. Test refactoring. Added to default config. * Enable flag on spacy.load: added support for fields not in pipeline. * Enable flag on spacy.load: removed serialization fields from supported fields. * Enable flag on spacy.load: removed 'enable' from config again. * Enable flag on spacy.load: relaxed checks in _resolve_component_activation_status() to allow non-standard pipes. * Enable flag on spacy.load: fixed relaxed checks for _resolve_component_activation_status() to allow non-standard pipes. Extended tests. * Enable flag on spacy.load: comments w.r.t. resolution workarounds. * Enable flag on spacy.load: remove include fields. Update website docs. * Enable flag on spacy.load: updates w.r.t. changes in master. * Implement Doc.from_json(): update docstrings. Co-authored-by: Adriane Boyd * Implement Doc.from_json(): remove newline. Co-authored-by: Adriane Boyd * Implement Doc.from_json(): change error message for E1038. Co-authored-by: Adriane Boyd * Enable flag on spacy.load: wrapped docstring for _resolve_component_status() at 80 chars. * Enable flag on spacy.load: changed exmples for enable flag. * Remove newline. Co-authored-by: Sofie Van Landeghem * Fix docstring for Language._resolve_component_status(). * Rename E1038 to E1042. Co-authored-by: Adriane Boyd Co-authored-by: Sofie Van Landeghem --- spacy/__init__.py | 10 ++++- spacy/errors.py | 2 + spacy/language.py | 50 ++++++++++++++++++++- spacy/tests/pipeline/test_pipe_methods.py | 52 +++++++++++++++++++++- spacy/util.py | 37 ++++++++++++--- website/docs/api/top-level.md | 1 + website/docs/usage/processing-pipelines.md | 12 +++++ 7 files changed, 155 insertions(+), 9 deletions(-) diff --git a/spacy/__init__.py b/spacy/__init__.py index ca47edc94..069215fda 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -32,6 +32,7 @@ def load( *, vocab: Union[Vocab, bool] = True, disable: Iterable[str] = util.SimpleFrozenList(), + enable: Iterable[str] = util.SimpleFrozenList(), exclude: Iterable[str] = util.SimpleFrozenList(), config: Union[Dict[str, Any], Config] = util.SimpleFrozenDict(), ) -> Language: @@ -42,6 +43,8 @@ def load( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (but can be enabled later using nlp.enable_pipe). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -49,7 +52,12 @@ def load( RETURNS (Language): The loaded nlp object. """ return util.load_model( - name, vocab=vocab, disable=disable, exclude=exclude, config=config + name, + vocab=vocab, + disable=disable, + enable=enable, + exclude=exclude, + config=config, ) diff --git a/spacy/errors.py b/spacy/errors.py index 384a6a4d2..14010565b 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -932,6 +932,8 @@ class Errors(metaclass=ErrorsWithCodes): E1040 = ("Doc.from_json requires all tokens to have the same attributes. " "Some tokens do not contain annotation for: {partial_attrs}") E1041 = ("Expected a string, Doc, or bytes as input, but got: {type}") + E1042 = ("Function was called with `{arg1}`={arg1_values} and " + "`{arg2}`={arg2_values} but these arguments are conflicting.") # Deprecated model shortcuts, only used in errors and warnings diff --git a/spacy/language.py b/spacy/language.py index 42847823f..816bd6531 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1,4 +1,4 @@ -from typing import Iterator, Optional, Any, Dict, Callable, Iterable +from typing import Iterator, Optional, Any, Dict, Callable, Iterable, Collection from typing import Union, Tuple, List, Set, Pattern, Sequence from typing import NoReturn, TYPE_CHECKING, TypeVar, cast, overload @@ -1694,6 +1694,7 @@ class Language: *, vocab: Union[Vocab, bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), meta: Dict[str, Any] = SimpleFrozenDict(), auto_fill: bool = True, @@ -1708,6 +1709,8 @@ class Language: disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. meta (Dict[str, Any]): Meta overrides for nlp.meta. @@ -1861,8 +1864,15 @@ class Language: # Restore the original vocab after sourcing if necessary if vocab_b is not None: nlp.vocab.from_bytes(vocab_b) - disabled_pipes = [*config["nlp"]["disabled"], *disable] + + # Resolve disabled/enabled settings. + disabled_pipes = cls._resolve_component_status( + [*config["nlp"]["disabled"], *disable], + [*config["nlp"].get("enabled", []), *enable], + config["nlp"]["pipeline"], + ) nlp._disabled = set(p for p in disabled_pipes if p not in exclude) + nlp.batch_size = config["nlp"]["batch_size"] nlp.config = filled if auto_fill else config if after_pipeline_creation is not None: @@ -2014,6 +2024,42 @@ class Language: serializers["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) util.to_disk(path, serializers, exclude) + @staticmethod + def _resolve_component_status( + disable: Iterable[str], enable: Iterable[str], pipe_names: Collection[str] + ) -> Tuple[str, ...]: + """Derives whether (1) `disable` and `enable` values are consistent and (2) + resolves those to a single set of disabled components. Raises an error in + case of inconsistency. + + disable (Iterable[str]): Names of components or serialization fields to disable. + enable (Iterable[str]): Names of pipeline components to enable. + pipe_names (Iterable[str]): Names of all pipeline components. + + RETURNS (Tuple[str, ...]): Names of components to exclude from pipeline w.r.t. + specified includes and excludes. + """ + + if disable is not None and isinstance(disable, str): + disable = [disable] + to_disable = disable + + if enable: + to_disable = [ + pipe_name for pipe_name in pipe_names if pipe_name not in enable + ] + if disable and disable != to_disable: + raise ValueError( + Errors.E1042.format( + arg1="enable", + arg2="disable", + arg1_values=enable, + arg2_values=disable, + ) + ) + + return tuple(to_disable) + def from_disk( self, path: Union[str, Path], diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index 4b8fb8ebc..6f00a1cd9 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -4,13 +4,14 @@ import numpy import pytest from thinc.api import get_current_ops +import spacy from spacy.lang.en import English from spacy.lang.en.syntax_iterators import noun_chunks from spacy.language import Language from spacy.pipeline import TrainablePipe from spacy.tokens import Doc from spacy.training import Example -from spacy.util import SimpleFrozenList, get_arg_names +from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir from spacy.vocab import Vocab @@ -602,3 +603,52 @@ def test_update_with_annotates(): assert results[component] == "".join(eg.predicted.text for eg in examples) for component in components - set(components_to_annotate): assert results[component] == "" + + +def test_load_disable_enable() -> None: + """ + Tests spacy.load() with dis-/enabling components. + """ + + base_nlp = English() + for pipe in ("sentencizer", "tagger", "parser"): + base_nlp.add_pipe(pipe) + + with make_tempdir() as tmp_dir: + base_nlp.to_disk(tmp_dir) + to_disable = ["parser", "tagger"] + to_enable = ["tagger", "parser"] + + # Setting only `disable`. + nlp = spacy.load(tmp_dir, disable=to_disable) + assert all([comp_name in nlp.disabled for comp_name in to_disable]) + + # Setting only `enable`. + nlp = spacy.load(tmp_dir, enable=to_enable) + assert all( + [ + (comp_name in nlp.disabled) is (comp_name not in to_enable) + for comp_name in nlp.component_names + ] + ) + + # Testing consistent enable/disable combination. + nlp = spacy.load( + tmp_dir, + enable=to_enable, + disable=[ + comp_name + for comp_name in nlp.component_names + if comp_name not in to_enable + ], + ) + assert all( + [ + (comp_name in nlp.disabled) is (comp_name not in to_enable) + for comp_name in nlp.component_names + ] + ) + + # Inconsistent enable/disable combination. + with pytest.raises(ValueError): + spacy.load(tmp_dir, enable=to_enable, disable=["parser"]) diff --git a/spacy/util.py b/spacy/util.py index 0111c839e..9b871b87b 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1,6 +1,6 @@ from typing import List, Mapping, NoReturn, Union, Dict, Any, Set, cast from typing import Optional, Iterable, Callable, Tuple, Type -from typing import Iterator, Type, Pattern, Generator, TYPE_CHECKING +from typing import Iterator, Pattern, Generator, TYPE_CHECKING from types import ModuleType import os import importlib @@ -12,7 +12,6 @@ from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer from thinc.api import ConfigValidationError, Model import functools import itertools -import numpy.random import numpy import srsly import catalogue @@ -400,6 +399,7 @@ def load_model( *, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -409,11 +409,19 @@ def load_model( vocab (Vocab / True): Optional vocab to pass in on initialization. If True, a new Vocab object will be created. disable (Iterable[str]): Names of pipeline components to disable. + enable (Iterable[str]): Names of pipeline components to enable. All others will be disabled. + exclude (Iterable[str]): Names of pipeline components to exclude. config (Dict[str, Any] / Config): Config overrides as nested dict or dict keyed by section values in dot notation. RETURNS (Language): The loaded nlp object. """ - kwargs = {"vocab": vocab, "disable": disable, "exclude": exclude, "config": config} + kwargs = { + "vocab": vocab, + "disable": disable, + "enable": enable, + "exclude": exclude, + "config": config, + } if isinstance(name, str): # name or string path if name.startswith("blank:"): # shortcut for blank model return get_lang_class(name.replace("blank:", ""))() @@ -433,6 +441,7 @@ def load_model_from_package( *, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -444,6 +453,8 @@ def load_model_from_package( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -451,7 +462,7 @@ def load_model_from_package( RETURNS (Language): The loaded nlp object. """ cls = importlib.import_module(name) - return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined] + return cls.load(vocab=vocab, disable=disable, enable=enable, exclude=exclude, config=config) # type: ignore[attr-defined] def load_model_from_path( @@ -460,6 +471,7 @@ def load_model_from_path( meta: Optional[Dict[str, Any]] = None, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -473,6 +485,8 @@ def load_model_from_path( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -487,7 +501,12 @@ def load_model_from_path( overrides = dict_to_dot(config) config = load_config(config_path, overrides=overrides) nlp = load_model_from_config( - config, vocab=vocab, disable=disable, exclude=exclude, meta=meta + config, + vocab=vocab, + disable=disable, + enable=enable, + exclude=exclude, + meta=meta, ) return nlp.from_disk(model_path, exclude=exclude, overrides=overrides) @@ -498,6 +517,7 @@ def load_model_from_config( meta: Dict[str, Any] = SimpleFrozenDict(), vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), auto_fill: bool = False, validate: bool = True, @@ -512,6 +532,8 @@ def load_model_from_config( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. auto_fill (bool): Whether to auto-fill config with missing defaults. @@ -530,6 +552,7 @@ def load_model_from_config( config, vocab=vocab, disable=disable, + enable=enable, exclude=exclude, auto_fill=auto_fill, validate=validate, @@ -594,6 +617,7 @@ def load_model_from_init_py( *, vocab: Union["Vocab", bool] = True, disable: Iterable[str] = SimpleFrozenList(), + enable: Iterable[str] = SimpleFrozenList(), exclude: Iterable[str] = SimpleFrozenList(), config: Union[Dict[str, Any], Config] = SimpleFrozenDict(), ) -> "Language": @@ -605,6 +629,8 @@ def load_model_from_init_py( disable (Iterable[str]): Names of pipeline components to disable. Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling nlp.enable_pipe. + enable (Iterable[str]): Names of pipeline components to enable. All other + pipes will be disabled (and can be enabled using `nlp.enable_pipe`). exclude (Iterable[str]): Names of pipeline components to exclude. Excluded components won't be loaded. config (Dict[str, Any] / Config): Config overrides as nested dict or dict @@ -622,6 +648,7 @@ def load_model_from_init_py( vocab=vocab, meta=meta, disable=disable, + enable=enable, exclude=exclude, config=config, ) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 889c6437c..c96c571e9 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -51,6 +51,7 @@ specified separately using the new `exclude` keyword argument. | _keyword-only_ | | | `vocab` | Optional shared vocab to pass in on initialization. If `True` (default), a new `Vocab` object will be created. ~~Union[Vocab, bool]~~ | | `disable` | Names of pipeline components to [disable](/usage/processing-pipelines#disabling). Disabled pipes will be loaded but they won't be run unless you explicitly enable them by calling [nlp.enable_pipe](/api/language#enable_pipe). ~~List[str]~~ | +| `enable` | Names of pipeline components to [enable](/usage/processing-pipelines#disabling). All other pipes will be disabled. ~~List[str]~~ | | `exclude` 3 | Names of pipeline components to [exclude](/usage/processing-pipelines#disabling). Excluded components won't be loaded. ~~List[str]~~ | | `config` 3 | Optional config overrides, either as nested dict or dict keyed by section value in dot notation, e.g. `"components.name.value"`. ~~Union[Dict[str, Any], Config]~~ | | **RETURNS** | A `Language` object with the loaded pipeline. ~~Language~~ | diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index 4f75b5193..bd28810ae 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -362,6 +362,18 @@ nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"]) nlp.enable_pipe("tagger") ``` +In addition to `disable`, `spacy.load()` also accepts `enable`. If `enable` is +set, all components except for those in `enable` are disabled. + +```python +# Load the complete pipeline, but disable all components except for tok2vec and tagger +nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"]) +# Has the same effect, as NER is already not part of enabled set of components +nlp = spacy.load("en_core_web_sm", enable=["tok2vec", "tagger"], disable=["ner"]) +# Will raise an error, as the sets of enabled and disabled components are conflicting +nlp = spacy.load("en_core_web_sm", enable=["ner"], disable=["ner"]) +``` + As of v3.0, the `disable` keyword argument specifies components to load but From f00254ae276eca963991efb8a45748b2948b1c77 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 20 Jun 2022 08:48:40 +0100 Subject: [PATCH 12/29] add counts to verbose list of NER labels (#10957) --- spacy/cli/debug_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 8a6dde955..bd05471b1 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -361,7 +361,7 @@ def debug_data( if label != "-" ] labels_with_counts = _format_labels(labels_with_counts, counts=True) - msg.text(f"Labels in train data: {_format_labels(labels)}", show=verbose) + msg.text(f"Labels in train data: {labels_with_counts}", show=verbose) missing_labels = model_labels - labels if missing_labels: msg.warn( From cdad815c6854a5349abbde469f2478585b118e6a Mon Sep 17 00:00:00 2001 From: Lucaterre Date: Mon, 20 Jun 2022 14:28:49 +0200 Subject: [PATCH 13/29] updated spacy universe for spacyfishing --- .github/contributors/Lucaterre.md | 106 ++++++++++++++++++++++++++++++ website/meta/universe.json | 29 ++++++++ 2 files changed, 135 insertions(+) create mode 100644 .github/contributors/Lucaterre.md diff --git a/.github/contributors/Lucaterre.md b/.github/contributors/Lucaterre.md new file mode 100644 index 000000000..5da763b22 --- /dev/null +++ b/.github/contributors/Lucaterre.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- |---------------| +| Name | Lucas Terriel | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 2022-06-20 | +| GitHub username | Lucaterre | +| Website (optional) | | \ No newline at end of file diff --git a/website/meta/universe.json b/website/meta/universe.json index 9b644adf4..ce2c63739 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -1,5 +1,34 @@ { "resources": [ + { + "id": "spacyfishing", + "title": "spaCy fishing", + "slogan": "Named entity disambiguation and linking on Wikidata in spaCy with Entity-Fishing.", + "description": "A spaCy wrapper of Entity-Fishing for named entity disambiguation and linking against a Wikidata knowledge base.", + "github": "Lucaterre/spacyfishing", + "pip": "spacyfishing", + "code_example": [ + "import spacy", + "text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'", + "nlp = spacy.load('en_core_web_sm')", + "nlp.add_pipe('spacyfishing')", + "doc = nlp(text)", + "for span in doc.ents:", + " print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))", + "# ('Victor Hugo', 'PERSON', 'Q535', 'https://www.wikidata.org/wiki/Q535', 0.972)", + "# ('Honoré de Balzac', 'PERSON', 'Q9711', 'https://www.wikidata.org/wiki/Q9711', 0.9724)", + "# ('French', 'NORP', 'Q121842', 'https://www.wikidata.org/wiki/Q121842', 0.3739)", + "# ('Paris', 'GPE', 'Q90', 'https://www.wikidata.org/wiki/Q90', 0.5652)", + "## Set parameter `extra_info` to `True` and check also span._.description, span._.src_description, span._.normal_term, span._.other_ids" + ], + "category": ["models", "pipeline"], + "tags": ["NER", "NEL"], + "author": "Lucas Terriel", + "author_links": { + "twitter": "TerreLuca", + "github": "Lucaterre" + } + }, { "id": "aim-spacy", "title": "Aim-spaCy", From 2820d7dd8daa66e12bb7c07b1dcfb31423741a72 Mon Sep 17 00:00:00 2001 From: Lucaterre Date: Mon, 20 Jun 2022 15:26:23 +0200 Subject: [PATCH 14/29] correct typo in universe.json for 'code_example' key : pipe name 'entityfishing' --- website/meta/universe.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index ce2c63739..4a3ec6225 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -11,7 +11,7 @@ "import spacy", "text = 'Victor Hugo and Honoré de Balzac are French writers who lived in Paris.'", "nlp = spacy.load('en_core_web_sm')", - "nlp.add_pipe('spacyfishing')", + "nlp.add_pipe('entityfishing')", "doc = nlp(text)", "for span in doc.ents:", " print((ent.text, ent.label_, ent._.kb_qid, ent._.url_wikidata, ent._.nerd_score))", From a08ca064e53810cf1c7c0aa1ee7030654d11b5aa Mon Sep 17 00:00:00 2001 From: Victoria <80417010+victorialslocum@users.noreply.github.com> Date: Tue, 21 Jun 2022 01:03:41 -0500 Subject: [PATCH 15/29] Update linguistic-features.md (#10993) Change link for downloading fasttext word vectors --- website/docs/usage/linguistic-features.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md index c547ec0bc..9dae6f2ee 100644 --- a/website/docs/usage/linguistic-features.md +++ b/website/docs/usage/linguistic-features.md @@ -1899,7 +1899,7 @@ access to some nice Latin vectors. You can then pass the directory path to > ``` ```cli -$ wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/word-vectors-v2/cc.la.300.vec.gz +$ wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.la.300.vec.gz $ python -m spacy init vectors en cc.la.300.vec.gz /tmp/la_vectors_wiki_lg ``` From 0271306f1603a3f70870c1786e8783fe39e22bd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Tue, 21 Jun 2022 08:26:59 +0200 Subject: [PATCH 16/29] Use thinc-apple-ops>=0.1.0.dev0 with `apple` extras (#10904) * Use thinc-apple-ops>=0.1.0.dev0 with `apple` extras Also test with thinc-apple-ops that is at least 0.1.0.dev0. * Check thinc-apple-ops on macOS with Python 3.10 Co-authored-by: Adriane Boyd * Use `pip install --pre` for installing thinc-apple-ops in CI Co-authored-by: Adriane Boyd --- .github/azure-steps.yml | 4 ++-- setup.cfg | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index 80c88b0b8..d7233328a 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -111,7 +111,7 @@ steps: condition: eq(variables['python_version'], '3.8') - script: | - ${{ parameters.prefix }} python -m pip install thinc-apple-ops + ${{ parameters.prefix }} python -m pip install --pre thinc-apple-ops ${{ parameters.prefix }} python -m pytest --pyargs spacy displayName: "Run CPU tests with thinc-apple-ops" - condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.9')) + condition: and(startsWith(variables['imageName'], 'macos'), eq(variables['python.version'], '3.10')) diff --git a/setup.cfg b/setup.cfg index 110a2e4ee..d317847ba 100644 --- a/setup.cfg +++ b/setup.cfg @@ -104,7 +104,7 @@ cuda114 = cuda115 = cupy-cuda115>=5.0.0b4,<11.0.0 apple = - thinc-apple-ops>=0.0.4,<1.0.0 + thinc-apple-ops>=0.1.0.dev0,<1.0.0 # Language tokenizers with external dependencies ja = sudachipy>=0.5.2,!=0.6.1 From 0fa004c4cd718319d750abad896447c114f39106 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Tue, 21 Jun 2022 21:00:07 +0100 Subject: [PATCH 17/29] the 'new' indicator wants a 'number' (#10997) --- website/docs/api/spanruler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/spanruler.md b/website/docs/api/spanruler.md index a1c222714..b573f7c58 100644 --- a/website/docs/api/spanruler.md +++ b/website/docs/api/spanruler.md @@ -2,7 +2,7 @@ title: SpanRuler tag: class source: spacy/pipeline/span_ruler.py -new: 3.3.1 +new: 3.3 teaser: 'Pipeline component for rule-based span and named entity recognition' api_string_name: span_ruler api_trainable: false From bed23ff291f3e97f5ba6ee42f1a80db7c713b691 Mon Sep 17 00:00:00 2001 From: jademlc <68696651+jademlc@users.noreply.github.com> Date: Wed, 22 Jun 2022 20:45:26 +0200 Subject: [PATCH 18/29] Update serialization methods code block (#11004) * Update serialization methods code block * Update website/docs/usage/saving-loading.md Co-authored-by: Adriane Boyd --- website/docs/usage/saving-loading.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/website/docs/usage/saving-loading.md b/website/docs/usage/saving-loading.md index af140e7a7..0fd713a49 100644 --- a/website/docs/usage/saving-loading.md +++ b/website/docs/usage/saving-loading.md @@ -203,11 +203,14 @@ the data to and from a JSON file. ```python ### {highlight="16-23,25-30"} +import json +from spacy import Language from spacy.util import ensure_path @Language.factory("my_component") class CustomComponent: - def __init__(self): + def __init__(self, nlp: Language, name: str = "my_component"): + self.name = name self.data = [] def __call__(self, doc): @@ -231,7 +234,7 @@ class CustomComponent: # This will receive the directory path + /my_component data_path = path / "data.json" with data_path.open("r", encoding="utf8") as f: - self.data = json.loads(f) + self.data = json.load(f) return self ``` From 3335bb9d0c9df99f20460ed18e07d8844200d7d7 Mon Sep 17 00:00:00 2001 From: Peter Baumgartner <5107405+pmbaumgartner@users.noreply.github.com> Date: Thu, 23 Jun 2022 02:15:28 -0400 Subject: [PATCH 19/29] remove `cuda116` extra from install widget (#11012) --- website/src/widgets/quickstart-install.js | 1 - 1 file changed, 1 deletion(-) diff --git a/website/src/widgets/quickstart-install.js b/website/src/widgets/quickstart-install.js index 926d76ae3..ccc6b56d9 100644 --- a/website/src/widgets/quickstart-install.js +++ b/website/src/widgets/quickstart-install.js @@ -24,7 +24,6 @@ const CUDA = { '11.3': 'cuda113', '11.4': 'cuda114', '11.5': 'cuda115', - '11.6': 'cuda116', } const LANG_EXTRAS = ['ja'] // only for languages with models From f1197d9175927b453312be633cd789157c17a6e7 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 23 Jun 2022 08:16:38 +0200 Subject: [PATCH 20/29] Add API docs for token attribute symbols (#10836) * Add API docs for token attribute symbols * Remove NBSP's * Fix typo * Rephrase Co-authored-by: svlandeg --- website/docs/api/attributes.md | 78 ++++++++++++++++++++++++++++++++++ website/meta/sidebars.json | 1 + 2 files changed, 79 insertions(+) create mode 100644 website/docs/api/attributes.md diff --git a/website/docs/api/attributes.md b/website/docs/api/attributes.md new file mode 100644 index 000000000..adacd3898 --- /dev/null +++ b/website/docs/api/attributes.md @@ -0,0 +1,78 @@ +--- +title: Attributes +teaser: Token attributes +source: spacy/attrs.pyx +--- + +[Token](/api/token) attributes are specified using internal IDs in many places +including: + +- [`Matcher` patterns](/api/matcher#patterns), +- [`Doc.to_array`](/api/doc#to_array) and + [`Doc.from_array`](/api/doc#from_array) +- [`Doc.has_annotation`](/api/doc#has_annotation) +- [`MultiHashEmbed`](/api/architectures#MultiHashEmbed) Tok2Vec architecture + `attrs` + +> ```python +> import spacy +> from spacy.attrs import DEP +> +> nlp = spacy.blank("en") +> doc = nlp("There are many attributes.") +> +> # DEP always has the same internal value +> assert DEP == 76 +> +> # "DEP" is automatically converted to DEP +> assert DEP == nlp.vocab.strings["DEP"] +> assert doc.has_annotation(DEP) == doc.has_annotation("DEP") +> +> # look up IDs in spacy.attrs.IDS +> from spacy.attrs import IDS +> assert IDS["DEP"] == DEP +> ``` + +All methods automatically convert between the string version of an ID (`"DEP"`) +and the internal integer symbols (`DEP`). The internal IDs can be imported from +`spacy.attrs` or retrieved from the [`StringStore`](/api/stringstore). A map +from string attribute names to internal attribute IDs is stored in +`spacy.attrs.IDS`. + +The corresponding [`Token` object attributes](/api/token#attributes) can be +accessed using the same names in lowercase, e.g. `token.orth` or `token.length`. +For attributes that represent string values, the internal integer ID is +accessed as `Token.attr`, e.g. `token.dep`, while the string value can be +retrieved by appending `_` as in `token.dep_`. + + +| Attribute | Description | +| ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `DEP` | The token's dependency label. ~~str~~ | +| `ENT_ID` | The token's entity ID (`ent_id`). ~~str~~ | +| `ENT_IOB` | The IOB part of the token's entity tag. Uses custom integer vaues rather than the string store: unset is `0`, `I` is `1`, `O` is `2`, and `B` is `3`. ~~str~~ | +| `ENT_KB_ID` | The token's entity knowledge base ID. ~~str~~ | +| `ENT_TYPE` | The token's entity label. ~~str~~ | +| `IS_ALPHA` | Token text consists of alphabetic characters. ~~bool~~ | +| `IS_ASCII` | Token text consists of ASCII characters. ~~bool~~ | +| `IS_DIGIT` | Token text consists of digits. ~~bool~~ | +| `IS_LOWER` | Token text is in lowercase. ~~bool~~ | +| `IS_PUNCT` | Token is punctuation. ~~bool~~ | +| `IS_SPACE` | Token is whitespace. ~~bool~~ | +| `IS_STOP` | Token is a stop word. ~~bool~~ | +| `IS_TITLE` | Token text is in titlecase. ~~bool~~ | +| `IS_UPPER` | Token text is in uppercase. ~~bool~~ | +| `LEMMA` | The token's lemma. ~~str~~ | +| `LENGTH` | The length of the token text. ~~int~~ | +| `LIKE_EMAIL` | Token text resembles an email address. ~~bool~~ | +| `LIKE_NUM` | Token text resembles a number. ~~bool~~ | +| `LIKE_URL` | Token text resembles a URL. ~~bool~~ | +| `LOWER` | The lowercase form of the token text. ~~str~~ | +| `MORPH` | The token's morphological analysis. ~~MorphAnalysis~~ | +| `NORM` | The normalized form of the token text. ~~str~~ | +| `ORTH` | The exact verbatim text of a token. ~~str~~ | +| `POS` | The token's universal part of speech (UPOS). ~~str~~ | +| `SENT_START` | Token is start of sentence. ~~bool~~ | +| `SHAPE` | The token's shape. ~~str~~ | +| `SPACY` | Token has a trailing space. ~~bool~~ | +| `TAG` | The token's fine-grained part of speech. ~~str~~ | diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index c23f0a255..1bc395a66 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -124,6 +124,7 @@ { "label": "Other", "items": [ + { "text": "Attributes", "url": "/api/attributes" }, { "text": "Corpus", "url": "/api/corpus" }, { "text": "KnowledgeBase", "url": "/api/kb" }, { "text": "Lookups", "url": "/api/lookups" }, From d4e3f43639a963125bad123abe9514a1e6da81fc Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 23 Jun 2022 09:50:25 +0200 Subject: [PATCH 21/29] Update thinc version to switch back to blis v0.7 (#11014) --- pyproject.toml | 2 +- requirements.txt | 2 +- setup.cfg | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4fea41be2..4e388e54f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=8.1.0.dev2,<8.2.0", + "thinc>=8.1.0.dev3,<8.2.0", "pathy", "numpy>=1.15.0", ] diff --git a/requirements.txt b/requirements.txt index 082ef1522..3b77140f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ spacy-legacy>=3.0.9,<3.1.0 spacy-loggers>=1.0.0,<2.0.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=8.1.0.dev2,<8.2.0 +thinc>=8.1.0.dev3,<8.2.0 ml_datasets>=0.2.0,<0.3.0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.9.1,<1.1.0 diff --git a/setup.cfg b/setup.cfg index d317847ba..ba5b46ff0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,7 @@ setup_requires = cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 murmurhash>=0.28.0,<1.1.0 - thinc>=8.1.0.dev2,<8.2.0 + thinc>=8.1.0.dev3,<8.2.0 install_requires = # Our libraries spacy-legacy>=3.0.9,<3.1.0 @@ -46,7 +46,7 @@ install_requires = murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=8.1.0.dev2,<8.2.0 + thinc>=8.1.0.dev3,<8.2.0 wasabi>=0.9.1,<1.1.0 srsly>=2.4.3,<3.0.0 catalogue>=2.0.6,<2.1.0 From f8116078ce2c5760ae218bc1657977ed116fcf18 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 23 Jun 2022 09:57:46 +0100 Subject: [PATCH 22/29] disable failing test because Stanford servers are down (#11015) --- spacy/tests/training/test_readers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy/tests/training/test_readers.py b/spacy/tests/training/test_readers.py index 8c5c81625..eb07a52b1 100644 --- a/spacy/tests/training/test_readers.py +++ b/spacy/tests/training/test_readers.py @@ -60,11 +60,12 @@ def test_readers(): assert isinstance(extra_corpus, Callable) +# TODO: enable IMDB test once Stanford servers are back up and running @pytest.mark.slow @pytest.mark.parametrize( "reader,additional_config", [ - ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}), + # ("ml_datasets.imdb_sentiment.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.dbpedia.v1", {"train_limit": 10, "dev_limit": 10}), ("ml_datasets.cmu_movies.v1", {"limit": 10, "freq_cutoff": 200, "split": 0.8}), ], From 4cd8b4cc222bebc2108eb52b4400eea562db4ac2 Mon Sep 17 00:00:00 2001 From: Dmytro Sadovnychyi Date: Thu, 23 Jun 2022 17:53:00 +0200 Subject: [PATCH 23/29] Fix some of the broken links on universe pages (#11011) Currently some of the "AUTHOR INFO" links (e.g. here[0]) are broken: ``` https://github.com/https://github.com/explosion ``` [0] https://spacy.io/universe/project/spacy-experimental Also one remains broken with `https://szegedai.github.io/`. --- website/meta/universe.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 4a3ec6225..ab64fe895 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -84,7 +84,7 @@ "code_language": "python", "author": "Leap Beyond", "author_links": { - "github": "https://github.com/LeapBeyond", + "github": "LeapBeyond", "website": "https://leapbeyond.ai" }, "code_example": [ @@ -107,8 +107,8 @@ "code_language": "python", "author": "Peter Baumgartner", "author_links": { - "twitter" : "https://twitter.com/pmbaumgartner", - "github": "https://github.com/pmbaumgartner", + "twitter" : "pmbaumgartner", + "github": "pmbaumgartner", "website": "https://www.peterbaumgartner.com/" }, "code_example": [ @@ -127,8 +127,8 @@ "code_language": "python", "author": "Explosion", "author_links": { - "twitter" : "https://twitter.com/explosion_ai", - "github": "https://github.com/explosion", + "twitter" : "explosion_ai", + "github": "explosion", "website": "https://explosion.ai/" }, "code_example": [ @@ -600,8 +600,8 @@ "code_language": "python", "author": "Keith Rozario", "author_links": { - "twitter" : "https://twitter.com/keithrozario", - "github": "https://github.com/keithrozario", + "twitter" : "keithrozario", + "github": "keithrozario", "website": "https://www.keithrozario.com" }, "code_example": [ @@ -2324,7 +2324,7 @@ "author": "Daniel Whitenack & Chris Benson", "author_links": { "website": "https://changelog.com/practicalai", - "twitter": "https://twitter.com/PracticalAIFM" + "twitter": "PracticalAIFM" }, "category": ["podcasts"] }, From 9738b69c0e3babb365cafaa26b872ca1028c9696 Mon Sep 17 00:00:00 2001 From: Peter Baumgartner <5107405+pmbaumgartner@users.noreply.github.com> Date: Fri, 24 Jun 2022 02:11:29 -0400 Subject: [PATCH 24/29] Update Code Conventions.md (#11018) --- extra/DEVELOPER_DOCS/Code Conventions.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/extra/DEVELOPER_DOCS/Code Conventions.md b/extra/DEVELOPER_DOCS/Code Conventions.md index 37cd8ff27..31a87d362 100644 --- a/extra/DEVELOPER_DOCS/Code Conventions.md +++ b/extra/DEVELOPER_DOCS/Code Conventions.md @@ -455,6 +455,10 @@ Regression tests are tests that refer to bugs reported in specific issues. They The test suite also provides [fixtures](https://github.com/explosion/spaCy/blob/master/spacy/tests/conftest.py) for different language tokenizers that can be used as function arguments of the same name and will be passed in automatically. Those should only be used for tests related to those specific languages. We also have [test utility functions](https://github.com/explosion/spaCy/blob/master/spacy/tests/util.py) for common operations, like creating a temporary file. +### Testing Cython Code + +If you're developing Cython code (`.pyx` files), those extensions will need to be built before the test runner can test that code - otherwise it's going to run the tests with stale code from the last time the extension was built. You can build the extensions locally with `python setup.py build_ext -i`. + ### Constructing objects and state Test functions usually follow the same simple structure: they set up some state, perform the operation you want to test and `assert` conditions that you expect to be true, usually before and after the operation. From bffe54d02b840a73f8dec4d8cd50056507695853 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 24 Jun 2022 08:48:58 +0200 Subject: [PATCH 25/29] Set version to v3.4.0 --- spacy/about.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/about.py b/spacy/about.py index 03eabc2e9..ef0358e1a 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy" -__version__ = "3.3.0" +__version__ = "3.4.0" __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __projects__ = "https://github.com/explosion/projects" From d9320db7db74b970b3751e38ed6f14de5b7d16d5 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 1 Apr 2022 10:42:25 +0200 Subject: [PATCH 26/29] Temporarily skip tests that require models/compat --- .github/azure-steps.yml | 34 +++++++++++++++++----------------- spacy/tests/test_cli.py | 2 ++ 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/.github/azure-steps.yml b/.github/azure-steps.yml index d7233328a..41f743feb 100644 --- a/.github/azure-steps.yml +++ b/.github/azure-steps.yml @@ -64,12 +64,12 @@ steps: displayName: "Run GPU tests" condition: eq(${{ parameters.gpu }}, true) - - script: | - python -m spacy download ca_core_news_sm - python -m spacy download ca_core_news_md - python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" - displayName: 'Test download CLI' - condition: eq(variables['python_version'], '3.8') +# - script: | +# python -m spacy download ca_core_news_sm +# python -m spacy download ca_core_news_md +# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" +# displayName: 'Test download CLI' +# condition: eq(variables['python_version'], '3.8') - script: | python -m spacy convert extra/example_data/ner_example_data/ner-token-per-line-conll2003.json . @@ -93,17 +93,17 @@ steps: displayName: 'Test train CLI' condition: eq(variables['python_version'], '3.8') - - script: | - python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" - PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir - displayName: 'Test assemble CLI' - condition: eq(variables['python_version'], '3.8') - - - script: | - python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" - python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 - displayName: 'Test assemble CLI vectors warning' - condition: eq(variables['python_version'], '3.8') +# - script: | +# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" +# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir +# displayName: 'Test assemble CLI' +# condition: eq(variables['python_version'], '3.8') +# +# - script: | +# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" +# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 +# displayName: 'Test assemble CLI vectors warning' +# condition: eq(variables['python_version'], '3.8') - script: | python .github/validate_universe_json.py website/meta/universe.json diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 838e00369..fe8b3a8a1 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -589,6 +589,7 @@ def test_string_to_list_intify(value): assert string_to_list(value, intify=True) == [1, 2, 3] +@pytest.mark.skip(reason="Temporarily skip for dev version") def test_download_compatibility(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False @@ -599,6 +600,7 @@ def test_download_compatibility(): assert get_minor_version(about.__version__) == get_minor_version(version) +@pytest.mark.skip(reason="Temporarily skip for dev version") def test_validate_compatibility_table(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False From 8f1ba4de582c5e5282c022a7713a56b47302cabe Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Fri, 24 Jun 2022 13:39:52 +0200 Subject: [PATCH 27/29] Backport parser/alignment optimizations from `feature/refactor-parser` (#10952) --- spacy/training/alignment_array.pyx | 20 +++-- spacy/training/example.pyx | 129 +++++++++++++++++++++++------ spacy/util.py | 7 ++ 3 files changed, 123 insertions(+), 33 deletions(-) diff --git a/spacy/training/alignment_array.pyx b/spacy/training/alignment_array.pyx index b58f08786..01e9d9bf8 100644 --- a/spacy/training/alignment_array.pyx +++ b/spacy/training/alignment_array.pyx @@ -1,33 +1,39 @@ from typing import List from ..errors import Errors import numpy +from libc.stdint cimport int32_t cdef class AlignmentArray: """AlignmentArray is similar to Thinc's Ragged with two simplfications: indexing returns numpy arrays and this type can only be used for CPU arrays. - However, these changes make AlginmentArray more efficient for indexing in a + However, these changes make AlignmentArray more efficient for indexing in a tight loop.""" __slots__ = [] def __init__(self, alignment: List[List[int]]): - self._lengths = None - self._starts_ends = numpy.zeros(len(alignment) + 1, dtype="i") - cdef int data_len = 0 cdef int outer_len cdef int idx + + self._starts_ends = numpy.zeros(len(alignment) + 1, dtype='int32') + cdef int32_t* starts_ends_ptr = self._starts_ends.data + for idx, outer in enumerate(alignment): outer_len = len(outer) - self._starts_ends[idx + 1] = self._starts_ends[idx] + outer_len + starts_ends_ptr[idx + 1] = starts_ends_ptr[idx] + outer_len data_len += outer_len - self._data = numpy.empty(data_len, dtype="i") + self._lengths = None + self._data = numpy.empty(data_len, dtype="int32") + idx = 0 + cdef int32_t* data_ptr = self._data.data + for outer in alignment: for inner in outer: - self._data[idx] = inner + data_ptr[idx] = inner idx += 1 def __getitem__(self, idx): diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index 045f0b483..473364f93 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -13,7 +13,7 @@ from .iob_utils import biluo_tags_to_spans, remove_bilu_prefix from ..errors import Errors, Warnings from ..pipeline._parser_internals import nonproj from ..tokens.token cimport MISSING_DEP -from ..util import logger, to_ternary_int +from ..util import logger, to_ternary_int, all_equal cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): @@ -151,50 +151,127 @@ cdef class Example: self._y_sig = y_sig return self._cached_alignment + + def _get_aligned_vectorized(self, align, gold_values): + # Fast path for Doc attributes/fields that are predominantly a single value, + # i.e., TAG, POS, MORPH. + x2y_single_toks = [] + x2y_single_toks_i = [] + + x2y_multiple_toks = [] + x2y_multiple_toks_i = [] + + # Gather indices of gold tokens aligned to the candidate tokens into two buckets. + # Bucket 1: All tokens that have a one-to-one alignment. + # Bucket 2: All tokens that have a one-to-many alignment. + for idx, token in enumerate(self.predicted): + aligned_gold_i = align[token.i] + aligned_gold_len = len(aligned_gold_i) + + if aligned_gold_len == 1: + x2y_single_toks.append(aligned_gold_i.item()) + x2y_single_toks_i.append(idx) + elif aligned_gold_len > 1: + x2y_multiple_toks.append(aligned_gold_i) + x2y_multiple_toks_i.append(idx) + + # Map elements of the first bucket directly to the output array. + output = numpy.full(len(self.predicted), None) + output[x2y_single_toks_i] = gold_values[x2y_single_toks].squeeze() + + # Collapse many-to-one alignments into one-to-one alignments if they + # share the same value. Map to None in all other cases. + for i in range(len(x2y_multiple_toks)): + aligned_gold_values = gold_values[x2y_multiple_toks[i]] + + # If all aligned tokens have the same value, use it. + if all_equal(aligned_gold_values): + x2y_multiple_toks[i] = aligned_gold_values[0].item() + else: + x2y_multiple_toks[i] = None + + output[x2y_multiple_toks_i] = x2y_multiple_toks + + return output.tolist() + + + def _get_aligned_non_vectorized(self, align, gold_values): + # Slower path for fields that return multiple values (resulting + # in ragged arrays that cannot be vectorized trivially). + output = [None] * len(self.predicted) + + for token in self.predicted: + aligned_gold_i = align[token.i] + values = gold_values[aligned_gold_i].ravel() + if len(values) == 1: + output[token.i] = values.item() + elif all_equal(values): + # If all aligned tokens have the same value, use it. + output[token.i] = values[0].item() + + return output + + def get_aligned(self, field, as_string=False): """Return an aligned array for a token attribute.""" align = self.alignment.x2y + gold_values = self.reference.to_array([field]) + + if len(gold_values.shape) == 1: + output = self._get_aligned_vectorized(align, gold_values) + else: + output = self._get_aligned_non_vectorized(align, gold_values) vocab = self.reference.vocab - gold_values = self.reference.to_array([field]) - output = [None] * len(self.predicted) - for token in self.predicted: - values = gold_values[align[token.i]] - values = values.ravel() - if len(values) == 0: - output[token.i] = None - elif len(values) == 1: - output[token.i] = values[0] - elif len(set(list(values))) == 1: - # If all aligned tokens have the same value, use it. - output[token.i] = values[0] - else: - output[token.i] = None if as_string and field not in ["ENT_IOB", "SENT_START"]: output = [vocab.strings[o] if o is not None else o for o in output] + return output def get_aligned_parse(self, projectivize=True): cand_to_gold = self.alignment.x2y gold_to_cand = self.alignment.y2x - aligned_heads = [None] * self.x.length - aligned_deps = [None] * self.x.length - has_deps = [token.has_dep() for token in self.y] - has_heads = [token.has_head() for token in self.y] heads = [token.head.i for token in self.y] deps = [token.dep_ for token in self.y] + if projectivize: proj_heads, proj_deps = nonproj.projectivize(heads, deps) + has_deps = [token.has_dep() for token in self.y] + has_heads = [token.has_head() for token in self.y] + # ensure that missing data remains missing heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)] deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)] - for cand_i in range(self.x.length): - if cand_to_gold.lengths[cand_i] == 1: - gold_i = cand_to_gold[cand_i][0] - if gold_to_cand.lengths[heads[gold_i]] == 1: - aligned_heads[cand_i] = int(gold_to_cand[heads[gold_i]][0]) - aligned_deps[cand_i] = deps[gold_i] - return aligned_heads, aligned_deps + + # Select all candidate tokens that are aligned to a single gold token. + c2g_single_toks = numpy.where(cand_to_gold.lengths == 1)[0] + + # Fetch all aligned gold token incides. + if c2g_single_toks.shape == cand_to_gold.lengths.shape: + # This the most likely case. + gold_i = cand_to_gold[:].squeeze() + else: + gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0])(c2g_single_toks).squeeze() + + # Fetch indices of all gold heads for the aligned gold tokens. + heads = numpy.asarray(heads, dtype='i') + gold_head_i = heads[gold_i] + + # Select all gold tokens that are heads of the previously selected + # gold tokens (and are aligned to a single candidate token). + g2c_len_heads = gold_to_cand.lengths[gold_head_i] + g2c_len_heads = numpy.where(g2c_len_heads == 1)[0] + g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0])(gold_head_i[g2c_len_heads]).squeeze() + + # Update head/dep alignments with the above. + aligned_heads = numpy.full((self.x.length), None) + aligned_heads[c2g_single_toks[g2c_len_heads]] = g2c_i + + deps = numpy.asarray(deps) + aligned_deps = numpy.full((self.x.length), None) + aligned_deps[c2g_single_toks] = deps[gold_i] + + return aligned_heads.tolist(), aligned_deps.tolist() def get_aligned_sent_starts(self): """Get list of SENT_START attributes aligned to the predicted tokenization. diff --git a/spacy/util.py b/spacy/util.py index 9b871b87b..4f21d618a 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1716,3 +1716,10 @@ def packages_distributions() -> Dict[str, List[str]]: for pkg in (dist.read_text("top_level.txt") or "").split(): pkg_to_dist[pkg].append(dist.metadata["Name"]) return dict(pkg_to_dist) + + +def all_equal(iterable): + """Return True if all the elements are equal to each other + (or if the input is an empty sequence), False otherwise.""" + g = itertools.groupby(iterable) + return next(g, True) and not next(g, False) From 4155a59d470c231b5bfca26044a6d4f93bea7e48 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 27 Jun 2022 09:35:35 +0200 Subject: [PATCH 28/29] Auto-format code with black (#11022) Co-authored-by: explosion-bot --- spacy/tests/parser/test_ner.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index 53bb2d554..00889efdc 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -158,13 +158,18 @@ def test_issue3209(): def test_labels_from_BILUO(): - """Test that labels are inferred correctly when there's a - in label. - """ + """Test that labels are inferred correctly when there's a - in label.""" nlp = English() ner = nlp.add_pipe("ner") ner.add_label("LARGE-ANIMAL") nlp.initialize() - move_names = ["O", "B-LARGE-ANIMAL", "I-LARGE-ANIMAL", "L-LARGE-ANIMAL", "U-LARGE-ANIMAL"] + move_names = [ + "O", + "B-LARGE-ANIMAL", + "I-LARGE-ANIMAL", + "L-LARGE-ANIMAL", + "U-LARGE-ANIMAL", + ] labels = {"LARGE-ANIMAL"} assert ner.move_names == move_names assert set(ner.labels) == labels From 308a612ec98f27098fe7f69ec20be0b5e88d51fa Mon Sep 17 00:00:00 2001 From: Eric Holscher <25510+ericholscher@users.noreply.github.com> Date: Mon, 27 Jun 2022 00:45:22 -0700 Subject: [PATCH 29/29] Remove `simply` (#11017) I was reading this page, and as a relative beginner, nothing about it was simple :) --- website/docs/api/architectures.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md index 2bddcb28c..2537faff6 100644 --- a/website/docs/api/architectures.md +++ b/website/docs/api/architectures.md @@ -587,7 +587,7 @@ consists of either two or three subnetworks: run once for each batch. - **lower**: Construct a feature-specific vector for each `(token, feature)` pair. This is also run once for each batch. Constructing the state - representation is then simply a matter of summing the component features and + representation is then a matter of summing the component features and applying the non-linearity. - **upper** (optional): A feed-forward network that predicts scores from the state representation. If not present, the output from the lower model is used @@ -628,7 +628,7 @@ same signature, but the `use_upper` argument was `True` by default. > ``` Build a tagger model, using a provided token-to-vector component. The tagger -model simply adds a linear layer with softmax activation to predict scores given +model adds a linear layer with softmax activation to predict scores given the token vectors. | Name | Description | @@ -920,5 +920,5 @@ A function that reads an existing `KnowledgeBase` from file. A function that takes as input a [`KnowledgeBase`](/api/kb) and a [`Span`](/api/span) object denoting a named entity, and returns a list of plausible [`Candidate`](/api/kb/#candidate) objects. The default -`CandidateGenerator` simply uses the text of a mention to find its potential +`CandidateGenerator` uses the text of a mention to find its potential aliases in the `KnowledgeBase`. Note that this function is case-dependent.