From c067b5264cfb42b38f46068af7316297411b43d5 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 27 Jun 2023 10:47:07 +0200 Subject: [PATCH 01/27] Address issues with source with component names and replacing listeners (#12701) When sourcing a component, the object from the original pipeline is added to the new pipeline as the same object. This creates a situation where there are several attributes that cannot be in sync between the original pipeline and the new pipeline at the same time for this one object: * component.name * component.listener_map / component.listening_components for tok2vec and transformer When running replace_listeners on a component, the config is not updated correctly if the state of the component is incorrect for the current pipeline (in particular changes that should be applied from model.attrs["replace_listener_cfg"] as used in spacy-transformers) due to the fact that: * find_listeners relies on component.name to set the name in the listener_map * replace_listeners relies on listener_map to determine how to modify the configs In addition, there are several places where pipeline components are modified and the listener map and/or internal component names aren't currently updated. In cases where there is a component shared by two pipelines that cannot be in sync, this PR chooses to prioritize the most recently modified or initialized pipeline. There is no actual solution with the current source behavior that will make both pipelines usable, so the current pipeline is updated whenever components are added/renamed/removed or the pipeline is initialized for training. --- spacy/language.py | 44 +++++++------- spacy/tests/pipeline/test_tok2vec.py | 91 ++++++++++++++++++++++++---- spacy/training/initialize.py | 3 +- 3 files changed, 105 insertions(+), 33 deletions(-) diff --git a/spacy/language.py b/spacy/language.py index 80077bf69..fd616483b 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -739,6 +739,11 @@ class Language: ) ) pipe = source.get_pipe(source_name) + # There is no actual solution here. Either the component has the right + # name for the source pipeline or the component has the right name for + # the current pipeline. This prioritizes the current pipeline. + if hasattr(pipe, "name"): + pipe.name = name # Make sure the source config is interpolated so we don't end up with # orphaned variables in our final config source_config = source.config.interpolate() @@ -816,6 +821,7 @@ class Language: pipe_index = self._get_pipe_index(before, after, first, last) self._pipe_meta[name] = self.get_factory_meta(factory_name) self._components.insert(pipe_index, (name, pipe_component)) + self._link_components() return pipe_component def _get_pipe_index( @@ -951,6 +957,7 @@ class Language: if old_name in self._config["initialize"]["components"]: init_cfg = self._config["initialize"]["components"].pop(old_name) self._config["initialize"]["components"][new_name] = init_cfg + self._link_components() def remove_pipe(self, name: str) -> Tuple[str, PipeCallable]: """Remove a component from the pipeline. @@ -974,6 +981,7 @@ class Language: # Make sure the name is also removed from the set of disabled components if name in self.disabled: self._disabled.remove(name) + self._link_components() return removed def disable_pipe(self, name: str) -> None: @@ -1702,8 +1710,16 @@ class Language: # The problem is we need to do it during deserialization...And the # components don't receive the pipeline then. So this does have to be # here :( + # First, fix up all the internal component names in case they have + # gotten out of sync due to sourcing components from different + # pipelines, since find_listeners uses proc2.name for the listener + # map. + for name, proc in self.pipeline: + if hasattr(proc, "name"): + proc.name = name for i, (name1, proc1) in enumerate(self.pipeline): if isinstance(proc1, ty.ListenedToComponent): + proc1.listener_map = {} for name2, proc2 in self.pipeline[i + 1 :]: proc1.find_listeners(proc2) @@ -1837,6 +1853,7 @@ class Language: raw_config=raw_config, ) else: + assert "source" in pipe_cfg # We need the sourced components to reference the same # vocab without modifying the current vocab state **AND** # we still want to load the source model vectors to perform @@ -1856,6 +1873,10 @@ class Language: source_name = pipe_cfg.get("component", pipe_name) listeners_replaced = False if "replace_listeners" in pipe_cfg: + # Make sure that the listened-to component has the + # state of the source pipeline listener map so that the + # replace_listeners method below works as intended. + source_nlps[model]._link_components() for name, proc in source_nlps[model].pipeline: if source_name in getattr(proc, "listening_components", []): source_nlps[model].replace_listeners( @@ -1867,6 +1888,8 @@ class Language: nlp.add_pipe( source_name, source=source_nlps[model], name=pipe_name ) + # At this point after nlp.add_pipe, the listener map + # corresponds to the new pipeline. if model not in source_nlp_vectors_hashes: source_nlp_vectors_hashes[model] = hash( source_nlps[model].vocab.vectors.to_bytes( @@ -1921,27 +1944,6 @@ class Language: raise ValueError( Errors.E942.format(name="pipeline_creation", value=type(nlp)) ) - # Detect components with listeners that are not frozen consistently - for name, proc in nlp.pipeline: - if isinstance(proc, ty.ListenedToComponent): - # Remove listeners not in the pipeline - listener_names = proc.listening_components - unused_listener_names = [ - ll for ll in listener_names if ll not in nlp.pipe_names - ] - for listener_name in unused_listener_names: - for listener in proc.listener_map.get(listener_name, []): - proc.remove_listener(listener, listener_name) - - for listener_name in proc.listening_components: - # e.g. tok2vec/transformer - # If it's a component sourced from another pipeline, we check if - # the tok2vec listeners should be replaced with standalone tok2vec - # models (e.g. so component can be frozen without its performance - # degrading when other components/tok2vec are updated) - paths = sourced.get(listener_name, {}).get("replace_listeners", []) - if paths: - nlp.replace_listeners(name, listener_name, paths) return nlp def replace_listeners( diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py index 76c7d6f62..998f0472c 100644 --- a/spacy/tests/pipeline/test_tok2vec.py +++ b/spacy/tests/pipeline/test_tok2vec.py @@ -192,8 +192,7 @@ def test_tok2vec_listener(with_vectors): for tag in t[1]["tags"]: tagger.add_label(tag) - # Check that the Tok2Vec component finds it listeners - assert tok2vec.listeners == [] + # Check that the Tok2Vec component finds its listeners optimizer = nlp.initialize(lambda: train_examples) assert tok2vec.listeners == [tagger_tok2vec] @@ -221,7 +220,6 @@ def test_tok2vec_listener_callback(): assert nlp.pipe_names == ["tok2vec", "tagger"] tagger = nlp.get_pipe("tagger") tok2vec = nlp.get_pipe("tok2vec") - nlp._link_components() docs = [nlp.make_doc("A random sentence")] tok2vec.model.initialize(X=docs) gold_array = [[1.0 for tag in ["V", "Z"]] for word in docs] @@ -430,29 +428,46 @@ def test_replace_listeners_from_config(): nlp.to_disk(dir_path) base_model = str(dir_path) new_config = { - "nlp": {"lang": "en", "pipeline": ["tok2vec", "tagger", "ner"]}, + "nlp": { + "lang": "en", + "pipeline": ["tok2vec", "tagger2", "ner3", "tagger4"], + }, "components": { "tok2vec": {"source": base_model}, - "tagger": { + "tagger2": { "source": base_model, + "component": "tagger", "replace_listeners": ["model.tok2vec"], }, - "ner": {"source": base_model}, + "ner3": { + "source": base_model, + "component": "ner", + }, + "tagger4": { + "source": base_model, + "component": "tagger", + }, }, } new_nlp = util.load_model_from_config(new_config, auto_fill=True) new_nlp.initialize(lambda: examples) tok2vec = new_nlp.get_pipe("tok2vec") - tagger = new_nlp.get_pipe("tagger") - ner = new_nlp.get_pipe("ner") - assert tok2vec.listening_components == ["ner"] + tagger = new_nlp.get_pipe("tagger2") + ner = new_nlp.get_pipe("ner3") + assert "ner" not in new_nlp.pipe_names + assert "tagger" not in new_nlp.pipe_names + assert tok2vec.listening_components == ["ner3", "tagger4"] assert any(isinstance(node, Tok2VecListener) for node in ner.model.walk()) assert not any(isinstance(node, Tok2VecListener) for node in tagger.model.walk()) t2v_cfg = new_nlp.config["components"]["tok2vec"]["model"] assert t2v_cfg["@architectures"] == "spacy.Tok2Vec.v2" - assert new_nlp.config["components"]["tagger"]["model"]["tok2vec"] == t2v_cfg + assert new_nlp.config["components"]["tagger2"]["model"]["tok2vec"] == t2v_cfg assert ( - new_nlp.config["components"]["ner"]["model"]["tok2vec"]["@architectures"] + new_nlp.config["components"]["ner3"]["model"]["tok2vec"]["@architectures"] + == "spacy.Tok2VecListener.v1" + ) + assert ( + new_nlp.config["components"]["tagger4"]["model"]["tok2vec"]["@architectures"] == "spacy.Tok2VecListener.v1" ) @@ -544,3 +559,57 @@ def test_tok2vec_listeners_textcat(): assert cats1["imperative"] < 0.9 assert [t.tag_ for t in docs[0]] == ["V", "J", "N"] assert [t.tag_ for t in docs[1]] == ["N", "V", "J", "N"] + + +def test_tok2vec_listener_source_link_name(): + """The component's internal name and the tok2vec listener map correspond + to the most recently modified pipeline. + """ + orig_config = Config().from_str(cfg_string_multi) + nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] + + nlp2 = English() + nlp2.add_pipe("tok2vec", source=nlp1) + nlp2.add_pipe("tagger", name="tagger2", source=nlp1) + + # there is no way to have the component have the right name for both + # pipelines, right now the most recently modified pipeline is prioritized + assert nlp1.get_pipe("tagger").name == nlp2.get_pipe("tagger2").name == "tagger2" + + # there is no way to have the tok2vec have the right listener map for both + # pipelines, right now the most recently modified pipeline is prioritized + assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"] + nlp2.add_pipe("ner", name="ner3", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2", "ner3"] + nlp2.remove_pipe("ner3") + assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"] + nlp2.remove_pipe("tagger2") + assert nlp2.get_pipe("tok2vec").listening_components == [] + + # at this point the tok2vec component corresponds to nlp2 + assert nlp1.get_pipe("tok2vec").listening_components == [] + + # modifying the nlp1 pipeline syncs the tok2vec listener map back to nlp1 + nlp1.add_pipe("sentencizer") + assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] + + # modifying nlp2 syncs it back to nlp2 + nlp2.add_pipe("sentencizer") + assert nlp1.get_pipe("tok2vec").listening_components == [] + + +def test_tok2vec_listener_source_replace_listeners(): + orig_config = Config().from_str(cfg_string_multi) + nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True) + assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"] + nlp1.replace_listeners("tok2vec", "tagger", ["model.tok2vec"]) + assert nlp1.get_pipe("tok2vec").listening_components == ["ner"] + + nlp2 = English() + nlp2.add_pipe("tok2vec", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == [] + nlp2.add_pipe("tagger", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == [] + nlp2.add_pipe("ner", name="ner2", source=nlp1) + assert nlp2.get_pipe("tok2vec").listening_components == ["ner2"] diff --git a/spacy/training/initialize.py b/spacy/training/initialize.py index 39dc06b9e..3a46b6632 100644 --- a/spacy/training/initialize.py +++ b/spacy/training/initialize.py @@ -76,7 +76,8 @@ def init_nlp(config: Config, *, use_gpu: int = -1) -> "Language": with nlp.select_pipes(enable=resume_components): logger.info("Resuming training for: %s", resume_components) nlp.resume_training(sgd=optimizer) - # Make sure that listeners are defined before initializing further + # Make sure that internal component names are synced and listeners are + # defined before initializing further nlp._link_components() with nlp.select_pipes(disable=[*frozen_components, *resume_components]): if T["max_epochs"] == -1: From 65f6c9cd10fb8d61590f954201d8609360c53eb1 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 27 Jun 2023 17:36:33 +0200 Subject: [PATCH 02/27] Support overriding registered functions in configs (#12623) Support overriding registered functions in configs. Previously the registry name was parsed as a section name rather than as a registry name. --- .../tests/serialize/test_serialize_config.py | 50 +++++++++++++++++++ spacy/tests/test_misc.py | 27 ++++++++++ spacy/util.py | 28 ++++++++--- 3 files changed, 97 insertions(+), 8 deletions(-) diff --git a/spacy/tests/serialize/test_serialize_config.py b/spacy/tests/serialize/test_serialize_config.py index 3e158ad8b..b36d3ad74 100644 --- a/spacy/tests/serialize/test_serialize_config.py +++ b/spacy/tests/serialize/test_serialize_config.py @@ -13,6 +13,7 @@ from spacy.ml.models import ( build_Tok2Vec_model, ) from spacy.schemas import ConfigSchema, ConfigSchemaPretrain +from spacy.training import Example from spacy.util import ( load_config, load_config_from_str, @@ -422,6 +423,55 @@ def test_config_overrides(): assert nlp.pipe_names == ["tok2vec", "tagger"] +@pytest.mark.filterwarnings("ignore:\\[W036") +def test_config_overrides_registered_functions(): + nlp = spacy.blank("en") + nlp.add_pipe("attribute_ruler") + with make_tempdir() as d: + nlp.to_disk(d) + nlp_re1 = spacy.load( + d, + config={ + "components": { + "attribute_ruler": { + "scorer": {"@scorers": "spacy.tagger_scorer.v1"} + } + } + }, + ) + assert ( + nlp_re1.config["components"]["attribute_ruler"]["scorer"]["@scorers"] + == "spacy.tagger_scorer.v1" + ) + + @registry.misc("test_some_other_key") + def misc_some_other_key(): + return "some_other_key" + + nlp_re2 = spacy.load( + d, + config={ + "components": { + "attribute_ruler": { + "scorer": { + "@scorers": "spacy.overlapping_labeled_spans_scorer.v1", + "spans_key": {"@misc": "test_some_other_key"}, + } + } + } + }, + ) + assert nlp_re2.config["components"]["attribute_ruler"]["scorer"][ + "spans_key" + ] == {"@misc": "test_some_other_key"} + # run dummy evaluation (will return None scores) in order to test that + # the spans_key value in the nested override is working as intended in + # the config + example = Example.from_dict(nlp_re2.make_doc("a b c"), {}) + scores = nlp_re2.evaluate([example]) + assert "spans_some_other_key_f" in scores + + def test_config_interpolation(): config = Config().from_str(nlp_config_string, interpolate=False) assert config["corpora"]["train"]["path"] == "${paths.train}" diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index 19163d350..438f458ec 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -252,6 +252,10 @@ def test_minor_version(a1, a2, b1, b2, is_match): {"training.batch_size": 128, "training.optimizer.learn_rate": 0.01}, {"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}}, ), + ( + {"attribute_ruler.scorer.@scorers": "spacy.tagger_scorer.v1"}, + {"attribute_ruler": {"scorer": {"@scorers": "spacy.tagger_scorer.v1"}}}, + ), ], ) def test_dot_to_dict(dot_notation, expected): @@ -260,6 +264,29 @@ def test_dot_to_dict(dot_notation, expected): assert util.dict_to_dot(result) == dot_notation +@pytest.mark.parametrize( + "dot_notation,expected", + [ + ( + {"token.pos": True, "token._.xyz": True}, + {"token": {"pos": True, "_": {"xyz": True}}}, + ), + ( + {"training.batch_size": 128, "training.optimizer.learn_rate": 0.01}, + {"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}}, + ), + ( + {"attribute_ruler.scorer": {"@scorers": "spacy.tagger_scorer.v1"}}, + {"attribute_ruler": {"scorer": {"@scorers": "spacy.tagger_scorer.v1"}}}, + ), + ], +) +def test_dot_to_dict_overrides(dot_notation, expected): + result = util.dot_to_dict(dot_notation) + assert result == expected + assert util.dict_to_dot(result, for_overrides=True) == dot_notation + + def test_set_dot_to_object(): config = {"foo": {"bar": 1, "baz": {"x": "y"}}, "test": {"a": {"b": "c"}}} with pytest.raises(KeyError): diff --git a/spacy/util.py b/spacy/util.py index ec6ab47c0..762699a97 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -534,7 +534,7 @@ def load_model_from_path( if not meta: meta = get_model_meta(model_path) config_path = model_path / "config.cfg" - overrides = dict_to_dot(config) + overrides = dict_to_dot(config, for_overrides=True) config = load_config(config_path, overrides=overrides) nlp = load_model_from_config( config, @@ -1502,14 +1502,19 @@ def dot_to_dict(values: Dict[str, Any]) -> Dict[str, dict]: return result -def dict_to_dot(obj: Dict[str, dict]) -> Dict[str, Any]: +def dict_to_dot(obj: Dict[str, dict], *, for_overrides: bool = False) -> Dict[str, Any]: """Convert dot notation to a dict. For example: {"token": {"pos": True, "_": {"xyz": True }}} becomes {"token.pos": True, "token._.xyz": True}. - values (Dict[str, dict]): The dict to convert. + obj (Dict[str, dict]): The dict to convert. + for_overrides (bool): Whether to enable special handling for registered + functions in overrides. RETURNS (Dict[str, Any]): The key/value pairs. """ - return {".".join(key): value for key, value in walk_dict(obj)} + return { + ".".join(key): value + for key, value in walk_dict(obj, for_overrides=for_overrides) + } def dot_to_object(config: Config, section: str): @@ -1551,13 +1556,20 @@ def set_dot_to_object(config: Config, section: str, value: Any) -> None: def walk_dict( - node: Dict[str, Any], parent: List[str] = [] + node: Dict[str, Any], parent: List[str] = [], *, for_overrides: bool = False ) -> Iterator[Tuple[List[str], Any]]: - """Walk a dict and yield the path and values of the leaves.""" + """Walk a dict and yield the path and values of the leaves. + + for_overrides (bool): Whether to treat registered functions that start with + @ as final values rather than dicts to traverse. + """ for key, value in node.items(): key_parent = [*parent, key] - if isinstance(value, dict): - yield from walk_dict(value, key_parent) + if isinstance(value, dict) and ( + not for_overrides + or not any(value_key.startswith("@") for value_key in value) + ): + yield from walk_dict(value, key_parent, for_overrides=for_overrides) else: yield (key_parent, value) From 337a360cc7871f768988d60782d83b420fe24270 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 27 Jun 2023 19:32:17 +0200 Subject: [PATCH 03/27] Use spans_ prefix for default span finder scores (#12753) --- spacy/pipeline/span_finder.py | 8 ++++---- spacy/tests/pipeline/test_span_finder.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/spacy/pipeline/span_finder.py b/spacy/pipeline/span_finder.py index 91be2f2ae..53f5c55be 100644 --- a/spacy/pipeline/span_finder.py +++ b/spacy/pipeline/span_finder.py @@ -53,9 +53,9 @@ DEFAULT_SPAN_FINDER_MODEL = Config().from_str(span_finder_default_config)["model "scorer": {"@scorers": "spacy.span_finder_scorer.v1"}, }, default_score_weights={ - f"span_finder_{DEFAULT_SPANS_KEY}_f": 1.0, - f"span_finder_{DEFAULT_SPANS_KEY}_p": 0.0, - f"span_finder_{DEFAULT_SPANS_KEY}_r": 0.0, + f"spans_{DEFAULT_SPANS_KEY}_f": 1.0, + f"spans_{DEFAULT_SPANS_KEY}_p": 0.0, + f"spans_{DEFAULT_SPANS_KEY}_r": 0.0, }, ) def make_span_finder( @@ -104,7 +104,7 @@ def make_span_finder_scorer(): def span_finder_score(examples: Iterable[Example], **kwargs) -> Dict[str, Any]: kwargs = dict(kwargs) - attr_prefix = "span_finder_" + attr_prefix = "spans_" key = kwargs["spans_key"] kwargs.setdefault("attr", f"{attr_prefix}{key}") kwargs.setdefault( diff --git a/spacy/tests/pipeline/test_span_finder.py b/spacy/tests/pipeline/test_span_finder.py index 1a8789fff..47a8a34a8 100644 --- a/spacy/tests/pipeline/test_span_finder.py +++ b/spacy/tests/pipeline/test_span_finder.py @@ -230,10 +230,10 @@ def test_overfitting_IO(): # Test scoring scores = nlp.evaluate(train_examples) - assert f"span_finder_{SPANS_KEY}_f" in scores + assert f"spans_{SPANS_KEY}_f" in scores # It's not perfect 1.0 F1 because it's designed to overgenerate for now. - assert scores[f"span_finder_{SPANS_KEY}_p"] == 0.75 - assert scores[f"span_finder_{SPANS_KEY}_r"] == 1.0 + assert scores[f"spans_{SPANS_KEY}_p"] == 0.75 + assert scores[f"spans_{SPANS_KEY}_r"] == 1.0 # also test that the spancat works for just a single entity in a sentence doc = nlp("London") From fb0da3e097b64c62e01d491338efe410264d9370 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 28 Jun 2023 09:43:14 +0200 Subject: [PATCH 04/27] Support custom token/lexeme attribute for vectors (#12625) * Support custom token/lexeme attribute for vectors * Fix imports * Back off to ORTH without Vectors.attr * Fallback if vectors.attr doesn't exist * Update docs --- spacy/cli/init_pipeline.py | 2 ++ spacy/errors.py | 3 +++ spacy/ml/staticvectors.py | 10 +++++--- spacy/tests/vocab_vectors/test_vectors.py | 30 +++++++++++++++++++++++ spacy/tokens/doc.pyx | 24 ++++++++++++++---- spacy/tokens/span.pyx | 22 ++++++++++++++--- spacy/tokens/token.pyx | 19 +++++++++----- spacy/training/initialize.py | 11 ++++++++- spacy/vectors.pyx | 17 ++++++++++++- spacy/vocab.pyx | 26 ++++++++++++++------ website/docs/api/architectures.mdx | 2 +- website/docs/api/cli.mdx | 3 ++- website/docs/api/vectors.mdx | 12 +++++---- 13 files changed, 146 insertions(+), 35 deletions(-) diff --git a/spacy/cli/init_pipeline.py b/spacy/cli/init_pipeline.py index e0d048c69..13202cb60 100644 --- a/spacy/cli/init_pipeline.py +++ b/spacy/cli/init_pipeline.py @@ -32,6 +32,7 @@ def init_vectors_cli( name: Optional[str] = Opt(None, "--name", "-n", help="Optional name for the word vectors, e.g. en_core_web_lg.vectors"), verbose: bool = Opt(False, "--verbose", "-V", "-VV", help="Display more information for debugging purposes"), jsonl_loc: Optional[Path] = Opt(None, "--lexemes-jsonl", "-j", help="Location of JSONL-formatted attributes file", hidden=True), + attr: str = Opt("ORTH", "--attr", "-a", help="Optional token attribute to use for vectors, e.g. LOWER or NORM"), # fmt: on ): """Convert word vectors for use with spaCy. Will export an nlp object that @@ -50,6 +51,7 @@ def init_vectors_cli( prune=prune, name=name, mode=mode, + attr=attr, ) msg.good(f"Successfully converted {len(nlp.vocab.vectors)} vectors") nlp.to_disk(output_dir) diff --git a/spacy/errors.py b/spacy/errors.py index a95f0c8a2..db1a886aa 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -216,6 +216,9 @@ class Warnings(metaclass=ErrorsWithCodes): W123 = ("Argument `enable` with value {enable} does not contain all values specified in the config option " "`enabled` ({enabled}). Be aware that this might affect other components in your pipeline.") W124 = ("{host}:{port} is already in use, using the nearest available port {serve_port} as an alternative.") + W125 = ("The StaticVectors key_attr is no longer used. To set a custom " + "key attribute for vectors, configure it through Vectors(attr=) or " + "'spacy init vectors --attr'") class Errors(metaclass=ErrorsWithCodes): diff --git a/spacy/ml/staticvectors.py b/spacy/ml/staticvectors.py index 6fcb13ad0..b75240c5d 100644 --- a/spacy/ml/staticvectors.py +++ b/spacy/ml/staticvectors.py @@ -1,3 +1,4 @@ +import warnings from typing import Callable, List, Optional, Sequence, Tuple, cast from thinc.api import Model, Ops, registry @@ -5,7 +6,8 @@ from thinc.initializers import glorot_uniform_init from thinc.types import Floats1d, Floats2d, Ints1d, Ragged from thinc.util import partial -from ..errors import Errors +from ..attrs import ORTH +from ..errors import Errors, Warnings from ..tokens import Doc from ..vectors import Mode from ..vocab import Vocab @@ -24,6 +26,8 @@ def StaticVectors( linear projection to control the dimensionality. If a dropout rate is specified, the dropout is applied per dimension over the whole batch. """ + if key_attr != "ORTH": + warnings.warn(Warnings.W125, DeprecationWarning) return Model( "static_vectors", forward, @@ -40,9 +44,9 @@ def forward( token_count = sum(len(doc) for doc in docs) if not token_count: return _handle_empty(model.ops, model.get_dim("nO")) - key_attr: int = model.attrs["key_attr"] - keys = model.ops.flatten([cast(Ints1d, doc.to_array(key_attr)) for doc in docs]) vocab: Vocab = docs[0].vocab + key_attr: int = getattr(vocab.vectors, "attr", ORTH) + keys = model.ops.flatten([cast(Ints1d, doc.to_array(key_attr)) for doc in docs]) W = cast(Floats2d, model.ops.as_contig(model.get_param("W"))) if vocab.vectors.mode == Mode.default: V = model.ops.asarray(vocab.vectors.data) diff --git a/spacy/tests/vocab_vectors/test_vectors.py b/spacy/tests/vocab_vectors/test_vectors.py index 70835816d..717291314 100644 --- a/spacy/tests/vocab_vectors/test_vectors.py +++ b/spacy/tests/vocab_vectors/test_vectors.py @@ -402,6 +402,7 @@ def test_vectors_serialize(): row_r = v_r.add("D", vector=OPS.asarray([10, 20, 30, 40], dtype="f")) assert row == row_r assert_equal(OPS.to_numpy(v.data), OPS.to_numpy(v_r.data)) + assert v.attr == v_r.attr def test_vector_is_oov(): @@ -646,3 +647,32 @@ def test_equality(): vectors1.resize((5, 9)) vectors2.resize((5, 9)) assert vectors1 == vectors2 + + +def test_vectors_attr(): + data = numpy.asarray([[0, 0, 0], [1, 2, 3], [9, 8, 7]], dtype="f") + # default ORTH + nlp = English() + nlp.vocab.vectors = Vectors(data=data, keys=["A", "B", "C"]) + assert nlp.vocab.strings["A"] in nlp.vocab.vectors.key2row + assert nlp.vocab.strings["a"] not in nlp.vocab.vectors.key2row + assert nlp.vocab["A"].has_vector is True + assert nlp.vocab["a"].has_vector is False + assert nlp("A")[0].has_vector is True + assert nlp("a")[0].has_vector is False + + # custom LOWER + nlp = English() + nlp.vocab.vectors = Vectors(data=data, keys=["a", "b", "c"], attr="LOWER") + assert nlp.vocab.strings["A"] not in nlp.vocab.vectors.key2row + assert nlp.vocab.strings["a"] in nlp.vocab.vectors.key2row + assert nlp.vocab["A"].has_vector is True + assert nlp.vocab["a"].has_vector is True + assert nlp("A")[0].has_vector is True + assert nlp("a")[0].has_vector is True + # add a new vectors entry + assert nlp.vocab["D"].has_vector is False + assert nlp.vocab["d"].has_vector is False + nlp.vocab.set_vector("D", numpy.asarray([4, 5, 6])) + assert nlp.vocab["D"].has_vector is True + assert nlp.vocab["d"].has_vector is True diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 206253949..146b276e2 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -35,6 +35,7 @@ from ..attrs cimport ( LENGTH, MORPH, NORM, + ORTH, POS, SENT_START, SPACY, @@ -613,13 +614,26 @@ cdef class Doc: """ if "similarity" in self.user_hooks: return self.user_hooks["similarity"](self, other) - if isinstance(other, (Lexeme, Token)) and self.length == 1: - if self.c[0].lex.orth == other.orth: + attr = getattr(self.vocab.vectors, "attr", ORTH) + cdef Token this_token + cdef Token other_token + cdef Lexeme other_lex + if len(self) == 1 and isinstance(other, Token): + this_token = self[0] + other_token = other + if Token.get_struct_attr(this_token.c, attr) == Token.get_struct_attr(other_token.c, attr): return 1.0 - elif isinstance(other, (Span, Doc)) and len(self) == len(other): + elif len(self) == 1 and isinstance(other, Lexeme): + this_token = self[0] + other_lex = other + if Token.get_struct_attr(this_token.c, attr) == Lexeme.get_struct_attr(other_lex.c, attr): + return 1.0 + elif isinstance(other, (Doc, Span)) and len(self) == len(other): similar = True - for i in range(self.length): - if self[i].orth != other[i].orth: + for i in range(len(self)): + this_token = self[i] + other_token = other[i] + if Token.get_struct_attr(this_token.c, attr) != Token.get_struct_attr(other_token.c, attr): similar = False break if similar: diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 73192b760..59ee21687 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -8,13 +8,14 @@ import numpy from thinc.api import get_array_module from ..attrs cimport * -from ..attrs cimport attr_id_t +from ..attrs cimport ORTH, attr_id_t from ..lexeme cimport Lexeme from ..parts_of_speech cimport univ_pos_t from ..structs cimport LexemeC, TokenC from ..symbols cimport dep from ..typedefs cimport attr_t, flags_t, hash_t from .doc cimport _get_lca_matrix, get_token_attr, token_by_end, token_by_start +from .token cimport Token from ..errors import Errors, Warnings from ..util import normalize_slice @@ -341,13 +342,26 @@ cdef class Span: """ if "similarity" in self.doc.user_span_hooks: return self.doc.user_span_hooks["similarity"](self, other) - if len(self) == 1 and hasattr(other, "orth"): - if self[0].orth == other.orth: + attr = getattr(self.doc.vocab.vectors, "attr", ORTH) + cdef Token this_token + cdef Token other_token + cdef Lexeme other_lex + if len(self) == 1 and isinstance(other, Token): + this_token = self[0] + other_token = other + if Token.get_struct_attr(this_token.c, attr) == Token.get_struct_attr(other_token.c, attr): + return 1.0 + elif len(self) == 1 and isinstance(other, Lexeme): + this_token = self[0] + other_lex = other + if Token.get_struct_attr(this_token.c, attr) == Lexeme.get_struct_attr(other_lex.c, attr): return 1.0 elif isinstance(other, (Doc, Span)) and len(self) == len(other): similar = True for i in range(len(self)): - if self[i].orth != getattr(other[i], "orth", None): + this_token = self[i] + other_token = other[i] + if Token.get_struct_attr(this_token.c, attr) != Token.get_struct_attr(other_token.c, attr): similar = False break if similar: diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 8c384f417..6018c3112 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -28,6 +28,7 @@ from ..attrs cimport ( LIKE_EMAIL, LIKE_NUM, LIKE_URL, + ORTH, ) from ..lexeme cimport Lexeme from ..symbols cimport conj @@ -214,11 +215,17 @@ cdef class Token: """ if "similarity" in self.doc.user_token_hooks: return self.doc.user_token_hooks["similarity"](self, other) - if hasattr(other, "__len__") and len(other) == 1 and hasattr(other, "__getitem__"): - if self.c.lex.orth == getattr(other[0], "orth", None): + attr = getattr(self.doc.vocab.vectors, "attr", ORTH) + cdef Token this_token = self + cdef Token other_token + cdef Lexeme other_lex + if isinstance(other, Token): + other_token = other + if Token.get_struct_attr(this_token.c, attr) == Token.get_struct_attr(other_token.c, attr): return 1.0 - elif hasattr(other, "orth"): - if self.c.lex.orth == other.orth: + elif isinstance(other, Lexeme): + other_lex = other + if Token.get_struct_attr(this_token.c, attr) == Lexeme.get_struct_attr(other_lex.c, attr): return 1.0 if self.vocab.vectors.n_keys == 0: warnings.warn(Warnings.W007.format(obj="Token")) @@ -415,7 +422,7 @@ cdef class Token: return self.doc.user_token_hooks["has_vector"](self) if self.vocab.vectors.size == 0 and self.doc.tensor.size != 0: return True - return self.vocab.has_vector(self.c.lex.orth) + return self.vocab.has_vector(Token.get_struct_attr(self.c, self.vocab.vectors.attr)) @property def vector(self): @@ -431,7 +438,7 @@ cdef class Token: if self.vocab.vectors.size == 0 and self.doc.tensor.size != 0: return self.doc.tensor[self.i] else: - return self.vocab.get_vector(self.c.lex.orth) + return self.vocab.get_vector(Token.get_struct_attr(self.c, self.vocab.vectors.attr)) @property def vector_norm(self): diff --git a/spacy/training/initialize.py b/spacy/training/initialize.py index 3a46b6632..82d4ebf24 100644 --- a/spacy/training/initialize.py +++ b/spacy/training/initialize.py @@ -216,9 +216,14 @@ def convert_vectors( prune: int, name: Optional[str] = None, mode: str = VectorsMode.default, + attr: str = "ORTH", ) -> None: vectors_loc = ensure_path(vectors_loc) if vectors_loc and vectors_loc.parts[-1].endswith(".npz"): + if attr != "ORTH": + raise ValueError( + "ORTH is the only attribute supported for vectors in .npz format." + ) nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=numpy.load(vectors_loc.open("rb")) ) @@ -246,11 +251,15 @@ def convert_vectors( nlp.vocab.vectors = Vectors( strings=nlp.vocab.strings, data=vectors_data, + attr=attr, **floret_settings, ) else: nlp.vocab.vectors = Vectors( - strings=nlp.vocab.strings, data=vectors_data, keys=vector_keys + strings=nlp.vocab.strings, + data=vectors_data, + keys=vector_keys, + attr=attr, ) nlp.vocab.deduplicate_vectors() if name is None: diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index bc654252a..bf79481b8 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -15,9 +15,11 @@ from thinc.api import Ops, get_array_module, get_current_ops from thinc.backends import get_array_ops from thinc.types import Floats2d +from .attrs cimport ORTH, attr_id_t from .strings cimport StringStore from . import util +from .attrs import IDS from .errors import Errors, Warnings from .strings import get_string_id @@ -64,8 +66,9 @@ cdef class Vectors: cdef readonly uint32_t hash_seed cdef readonly unicode bow cdef readonly unicode eow + cdef readonly attr_id_t attr - def __init__(self, *, strings=None, shape=None, data=None, keys=None, name=None, mode=Mode.default, minn=0, maxn=0, hash_count=1, hash_seed=0, bow="<", eow=">"): + def __init__(self, *, strings=None, shape=None, data=None, keys=None, name=None, mode=Mode.default, minn=0, maxn=0, hash_count=1, hash_seed=0, bow="<", eow=">", attr="ORTH"): """Create a new vector store. strings (StringStore): The string store. @@ -80,6 +83,8 @@ cdef class Vectors: hash_seed (int): The floret hash seed (default: 0). bow (str): The floret BOW string (default: "<"). eow (str): The floret EOW string (default: ">"). + attr (Union[int, str]): The token attribute for the vector keys + (default: "ORTH"). DOCS: https://spacy.io/api/vectors#init """ @@ -103,6 +108,14 @@ cdef class Vectors: self.hash_seed = hash_seed self.bow = bow self.eow = eow + if isinstance(attr, (int, long)): + self.attr = attr + else: + attr = attr.upper() + if attr == "TEXT": + attr = "ORTH" + self.attr = IDS.get(attr, ORTH) + if self.mode == Mode.default: if data is None: if shape is None: @@ -546,6 +559,7 @@ cdef class Vectors: "hash_seed": self.hash_seed, "bow": self.bow, "eow": self.eow, + "attr": self.attr, } def _set_cfg(self, cfg): @@ -556,6 +570,7 @@ cdef class Vectors: self.hash_seed = cfg.get("hash_seed", 0) self.bow = cfg.get("bow", "<") self.eow = cfg.get("eow", ">") + self.attr = cfg.get("attr", ORTH) def to_disk(self, path, *, exclude=tuple()): """Save the current state to a directory. diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index d47122d08..520228b51 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -365,8 +365,13 @@ cdef class Vocab: self[orth] # Make prob negative so it sorts by rank ascending # (key2row contains the rank) - priority = [(-lex.prob, self.vectors.key2row[lex.orth], lex.orth) - for lex in self if lex.orth in self.vectors.key2row] + priority = [] + cdef Lexeme lex + cdef attr_t value + for lex in self: + value = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + if value in self.vectors.key2row: + priority.append((-lex.prob, self.vectors.key2row[value], value)) priority.sort() indices = xp.asarray([i for (prob, i, key) in priority], dtype="uint64") keys = xp.asarray([key for (prob, i, key) in priority], dtype="uint64") @@ -399,8 +404,10 @@ cdef class Vocab: """ if isinstance(orth, str): orth = self.strings.add(orth) - if self.has_vector(orth): - return self.vectors[orth] + cdef Lexeme lex = self[orth] + key = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + if self.has_vector(key): + return self.vectors[key] xp = get_array_module(self.vectors.data) vectors = xp.zeros((self.vectors_length,), dtype="f") return vectors @@ -416,15 +423,16 @@ cdef class Vocab: """ if isinstance(orth, str): orth = self.strings.add(orth) - if self.vectors.is_full and orth not in self.vectors: + cdef Lexeme lex = self[orth] + key = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + if self.vectors.is_full and key not in self.vectors: new_rows = max(100, int(self.vectors.shape[0]*1.3)) if self.vectors.shape[1] == 0: width = vector.size else: width = self.vectors.shape[1] self.vectors.resize((new_rows, width)) - lex = self[orth] # Add word to vocab if necessary - row = self.vectors.add(orth, vector=vector) + row = self.vectors.add(key, vector=vector) if row >= 0: lex.rank = row @@ -439,7 +447,9 @@ cdef class Vocab: """ if isinstance(orth, str): orth = self.strings.add(orth) - return orth in self.vectors + cdef Lexeme lex = self[orth] + key = Lexeme.get_struct_attr(lex.c, self.vectors.attr) + return key in self.vectors property lookups: def __get__(self): diff --git a/website/docs/api/architectures.mdx b/website/docs/api/architectures.mdx index 268c04a07..bab24f13b 100644 --- a/website/docs/api/architectures.mdx +++ b/website/docs/api/architectures.mdx @@ -303,7 +303,7 @@ mapped to a zero vector. See the documentation on | `nM` | The width of the static vectors. ~~Optional[int]~~ | | `dropout` | Optional dropout rate. If set, it's applied per dimension over the whole batch. Defaults to `None`. ~~Optional[float]~~ | | `init_W` | The [initialization function](https://thinc.ai/docs/api-initializers). Defaults to [`glorot_uniform_init`](https://thinc.ai/docs/api-initializers#glorot_uniform_init). ~~Callable[[Ops, Tuple[int, ...]]], FloatsXd]~~ | -| `key_attr` | Defaults to `"ORTH"`. ~~str~~ | +| `key_attr` | This setting is ignored in spaCy v3.6+. To set a custom key attribute for vectors, configure it through [`Vectors`](/api/vectors) or [`spacy init vectors`](/api/cli#init-vectors). Defaults to `"ORTH"`. ~~str~~ | | **CREATES** | The model using the architecture. ~~Model[List[Doc], Ragged]~~ | ### spacy.FeatureExtractor.v1 {id="FeatureExtractor"} diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index 5b4bca1ce..6a87f78b8 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -211,7 +211,8 @@ $ python -m spacy init vectors [lang] [vectors_loc] [output_dir] [--prune] [--tr | `output_dir` | Pipeline output directory. Will be created if it doesn't exist. ~~Path (positional)~~ | | `--truncate`, `-t` | Number of vectors to truncate to when reading in vectors file. Defaults to `0` for no truncation. ~~int (option)~~ | | `--prune`, `-p` | Number of vectors to prune the vocabulary to. Defaults to `-1` for no pruning. ~~int (option)~~ | -| `--mode`, `-m` | Vectors mode: `default` or [`floret`](https://github.com/explosion/floret). Defaults to `default`. ~~Optional[str] \(option)~~ | +| `--mode`, `-m` | Vectors mode: `default` or [`floret`](https://github.com/explosion/floret). Defaults to `default`. ~~str \(option)~~ | +| `--attr`, `-a` | Token attribute to use for vectors, e.g. `LOWER` or `NORM`) Defaults to `ORTH`. ~~str \(option)~~ | | `--name`, `-n` | Name to assign to the word vectors in the `meta.json`, e.g. `en_core_web_md.vectors`. ~~Optional[str] \(option)~~ | | `--verbose`, `-V` | Print additional information and explanations. ~~bool (flag)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | diff --git a/website/docs/api/vectors.mdx b/website/docs/api/vectors.mdx index d6033c096..fa4cd0c7a 100644 --- a/website/docs/api/vectors.mdx +++ b/website/docs/api/vectors.mdx @@ -60,6 +60,7 @@ modified later. | `hash_seed` 3.2 | The floret hash seed (default: `0`). ~~int~~ | | `bow` 3.2 | The floret BOW string (default: `"<"`). ~~str~~ | | `eow` 3.2 | The floret EOW string (default: `">"`). ~~str~~ | +| `attr` 3.6 | The token attribute for the vector keys (default: `"ORTH"`). ~~Union[int, str]~~ | ## Vectors.\_\_getitem\_\_ {id="getitem",tag="method"} @@ -453,8 +454,9 @@ Load state from a binary string. ## Attributes {id="attributes"} -| Name | Description | -| --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `data` | Stored vectors data. `numpy` is used for CPU vectors, `cupy` for GPU vectors. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | -| `key2row` | Dictionary mapping word hashes to rows in the `Vectors.data` table. ~~Dict[int, int]~~ | -| `keys` | Array keeping the keys in order, such that `keys[vectors.key2row[key]] == key`. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | +| Name | Description | +| ----------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `data` | Stored vectors data. `numpy` is used for CPU vectors, `cupy` for GPU vectors. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | +| `key2row` | Dictionary mapping word hashes to rows in the `Vectors.data` table. ~~Dict[int, int]~~ | +| `keys` | Array keeping the keys in order, such that `keys[vectors.key2row[key]] == key`. ~~Union[numpy.ndarray[ndim=1, dtype=float32], cupy.ndarray[ndim=1, dtype=float32]]~~ | +| `attr` 3.6 | The token attribute for the vector keys. ~~int~~ | From 57a230c6e4844d368d4d12b09993877fc9e50946 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Wed, 28 Jun 2023 17:09:57 +0200 Subject: [PATCH 05/27] Remove section about parallel training with Ray (#12770) The Ray integration is currently broken, having these docs around suggest that this functionality is currently available. --- website/docs/usage/training.mdx | 72 --------------------------------- 1 file changed, 72 deletions(-) diff --git a/website/docs/usage/training.mdx b/website/docs/usage/training.mdx index 6caf2e94b..98333db72 100644 --- a/website/docs/usage/training.mdx +++ b/website/docs/usage/training.mdx @@ -11,7 +11,6 @@ menu: - ['Custom Functions', 'custom-functions'] - ['Initialization', 'initialization'] - ['Data Utilities', 'data'] - - ['Parallel Training', 'parallel-training'] - ['Internal API', 'api'] --- @@ -1565,77 +1564,6 @@ token-based annotations like the dependency parse or entity labels, you'll need to take care to adjust the `Example` object so its annotations match and remain valid. -## Parallel & distributed training with Ray {id="parallel-training"} - -> #### Installation -> -> ```bash -> $ pip install -U %%SPACY_PKG_NAME[ray]%%SPACY_PKG_FLAGS -> # Check that the CLI is registered -> $ python -m spacy ray --help -> ``` - -[Ray](https://ray.io/) is a fast and simple framework for building and running -**distributed applications**. You can use Ray to train spaCy on one or more -remote machines, potentially speeding up your training process. Parallel -training won't always be faster though – it depends on your batch size, models, -and hardware. - - - -To use Ray with spaCy, you need the -[`spacy-ray`](https://github.com/explosion/spacy-ray) package installed. -Installing the package will automatically add the `ray` command to the spaCy -CLI. - - - -The [`spacy ray train`](/api/cli#ray-train) command follows the same API as -[`spacy train`](/api/cli#train), with a few extra options to configure the Ray -setup. You can optionally set the `--address` option to point to your Ray -cluster. If it's not set, Ray will run locally. - -```bash -python -m spacy ray train config.cfg --n-workers 2 -``` - - - -Get started with parallel training using our project template. It trains a -simple model on a Universal Dependencies Treebank and lets you parallelize the -training with Ray. - - - -### How parallel training works {id="parallel-training-details"} - -Each worker receives a shard of the **data** and builds a copy of the **model -and optimizer** from the [`config.cfg`](#config). It also has a communication -channel to **pass gradients and parameters** to the other workers. Additionally, -each worker is given ownership of a subset of the parameter arrays. Every -parameter array is owned by exactly one worker, and the workers are given a -mapping so they know which worker owns which parameter. - -![Illustration of setup](/images/spacy-ray.svg) - -As training proceeds, every worker will be computing gradients for **all** of -the model parameters. When they compute gradients for parameters they don't own, -they'll **send them to the worker** that does own that parameter, along with a -version identifier so that the owner can decide whether to discard the gradient. -Workers use the gradients they receive and the ones they compute locally to -update the parameters they own, and then broadcast the updated array and a new -version ID to the other workers. - -This training procedure is **asynchronous** and **non-blocking**. Workers always -push their gradient increments and parameter updates, they do not have to pull -them and block on the result, so the transfers can happen in the background, -overlapped with the actual training work. The workers also do not have to stop -and wait for each other ("synchronize") at the start of each batch. This is very -useful for spaCy, because spaCy is often trained on long documents, which means -**batches can vary in size** significantly. Uneven workloads make synchronous -gradient descent inefficient, because if one batch is slow, all of the other -workers are stuck waiting for it to complete before they can continue. - ## Internal training API {id="api"} From bd239511a41c1b93fba1ad53b110d4ce07bf70a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcus=20Bl=C3=A4ttermann?= Date: Mon, 3 Jul 2023 10:24:25 +0200 Subject: [PATCH 06/27] Fix problem with missing syntax highlighting languages causing runtime crash on the website (#12781) * Fix problem with universe pages using `docker` language * Fix problem with universe pages using `r` language * Add fallback, in case code language is unknown --- website/src/components/code.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/src/components/code.js b/website/src/components/code.js index 09c2fabfc..e733dba77 100644 --- a/website/src/components/code.js +++ b/website/src/components/code.js @@ -13,6 +13,8 @@ import 'prismjs/components/prism-json.min.js' import 'prismjs/components/prism-markdown.min.js' import 'prismjs/components/prism-python.min.js' import 'prismjs/components/prism-yaml.min.js' +import 'prismjs/components/prism-docker.min.js' +import 'prismjs/components/prism-r.min.js' import { isString } from './util' import Link, { OptionalLink } from './link' @@ -172,7 +174,7 @@ const convertLine = ({ line, prompt, lang }) => { return handlePromot({ lineFlat, prompt }) } - return lang === 'none' || !lineFlat ? ( + return lang === 'none' || !lineFlat || !(lang in Prism.languages) ? ( lineFlat ) : ( Date: Tue, 4 Jul 2023 11:45:13 +0200 Subject: [PATCH 07/27] Use 'exclude' instead of 'disable' (#12783) as suggested by @svlandeg --- website/meta/universe.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index cd3bedbff..75ec5fb5c 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -4372,7 +4372,7 @@ "code_example": [ "import spacy", "", - "nlp = spacy.load(\"en_core_web_sm\", disable=[\"ner\"])", + "nlp = spacy.load(\"en_core_web_sm\", exclude=[\"ner\"])", "nlp.add_pipe(\"span_marker\", config={\"model\": \"tomaarsen/span-marker-roberta-large-ontonotes5\"})", "", "text = \"\"\"Cleopatra VII, also known as Cleopatra the Great, was the last active ruler of the \\", From 830dcca3679bdc22c6c21a7321cae0862319970c Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 6 Jul 2023 09:55:34 +0200 Subject: [PATCH 08/27] SpanFinder: set default max_length to 25 (#12791) When the default `max_length` is not set and there are longer training documents, it can be difficult to train and evaluate the span finder due to memory limits and the time it takes to evaluate a huge number of predicted spans. --- spacy/cli/templates/quickstart_training.jinja | 4 ++-- spacy/pipeline/span_finder.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index e3ca73cfb..1937ea935 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -130,7 +130,7 @@ grad_factor = 1.0 {% if "span_finder" in components -%} [components.span_finder] factory = "span_finder" -max_length = null +max_length = 25 min_length = null scorer = {"@scorers":"spacy.span_finder_scorer.v1"} spans_key = "sc" @@ -419,7 +419,7 @@ width = ${components.tok2vec.model.encode.width} {% if "span_finder" in components %} [components.span_finder] factory = "span_finder" -max_length = null +max_length = 25 min_length = null scorer = {"@scorers":"spacy.span_finder_scorer.v1"} spans_key = "sc" diff --git a/spacy/pipeline/span_finder.py b/spacy/pipeline/span_finder.py index 53f5c55be..a12d52911 100644 --- a/spacy/pipeline/span_finder.py +++ b/spacy/pipeline/span_finder.py @@ -48,7 +48,7 @@ DEFAULT_SPAN_FINDER_MODEL = Config().from_str(span_finder_default_config)["model "threshold": 0.5, "model": DEFAULT_SPAN_FINDER_MODEL, "spans_key": DEFAULT_SPANS_KEY, - "max_length": None, + "max_length": 25, "min_length": None, "scorer": {"@scorers": "spacy.span_finder_scorer.v1"}, }, From a1191146f5b4a47ff81a94bcc9b8a6acc8ed5568 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 6 Jul 2023 12:47:50 +0200 Subject: [PATCH 09/27] Revert "Temporarily skip tests for compat table" This reverts commit dd5e00c7355612b07550cb8ee3c5f72c26983bd1. --- spacy/tests/test_cli.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 9a2d7705f..8e1c9ca32 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -697,7 +697,6 @@ def test_string_to_list_intify(value): assert string_to_list(value, intify=True) == [1, 2, 3] -@pytest.mark.skip(reason="Temporarily skip before models are published") def test_download_compatibility(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False @@ -708,7 +707,6 @@ def test_download_compatibility(): assert get_minor_version(about.__version__) == get_minor_version(version) -@pytest.mark.skip(reason="Temporarily skip before models are published") def test_validate_compatibility_table(): spec = SpecifierSet("==" + about.__version__) spec.prereleases = False From 76329e1dde85e4b978aab5337b3a5f460b42e576 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 6 Jul 2023 12:48:06 +0200 Subject: [PATCH 10/27] Revert "Temporarily skip download CLI related tests in CI" This reverts commit 46ce66021a1f6c6f18914546051199b478e63040. --- .github/workflows/tests.yml | 54 ++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f177fbcb6..d60c90c1c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -111,22 +111,22 @@ jobs: - name: Test import run: python -W error -c "import spacy" -# - name: "Test download CLI" -# run: | -# python -m spacy download ca_core_news_sm -# python -m spacy download ca_core_news_md -# python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" -# if: matrix.python_version == '3.9' -# -# - name: "Test download_url in info CLI" -# run: | -# python -W error -m spacy info ca_core_news_sm | grep -q download_url -# if: matrix.python_version == '3.9' -# -# - name: "Test no warnings on load (#11713)" -# run: | -# python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" -# if: matrix.python_version == '3.9' + - name: "Test download CLI" + run: | + python -m spacy download ca_core_news_sm + python -m spacy download ca_core_news_md + python -c "import spacy; nlp=spacy.load('ca_core_news_sm'); doc=nlp('test')" + if: matrix.python_version == '3.9' + + - name: "Test download_url in info CLI" + run: | + python -W error -m spacy info ca_core_news_sm | grep -q download_url + if: matrix.python_version == '3.9' + + - name: "Test no warnings on load (#11713)" + run: | + python -W error -c "import ca_core_news_sm; nlp = ca_core_news_sm.load(); doc=nlp('test')" + if: matrix.python_version == '3.9' - name: "Test convert CLI" run: | @@ -150,17 +150,17 @@ jobs: python -m spacy train ner.cfg --paths.train ner-token-per-line-conll2003.spacy --paths.dev ner-token-per-line-conll2003.spacy --training.max_steps 10 --gpu-id -1 if: matrix.python_version == '3.9' -# - name: "Test assemble CLI" -# run: | -# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" -# PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir -# if: matrix.python_version == '3.9' -# -# - name: "Test assemble CLI vectors warning" -# run: | -# python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" -# python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 -# if: matrix.python_version == '3.9' + - name: "Test assemble CLI" + run: | + python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_sm'}; config.to_disk('ner_source_sm.cfg')" + PYTHONWARNINGS="error,ignore::DeprecationWarning" python -m spacy assemble ner_source_sm.cfg output_dir + if: matrix.python_version == '3.9' + + - name: "Test assemble CLI vectors warning" + run: | + python -c "import spacy; config = spacy.util.load_config('ner.cfg'); config['components']['ner'] = {'source': 'ca_core_news_md'}; config.to_disk('ner_source_md.cfg')" + python -m spacy assemble ner_source_md.cfg output_dir 2>&1 | grep -q W113 + if: matrix.python_version == '3.9' - name: "Install test requirements" run: | From 4e19ec7eb81aacb7db0c700b098784c202643a34 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 6 Jul 2023 12:58:25 +0200 Subject: [PATCH 11/27] Docs for v3.6.0 (#12792) * Docs for v3.6.0 * Add sl performance * Add da trf note --- website/docs/usage/v3-6.mdx | 143 +++++++++++++++++++++++++++++++++ website/meta/languages.json | 9 ++- website/meta/sidebars.json | 3 +- website/src/templates/index.js | 4 +- 4 files changed, 154 insertions(+), 5 deletions(-) create mode 100644 website/docs/usage/v3-6.mdx diff --git a/website/docs/usage/v3-6.mdx b/website/docs/usage/v3-6.mdx new file mode 100644 index 000000000..eda46b365 --- /dev/null +++ b/website/docs/usage/v3-6.mdx @@ -0,0 +1,143 @@ +--- +title: What's New in v3.6 +teaser: New features and how to upgrade +menu: + - ['New Features', 'features'] + - ['Upgrading Notes', 'upgrading'] +--- + +## New features {id="features",hidden="true"} + +spaCy v3.6 adds the new [`SpanFinder`](/api/spanfinder) component to the core +spaCy library and new trained pipelines for Slovenian. + +### SpanFinder {id="spanfinder"} + +The [`SpanFinder`](/api/spanfinder) component identifies potentially +overlapping, unlabeled spans by identifying span start and end tokens. It is +intended for use in combination with a component like +[`SpanCategorizer`](/api/spancategorizer) that may further filter or label the +spans. See our +[Spancat blog post](https://explosion.ai/blog/spancat#span-finder) for a more +detailed introduction to the span finder. + +To train a pipeline with `span_finder` + `spancat`, remember to add +`span_finder` (and its `tok2vec` or `transformer` if required) to +`[training.annotating_components]` so that the `spancat` component can be +trained directly from its predictions: + +```ini +[nlp] +pipeline = ["tok2vec","span_finder","spancat"] + +[training] +annotating_components = ["tok2vec","span_finder"] +``` + +In practice it can be helpful to initially train the `span_finder` separately +before [sourcing](/usage/processing-pipelines#sourced-components) it (along with +its `tok2vec`) into the `spancat` pipeline for further training. Otherwise the +memory usage can spike for `spancat` in the first few training steps if the +`span_finder` makes a large number of predictions. + +### Additional features and improvements {id="additional-features-and-improvements"} + +- Language updates: + - Add initial support for Malay. + - Update Latin defaults to support noun chunks, update lexical/tokenizer + settings and add example sentences. +- Support `spancat_singlelabel` in `spacy debug data` CLI. +- Add `doc.spans` rendering to `spacy evaluate` CLI displaCy output. +- Support custom token/lexeme attribute for vectors. +- Add option to return scores separately keyed by component name with + `spacy evaluate --per-component`, `Language.evaluate(per_component=True)` and + `Scorer.score(per_component=True)`. This is useful when the pipeline contains + more than one of the same component like `textcat` that may have overlapping + scores keys. +- Typing updates for `PhraseMatcher` and `SpanGroup`. + +## Trained pipelines {id="pipelines"} + +### New trained pipelines {id="new-pipelines"} + +v3.6 introduces new pipelines for Slovenian, which use the trainable lemmatizer +and [floret vectors](https://github.com/explosion/floret). + +| Package | UPOS | Parser LAS | NER F | +| ------------------------------------------------- | ---: | ---------: | ----: | +| [`sl_core_news_sm`](/models/sl#sl_core_news_sm) | 96.9 | 82.1 | 62.9 | +| [`sl_core_news_md`](/models/sl#sl_core_news_md) | 97.6 | 84.3 | 73.5 | +| [`sl_core_news_lg`](/models/sl#sl_core_news_lg) | 97.7 | 84.3 | 79.0 | +| [`sl_core_news_trf`](/models/sl#sl_core_news_trf) | 99.0 | 91.7 | 90.0 | + +### Pipeline updates {id="pipeline-updates"} + +The English pipelines have been updated to improve handling of contractions with +various apostrophes and to lemmatize "get" as a passive auxiliary. + +The Danish pipeline `da_core_news_trf` has been updated to use +[`vesteinn/DanskBERT`](https://huggingface.co/vesteinn/DanskBERT) with +performance improvements across the board. + +## Notes about upgrading from v3.5 {id="upgrading"} + +### SpanGroup spans are now required to be from the same doc {id="spangroup-spans"} + +When initializing a `SpanGroup`, there is a new check to verify that all added +spans refer to the current doc. Without this check, it was possible to run into +string store or other errors. + +One place this may crop up is when creating `Example` objects for training with +custom spans: + +```diff + doc = Doc(nlp.vocab, words=tokens) # predicted doc + example = Example.from_dict(doc, {"ner": iob_tags}) + # use the reference doc when creating reference spans +- span = Span(doc, 0, 5, "ORG") ++ span = Span(example.reference, 0, 5, "ORG") + example.reference.spans[spans_key] = [span] +``` + +### Pipeline package version compatibility {id="version-compat"} + +> #### Using legacy implementations +> +> In spaCy v3, you'll still be able to load and reference legacy implementations +> via [`spacy-legacy`](https://github.com/explosion/spacy-legacy), even if the +> components or architectures change and newer versions are available in the +> core library. + +When you're loading a pipeline package trained with an earlier version of spaCy +v3, you will see a warning telling you that the pipeline may be incompatible. +This doesn't necessarily have to be true, but we recommend running your +pipelines against your test suite or evaluation data to make sure there are no +unexpected results. + +If you're using one of the [trained pipelines](/models) we provide, you should +run [`spacy download`](/api/cli#download) to update to the latest version. To +see an overview of all installed packages and their compatibility, you can run +[`spacy validate`](/api/cli#validate). + +If you've trained your own custom pipeline and you've confirmed that it's still +working as expected, you can update the spaCy version requirements in the +[`meta.json`](/api/data-formats#meta): + +```diff +- "spacy_version": ">=3.5.0,<3.6.0", ++ "spacy_version": ">=3.5.0,<3.7.0", +``` + +### Updating v3.5 configs + +To update a config from spaCy v3.5 with the new v3.6 settings, run +[`init fill-config`](/api/cli#init-fill-config): + +```cli +$ python -m spacy init fill-config config-v3.5.cfg config-v3.6.cfg +``` + +In many cases ([`spacy train`](/api/cli#train), +[`spacy.load`](/api/top-level#spacy.load)), the new defaults will be filled in +automatically, but you'll need to fill in the new settings to run +[`debug config`](/api/cli#debug) and [`debug data`](/api/cli#debug-data). diff --git a/website/meta/languages.json b/website/meta/languages.json index f88d2b7bf..3305b840b 100644 --- a/website/meta/languages.json +++ b/website/meta/languages.json @@ -222,7 +222,9 @@ }, { "code": "la", - "name": "Latin" + "name": "Latin", + "example": "In principio creavit Deus caelum et terram.", + "has_examples": true }, { "code": "lb", @@ -339,7 +341,10 @@ }, { "code": "sl", - "name": "Slovenian" + "name": "Slovenian", + "example": "France Prešeren je umrl 8. februarja 1849 v Kranju", + "has_examples": true, + "models": ["sl_core_news_sm", "sl_core_news_md", "sl_core_news_lg", "sl_core_news_trf"] }, { "code": "sq", diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index 12c3fce35..04102095f 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -14,7 +14,8 @@ { "text": "New in v3.2", "url": "/usage/v3-2" }, { "text": "New in v3.3", "url": "/usage/v3-3" }, { "text": "New in v3.4", "url": "/usage/v3-4" }, - { "text": "New in v3.5", "url": "/usage/v3-5" } + { "text": "New in v3.5", "url": "/usage/v3-5" }, + { "text": "New in v3.6", "url": "/usage/v3-6" } ] }, { diff --git a/website/src/templates/index.js b/website/src/templates/index.js index 227b25be8..c8295593c 100644 --- a/website/src/templates/index.js +++ b/website/src/templates/index.js @@ -58,8 +58,8 @@ const AlertSpace = ({ nightly, legacy }) => { } const navAlert = ( - - 💥 Out now: spaCy v3.5 + + 💥 Out now: spaCy v3.6 ) From 30bb34533a9b947f8f120805ce9f662cacbf4e89 Mon Sep 17 00:00:00 2001 From: Basile Dura Date: Thu, 6 Jul 2023 16:49:43 +0200 Subject: [PATCH 12/27] feat: add example stubs (#12679) * feat: add example stubs * fix: add required annotations * fix: mypy issues * fix: use Py36-compatible Portocol * Minor reformatting --------- Co-authored-by: Adriane Boyd Co-authored-by: svlandeg --- spacy/tokens/doc.pyi | 8 +++++- spacy/training/corpus.py | 24 ++++++++++------ spacy/training/example.pyi | 59 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 10 deletions(-) create mode 100644 spacy/training/example.pyi diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi index 00c7a9d07..55222f8aa 100644 --- a/spacy/tokens/doc.pyi +++ b/spacy/tokens/doc.pyi @@ -8,6 +8,7 @@ from typing import ( List, Optional, Protocol, + Sequence, Tuple, Union, overload, @@ -134,7 +135,12 @@ class Doc: def text(self) -> str: ... @property def text_with_ws(self) -> str: ... - ents: Tuple[Span] + # Ideally the getter would output Tuple[Span] + # see https://github.com/python/mypy/issues/3004 + @property + def ents(self) -> Sequence[Span]: ... + @ents.setter + def ents(self, value: Sequence[Span]) -> None: ... def set_ents( self, entities: List[Span], diff --git a/spacy/training/corpus.py b/spacy/training/corpus.py index 6037c15e3..37af9e476 100644 --- a/spacy/training/corpus.py +++ b/spacy/training/corpus.py @@ -6,6 +6,7 @@ from typing import TYPE_CHECKING, Callable, Iterable, Iterator, List, Optional, import srsly from .. import util +from ..compat import Protocol from ..errors import Errors, Warnings from ..tokens import Doc, DocBin from ..vocab import Vocab @@ -19,6 +20,11 @@ if TYPE_CHECKING: FILE_TYPE = ".spacy" +class ReaderProtocol(Protocol): + def __call__(self, nlp: "Language") -> Iterable[Example]: + pass + + @util.registry.readers("spacy.Corpus.v1") def create_docbin_reader( path: Optional[Path], @@ -26,7 +32,7 @@ def create_docbin_reader( max_length: int = 0, limit: int = 0, augmenter: Optional[Callable] = None, -) -> Callable[["Language"], Iterable[Example]]: +) -> ReaderProtocol: if path is None: raise ValueError(Errors.E913) util.logger.debug("Loading corpus from path: %s", path) @@ -45,7 +51,7 @@ def create_jsonl_reader( min_length: int = 0, max_length: int = 0, limit: int = 0, -) -> Callable[["Language"], Iterable[Example]]: +) -> ReaderProtocol: return JsonlCorpus(path, min_length=min_length, max_length=max_length, limit=limit) @@ -63,7 +69,7 @@ def create_plain_text_reader( path: Optional[Path], min_length: int = 0, max_length: int = 0, -) -> Callable[["Language"], Iterable[Doc]]: +) -> ReaderProtocol: """Iterate Example objects from a file or directory of plain text UTF-8 files with one line per doc. @@ -144,7 +150,7 @@ class Corpus: self.augmenter = augmenter if augmenter is not None else dont_augment self.shuffle = shuffle - def __call__(self, nlp: "Language") -> Iterator[Example]: + def __call__(self, nlp: "Language") -> Iterable[Example]: """Yield examples from the data. nlp (Language): The current nlp object. @@ -182,7 +188,7 @@ class Corpus: def make_examples( self, nlp: "Language", reference_docs: Iterable[Doc] - ) -> Iterator[Example]: + ) -> Iterable[Example]: for reference in reference_docs: if len(reference) == 0: continue @@ -197,7 +203,7 @@ class Corpus: def make_examples_gold_preproc( self, nlp: "Language", reference_docs: Iterable[Doc] - ) -> Iterator[Example]: + ) -> Iterable[Example]: for reference in reference_docs: if reference.has_annotation("SENT_START"): ref_sents = [sent.as_doc() for sent in reference.sents] @@ -210,7 +216,7 @@ class Corpus: def read_docbin( self, vocab: Vocab, locs: Iterable[Union[str, Path]] - ) -> Iterator[Doc]: + ) -> Iterable[Doc]: """Yield training examples as example dicts""" i = 0 for loc in locs: @@ -257,7 +263,7 @@ class JsonlCorpus: self.max_length = max_length self.limit = limit - def __call__(self, nlp: "Language") -> Iterator[Example]: + def __call__(self, nlp: "Language") -> Iterable[Example]: """Yield examples from the data. nlp (Language): The current nlp object. @@ -307,7 +313,7 @@ class PlainTextCorpus: self.min_length = min_length self.max_length = max_length - def __call__(self, nlp: "Language") -> Iterator[Example]: + def __call__(self, nlp: "Language") -> Iterable[Example]: """Yield examples from the data. nlp (Language): The current nlp object. diff --git a/spacy/training/example.pyi b/spacy/training/example.pyi new file mode 100644 index 000000000..9cd563465 --- /dev/null +++ b/spacy/training/example.pyi @@ -0,0 +1,59 @@ +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple + +from ..tokens import Doc, Span +from ..vocab import Vocab +from .alignment import Alignment + +def annotations_to_doc( + vocab: Vocab, + tok_annot: Dict[str, Any], + doc_annot: Dict[str, Any], +) -> Doc: ... +def validate_examples( + examples: Iterable[Example], + method: str, +) -> None: ... +def validate_get_examples( + get_examples: Callable[[], Iterable[Example]], + method: str, +): ... + +class Example: + x: Doc + y: Doc + + def __init__( + self, + predicted: Doc, + reference: Doc, + *, + alignment: Optional[Alignment] = None, + ): ... + def __len__(self) -> int: ... + @property + def predicted(self) -> Doc: ... + @predicted.setter + def predicted(self, doc: Doc) -> None: ... + @property + def reference(self) -> Doc: ... + @reference.setter + def reference(self, doc: Doc) -> None: ... + def copy(self) -> Example: ... + @classmethod + def from_dict(cls, predicted: Doc, example_dict: Dict[str, Any]) -> Example: ... + @property + def alignment(self) -> Alignment: ... + def get_aligned(self, field: str, as_string=False): ... + def get_aligned_parse(self, projectivize=True): ... + def get_aligned_sent_starts(self): ... + def get_aligned_spans_x2y(self, x_spans: Sequence[Span], allow_overlap=False) -> List[Span]: ... + def get_aligned_spans_y2x(self, y_spans: Sequence[Span], allow_overlap=False) -> List[Span]: ... + def get_aligned_ents_and_ner(self) -> Tuple[List[Span], List[str]]: ... + def get_aligned_ner(self) -> List[str]: ... + def get_matching_ents(self, check_label: bool = True) -> List[Span]: ... + def to_dict(self) -> Dict[str, Any]: ... + def split_sents(self) -> List[Example]: ... + @property + def text(self) -> str: ... + def __str__(self) -> str: ... + def __repr__(self) -> str: ... From d26e4e08493aee2daf8c489435d94e8c01ce1638 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Thu, 6 Jul 2023 17:02:38 +0200 Subject: [PATCH 13/27] Revert "feat: add example stubs (#12679)" This reverts commit 30bb34533a9b947f8f120805ce9f662cacbf4e89. --- spacy/tokens/doc.pyi | 8 +----- spacy/training/corpus.py | 24 ++++++---------- spacy/training/example.pyi | 59 -------------------------------------- 3 files changed, 10 insertions(+), 81 deletions(-) delete mode 100644 spacy/training/example.pyi diff --git a/spacy/tokens/doc.pyi b/spacy/tokens/doc.pyi index 55222f8aa..00c7a9d07 100644 --- a/spacy/tokens/doc.pyi +++ b/spacy/tokens/doc.pyi @@ -8,7 +8,6 @@ from typing import ( List, Optional, Protocol, - Sequence, Tuple, Union, overload, @@ -135,12 +134,7 @@ class Doc: def text(self) -> str: ... @property def text_with_ws(self) -> str: ... - # Ideally the getter would output Tuple[Span] - # see https://github.com/python/mypy/issues/3004 - @property - def ents(self) -> Sequence[Span]: ... - @ents.setter - def ents(self, value: Sequence[Span]) -> None: ... + ents: Tuple[Span] def set_ents( self, entities: List[Span], diff --git a/spacy/training/corpus.py b/spacy/training/corpus.py index 37af9e476..6037c15e3 100644 --- a/spacy/training/corpus.py +++ b/spacy/training/corpus.py @@ -6,7 +6,6 @@ from typing import TYPE_CHECKING, Callable, Iterable, Iterator, List, Optional, import srsly from .. import util -from ..compat import Protocol from ..errors import Errors, Warnings from ..tokens import Doc, DocBin from ..vocab import Vocab @@ -20,11 +19,6 @@ if TYPE_CHECKING: FILE_TYPE = ".spacy" -class ReaderProtocol(Protocol): - def __call__(self, nlp: "Language") -> Iterable[Example]: - pass - - @util.registry.readers("spacy.Corpus.v1") def create_docbin_reader( path: Optional[Path], @@ -32,7 +26,7 @@ def create_docbin_reader( max_length: int = 0, limit: int = 0, augmenter: Optional[Callable] = None, -) -> ReaderProtocol: +) -> Callable[["Language"], Iterable[Example]]: if path is None: raise ValueError(Errors.E913) util.logger.debug("Loading corpus from path: %s", path) @@ -51,7 +45,7 @@ def create_jsonl_reader( min_length: int = 0, max_length: int = 0, limit: int = 0, -) -> ReaderProtocol: +) -> Callable[["Language"], Iterable[Example]]: return JsonlCorpus(path, min_length=min_length, max_length=max_length, limit=limit) @@ -69,7 +63,7 @@ def create_plain_text_reader( path: Optional[Path], min_length: int = 0, max_length: int = 0, -) -> ReaderProtocol: +) -> Callable[["Language"], Iterable[Doc]]: """Iterate Example objects from a file or directory of plain text UTF-8 files with one line per doc. @@ -150,7 +144,7 @@ class Corpus: self.augmenter = augmenter if augmenter is not None else dont_augment self.shuffle = shuffle - def __call__(self, nlp: "Language") -> Iterable[Example]: + def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. @@ -188,7 +182,7 @@ class Corpus: def make_examples( self, nlp: "Language", reference_docs: Iterable[Doc] - ) -> Iterable[Example]: + ) -> Iterator[Example]: for reference in reference_docs: if len(reference) == 0: continue @@ -203,7 +197,7 @@ class Corpus: def make_examples_gold_preproc( self, nlp: "Language", reference_docs: Iterable[Doc] - ) -> Iterable[Example]: + ) -> Iterator[Example]: for reference in reference_docs: if reference.has_annotation("SENT_START"): ref_sents = [sent.as_doc() for sent in reference.sents] @@ -216,7 +210,7 @@ class Corpus: def read_docbin( self, vocab: Vocab, locs: Iterable[Union[str, Path]] - ) -> Iterable[Doc]: + ) -> Iterator[Doc]: """Yield training examples as example dicts""" i = 0 for loc in locs: @@ -263,7 +257,7 @@ class JsonlCorpus: self.max_length = max_length self.limit = limit - def __call__(self, nlp: "Language") -> Iterable[Example]: + def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. @@ -313,7 +307,7 @@ class PlainTextCorpus: self.min_length = min_length self.max_length = max_length - def __call__(self, nlp: "Language") -> Iterable[Example]: + def __call__(self, nlp: "Language") -> Iterator[Example]: """Yield examples from the data. nlp (Language): The current nlp object. diff --git a/spacy/training/example.pyi b/spacy/training/example.pyi deleted file mode 100644 index 9cd563465..000000000 --- a/spacy/training/example.pyi +++ /dev/null @@ -1,59 +0,0 @@ -from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple - -from ..tokens import Doc, Span -from ..vocab import Vocab -from .alignment import Alignment - -def annotations_to_doc( - vocab: Vocab, - tok_annot: Dict[str, Any], - doc_annot: Dict[str, Any], -) -> Doc: ... -def validate_examples( - examples: Iterable[Example], - method: str, -) -> None: ... -def validate_get_examples( - get_examples: Callable[[], Iterable[Example]], - method: str, -): ... - -class Example: - x: Doc - y: Doc - - def __init__( - self, - predicted: Doc, - reference: Doc, - *, - alignment: Optional[Alignment] = None, - ): ... - def __len__(self) -> int: ... - @property - def predicted(self) -> Doc: ... - @predicted.setter - def predicted(self, doc: Doc) -> None: ... - @property - def reference(self) -> Doc: ... - @reference.setter - def reference(self, doc: Doc) -> None: ... - def copy(self) -> Example: ... - @classmethod - def from_dict(cls, predicted: Doc, example_dict: Dict[str, Any]) -> Example: ... - @property - def alignment(self) -> Alignment: ... - def get_aligned(self, field: str, as_string=False): ... - def get_aligned_parse(self, projectivize=True): ... - def get_aligned_sent_starts(self): ... - def get_aligned_spans_x2y(self, x_spans: Sequence[Span], allow_overlap=False) -> List[Span]: ... - def get_aligned_spans_y2x(self, y_spans: Sequence[Span], allow_overlap=False) -> List[Span]: ... - def get_aligned_ents_and_ner(self) -> Tuple[List[Span], List[str]]: ... - def get_aligned_ner(self) -> List[str]: ... - def get_matching_ents(self, check_label: bool = True) -> List[Span]: ... - def to_dict(self) -> Dict[str, Any]: ... - def split_sents(self) -> List[Example]: ... - @property - def text(self) -> str: ... - def __str__(self) -> str: ... - def __repr__(self) -> str: ... From 41dba5bd344c2442906c7d3b74ad84e72b4a3847 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 7 Jul 2023 10:17:41 +0200 Subject: [PATCH 14/27] Update max_length default in span finder docs (#12803) --- website/docs/api/spanfinder.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/spanfinder.mdx b/website/docs/api/spanfinder.mdx index ca3104c85..ef4a6baa5 100644 --- a/website/docs/api/spanfinder.mdx +++ b/website/docs/api/spanfinder.mdx @@ -60,7 +60,7 @@ architectures and their arguments and hyperparameters. | `model` | A model instance that is given a list of documents and predicts a probability for each token. ~~Model[List[Doc], Floats2d]~~ | | `spans_key` | Key of the [`Doc.spans`](/api/doc#spans) dict to save the spans under. During initialization and training, the component will look for spans on the reference document under the same key. Defaults to `"sc"`. ~~str~~ | | `threshold` | Minimum probability to consider a prediction positive. Defaults to `0.5`. ~~float~~ | -| `max_length` | Maximum length of the produced spans, defaults to `None` meaning unlimited length. ~~Optional[int]~~ | +| `max_length` | Maximum length of the produced spans, defaults to `25`. ~~Optional[int]~~ | | `min_length` | Minimum length of the produced spans, defaults to `None` meaning shortest span length is 1. ~~Optional[int]~~ | | `scorer` | The scoring method. Defaults to [`Scorer.score_spans`](/api/scorer#score_spans) for `Doc.spans[spans_key]` with overlapping spans allowed. ~~Optional[Callable]~~ | From 1a55661cfbb51d2dcbe2dbf725ea7c56aca80d7e Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 7 Jul 2023 10:52:33 +0200 Subject: [PATCH 15/27] Update website binder version to v3.6 (#12805) --- website/meta/site.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/meta/site.json b/website/meta/site.json index 3d4f2d5ee..08fcde62e 100644 --- a/website/meta/site.json +++ b/website/meta/site.json @@ -27,7 +27,7 @@ "indexName": "spacy" }, "binderUrl": "explosion/spacy-io-binder", - "binderVersion": "3.5", + "binderVersion": "3.6", "sections": [ { "id": "usage", "title": "Usage Documentation", "theme": "blue" }, { "id": "models", "title": "Models Documentation", "theme": "blue" }, From ddffd096024004f27a0dee3701dc248c4647b3a7 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 7 Jul 2023 15:18:16 +0200 Subject: [PATCH 16/27] Trainable lemmatizer docs link (#12795) * add an anchor to the trainable lemmatizer section * add requirement for morphologizer,tagger to rule-based lemmatizer * morphologizer only --- website/docs/usage/linguistic-features.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/usage/linguistic-features.mdx b/website/docs/usage/linguistic-features.mdx index 55d5680fe..90f305ada 100644 --- a/website/docs/usage/linguistic-features.mdx +++ b/website/docs/usage/linguistic-features.mdx @@ -113,7 +113,7 @@ print(doc[2].morph) # 'Case=Nom|Person=2|PronType=Prs' print(doc[2].pos_) # 'PRON' ``` -## Lemmatization {id="lemmatization",model="lemmatizer",version="3"} +## Lemmatization {id="lemmatization",version="3"} spaCy provides two pipeline components for lemmatization: @@ -170,7 +170,7 @@ nlp = spacy.blank("sv") nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) ``` -### Rule-based lemmatizer {id="lemmatizer-rule"} +### Rule-based lemmatizer {id="lemmatizer-rule",model="morphologizer"} When training pipelines that include a component that assigns part-of-speech tags (a morphologizer or a tagger with a [POS mapping](#mappings-exceptions)), a @@ -194,7 +194,7 @@ information, without consulting the context of the token. The rule-based lemmatizer also accepts list-based exception files. For English, these are acquired from [WordNet](https://wordnet.princeton.edu/). -### Trainable lemmatizer +### Trainable lemmatizer {id="lemmatizer-train",model="trainable_lemmatizer"} The [`EditTreeLemmatizer`](/api/edittreelemmatizer) can learn form-to-lemma transformations from a training corpus that includes lemma annotations. This From 0566c3a166c7ccfb5a1bddb025dddf9c576a9ed2 Mon Sep 17 00:00:00 2001 From: Connor Brinton Date: Thu, 13 Jul 2023 11:33:05 -0400 Subject: [PATCH 17/27] =?UTF-8?q?=F0=9F=90=9B=20Escape=20annotated=20HTML?= =?UTF-8?q?=20tags=20in=20span=20renderer=20(#12817)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These changes add a missing call to `escape_html` in the displaCy span renderer. Previously span-annotated tokens would be inserted into the page markup without being escaped, resulting in potentially incorrect rendering. When I encountered this issue, it resulted in some docs and span underlines being superimposed on top of properly rendered docs and span underlines near the beginning of the visualization (due to an unescaped `` tag). --- spacy/displacy/render.py | 3 +-- spacy/tests/test_displacy.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/spacy/displacy/render.py b/spacy/displacy/render.py index 86869e3b8..47407bcb7 100644 --- a/spacy/displacy/render.py +++ b/spacy/displacy/render.py @@ -1,4 +1,3 @@ -import itertools import uuid from typing import Any, Dict, List, Optional, Tuple, Union @@ -218,7 +217,7 @@ class SpanRenderer: + (self.offset_step * (len(entities) - 1)) ) markup += self.span_template.format( - text=token["text"], + text=escape_html(token["text"]), span_slices=slices, span_starts=starts, total_height=total_height, diff --git a/spacy/tests/test_displacy.py b/spacy/tests/test_displacy.py index ce103068a..1570f8d09 100644 --- a/spacy/tests/test_displacy.py +++ b/spacy/tests/test_displacy.py @@ -377,3 +377,22 @@ def test_displacy_manual_sorted_entities(): html = displacy.render(doc, style="ent", manual=True) assert html.find("FIRST") < html.find("SECOND") + + +@pytest.mark.issue(12816) +def test_issue12816(en_vocab) -> None: + """Test that displaCy's span visualizer escapes annotated HTML tags correctly.""" + # Create a doc containing an annotated word and an unannotated HTML tag + doc = Doc(en_vocab, words=["test", ""]) + doc.spans["sc"] = [Span(doc, 0, 1, label="test")] + + # Verify that the HTML tag is escaped when unannotated + html = displacy.render(doc, style="span") + assert "<TEST>" in html + + # Annotate the HTML tag + doc.spans["sc"].append(Span(doc, 1, 2, label="test")) + + # Verify that the HTML tag is still escaped + html = displacy.render(doc, style="span") + assert "<TEST>" in html From ef20e114e0c4984701e6bcd8af8b5cc5a12bc00a Mon Sep 17 00:00:00 2001 From: Ian Thompson Date: Fri, 14 Jul 2023 02:45:54 -0500 Subject: [PATCH 18/27] Typo fix in `Language.replace_listeners` docs (#12823) * modified: spacy/language.py - corrected typo in docstring for :method:`Language.replace_listeners` - added noqa comment on unused local variable assignment in :method:`Language.from_config` as I wasn't sure if it should be unassigned modified: website/docs/api/language.mdx - corrected typo in `Language.replace_listeners` markdown * modified: spacy/language.py - removed noqa comment --------- Co-authored-by: Ian Thompson --- spacy/language.py | 2 +- website/docs/api/language.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/language.py b/spacy/language.py index fd616483b..3b3e33991 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1958,7 +1958,7 @@ class Language: useful when training a pipeline with components sourced from an existing pipeline: if multiple components (e.g. tagger, parser, NER) listen to the same tok2vec component, but some of them are frozen and not updated, - their performance may degrade significally as the tok2vec component is + their performance may degrade significantly as the tok2vec component is updated with new data. To prevent this, listeners can be replaced with a standalone tok2vec layer that is owned by the component and doesn't change if the component isn't updated. diff --git a/website/docs/api/language.mdx b/website/docs/api/language.mdx index de23156b9..068e8ea78 100644 --- a/website/docs/api/language.mdx +++ b/website/docs/api/language.mdx @@ -856,7 +856,7 @@ token-to-vector embedding component like [`Tok2Vec`](/api/tok2vec) or training a pipeline with components sourced from an existing pipeline: if multiple components (e.g. tagger, parser, NER) listen to the same token-to-vector component, but some of them are frozen and not updated, their -performance may degrade significally as the token-to-vector component is updated +performance may degrade significantly as the token-to-vector component is updated with new data. To prevent this, listeners can be replaced with a standalone token-to-vector layer that is owned by the component and doesn't change if the component isn't updated. From 95075298f5e6b28ecf41faef83af4bc247e12ed7 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 18 Jul 2023 09:29:04 +0200 Subject: [PATCH 19/27] Update pex Makefile defaults (#12832) * Update pex Makefile defaults - switch to python 3.8 - only install spacy-lookups-data for extra packages * Update website for pex defaults --- Makefile | 4 ++-- website/docs/usage/index.mdx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 4de628663..c8f68be7f 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,11 @@ SHELL := /bin/bash ifndef SPACY_EXTRAS -override SPACY_EXTRAS = spacy-lookups-data==1.0.2 jieba spacy-pkuseg==0.0.28 sudachipy sudachidict_core pymorphy2 +override SPACY_EXTRAS = spacy-lookups-data==1.0.3 endif ifndef PYVER -override PYVER = 3.6 +override PYVER = 3.8 endif VENV := ./env$(PYVER) diff --git a/website/docs/usage/index.mdx b/website/docs/usage/index.mdx index 4b06178d5..414968d42 100644 --- a/website/docs/usage/index.mdx +++ b/website/docs/usage/index.mdx @@ -261,7 +261,7 @@ source code and recompiling frequently. #### Visual Studio Code extension -![spaCy extension demo](/images/spacy-extension-demo.gif) +![spaCy extension demo](/images/spacy-extension-demo.gif) The [spaCy VSCode Extension](https://github.com/explosion/spacy-vscode) provides additional tooling and features for working with spaCy's config files. Version @@ -310,7 +310,7 @@ You can configure the build process with the following environment variables: | Variable | Description | | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `SPACY_EXTRAS` | Additional Python packages to install alongside spaCy with optional version specifications. Should be a string that can be passed to `pip install`. See [`Makefile`](%%GITHUB_SPACY/Makefile) for defaults. | -| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.6`. | +| `PYVER` | The Python version to build against. This version needs to be available on your build and runtime machines. Defaults to `3.8`. | | `WHEELHOUSE` | Directory to store the wheel files during compilation. Defaults to `./wheelhouse`. | ### Run tests {id="run-tests"} From 6bf7c65329a59055fa98e5a5493a4380397627b9 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 18 Jul 2023 10:00:07 +0200 Subject: [PATCH 20/27] Update matcher pattern validation tests (#12835) - parametrize over individual token patterns (as originally intended, as far as I can tell) - add a test for lowercase `in` in patterns --- spacy/tests/matcher/test_pattern_validation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy/tests/matcher/test_pattern_validation.py b/spacy/tests/matcher/test_pattern_validation.py index 21fa36865..45f9f4ee7 100644 --- a/spacy/tests/matcher/test_pattern_validation.py +++ b/spacy/tests/matcher/test_pattern_validation.py @@ -52,7 +52,8 @@ TEST_PATTERNS = [ @pytest.mark.parametrize( - "pattern", [[{"XX": "y"}, {"LENGTH": "2"}, {"TEXT": {"IN": 5}}]] + "pattern", + [[{"XX": "y"}], [{"LENGTH": "2"}], [{"TEXT": {"IN": 5}}], [{"text": {"in": 6}}]], ) def test_matcher_pattern_validation(en_vocab, pattern): matcher = Matcher(en_vocab, validate=True) From 1509c9669483abcd1b6c018cde5bc189dd04250b Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 18 Jul 2023 14:10:30 +0200 Subject: [PATCH 21/27] Clean up unused code in Language (#12836) Follow-up to #12701. --- spacy/language.py | 1 - 1 file changed, 1 deletion(-) diff --git a/spacy/language.py b/spacy/language.py index 3b3e33991..46f4a7996 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -1825,7 +1825,6 @@ class Language: # Later we replace the component config with the raw config again. interpolated = filled.interpolate() if not filled.is_interpolated else filled pipeline = interpolated.get("components", {}) - sourced = util.get_sourced_components(interpolated) # If components are loaded from a source (existing models), we cache # them here so they're only loaded once source_nlps = {} From b0228d8ea6f7cce9583c747fb84d9be8ae9a753e Mon Sep 17 00:00:00 2001 From: Basile Dura Date: Wed, 19 Jul 2023 12:03:31 +0200 Subject: [PATCH 22/27] ci: add cython linter (#12694) * chore: add cython-linter dev dependency * fix: lexeme.pyx * fix: morphology.pxd * fix: tokenizer.pxd * fix: vocab.pxd * fix: morphology.pxd (line length) * ci: add cython-lint * ci: fix cython-lint call * Fix kb/candidate.pyx. * Fix kb/kb.pyx. * Fix kb/kb_in_memory.pyx. * Fix kb. * Fix training/ partially. * Fix training/. Ignore trailing whitespaces and too long lines. * Fix ml/. * Fix matcher/. * Fix pipeline/. * Fix tokens/. * Fix build errors. Fix vocab.pyx. * Fix cython-lint install and run. * Fix lexeme.pyx, parts_of_speech.pxd, vectors.pyx. Temporarily disable cython-lint execution. * Fix attrs.pyx, lexeme.pyx, symbols.pxd, isort issues. * Make cython-lint install conditional. Fix tokenizer.pyx. * Fix remaining files. Reenable cython-lint check. * Readded parentheses. * Fix test_build_dependencies(). * Add explanatory comment to cython-lint execution. --------- Co-authored-by: Raphael Mitsch --- .github/workflows/tests.yml | 6 + requirements.txt | 1 + spacy/attrs.pxd | 2 +- spacy/attrs.pyx | 2 +- spacy/kb/candidate.pxd | 3 +- spacy/kb/candidate.pyx | 27 +- spacy/kb/kb.pyx | 49 +++- spacy/kb/kb_in_memory.pxd | 62 +++-- spacy/kb/kb_in_memory.pyx | 154 +++++++---- spacy/lexeme.pyx | 17 +- spacy/matcher/dependencymatcher.pyx | 10 +- spacy/matcher/matcher.pyx | 246 +++++++++++------- spacy/matcher/phrasematcher.pyx | 4 +- spacy/ml/parser_model.pxd | 17 +- spacy/ml/parser_model.pyx | 129 +++++---- spacy/morphology.pxd | 8 +- spacy/morphology.pyx | 10 +- spacy/parts_of_speech.pxd | 2 +- .../_edit_tree_internals/edit_trees.pxd | 17 +- .../_edit_tree_internals/edit_trees.pyx | 14 +- .../_parser_internals/_beam_utils.pyx | 6 +- spacy/pipeline/_parser_internals/_state.pxd | 1 - .../pipeline/_parser_internals/arc_eager.pyx | 18 +- spacy/pipeline/_parser_internals/ner.pyx | 17 +- spacy/pipeline/_parser_internals/nonproj.pyx | 12 +- .../pipeline/_parser_internals/stateclass.pyx | 24 +- .../_parser_internals/transition_system.pxd | 14 +- .../_parser_internals/transition_system.pyx | 3 - spacy/pipeline/dep_parser.pyx | 3 +- spacy/pipeline/morphologizer.pyx | 11 +- spacy/pipeline/multitask.pyx | 9 +- spacy/pipeline/ner.pyx | 5 +- spacy/pipeline/pipe.pyx | 6 +- spacy/pipeline/sentencizer.pyx | 28 +- spacy/pipeline/senter.pyx | 1 - spacy/pipeline/tagger.pyx | 11 +- spacy/pipeline/trainable_pipe.pyx | 14 +- spacy/pipeline/transition_parser.pxd | 18 +- spacy/pipeline/transition_parser.pyx | 64 ++--- spacy/strings.pyx | 5 +- spacy/structs.pxd | 2 +- spacy/symbols.pxd | 8 +- spacy/symbols.pyx | 8 +- spacy/tests/package/test_requirements.py | 1 + spacy/tokenizer.pxd | 76 ++++-- spacy/tokenizer.pyx | 37 ++- spacy/tokens/_retokenize.pyx | 27 +- spacy/tokens/doc.pxd | 3 +- spacy/tokens/doc.pyx | 32 +-- spacy/tokens/graph.pyx | 53 ++-- spacy/tokens/morphanalysis.pyx | 1 - spacy/tokens/span.pyx | 9 +- spacy/tokens/span_group.pyx | 6 +- spacy/tokens/token.pxd | 4 +- spacy/tokens/token.pyx | 8 +- spacy/training/align.pyx | 12 +- spacy/training/example.pyx | 15 +- spacy/training/gold_io.pyx | 31 ++- spacy/vectors.pyx | 32 +-- spacy/vocab.pxd | 2 +- spacy/vocab.pyx | 23 +- 61 files changed, 846 insertions(+), 594 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d60c90c1c..4099b31e2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -45,6 +45,12 @@ jobs: run: | python -m pip install flake8==5.0.4 python -m flake8 spacy --count --select=E901,E999,F821,F822,F823,W605 --show-source --statistics + - name: cython-lint + run: | + python -m pip install cython-lint -c requirements.txt + # E501: line too log, W291: trailing whitespace, E266: too many leading '#' for block comment + cython-lint spacy --ignore E501,W291,E266 + tests: name: Test needs: Validate diff --git a/requirements.txt b/requirements.txt index a007f495e..4a131d18c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -38,4 +38,5 @@ types-setuptools>=57.0.0 types-requests types-setuptools>=57.0.0 black==22.3.0 +cython-lint>=0.15.0; python_version >= "3.7" isort>=5.0,<6.0 diff --git a/spacy/attrs.pxd b/spacy/attrs.pxd index 6dc9ecaee..fbbac0ec2 100644 --- a/spacy/attrs.pxd +++ b/spacy/attrs.pxd @@ -96,4 +96,4 @@ cdef enum attr_id_t: ENT_ID = symbols.ENT_ID IDX - SENT_END \ No newline at end of file + SENT_END diff --git a/spacy/attrs.pyx b/spacy/attrs.pyx index dc8eed7c3..97b5d5e36 100644 --- a/spacy/attrs.pyx +++ b/spacy/attrs.pyx @@ -117,7 +117,7 @@ def intify_attrs(stringy_attrs, strings_map=None, _do_deprecated=False): if "pos" in stringy_attrs: stringy_attrs["TAG"] = stringy_attrs.pop("pos") if "morph" in stringy_attrs: - morphs = stringy_attrs.pop("morph") + morphs = stringy_attrs.pop("morph") # no-cython-lint if "number" in stringy_attrs: stringy_attrs.pop("number") if "tenspect" in stringy_attrs: diff --git a/spacy/kb/candidate.pxd b/spacy/kb/candidate.pxd index 9fc4c4e9d..80fcbc459 100644 --- a/spacy/kb/candidate.pxd +++ b/spacy/kb/candidate.pxd @@ -4,7 +4,8 @@ from ..typedefs cimport hash_t from .kb cimport KnowledgeBase -# Object used by the Entity Linker that summarizes one entity-alias candidate combination. +# Object used by the Entity Linker that summarizes one entity-alias candidate +# combination. cdef class Candidate: cdef readonly KnowledgeBase kb cdef hash_t entity_hash diff --git a/spacy/kb/candidate.pyx b/spacy/kb/candidate.pyx index 4cd734f43..53fc9b036 100644 --- a/spacy/kb/candidate.pyx +++ b/spacy/kb/candidate.pyx @@ -8,15 +8,24 @@ from ..tokens import Span cdef class Candidate: - """A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved - to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking - algorithm which will disambiguate the various candidates to the correct one. + """A `Candidate` object refers to a textual mention (`alias`) that may or + may not be resolved to a specific `entity` from a Knowledge Base. This + will be used as input for the entity linking algorithm which will + disambiguate the various candidates to the correct one. Each candidate (alias, entity) pair is assigned a certain prior probability. DOCS: https://spacy.io/api/kb/#candidate-init """ - def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob): + def __init__( + self, + KnowledgeBase kb, + entity_hash, + entity_freq, + entity_vector, + alias_hash, + prior_prob + ): self.kb = kb self.entity_hash = entity_hash self.entity_freq = entity_freq @@ -59,7 +68,8 @@ cdef class Candidate: def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]: """ - Return candidate entities for a given mention and fetching appropriate entries from the index. + Return candidate entities for a given mention and fetching appropriate + entries from the index. kb (KnowledgeBase): Knowledge base to query. mention (Span): Entity mention for which to identify candidates. RETURNS (Iterable[Candidate]): Identified candidates. @@ -67,9 +77,12 @@ def get_candidates(kb: KnowledgeBase, mention: Span) -> Iterable[Candidate]: return kb.get_candidates(mention) -def get_candidates_batch(kb: KnowledgeBase, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]: +def get_candidates_batch( + kb: KnowledgeBase, mentions: Iterable[Span] +) -> Iterable[Iterable[Candidate]]: """ - Return candidate entities for the given mentions and fetching appropriate entries from the index. + Return candidate entities for the given mentions and fetching appropriate entries + from the index. kb (KnowledgeBase): Knowledge base to query. mention (Iterable[Span]): Entity mentions for which to identify candidates. RETURNS (Iterable[Iterable[Candidate]]): Identified candidates. diff --git a/spacy/kb/kb.pyx b/spacy/kb/kb.pyx index a88e18e1f..6ad4c3564 100644 --- a/spacy/kb/kb.pyx +++ b/spacy/kb/kb.pyx @@ -12,8 +12,9 @@ from .candidate import Candidate cdef class KnowledgeBase: - """A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases, - to support entity linking of named entities to real-world concepts. + """A `KnowledgeBase` instance stores unique identifiers for entities and + their textual aliases, to support entity linking of named entities to + real-world concepts. This is an abstract class and requires its operations to be implemented. DOCS: https://spacy.io/api/kb @@ -31,10 +32,13 @@ cdef class KnowledgeBase: self.entity_vector_length = entity_vector_length self.mem = Pool() - def get_candidates_batch(self, mentions: Iterable[Span]) -> Iterable[Iterable[Candidate]]: + def get_candidates_batch( + self, mentions: Iterable[Span] + ) -> Iterable[Iterable[Candidate]]: """ - Return candidate entities for specified texts. Each candidate defines the entity, the original alias, - and the prior probability of that alias resolving to that entity. + Return candidate entities for specified texts. Each candidate defines + the entity, the original alias, and the prior probability of that + alias resolving to that entity. If no candidate is found for a given text, an empty list is returned. mentions (Iterable[Span]): Mentions for which to get candidates. RETURNS (Iterable[Iterable[Candidate]]): Identified candidates. @@ -43,14 +47,17 @@ cdef class KnowledgeBase: def get_candidates(self, mention: Span) -> Iterable[Candidate]: """ - Return candidate entities for specified text. Each candidate defines the entity, the original alias, + Return candidate entities for specified text. Each candidate defines + the entity, the original alias, and the prior probability of that alias resolving to that entity. If the no candidate is found for a given text, an empty list is returned. mention (Span): Mention for which to get candidates. RETURNS (Iterable[Candidate]): Identified candidates. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="get_candidates", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="get_candidates", name=self.__name__ + ) ) def get_vectors(self, entities: Iterable[str]) -> Iterable[Iterable[float]]: @@ -68,7 +75,9 @@ cdef class KnowledgeBase: RETURNS (Iterable[float]): Vector for specified entity. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="get_vector", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="get_vector", name=self.__name__ + ) ) def to_bytes(self, **kwargs) -> bytes: @@ -76,7 +85,9 @@ cdef class KnowledgeBase: RETURNS (bytes): Current state as binary string. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="to_bytes", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="to_bytes", name=self.__name__ + ) ) def from_bytes(self, bytes_data: bytes, *, exclude: Tuple[str] = tuple()): @@ -85,25 +96,35 @@ cdef class KnowledgeBase: exclude (Tuple[str]): Properties to exclude when restoring KB. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="from_bytes", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="from_bytes", name=self.__name__ + ) ) - def to_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None: + def to_disk( + self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList() + ) -> None: """ Write KnowledgeBase content to disk. path (Union[str, Path]): Target file path. exclude (Iterable[str]): List of components to exclude. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="to_disk", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="to_disk", name=self.__name__ + ) ) - def from_disk(self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList()) -> None: + def from_disk( + self, path: Union[str, Path], exclude: Iterable[str] = SimpleFrozenList() + ) -> None: """ Load KnowledgeBase content from disk. path (Union[str, Path]): Target file path. exclude (Iterable[str]): List of components to exclude. """ raise NotImplementedError( - Errors.E1045.format(parent="KnowledgeBase", method="from_disk", name=self.__name__) + Errors.E1045.format( + parent="KnowledgeBase", method="from_disk", name=self.__name__ + ) ) diff --git a/spacy/kb/kb_in_memory.pxd b/spacy/kb/kb_in_memory.pxd index 08ec6b2a3..e0e33301a 100644 --- a/spacy/kb/kb_in_memory.pxd +++ b/spacy/kb/kb_in_memory.pxd @@ -55,23 +55,28 @@ cdef class InMemoryLookupKB(KnowledgeBase): # optional data, we can let users configure a DB as the backend for this. cdef object _features_table - cdef inline int64_t c_add_vector(self, vector[float] entity_vector) nogil: """Add an entity vector to the vectors table.""" cdef int64_t new_index = self._vectors_table.size() self._vectors_table.push_back(entity_vector) return new_index - - cdef inline int64_t c_add_entity(self, hash_t entity_hash, float freq, - int32_t vector_index, int feats_row) nogil: + cdef inline int64_t c_add_entity( + self, + hash_t entity_hash, + float freq, + int32_t vector_index, + int feats_row + ) nogil: """Add an entry to the vector of entries. - After calling this method, make sure to update also the _entry_index using the return value""" + After calling this method, make sure to update also the _entry_index + using the return value""" # This is what we'll map the entity hash key to. It's where the entry will sit # in the vector of entries, so we can get it later. cdef int64_t new_index = self._entries.size() - # Avoid struct initializer to enable nogil, cf https://github.com/cython/cython/issues/1642 + # Avoid struct initializer to enable nogil, cf. + # https://github.com/cython/cython/issues/1642 cdef KBEntryC entry entry.entity_hash = entity_hash entry.vector_index = vector_index @@ -81,11 +86,17 @@ cdef class InMemoryLookupKB(KnowledgeBase): self._entries.push_back(entry) return new_index - cdef inline int64_t c_add_aliases(self, hash_t alias_hash, vector[int64_t] entry_indices, vector[float] probs) nogil: - """Connect a mention to a list of potential entities with their prior probabilities . - After calling this method, make sure to update also the _alias_index using the return value""" - # This is what we'll map the alias hash key to. It's where the alias will be defined - # in the vector of aliases. + cdef inline int64_t c_add_aliases( + self, + hash_t alias_hash, + vector[int64_t] entry_indices, + vector[float] probs + ) nogil: + """Connect a mention to a list of potential entities with their prior + probabilities. After calling this method, make sure to update also the + _alias_index using the return value""" + # This is what we'll map the alias hash key to. It's where the alias will be + # defined in the vector of aliases. cdef int64_t new_index = self._aliases_table.size() # Avoid struct initializer to enable nogil @@ -98,8 +109,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): cdef inline void _create_empty_vectors(self, hash_t dummy_hash) nogil: """ - Initializing the vectors and making sure the first element of each vector is a dummy, - because the PreshMap maps pointing to indices in these vectors can not contain 0 as value + Initializing the vectors and making sure the first element of each vector is a + dummy, because the PreshMap maps pointing to indices in these vectors can not + contain 0 as value. cf. https://github.com/explosion/preshed/issues/17 """ cdef int32_t dummy_value = 0 @@ -130,12 +142,18 @@ cdef class InMemoryLookupKB(KnowledgeBase): cdef class Writer: cdef FILE* _fp - cdef int write_header(self, int64_t nr_entries, int64_t entity_vector_length) except -1 + cdef int write_header( + self, int64_t nr_entries, int64_t entity_vector_length + ) except -1 cdef int write_vector_element(self, float element) except -1 - cdef int write_entry(self, hash_t entry_hash, float entry_freq, int32_t vector_index) except -1 + cdef int write_entry( + self, hash_t entry_hash, float entry_freq, int32_t vector_index + ) except -1 cdef int write_alias_length(self, int64_t alias_length) except -1 - cdef int write_alias_header(self, hash_t alias_hash, int64_t candidate_length) except -1 + cdef int write_alias_header( + self, hash_t alias_hash, int64_t candidate_length + ) except -1 cdef int write_alias(self, int64_t entry_index, float prob) except -1 cdef int _write(self, void* value, size_t size) except -1 @@ -143,12 +161,18 @@ cdef class Writer: cdef class Reader: cdef FILE* _fp - cdef int read_header(self, int64_t* nr_entries, int64_t* entity_vector_length) except -1 + cdef int read_header( + self, int64_t* nr_entries, int64_t* entity_vector_length + ) except -1 cdef int read_vector_element(self, float* element) except -1 - cdef int read_entry(self, hash_t* entity_hash, float* freq, int32_t* vector_index) except -1 + cdef int read_entry( + self, hash_t* entity_hash, float* freq, int32_t* vector_index + ) except -1 cdef int read_alias_length(self, int64_t* alias_length) except -1 - cdef int read_alias_header(self, hash_t* alias_hash, int64_t* candidate_length) except -1 + cdef int read_alias_header( + self, hash_t* alias_hash, int64_t* candidate_length + ) except -1 cdef int read_alias(self, int64_t* entry_index, float* prob) except -1 cdef int _read(self, void* value, size_t size) except -1 diff --git a/spacy/kb/kb_in_memory.pyx b/spacy/kb/kb_in_memory.pyx index e991f7720..02773cbae 100644 --- a/spacy/kb/kb_in_memory.pyx +++ b/spacy/kb/kb_in_memory.pyx @@ -1,5 +1,5 @@ # cython: infer_types=True, profile=True -from typing import Any, Callable, Dict, Iterable, Union +from typing import Any, Callable, Dict, Iterable import srsly @@ -27,8 +27,9 @@ from .candidate import Candidate as Candidate cdef class InMemoryLookupKB(KnowledgeBase): - """An `InMemoryLookupKB` instance stores unique identifiers for entities and their textual aliases, - to support entity linking of named entities to real-world concepts. + """An `InMemoryLookupKB` instance stores unique identifiers for entities + and their textual aliases, to support entity linking of named entities to + real-world concepts. DOCS: https://spacy.io/api/inmemorylookupkb """ @@ -71,7 +72,8 @@ cdef class InMemoryLookupKB(KnowledgeBase): def add_entity(self, str entity, float freq, vector[float] entity_vector): """ - Add an entity to the KB, optionally specifying its log probability based on corpus frequency + Add an entity to the KB, optionally specifying its log probability + based on corpus frequency. Return the hash of the entity ID/name at the end. """ cdef hash_t entity_hash = self.vocab.strings.add(entity) @@ -83,14 +85,20 @@ cdef class InMemoryLookupKB(KnowledgeBase): # Raise an error if the provided entity vector is not of the correct length if len(entity_vector) != self.entity_vector_length: - raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length)) + raise ValueError( + Errors.E141.format( + found=len(entity_vector), required=self.entity_vector_length + ) + ) vector_index = self.c_add_vector(entity_vector=entity_vector) - new_index = self.c_add_entity(entity_hash=entity_hash, - freq=freq, - vector_index=vector_index, - feats_row=-1) # Features table currently not implemented + new_index = self.c_add_entity( + entity_hash=entity_hash, + freq=freq, + vector_index=vector_index, + feats_row=-1 + ) # Features table currently not implemented self._entry_index[entity_hash] = new_index return entity_hash @@ -115,7 +123,12 @@ cdef class InMemoryLookupKB(KnowledgeBase): else: entity_vector = vector_list[i] if len(entity_vector) != self.entity_vector_length: - raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length)) + raise ValueError( + Errors.E141.format( + found=len(entity_vector), + required=self.entity_vector_length + ) + ) entry.entity_hash = entity_hash entry.freq = freq_list[i] @@ -149,11 +162,15 @@ cdef class InMemoryLookupKB(KnowledgeBase): previous_alias_nr = self.get_size_aliases() # Throw an error if the length of entities and probabilities are not the same if not len(entities) == len(probabilities): - raise ValueError(Errors.E132.format(alias=alias, - entities_length=len(entities), - probabilities_length=len(probabilities))) + raise ValueError( + Errors.E132.format( + alias=alias, + entities_length=len(entities), + probabilities_length=len(probabilities)) + ) - # Throw an error if the probabilities sum up to more than 1 (allow for some rounding errors) + # Throw an error if the probabilities sum up to more than 1 (allow for + # some rounding errors) prob_sum = sum(probabilities) if prob_sum > 1.00001: raise ValueError(Errors.E133.format(alias=alias, sum=prob_sum)) @@ -170,40 +187,47 @@ cdef class InMemoryLookupKB(KnowledgeBase): for entity, prob in zip(entities, probabilities): entity_hash = self.vocab.strings[entity] - if not entity_hash in self._entry_index: + if entity_hash not in self._entry_index: raise ValueError(Errors.E134.format(entity=entity)) entry_index = self._entry_index.get(entity_hash) entry_indices.push_back(int(entry_index)) probs.push_back(float(prob)) - new_index = self.c_add_aliases(alias_hash=alias_hash, entry_indices=entry_indices, probs=probs) + new_index = self.c_add_aliases( + alias_hash=alias_hash, entry_indices=entry_indices, probs=probs + ) self._alias_index[alias_hash] = new_index if previous_alias_nr + 1 != self.get_size_aliases(): raise RuntimeError(Errors.E891.format(alias=alias)) return alias_hash - def append_alias(self, str alias, str entity, float prior_prob, ignore_warnings=False): + def append_alias( + self, str alias, str entity, float prior_prob, ignore_warnings=False + ): """ - For an alias already existing in the KB, extend its potential entities with one more. + For an alias already existing in the KB, extend its potential entities + with one more. Throw a warning if either the alias or the entity is unknown, or when the combination is already previously recorded. Throw an error if this entity+prior prob would exceed the sum of 1. - For efficiency, it's best to use the method `add_alias` as much as possible instead of this one. + For efficiency, it's best to use the method `add_alias` as much as + possible instead of this one. """ # Check if the alias exists in the KB cdef hash_t alias_hash = self.vocab.strings[alias] - if not alias_hash in self._alias_index: + if alias_hash not in self._alias_index: raise ValueError(Errors.E176.format(alias=alias)) # Check if the entity exists in the KB cdef hash_t entity_hash = self.vocab.strings[entity] - if not entity_hash in self._entry_index: + if entity_hash not in self._entry_index: raise ValueError(Errors.E134.format(entity=entity)) entry_index = self._entry_index.get(entity_hash) - # Throw an error if the prior probabilities (including the new one) sum up to more than 1 + # Throw an error if the prior probabilities (including the new one) + # sum up to more than 1 alias_index = self._alias_index.get(alias_hash) alias_entry = self._aliases_table[alias_index] current_sum = sum([p for p in alias_entry.probs]) @@ -236,12 +260,13 @@ cdef class InMemoryLookupKB(KnowledgeBase): def get_alias_candidates(self, str alias) -> Iterable[Candidate]: """ - Return candidate entities for an alias. Each candidate defines the entity, the original alias, - and the prior probability of that alias resolving to that entity. + Return candidate entities for an alias. Each candidate defines the + entity, the original alias, and the prior probability of that alias + resolving to that entity. If the alias is not known in the KB, and empty list is returned. """ cdef hash_t alias_hash = self.vocab.strings[alias] - if not alias_hash in self._alias_index: + if alias_hash not in self._alias_index: return [] alias_index = self._alias_index.get(alias_hash) alias_entry = self._aliases_table[alias_index] @@ -249,10 +274,14 @@ cdef class InMemoryLookupKB(KnowledgeBase): return [Candidate(kb=self, entity_hash=self._entries[entry_index].entity_hash, entity_freq=self._entries[entry_index].freq, - entity_vector=self._vectors_table[self._entries[entry_index].vector_index], + entity_vector=self._vectors_table[ + self._entries[entry_index].vector_index + ], alias_hash=alias_hash, prior_prob=prior_prob) - for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs) + for (entry_index, prior_prob) in zip( + alias_entry.entry_indices, alias_entry.probs + ) if entry_index != 0] def get_vector(self, str entity): @@ -266,8 +295,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): return self._vectors_table[self._entries[entry_index].vector_index] def get_prior_prob(self, str entity, str alias): - """ Return the prior probability of a given alias being linked to a given entity, - or return 0.0 when this combination is not known in the knowledge base""" + """ Return the prior probability of a given alias being linked to a + given entity, or return 0.0 when this combination is not known in the + knowledge base.""" cdef hash_t alias_hash = self.vocab.strings[alias] cdef hash_t entity_hash = self.vocab.strings[entity] @@ -278,7 +308,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): entry_index = self._entry_index[entity_hash] alias_entry = self._aliases_table[alias_index] - for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs): + for (entry_index, prior_prob) in zip( + alias_entry.entry_indices, alias_entry.probs + ): if self._entries[entry_index].entity_hash == entity_hash: return prior_prob @@ -288,13 +320,19 @@ cdef class InMemoryLookupKB(KnowledgeBase): """Serialize the current state to a binary string. """ def serialize_header(): - header = (self.get_size_entities(), self.get_size_aliases(), self.entity_vector_length) + header = ( + self.get_size_entities(), + self.get_size_aliases(), + self.entity_vector_length + ) return srsly.json_dumps(header) def serialize_entries(): i = 1 tuples = [] - for entry_hash, entry_index in sorted(self._entry_index.items(), key=lambda x: x[1]): + for entry_hash, entry_index in sorted( + self._entry_index.items(), key=lambda x: x[1] + ): entry = self._entries[entry_index] assert entry.entity_hash == entry_hash assert entry_index == i @@ -307,7 +345,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): headers = [] indices_lists = [] probs_lists = [] - for alias_hash, alias_index in sorted(self._alias_index.items(), key=lambda x: x[1]): + for alias_hash, alias_index in sorted( + self._alias_index.items(), key=lambda x: x[1] + ): alias = self._aliases_table[alias_index] assert alias_index == i candidate_length = len(alias.entry_indices) @@ -365,7 +405,7 @@ cdef class InMemoryLookupKB(KnowledgeBase): indices = srsly.json_loads(all_data[1]) probs = srsly.json_loads(all_data[2]) for header, indices, probs in zip(headers, indices, probs): - alias_hash, candidate_length = header + alias_hash, _candidate_length = header alias.entry_indices = indices alias.probs = probs self._aliases_table[i] = alias @@ -414,10 +454,14 @@ cdef class InMemoryLookupKB(KnowledgeBase): writer.write_vector_element(element) i = i+1 - # dumping the entry records in the order in which they are in the _entries vector. - # index 0 is a dummy object not stored in the _entry_index and can be ignored. + # dumping the entry records in the order in which they are in the + # _entries vector. + # index 0 is a dummy object not stored in the _entry_index and can + # be ignored. i = 1 - for entry_hash, entry_index in sorted(self._entry_index.items(), key=lambda x: x[1]): + for entry_hash, entry_index in sorted( + self._entry_index.items(), key=lambda x: x[1] + ): entry = self._entries[entry_index] assert entry.entity_hash == entry_hash assert entry_index == i @@ -429,7 +473,9 @@ cdef class InMemoryLookupKB(KnowledgeBase): # dumping the aliases in the order in which they are in the _alias_index vector. # index 0 is a dummy object not stored in the _aliases_table and can be ignored. i = 1 - for alias_hash, alias_index in sorted(self._alias_index.items(), key=lambda x: x[1]): + for alias_hash, alias_index in sorted( + self._alias_index.items(), key=lambda x: x[1] + ): alias = self._aliases_table[alias_index] assert alias_index == i @@ -535,7 +581,8 @@ cdef class Writer: def __init__(self, path): assert isinstance(path, Path) content = bytes(path) - cdef bytes bytes_loc = content.encode('utf8') if type(content) == str else content + cdef bytes bytes_loc = content.encode('utf8') \ + if type(content) == str else content self._fp = fopen(bytes_loc, 'wb') if not self._fp: raise IOError(Errors.E146.format(path=path)) @@ -545,14 +592,18 @@ cdef class Writer: cdef size_t status = fclose(self._fp) assert status == 0 - cdef int write_header(self, int64_t nr_entries, int64_t entity_vector_length) except -1: + cdef int write_header( + self, int64_t nr_entries, int64_t entity_vector_length + ) except -1: self._write(&nr_entries, sizeof(nr_entries)) self._write(&entity_vector_length, sizeof(entity_vector_length)) cdef int write_vector_element(self, float element) except -1: self._write(&element, sizeof(element)) - cdef int write_entry(self, hash_t entry_hash, float entry_freq, int32_t vector_index) except -1: + cdef int write_entry( + self, hash_t entry_hash, float entry_freq, int32_t vector_index + ) except -1: self._write(&entry_hash, sizeof(entry_hash)) self._write(&entry_freq, sizeof(entry_freq)) self._write(&vector_index, sizeof(vector_index)) @@ -561,7 +612,9 @@ cdef class Writer: cdef int write_alias_length(self, int64_t alias_length) except -1: self._write(&alias_length, sizeof(alias_length)) - cdef int write_alias_header(self, hash_t alias_hash, int64_t candidate_length) except -1: + cdef int write_alias_header( + self, hash_t alias_hash, int64_t candidate_length + ) except -1: self._write(&alias_hash, sizeof(alias_hash)) self._write(&candidate_length, sizeof(candidate_length)) @@ -577,16 +630,19 @@ cdef class Writer: cdef class Reader: def __init__(self, path): content = bytes(path) - cdef bytes bytes_loc = content.encode('utf8') if type(content) == str else content + cdef bytes bytes_loc = content.encode('utf8') \ + if type(content) == str else content self._fp = fopen(bytes_loc, 'rb') if not self._fp: PyErr_SetFromErrno(IOError) - status = fseek(self._fp, 0, 0) # this can be 0 if there is no header + fseek(self._fp, 0, 0) # this can be 0 if there is no header def __dealloc__(self): fclose(self._fp) - cdef int read_header(self, int64_t* nr_entries, int64_t* entity_vector_length) except -1: + cdef int read_header( + self, int64_t* nr_entries, int64_t* entity_vector_length + ) except -1: status = self._read(nr_entries, sizeof(int64_t)) if status < 1: if feof(self._fp): @@ -606,7 +662,9 @@ cdef class Reader: return 0 # end of file raise IOError(Errors.E145.format(param="vector element")) - cdef int read_entry(self, hash_t* entity_hash, float* freq, int32_t* vector_index) except -1: + cdef int read_entry( + self, hash_t* entity_hash, float* freq, int32_t* vector_index + ) except -1: status = self._read(entity_hash, sizeof(hash_t)) if status < 1: if feof(self._fp): @@ -637,7 +695,9 @@ cdef class Reader: return 0 # end of file raise IOError(Errors.E145.format(param="alias length")) - cdef int read_alias_header(self, hash_t* alias_hash, int64_t* candidate_length) except -1: + cdef int read_alias_header( + self, hash_t* alias_hash, int64_t* candidate_length + ) except -1: status = self._read(alias_hash, sizeof(hash_t)) if status < 1: if feof(self._fp): diff --git a/spacy/lexeme.pyx b/spacy/lexeme.pyx index 00e2c6258..60d22e615 100644 --- a/spacy/lexeme.pyx +++ b/spacy/lexeme.pyx @@ -1,7 +1,6 @@ # cython: embedsignature=True # Compiler crashes on memory view coercion without this. Should report bug. cimport numpy as np -from cython.view cimport array as cvarray from libc.string cimport memset np.import_array() @@ -35,7 +34,7 @@ from .typedefs cimport attr_t, flags_t from .attrs import intify_attrs from .errors import Errors, Warnings -OOV_RANK = 0xffffffffffffffff # UINT64_MAX +OOV_RANK = 0xffffffffffffffff # UINT64_MAX memset(&EMPTY_LEXEME, 0, sizeof(LexemeC)) EMPTY_LEXEME.id = OOV_RANK @@ -105,7 +104,7 @@ cdef class Lexeme: if isinstance(value, float): continue elif isinstance(value, (int, long)): - Lexeme.set_struct_attr(self.c, attr, value) + Lexeme.set_struct_attr(self.c, attr, value) else: Lexeme.set_struct_attr(self.c, attr, self.vocab.strings.add(value)) @@ -137,10 +136,12 @@ cdef class Lexeme: if hasattr(other, "orth"): if self.c.orth == other.orth: return 1.0 - elif hasattr(other, "__len__") and len(other) == 1 \ - and hasattr(other[0], "orth"): - if self.c.orth == other[0].orth: - return 1.0 + elif ( + hasattr(other, "__len__") and len(other) == 1 + and hasattr(other[0], "orth") + and self.c.orth == other[0].orth + ): + return 1.0 if self.vector_norm == 0 or other.vector_norm == 0: warnings.warn(Warnings.W008.format(obj="Lexeme")) return 0.0 @@ -149,7 +150,7 @@ cdef class Lexeme: result = xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm) # ensure we get a scalar back (numpy does this automatically but cupy doesn't) return result.item() - + @property def has_vector(self): """RETURNS (bool): Whether a word vector is associated with the object. diff --git a/spacy/matcher/dependencymatcher.pyx b/spacy/matcher/dependencymatcher.pyx index a214c0668..348e000ff 100644 --- a/spacy/matcher/dependencymatcher.pyx +++ b/spacy/matcher/dependencymatcher.pyx @@ -108,7 +108,7 @@ cdef class DependencyMatcher: key (str): The match ID. RETURNS (bool): Whether the matcher contains rules for this match ID. """ - return self.has_key(key) + return self.has_key(key) # no-cython-lint: W601 def _validate_input(self, pattern, key): idx = 0 @@ -264,7 +264,7 @@ cdef class DependencyMatcher: def remove(self, key): key = self._normalize_key(key) - if not key in self._patterns: + if key not in self._patterns: raise ValueError(Errors.E175.format(key=key)) self._patterns.pop(key) self._raw_patterns.pop(key) @@ -382,7 +382,7 @@ cdef class DependencyMatcher: return [] return [doc[node].head] - def _gov(self,doc,node): + def _gov(self, doc, node): return list(doc[node].children) def _dep_chain(self, doc, node): @@ -443,7 +443,7 @@ cdef class DependencyMatcher: def _right_child(self, doc, node): return [child for child in doc[node].rights] - + def _left_child(self, doc, node): return [child for child in doc[node].lefts] @@ -461,7 +461,7 @@ cdef class DependencyMatcher: if doc[node].head.i > node: return [doc[node].head] return [] - + def _left_parent(self, doc, node): if doc[node].head.i < node: return [doc[node].head] diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 3d03f37ae..167f85af4 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -12,31 +12,18 @@ import warnings import srsly -from ..attrs cimport ( - DEP, - ENT_IOB, - ID, - LEMMA, - MORPH, - NULL_ATTR, - ORTH, - POS, - TAG, - attr_id_t, -) +from ..attrs cimport DEP, ENT_IOB, ID, LEMMA, MORPH, NULL_ATTR, POS, TAG from ..structs cimport TokenC from ..tokens.doc cimport Doc, get_token_attr_for_matcher from ..tokens.morphanalysis cimport MorphAnalysis from ..tokens.span cimport Span from ..tokens.token cimport Token from ..typedefs cimport attr_t -from ..vocab cimport Vocab from ..attrs import IDS from ..errors import Errors, MatchPatternError, Warnings from ..schemas import validate_token_pattern from ..strings import get_string_id -from ..util import registry from .levenshtein import levenshtein_compare DEF PADDING = 5 @@ -87,9 +74,9 @@ cdef class Matcher: key (str): The match ID. RETURNS (bool): Whether the matcher contains rules for this match ID. """ - return self.has_key(key) + return self.has_key(key) # no-cython-lint: W601 - def add(self, key, patterns, *, on_match=None, greedy: str=None): + def add(self, key, patterns, *, on_match=None, greedy: str = None): """Add a match-rule to the matcher. A match-rule consists of: an ID key, an on_match callback, and one or more patterns. @@ -143,8 +130,13 @@ cdef class Matcher: key = self._normalize_key(key) for pattern in patterns: try: - specs = _preprocess_pattern(pattern, self.vocab, - self._extensions, self._extra_predicates, self._fuzzy_compare) + specs = _preprocess_pattern( + pattern, + self.vocab, + self._extensions, + self._extra_predicates, + self._fuzzy_compare + ) self.patterns.push_back(init_pattern(self.mem, key, specs)) for spec in specs: for attr, _ in spec[1]: @@ -168,7 +160,7 @@ cdef class Matcher: key (str): The ID of the match rule. """ norm_key = self._normalize_key(key) - if not norm_key in self._patterns: + if norm_key not in self._patterns: raise ValueError(Errors.E175.format(key=key)) self._patterns.pop(norm_key) self._callbacks.pop(norm_key) @@ -268,8 +260,15 @@ cdef class Matcher: if self.patterns.empty(): matches = [] else: - matches = find_matches(&self.patterns[0], self.patterns.size(), doclike, length, - extensions=self._extensions, predicates=self._extra_predicates, with_alignments=with_alignments) + matches = find_matches( + &self.patterns[0], + self.patterns.size(), + doclike, + length, + extensions=self._extensions, + predicates=self._extra_predicates, + with_alignments=with_alignments + ) final_matches = [] pairs_by_id = {} # For each key, either add all matches, or only the filtered, @@ -289,9 +288,9 @@ cdef class Matcher: memset(matched, 0, length * sizeof(matched[0])) span_filter = self._filter.get(key) if span_filter == "FIRST": - sorted_pairs = sorted(pairs, key=lambda x: (x[0], -x[1]), reverse=False) # sort by start + sorted_pairs = sorted(pairs, key=lambda x: (x[0], -x[1]), reverse=False) # sort by start elif span_filter == "LONGEST": - sorted_pairs = sorted(pairs, key=lambda x: (x[1]-x[0], -x[0]), reverse=True) # reverse sort by length + sorted_pairs = sorted(pairs, key=lambda x: (x[1]-x[0], -x[0]), reverse=True) # reverse sort by length else: raise ValueError(Errors.E947.format(expected=["FIRST", "LONGEST"], arg=span_filter)) for match in sorted_pairs: @@ -366,7 +365,6 @@ cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, e cdef vector[MatchC] matches cdef vector[vector[MatchAlignmentC]] align_states cdef vector[vector[MatchAlignmentC]] align_matches - cdef PatternStateC state cdef int i, j, nr_extra_attr cdef Pool mem = Pool() output = [] @@ -388,14 +386,22 @@ cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, e value = token.vocab.strings[value] extra_attr_values[i * nr_extra_attr + index] = value # Main loop - cdef int nr_predicate = len(predicates) for i in range(length): for j in range(n): states.push_back(PatternStateC(patterns[j], i, 0)) if with_alignments != 0: align_states.resize(states.size()) - transition_states(states, matches, align_states, align_matches, predicate_cache, - doclike[i], extra_attr_values, predicates, with_alignments) + transition_states( + states, + matches, + align_states, + align_matches, + predicate_cache, + doclike[i], + extra_attr_values, + predicates, + with_alignments + ) extra_attr_values += nr_extra_attr predicate_cache += len(predicates) # Handle matches that end in 0-width patterns @@ -421,18 +427,28 @@ cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, e return output -cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& matches, - vector[vector[MatchAlignmentC]]& align_states, vector[vector[MatchAlignmentC]]& align_matches, - int8_t* cached_py_predicates, - Token token, const attr_t* extra_attrs, py_predicates, bint with_alignments) except *: +cdef void transition_states( + vector[PatternStateC]& states, + vector[MatchC]& matches, + vector[vector[MatchAlignmentC]]& align_states, + vector[vector[MatchAlignmentC]]& align_matches, + int8_t* cached_py_predicates, + Token token, + const attr_t* extra_attrs, + py_predicates, + bint with_alignments +) except *: cdef int q = 0 cdef vector[PatternStateC] new_states cdef vector[vector[MatchAlignmentC]] align_new_states - cdef int nr_predicate = len(py_predicates) for i in range(states.size()): if states[i].pattern.nr_py >= 1: - update_predicate_cache(cached_py_predicates, - states[i].pattern, token, py_predicates) + update_predicate_cache( + cached_py_predicates, + states[i].pattern, + token, + py_predicates + ) action = get_action(states[i], token.c, extra_attrs, cached_py_predicates) if action == REJECT: @@ -468,8 +484,12 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match align_new_states.push_back(align_states[q]) states[q].pattern += 1 if states[q].pattern.nr_py != 0: - update_predicate_cache(cached_py_predicates, - states[q].pattern, token, py_predicates) + update_predicate_cache( + cached_py_predicates, + states[q].pattern, + token, + py_predicates + ) action = get_action(states[q], token.c, extra_attrs, cached_py_predicates) # Update alignment before the transition of current state @@ -485,8 +505,12 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match ent_id = get_ent_id(state.pattern) if action == MATCH: matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length+1)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length+1 + ) + ) # `align_matches` always corresponds to `matches` 1:1 if with_alignments != 0: align_matches.push_back(align_states[q]) @@ -494,23 +518,35 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match # push match without last token if length > 0 if state.length > 0: matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length + ) + ) # MATCH_DOUBLE emits matches twice, # add one more to align_matches in order to keep 1:1 relationship if with_alignments != 0: align_matches.push_back(align_states[q]) # push match with last token matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length+1)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length + 1 + ) + ) # `align_matches` always corresponds to `matches` 1:1 if with_alignments != 0: align_matches.push_back(align_states[q]) elif action == MATCH_REJECT: matches.push_back( - MatchC(pattern_id=ent_id, start=state.start, - length=state.length)) + MatchC( + pattern_id=ent_id, + start=state.start, + length=state.length + ) + ) # `align_matches` always corresponds to `matches` 1:1 if with_alignments != 0: align_matches.push_back(align_states[q]) @@ -533,8 +569,12 @@ cdef void transition_states(vector[PatternStateC]& states, vector[MatchC]& match align_states.push_back(align_new_states[i]) -cdef int update_predicate_cache(int8_t* cache, - const TokenPatternC* pattern, Token token, predicates) except -1: +cdef int update_predicate_cache( + int8_t* cache, + const TokenPatternC* pattern, + Token token, + predicates +) except -1: # If the state references any extra predicates, check whether they match. # These are cached, so that we don't call these potentially expensive # Python functions more than we need to. @@ -580,10 +620,12 @@ cdef void finish_states(vector[MatchC]& matches, vector[PatternStateC]& states, else: state.pattern += 1 - -cdef action_t get_action(PatternStateC state, - const TokenC* token, const attr_t* extra_attrs, - const int8_t* predicate_matches) nogil: +cdef action_t get_action( + PatternStateC state, + const TokenC * token, + const attr_t * extra_attrs, + const int8_t * predicate_matches +) nogil: """We need to consider: a) Does the token match the specification? [Yes, No] b) What's the quantifier? [1, 0+, ?] @@ -649,53 +691,56 @@ cdef action_t get_action(PatternStateC state, is_match = not is_match quantifier = ONE if quantifier == ONE: - if is_match and is_final: - # Yes, final: 1000 - return MATCH - elif is_match and not is_final: - # Yes, non-final: 0100 - return ADVANCE - elif not is_match and is_final: - # No, final: 0000 - return REJECT - else: - return REJECT + if is_match and is_final: + # Yes, final: 1000 + return MATCH + elif is_match and not is_final: + # Yes, non-final: 0100 + return ADVANCE + elif not is_match and is_final: + # No, final: 0000 + return REJECT + else: + return REJECT elif quantifier == ZERO_PLUS: - if is_match and is_final: - # Yes, final: 1001 - return MATCH_EXTEND - elif is_match and not is_final: - # Yes, non-final: 0011 - return RETRY_EXTEND - elif not is_match and is_final: - # No, final 2000 (note: Don't include last token!) - return MATCH_REJECT - else: - # No, non-final 0010 - return RETRY + if is_match and is_final: + # Yes, final: 1001 + return MATCH_EXTEND + elif is_match and not is_final: + # Yes, non-final: 0011 + return RETRY_EXTEND + elif not is_match and is_final: + # No, final 2000 (note: Don't include last token!) + return MATCH_REJECT + else: + # No, non-final 0010 + return RETRY elif quantifier == ZERO_ONE: - if is_match and is_final: - # Yes, final: 3000 - # To cater for a pattern ending in "?", we need to add - # a match both with and without the last token - return MATCH_DOUBLE - elif is_match and not is_final: - # Yes, non-final: 0110 - # We need both branches here, consider a pair like: - # pattern: .?b string: b - # If we 'ADVANCE' on the .?, we miss the match. - return RETRY_ADVANCE - elif not is_match and is_final: - # No, final 2000 (note: Don't include last token!) - return MATCH_REJECT - else: - # No, non-final 0010 - return RETRY + if is_match and is_final: + # Yes, final: 3000 + # To cater for a pattern ending in "?", we need to add + # a match both with and without the last token + return MATCH_DOUBLE + elif is_match and not is_final: + # Yes, non-final: 0110 + # We need both branches here, consider a pair like: + # pattern: .?b string: b + # If we 'ADVANCE' on the .?, we miss the match. + return RETRY_ADVANCE + elif not is_match and is_final: + # No, final 2000 (note: Don't include last token!) + return MATCH_REJECT + else: + # No, non-final 0010 + return RETRY -cdef int8_t get_is_match(PatternStateC state, - const TokenC* token, const attr_t* extra_attrs, - const int8_t* predicate_matches) nogil: +cdef int8_t get_is_match( + PatternStateC state, + const TokenC* token, + const attr_t* extra_attrs, + const int8_t* predicate_matches +) nogil: for i in range(state.pattern.nr_py): if predicate_matches[state.pattern.py_predicates[i]] == -1: return 0 @@ -860,7 +905,7 @@ class _FuzzyPredicate: self.is_extension = is_extension if self.predicate not in self.operators: raise ValueError(Errors.E126.format(good=self.operators, bad=self.predicate)) - fuzz = self.predicate[len("FUZZY"):] # number after prefix + fuzz = self.predicate[len("FUZZY"):] # number after prefix self.fuzzy = int(fuzz) if fuzz else -1 self.fuzzy_compare = fuzzy_compare self.key = _predicate_cache_key(self.attr, self.predicate, value, fuzzy=self.fuzzy) @@ -1082,7 +1127,7 @@ def _get_extra_predicates_dict(attr, value_dict, vocab, predicate_types, elif cls == _FuzzyPredicate: if isinstance(value, dict): # add predicates inside fuzzy operator - fuzz = type_[len("FUZZY"):] # number after prefix + fuzz = type_[len("FUZZY"):] # number after prefix fuzzy_val = int(fuzz) if fuzz else -1 output.extend(_get_extra_predicates_dict(attr, value, vocab, predicate_types, extra_predicates, seen_predicates, @@ -1101,8 +1146,9 @@ def _get_extra_predicates_dict(attr, value_dict, vocab, predicate_types, return output -def _get_extension_extra_predicates(spec, extra_predicates, predicate_types, - seen_predicates): +def _get_extension_extra_predicates( + spec, extra_predicates, predicate_types, seen_predicates +): output = [] for attr, value in spec.items(): if isinstance(value, dict): @@ -1131,7 +1177,7 @@ def _get_operators(spec): return (ONE,) elif spec["OP"] in lookup: return lookup[spec["OP"]] - #Min_max {n,m} + # Min_max {n,m} elif spec["OP"].startswith("{") and spec["OP"].endswith("}"): # {n} --> {n,n} exactly n ONE,(n) # {n,m}--> {n,m} min of n, max of m ONE,(n),ZERO_ONE,(m) @@ -1142,8 +1188,8 @@ def _get_operators(spec): min_max = min_max if "," in min_max else f"{min_max},{min_max}" n, m = min_max.split(",") - #1. Either n or m is a blank string and the other is numeric -->isdigit - #2. Both are numeric and n <= m + # 1. Either n or m is a blank string and the other is numeric -->isdigit + # 2. Both are numeric and n <= m if (not n.isdecimal() and not m.isdecimal()) or (n.isdecimal() and m.isdecimal() and int(n) > int(m)): keys = ", ".join(lookup.keys()) + ", {n}, {n,m}, {n,}, {,m} where n and m are integers and n <= m " raise ValueError(Errors.E011.format(op=spec["OP"], opts=keys)) diff --git a/spacy/matcher/phrasematcher.pyx b/spacy/matcher/phrasematcher.pyx index c407cf1cc..26633e6d6 100644 --- a/spacy/matcher/phrasematcher.pyx +++ b/spacy/matcher/phrasematcher.pyx @@ -1,14 +1,12 @@ # cython: infer_types=True, profile=True -from libc.stdint cimport uintptr_t from preshed.maps cimport map_clear, map_get, map_init, map_iter, map_set import warnings -from ..attrs cimport DEP, LEMMA, MORPH, ORTH, POS, TAG +from ..attrs cimport DEP, LEMMA, MORPH, POS, TAG from ..attrs import IDS -from ..structs cimport TokenC from ..tokens.span cimport Span from ..tokens.token cimport Token from ..typedefs cimport attr_t diff --git a/spacy/ml/parser_model.pxd b/spacy/ml/parser_model.pxd index ca31c1699..4d2d7b3fe 100644 --- a/spacy/ml/parser_model.pxd +++ b/spacy/ml/parser_model.pxd @@ -40,11 +40,16 @@ cdef ActivationsC alloc_activations(SizesC n) nogil cdef void free_activations(const ActivationsC* A) nogil -cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, - const WeightsC* W, SizesC n) nogil - +cdef void predict_states( + CBlas cblas, ActivationsC* A, StateC** states, const WeightsC* W, SizesC n +) nogil + cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil -cdef void cpu_log_loss(float* d_scores, - const float* costs, const int* is_valid, const float* scores, int O) nogil - +cdef void cpu_log_loss( + float* d_scores, + const float* costs, + const int* is_valid, + const float* scores, + int O +) nogil diff --git a/spacy/ml/parser_model.pyx b/spacy/ml/parser_model.pyx index 5cffc4c2d..ae60972aa 100644 --- a/spacy/ml/parser_model.pyx +++ b/spacy/ml/parser_model.pyx @@ -8,13 +8,13 @@ from thinc.backends.linalg cimport Vec, VecVec import numpy import numpy.random -from thinc.api import CupyOps, Model, NumpyOps, get_ops +from thinc.api import CupyOps, Model, NumpyOps from .. import util from ..errors import Errors from ..pipeline._parser_internals.stateclass cimport StateClass -from ..typedefs cimport class_t, hash_t, weight_t +from ..typedefs cimport weight_t cdef WeightsC get_c_weights(model) except *: @@ -78,33 +78,48 @@ cdef void resize_activations(ActivationsC* A, SizesC n) nogil: A.is_valid = calloc(n.states * n.classes, sizeof(A.is_valid[0])) A._max_size = n.states else: - A.token_ids = realloc(A.token_ids, - n.states * n.feats * sizeof(A.token_ids[0])) - A.scores = realloc(A.scores, - n.states * n.classes * sizeof(A.scores[0])) - A.unmaxed = realloc(A.unmaxed, - n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0])) - A.hiddens = realloc(A.hiddens, - n.states * n.hiddens * sizeof(A.hiddens[0])) - A.is_valid = realloc(A.is_valid, - n.states * n.classes * sizeof(A.is_valid[0])) + A.token_ids = realloc( + A.token_ids, n.states * n.feats * sizeof(A.token_ids[0]) + ) + A.scores = realloc( + A.scores, n.states * n.classes * sizeof(A.scores[0]) + ) + A.unmaxed = realloc( + A.unmaxed, n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0]) + ) + A.hiddens = realloc( + A.hiddens, n.states * n.hiddens * sizeof(A.hiddens[0]) + ) + A.is_valid = realloc( + A.is_valid, n.states * n.classes * sizeof(A.is_valid[0]) + ) A._max_size = n.states A._curr_size = n.states -cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, - const WeightsC* W, SizesC n) nogil: - cdef double one = 1.0 +cdef void predict_states( + CBlas cblas, ActivationsC* A, StateC** states, const WeightsC* W, SizesC n +) nogil: resize_activations(A, n) for i in range(n.states): states[i].set_context_tokens(&A.token_ids[i*n.feats], n.feats) memset(A.unmaxed, 0, n.states * n.hiddens * n.pieces * sizeof(float)) memset(A.hiddens, 0, n.states * n.hiddens * sizeof(float)) - sum_state_features(cblas, A.unmaxed, - W.feat_weights, A.token_ids, n.states, n.feats, n.hiddens * n.pieces) + sum_state_features( + cblas, + A.unmaxed, + W.feat_weights, + A.token_ids, + n.states, + n.feats, + n.hiddens * n.pieces + ) for i in range(n.states): - VecVec.add_i(&A.unmaxed[i*n.hiddens*n.pieces], - W.feat_bias, 1., n.hiddens * n.pieces) + VecVec.add_i( + &A.unmaxed[i*n.hiddens*n.pieces], + W.feat_bias, 1., + n.hiddens * n.pieces + ) for j in range(n.hiddens): index = i * n.hiddens * n.pieces + j * n.pieces which = Vec.arg_max(&A.unmaxed[index], n.pieces) @@ -114,14 +129,15 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, memcpy(A.scores, A.hiddens, n.states * n.classes * sizeof(float)) else: # Compute hidden-to-output - sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, + sgemm(cblas)( + False, True, n.states, n.classes, n.hiddens, 1.0, A.hiddens, n.hiddens, W.hidden_weights, n.hiddens, - 0.0, A.scores, n.classes) + 0.0, A.scores, n.classes + ) # Add bias for i in range(n.states): - VecVec.add_i(&A.scores[i*n.classes], - W.hidden_bias, 1., n.classes) + VecVec.add_i(&A.scores[i*n.classes], W.hidden_bias, 1., n.classes) # Set unseen classes to minimum value i = 0 min_ = A.scores[0] @@ -134,9 +150,16 @@ cdef void predict_states(CBlas cblas, ActivationsC* A, StateC** states, A.scores[i*n.classes+j] = min_ -cdef void sum_state_features(CBlas cblas, float* output, - const float* cached, const int* token_ids, int B, int F, int O) nogil: - cdef int idx, b, f, i +cdef void sum_state_features( + CBlas cblas, + float* output, + const float* cached, + const int* token_ids, + int B, + int F, + int O +) nogil: + cdef int idx, b, f cdef const float* feature padding = cached cached += F * O @@ -153,9 +176,13 @@ cdef void sum_state_features(CBlas cblas, float* output, token_ids += F -cdef void cpu_log_loss(float* d_scores, - const float* costs, const int* is_valid, const float* scores, - int O) nogil: +cdef void cpu_log_loss( + float* d_scores, + const float* costs, + const int* is_valid, + const float* scores, + int O +) nogil: """Do multi-label log loss""" cdef double max_, gmax, Z, gZ best = arg_max_if_gold(scores, costs, is_valid, O) @@ -179,8 +206,9 @@ cdef void cpu_log_loss(float* d_scores, d_scores[i] = exp(scores[i]-max_) / Z -cdef int arg_max_if_gold(const weight_t* scores, const weight_t* costs, - const int* is_valid, int n) nogil: +cdef int arg_max_if_gold( + const weight_t* scores, const weight_t* costs, const int* is_valid, int n +) nogil: # Find minimum cost cdef float cost = 1 for i in range(n): @@ -204,10 +232,17 @@ cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) no return best - class ParserStepModel(Model): - def __init__(self, docs, layers, *, has_upper, unseen_classes=None, train=True, - dropout=0.1): + def __init__( + self, + docs, + layers, + *, + has_upper, + unseen_classes=None, + train=True, + dropout=0.1 + ): Model.__init__(self, name="parser_step_model", forward=step_forward) self.attrs["has_upper"] = has_upper self.attrs["dropout_rate"] = dropout @@ -268,8 +303,10 @@ class ParserStepModel(Model): return ids def backprop_step(self, token_ids, d_vector, get_d_tokvecs): - if isinstance(self.state2vec.ops, CupyOps) \ - and not isinstance(token_ids, self.state2vec.ops.xp.ndarray): + if ( + isinstance(self.state2vec.ops, CupyOps) + and not isinstance(token_ids, self.state2vec.ops.xp.ndarray) + ): # Move token_ids and d_vector to GPU, asynchronously self.backprops.append(( util.get_async(self.cuda_stream, token_ids), @@ -279,7 +316,6 @@ class ParserStepModel(Model): else: self.backprops.append((token_ids, d_vector, get_d_tokvecs)) - def finish_steps(self, golds): # Add a padding vector to the d_tokvecs gradient, so that missing # values don't affect the real gradient. @@ -292,14 +328,15 @@ class ParserStepModel(Model): ids = ids.flatten() d_state_features = d_state_features.reshape( (ids.size, d_state_features.shape[2])) - self.ops.scatter_add(d_tokvecs, ids, - d_state_features) + self.ops.scatter_add(d_tokvecs, ids, d_state_features) # Padded -- see update() self.bp_tokvecs(d_tokvecs[:-1]) return d_tokvecs + NUMPY_OPS = NumpyOps() + def step_forward(model: ParserStepModel, states, is_train): token_ids = model.get_token_ids(states) vector, get_d_tokvecs = model.state2vec(token_ids, is_train) @@ -312,7 +349,7 @@ def step_forward(model: ParserStepModel, states, is_train): scores, get_d_vector = model.vec2scores(vector, is_train) else: scores = NumpyOps().asarray(vector) - get_d_vector = lambda d_scores: d_scores + get_d_vector = lambda d_scores: d_scores # no-cython-lint: E731 # If the class is unseen, make sure its score is minimum scores[:, model._class_mask == 0] = numpy.nanmin(scores) @@ -448,9 +485,11 @@ cdef class precompute_hiddens: feat_weights = self.get_feat_weights() cdef int[:, ::1] ids = token_ids - sum_state_features(cblas, state_vector.data, - feat_weights, &ids[0,0], - token_ids.shape[0], self.nF, self.nO*self.nP) + sum_state_features( + cblas, state_vector.data, + feat_weights, &ids[0, 0], + token_ids.shape[0], self.nF, self.nO*self.nP + ) state_vector += self.bias state_vector, bp_nonlinearity = self._nonlinearity(state_vector) @@ -475,7 +514,7 @@ cdef class precompute_hiddens: def backprop_maxout(d_best): return self.ops.backprop_maxout(d_best, mask, self.nP) - + return state_vector, backprop_maxout def _relu_nonlinearity(self, state_vector): @@ -489,5 +528,5 @@ cdef class precompute_hiddens: def backprop_relu(d_best): d_best *= mask return d_best.reshape((d_best.shape + (1,))) - + return state_vector, backprop_relu diff --git a/spacy/morphology.pxd b/spacy/morphology.pxd index 968764b82..ee43aa4ec 100644 --- a/spacy/morphology.pxd +++ b/spacy/morphology.pxd @@ -11,7 +11,7 @@ from .typedefs cimport attr_t, hash_t cdef class Morphology: cdef readonly Pool mem cdef readonly StringStore strings - cdef PreshMap tags # Keyed by hash, value is pointer to tag + cdef PreshMap tags # Keyed by hash, value is pointer to tag cdef MorphAnalysisC create_morph_tag(self, field_feature_pairs) except * cdef int insert(self, MorphAnalysisC tag) except -1 @@ -20,4 +20,8 @@ cdef class Morphology: cdef int check_feature(const MorphAnalysisC* morph, attr_t feature) nogil cdef list list_features(const MorphAnalysisC* morph) cdef np.ndarray get_by_field(const MorphAnalysisC* morph, attr_t field) -cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t field) nogil +cdef int get_n_by_field( + attr_t* results, + const MorphAnalysisC* morph, + attr_t field, +) nogil diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index 1062fff09..ecbbed729 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -83,10 +83,11 @@ cdef class Morphology: features = self.normalize_attrs(features) string_features = {self.strings.as_string(field): self.strings.as_string(values) for field, values in features.items()} # normalized UFEATS string with sorted fields and values - norm_feats_string = self.FEATURE_SEP.join(sorted([ - self.FIELD_SEP.join([field, values]) - for field, values in string_features.items() - ])) + norm_feats_string = self.FEATURE_SEP.join( + sorted( + [self.FIELD_SEP.join([field, values]) for field, values in string_features.items()] + ) + ) return norm_feats_string or self.EMPTY_MORPH def normalize_attrs(self, attrs): @@ -192,6 +193,7 @@ cdef int get_n_by_field(attr_t* results, const MorphAnalysisC* morph, attr_t fie n_results += 1 return n_results + def unpickle_morphology(strings, tags): cdef Morphology morphology = Morphology(strings) for tag in tags: diff --git a/spacy/parts_of_speech.pxd b/spacy/parts_of_speech.pxd index a0b2567f1..b5423d113 100644 --- a/spacy/parts_of_speech.pxd +++ b/spacy/parts_of_speech.pxd @@ -8,7 +8,7 @@ cpdef enum univ_pos_t: ADV AUX CONJ - CCONJ # U20 + CCONJ # U20 DET INTJ NOUN diff --git a/spacy/pipeline/_edit_tree_internals/edit_trees.pxd b/spacy/pipeline/_edit_tree_internals/edit_trees.pxd index 3d63af921..41acd2b07 100644 --- a/spacy/pipeline/_edit_tree_internals/edit_trees.pxd +++ b/spacy/pipeline/_edit_tree_internals/edit_trees.pxd @@ -46,11 +46,18 @@ cdef struct EditTreeC: bint is_match_node NodeC inner -cdef inline EditTreeC edittree_new_match(len_t prefix_len, len_t suffix_len, - uint32_t prefix_tree, uint32_t suffix_tree): - cdef MatchNodeC match_node = MatchNodeC(prefix_len=prefix_len, - suffix_len=suffix_len, prefix_tree=prefix_tree, - suffix_tree=suffix_tree) +cdef inline EditTreeC edittree_new_match( + len_t prefix_len, + len_t suffix_len, + uint32_t prefix_tree, + uint32_t suffix_tree +): + cdef MatchNodeC match_node = MatchNodeC( + prefix_len=prefix_len, + suffix_len=suffix_len, + prefix_tree=prefix_tree, + suffix_tree=suffix_tree + ) cdef NodeC inner = NodeC(match_node=match_node) return EditTreeC(is_match_node=True, inner=inner) diff --git a/spacy/pipeline/_edit_tree_internals/edit_trees.pyx b/spacy/pipeline/_edit_tree_internals/edit_trees.pyx index daab0d204..78cd25622 100644 --- a/spacy/pipeline/_edit_tree_internals/edit_trees.pyx +++ b/spacy/pipeline/_edit_tree_internals/edit_trees.pyx @@ -5,8 +5,6 @@ from libc.string cimport memset from libcpp.pair cimport pair from libcpp.vector cimport vector -from pathlib import Path - from ...typedefs cimport hash_t from ... import util @@ -25,17 +23,16 @@ cdef LCS find_lcs(str source, str target): target (str): The second string. RETURNS (LCS): The spans of the longest common subsequences. """ - cdef Py_ssize_t source_len = len(source) cdef Py_ssize_t target_len = len(target) - cdef size_t longest_align = 0; + cdef size_t longest_align = 0 cdef int source_idx, target_idx cdef LCS lcs cdef Py_UCS4 source_cp, target_cp memset(&lcs, 0, sizeof(lcs)) - cdef vector[size_t] prev_aligns = vector[size_t](target_len); - cdef vector[size_t] cur_aligns = vector[size_t](target_len); + cdef vector[size_t] prev_aligns = vector[size_t](target_len) + cdef vector[size_t] cur_aligns = vector[size_t](target_len) for (source_idx, source_cp) in enumerate(source): for (target_idx, target_cp) in enumerate(target): @@ -89,7 +86,7 @@ cdef class EditTrees: cdef LCS lcs = find_lcs(form, lemma) cdef EditTreeC tree - cdef uint32_t tree_id, prefix_tree, suffix_tree + cdef uint32_t prefix_tree, suffix_tree if lcs_is_empty(lcs): tree = edittree_new_subst(self.strings.add(form), self.strings.add(lemma)) else: @@ -108,7 +105,7 @@ cdef class EditTrees: return self._tree_id(tree) cdef uint32_t _tree_id(self, EditTreeC tree): - # If this tree has been constructed before, return its identifier. + # If this tree has been constructed before, return its identifier. cdef hash_t hash = edittree_hash(tree) cdef unordered_map[hash_t, uint32_t].iterator iter = self.map.find(hash) if iter != self.map.end(): @@ -289,6 +286,7 @@ def _tree2dict(tree): tree = tree["inner"]["subst_node"] return(dict(tree)) + def _dict2tree(tree): errors = validate_edit_tree(tree) if errors: diff --git a/spacy/pipeline/_parser_internals/_beam_utils.pyx b/spacy/pipeline/_parser_internals/_beam_utils.pyx index 04dd3f11e..de8f0bf7b 100644 --- a/spacy/pipeline/_parser_internals/_beam_utils.pyx +++ b/spacy/pipeline/_parser_internals/_beam_utils.pyx @@ -1,17 +1,14 @@ # cython: infer_types=True # cython: profile=True -cimport numpy as np - import numpy -from cpython.ref cimport Py_XDECREF, PyObject from thinc.extra.search cimport Beam from thinc.extra.search import MaxViolation from thinc.extra.search cimport MaxViolation -from ...typedefs cimport class_t, hash_t +from ...typedefs cimport class_t from .transition_system cimport Transition, TransitionSystem from ...errors import Errors @@ -146,7 +143,6 @@ def update_beam(TransitionSystem moves, states, golds, model, int width, beam_de cdef MaxViolation violn pbeam = BeamBatch(moves, states, golds, width=width, density=beam_density) gbeam = BeamBatch(moves, states, golds, width=width, density=0.0) - cdef StateClass state beam_maps = [] backprops = [] violns = [MaxViolation() for _ in range(len(states))] diff --git a/spacy/pipeline/_parser_internals/_state.pxd b/spacy/pipeline/_parser_internals/_state.pxd index 24acc350c..c063cf97c 100644 --- a/spacy/pipeline/_parser_internals/_state.pxd +++ b/spacy/pipeline/_parser_internals/_state.pxd @@ -277,7 +277,6 @@ cdef cppclass StateC: return n - int n_L(int head) nogil const: return n_arcs(this._left_arcs, head) diff --git a/spacy/pipeline/_parser_internals/arc_eager.pyx b/spacy/pipeline/_parser_internals/arc_eager.pyx index 2c9eb0ff5..bcb4626fb 100644 --- a/spacy/pipeline/_parser_internals/arc_eager.pyx +++ b/spacy/pipeline/_parser_internals/arc_eager.pyx @@ -9,7 +9,7 @@ from ...strings cimport hash_string from ...structs cimport TokenC from ...tokens.doc cimport Doc, set_children_from_heads from ...tokens.token cimport MISSING_DEP -from ...typedefs cimport attr_t, hash_t +from ...typedefs cimport attr_t from ...training import split_bilu_label @@ -68,8 +68,9 @@ cdef struct GoldParseStateC: weight_t pop_cost -cdef GoldParseStateC create_gold_state(Pool mem, const StateC* state, - heads, labels, sent_starts) except *: +cdef GoldParseStateC create_gold_state( + Pool mem, const StateC* state, heads, labels, sent_starts +) except *: cdef GoldParseStateC gs gs.length = len(heads) gs.stride = 1 @@ -82,7 +83,7 @@ cdef GoldParseStateC create_gold_state(Pool mem, const StateC* state, gs.n_kids_in_stack = mem.alloc(gs.length, sizeof(gs.n_kids_in_stack[0])) for i, is_sent_start in enumerate(sent_starts): - if is_sent_start == True: + if is_sent_start is True: gs.state_bits[i] = set_state_flag( gs.state_bits[i], IS_SENT_START, @@ -210,6 +211,7 @@ cdef class ArcEagerGold: def update(self, StateClass stcls): update_gold_state(&self.c, stcls.c) + def _get_aligned_sent_starts(example): """Get list of SENT_START attributes aligned to the predicted tokenization. If the reference has not sentence starts, return a list of None values. @@ -524,7 +526,6 @@ cdef class Break: """ @staticmethod cdef bint is_valid(const StateC* st, attr_t label) nogil: - cdef int i if st.buffer_length() < 2: return False elif st.B(1) != st.B(0) + 1: @@ -556,8 +557,8 @@ cdef class Break: cost -= 1 if gold.heads[si] == b0: cost -= 1 - if not is_sent_start(gold, state.B(1)) \ - and not is_sent_start_unknown(gold, state.B(1)): + if not is_sent_start(gold, state.B(1)) and\ + not is_sent_start_unknown(gold, state.B(1)): cost += 1 return cost @@ -803,7 +804,6 @@ cdef class ArcEager(TransitionSystem): raise TypeError(Errors.E909.format(name="ArcEagerGold")) cdef ArcEagerGold gold_ = gold gold_state = gold_.c - n_gold = 0 if self.c[i].is_valid(stcls.c, self.c[i].label): cost = self.c[i].get_cost(stcls.c, &gold_state, self.c[i].label) else: @@ -875,7 +875,7 @@ cdef class ArcEager(TransitionSystem): print("Gold") for token in example.y: print(token.i, token.text, token.dep_, token.head.text) - aligned_heads, aligned_labels = example.get_aligned_parse() + aligned_heads, _aligned_labels = example.get_aligned_parse() print("Aligned heads") for i, head in enumerate(aligned_heads): print(example.x[i], example.x[head] if head is not None else "__") diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index e1edb4464..6c4f8e245 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -1,6 +1,3 @@ -import os -import random - from cymem.cymem cimport Pool from libc.stdint cimport int32_t @@ -14,7 +11,7 @@ from ...tokens.span import Span from ...attrs cimport IS_SPACE from ...lexeme cimport Lexeme -from ...structs cimport SpanC, TokenC +from ...structs cimport SpanC from ...tokens.span cimport Span from ...typedefs cimport attr_t, weight_t @@ -141,11 +138,10 @@ cdef class BiluoPushDown(TransitionSystem): OUT: Counter() } actions[OUT][''] = 1 # Represents a token predicted to be outside of any entity - actions[UNIT][''] = 1 # Represents a token prohibited to be in an entity + actions[UNIT][''] = 1 # Represents a token prohibited to be in an entity for entity_type in kwargs.get('entity_types', []): for action in (BEGIN, IN, LAST, UNIT): actions[action][entity_type] = 1 - moves = ('M', 'B', 'I', 'L', 'U') for example in kwargs.get('examples', []): for token in example.y: ent_type = token.ent_type_ @@ -164,7 +160,7 @@ cdef class BiluoPushDown(TransitionSystem): if token.ent_type: labels.add(token.ent_type_) return labels - + def move_name(self, int move, attr_t label): if move == OUT: return 'O' @@ -325,7 +321,6 @@ cdef class BiluoPushDown(TransitionSystem): raise TypeError(Errors.E909.format(name="BiluoGold")) cdef BiluoGold gold_ = gold gold_state = gold_.c - n_gold = 0 if self.c[i].is_valid(stcls.c, self.c[i].label): cost = self.c[i].get_cost(stcls.c, &gold_state, self.c[i].label) else: @@ -486,10 +481,8 @@ cdef class In: @staticmethod cdef weight_t cost(const StateC* s, const void* _gold, attr_t label) nogil: gold = _gold - move = IN cdef int next_act = gold.ner[s.B(1)].move if s.B(1) >= 0 else OUT cdef int g_act = gold.ner[s.B(0)].move - cdef attr_t g_tag = gold.ner[s.B(0)].label cdef bint is_sunk = _entity_is_sunk(s, gold.ner) if g_act == MISSING: @@ -549,12 +542,10 @@ cdef class Last: @staticmethod cdef weight_t cost(const StateC* s, const void* _gold, attr_t label) nogil: gold = _gold - move = LAST b0 = s.B(0) ent_start = s.E(0) cdef int g_act = gold.ner[b0].move - cdef attr_t g_tag = gold.ner[b0].label cdef int cost = 0 @@ -650,7 +641,6 @@ cdef class Unit: cost += 1 break return cost - cdef class Out: @@ -675,7 +665,6 @@ cdef class Out: cdef weight_t cost(const StateC* s, const void* _gold, attr_t label) nogil: gold = _gold cdef int g_act = gold.ner[s.B(0)].move - cdef attr_t g_tag = gold.ner[s.B(0)].label cdef weight_t cost = 0 if g_act == MISSING: pass diff --git a/spacy/pipeline/_parser_internals/nonproj.pyx b/spacy/pipeline/_parser_internals/nonproj.pyx index 66f423b3b..93ad14feb 100644 --- a/spacy/pipeline/_parser_internals/nonproj.pyx +++ b/spacy/pipeline/_parser_internals/nonproj.pyx @@ -125,14 +125,17 @@ def decompose(label): def is_decorated(label): return DELIMITER in label + def count_decorated_labels(gold_data): freqs = {} for example in gold_data: proj_heads, deco_deps = projectivize(example.get_aligned("HEAD"), example.get_aligned("DEP")) # set the label to ROOT for each root dependent - deco_deps = ['ROOT' if head == i else deco_deps[i] - for i, head in enumerate(proj_heads)] + deco_deps = [ + 'ROOT' if head == i else deco_deps[i] + for i, head in enumerate(proj_heads) + ] # count label frequencies for label in deco_deps: if is_decorated(label): @@ -160,9 +163,9 @@ def projectivize(heads, labels): cdef vector[int] _heads_to_c(heads): - cdef vector[int] c_heads; + cdef vector[int] c_heads for head in heads: - if head == None: + if head is None: c_heads.push_back(-1) else: assert head < len(heads) @@ -199,6 +202,7 @@ def _decorate(heads, proj_heads, labels): deco_labels.append(labels[tokenid]) return deco_labels + def get_smallest_nonproj_arc_slow(heads): cdef vector[int] c_heads = _heads_to_c(heads) return _get_smallest_nonproj_arc(c_heads) diff --git a/spacy/pipeline/_parser_internals/stateclass.pyx b/spacy/pipeline/_parser_internals/stateclass.pyx index 0a2657af1..fdb5004bb 100644 --- a/spacy/pipeline/_parser_internals/stateclass.pyx +++ b/spacy/pipeline/_parser_internals/stateclass.pyx @@ -1,6 +1,4 @@ # cython: infer_types=True -import numpy - from libcpp.vector cimport vector from ...tokens.doc cimport Doc @@ -38,11 +36,11 @@ cdef class StateClass: cdef vector[ArcC] arcs self.c.get_arcs(&arcs) return list(arcs) - #py_arcs = [] - #for arc in arcs: - # if arc.head != -1 and arc.child != -1: - # py_arcs.append((arc.head, arc.child, arc.label)) - #return arcs + # py_arcs = [] + # for arc in arcs: + # if arc.head != -1 and arc.child != -1: + # py_arcs.append((arc.head, arc.child, arc.label)) + # return arcs def add_arc(self, int head, int child, int label): self.c.add_arc(head, child, label) @@ -52,10 +50,10 @@ cdef class StateClass: def H(self, int child): return self.c.H(child) - + def L(self, int head, int idx): return self.c.L(head, idx) - + def R(self, int head, int idx): return self.c.R(head, idx) @@ -98,7 +96,7 @@ cdef class StateClass: def H(self, int i): return self.c.H(i) - + def E(self, int i): return self.c.E(i) @@ -116,7 +114,7 @@ cdef class StateClass: def H_(self, int i): return self.doc[self.c.H(i)] - + def E_(self, int i): return self.doc[self.c.E(i)] @@ -125,7 +123,7 @@ cdef class StateClass: def R_(self, int i, int idx): return self.doc[self.c.R(i, idx)] - + def empty(self): return self.c.empty() @@ -134,7 +132,7 @@ cdef class StateClass: def at_break(self): return False - #return self.c.at_break() + # return self.c.at_break() def has_head(self, int i): return self.c.has_head(i) diff --git a/spacy/pipeline/_parser_internals/transition_system.pxd b/spacy/pipeline/_parser_internals/transition_system.pxd index ce17480d4..04cd10d88 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pxd +++ b/spacy/pipeline/_parser_internals/transition_system.pxd @@ -20,11 +20,15 @@ cdef struct Transition: int (*do)(StateC* state, attr_t label) nogil -ctypedef weight_t (*get_cost_func_t)(const StateC* state, const void* gold, - attr_tlabel) nogil -ctypedef weight_t (*move_cost_func_t)(const StateC* state, const void* gold) nogil -ctypedef weight_t (*label_cost_func_t)(const StateC* state, const void* - gold, attr_t label) nogil +ctypedef weight_t (*get_cost_func_t)( + const StateC* state, const void* gold, attr_tlabel +) nogil +ctypedef weight_t (*move_cost_func_t)( + const StateC* state, const void* gold +) nogil +ctypedef weight_t (*label_cost_func_t)( + const StateC* state, const void* gold, attr_t label +) nogil ctypedef int (*do_func_t)(StateC* state, attr_t label) nogil diff --git a/spacy/pipeline/_parser_internals/transition_system.pyx b/spacy/pipeline/_parser_internals/transition_system.pyx index 053c87f22..aabbdfa24 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pyx +++ b/spacy/pipeline/_parser_internals/transition_system.pyx @@ -8,9 +8,7 @@ from collections import Counter import srsly from ...structs cimport TokenC -from ...tokens.doc cimport Doc from ...typedefs cimport attr_t, weight_t -from . cimport _beam_utils from .stateclass cimport StateClass from ... import util @@ -231,7 +229,6 @@ cdef class TransitionSystem: return self def to_bytes(self, exclude=tuple()): - transitions = [] serializers = { 'moves': lambda: srsly.json_dumps(self.labels), 'strings': lambda: self.strings.to_bytes(), diff --git a/spacy/pipeline/dep_parser.pyx b/spacy/pipeline/dep_parser.pyx index cb896c385..57f091788 100644 --- a/spacy/pipeline/dep_parser.pyx +++ b/spacy/pipeline/dep_parser.pyx @@ -1,6 +1,6 @@ # cython: infer_types=True, profile=True, binding=True from collections import defaultdict -from typing import Callable, Iterable, Optional +from typing import Callable, Optional from thinc.api import Config, Model @@ -124,6 +124,7 @@ def make_parser( scorer=scorer, ) + @Language.factory( "beam_parser", assigns=["token.dep", "token.head", "token.is_sent_start", "doc.sents"], diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx index 4ca0ce165..7ca3908bd 100644 --- a/spacy/pipeline/morphologizer.pyx +++ b/spacy/pipeline/morphologizer.pyx @@ -2,7 +2,6 @@ from itertools import islice from typing import Callable, Dict, Optional, Union -import srsly from thinc.api import Config, Model, SequenceCategoricalCrossentropy from ..morphology cimport Morphology @@ -14,10 +13,8 @@ from ..errors import Errors from ..language import Language from ..parts_of_speech import IDS as POS_IDS from ..scorer import Scorer -from ..symbols import POS from ..training import validate_examples, validate_get_examples from ..util import registry -from .pipe import deserialize_config from .tagger import Tagger # See #9050 @@ -76,8 +73,11 @@ def morphologizer_score(examples, **kwargs): results = {} results.update(Scorer.score_token_attr(examples, "pos", **kwargs)) results.update(Scorer.score_token_attr(examples, "morph", getter=morph_key_getter, **kwargs)) - results.update(Scorer.score_token_attr_per_feat(examples, - "morph", getter=morph_key_getter, **kwargs)) + results.update( + Scorer.score_token_attr_per_feat( + examples, "morph", getter=morph_key_getter, **kwargs + ) + ) return results @@ -233,7 +233,6 @@ class Morphologizer(Tagger): if isinstance(docs, Doc): docs = [docs] cdef Doc doc - cdef Vocab vocab = self.vocab cdef bint overwrite = self.cfg["overwrite"] cdef bint extend = self.cfg["extend"] labels = self.labels diff --git a/spacy/pipeline/multitask.pyx b/spacy/pipeline/multitask.pyx index 6b62c0811..2a62a50d5 100644 --- a/spacy/pipeline/multitask.pyx +++ b/spacy/pipeline/multitask.pyx @@ -4,13 +4,10 @@ from typing import Optional import numpy from thinc.api import Config, CosineDistance, Model, set_dropout_rate, to_categorical -from ..tokens.doc cimport Doc - -from ..attrs import ID, POS +from ..attrs import ID from ..errors import Errors from ..language import Language from ..training import validate_examples -from ._parser_internals import nonproj from .tagger import Tagger from .trainable_pipe import TrainablePipe @@ -103,10 +100,9 @@ class MultitaskObjective(Tagger): cdef int idx = 0 correct = numpy.zeros((scores.shape[0],), dtype="i") guesses = scores.argmax(axis=1) - docs = [eg.predicted for eg in examples] for i, eg in enumerate(examples): # Handles alignment for tokenization differences - doc_annots = eg.get_aligned() # TODO + _doc_annots = eg.get_aligned() # TODO for j in range(len(eg.predicted)): tok_annots = {key: values[j] for key, values in tok_annots.items()} label = self.make_label(j, tok_annots) @@ -206,7 +202,6 @@ class ClozeMultitask(TrainablePipe): losses[self.name] = 0. set_dropout_rate(self.model, drop) validate_examples(examples, "ClozeMultitask.rehearse") - docs = [eg.predicted for eg in examples] predictions, bp_predictions = self.model.begin_update() loss, d_predictions = self.get_loss(examples, self.vocab.vectors.data, predictions) bp_predictions(d_predictions) diff --git a/spacy/pipeline/ner.pyx b/spacy/pipeline/ner.pyx index 8dd6c3c43..15c092ae9 100644 --- a/spacy/pipeline/ner.pyx +++ b/spacy/pipeline/ner.pyx @@ -1,6 +1,6 @@ # cython: infer_types=True, profile=True, binding=True from collections import defaultdict -from typing import Callable, Iterable, Optional +from typing import Callable, Optional from thinc.api import Config, Model @@ -10,7 +10,7 @@ from ._parser_internals.ner cimport BiluoPushDown from .transition_parser cimport Parser from ..language import Language -from ..scorer import PRFScore, get_ner_prf +from ..scorer import get_ner_prf from ..training import remove_bilu_prefix from ..util import registry @@ -100,6 +100,7 @@ def make_ner( scorer=scorer, ) + @Language.factory( "beam_ner", assigns=["doc.ents", "token.ent_iob", "token.ent_type"], diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx index 42f518882..90775c465 100644 --- a/spacy/pipeline/pipe.pyx +++ b/spacy/pipeline/pipe.pyx @@ -1,6 +1,6 @@ # cython: infer_types=True, profile=True, binding=True import warnings -from typing import Callable, Dict, Iterable, Iterator, Optional, Tuple, Union +from typing import Callable, Dict, Iterable, Iterator, Tuple, Union import srsly @@ -40,7 +40,7 @@ cdef class Pipe: """ raise NotImplementedError(Errors.E931.format(parent="Pipe", method="__call__", name=self.name)) - def pipe(self, stream: Iterable[Doc], *, batch_size: int=128) -> Iterator[Doc]: + def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]: """Apply the pipe to a stream of documents. This usually happens under the hood when the nlp object is called on a text and all components are applied to the Doc. @@ -59,7 +59,7 @@ cdef class Pipe: except Exception as e: error_handler(self.name, self, [doc], e) - def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language=None): + def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language = None): """Initialize the pipe. For non-trainable components, this method is optional. For trainable components, which should inherit from the subclass TrainablePipe, the provided data examples diff --git a/spacy/pipeline/sentencizer.pyx b/spacy/pipeline/sentencizer.pyx index 2fe7e1540..76f296644 100644 --- a/spacy/pipeline/sentencizer.pyx +++ b/spacy/pipeline/sentencizer.pyx @@ -7,13 +7,13 @@ from ..tokens.doc cimport Doc from .. import util from ..language import Language -from ..scorer import Scorer from .pipe import Pipe from .senter import senter_score # see #9050 BACKWARD_OVERWRITE = False + @Language.factory( "sentencizer", assigns=["token.is_sent_start", "doc.sents"], @@ -36,17 +36,19 @@ class Sentencizer(Pipe): DOCS: https://spacy.io/api/sentencizer """ - default_punct_chars = ['!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '߹', - '।', '॥', '၊', '။', '።', '፧', '፨', '᙮', '᜵', '᜶', '᠃', '᠉', '᥄', - '᥅', '᪨', '᪩', '᪪', '᪫', '᭚', '᭛', '᭞', '᭟', '᰻', '᰼', '᱾', '᱿', - '‼', '‽', '⁇', '⁈', '⁉', '⸮', '⸼', '꓿', '꘎', '꘏', '꛳', '꛷', '꡶', - '꡷', '꣎', '꣏', '꤯', '꧈', '꧉', '꩝', '꩞', '꩟', '꫰', '꫱', '꯫', '﹒', - '﹖', '﹗', '!', '.', '?', '𐩖', '𐩗', '𑁇', '𑁈', '𑂾', '𑂿', '𑃀', - '𑃁', '𑅁', '𑅂', '𑅃', '𑇅', '𑇆', '𑇍', '𑇞', '𑇟', '𑈸', '𑈹', '𑈻', '𑈼', - '𑊩', '𑑋', '𑑌', '𑗂', '𑗃', '𑗉', '𑗊', '𑗋', '𑗌', '𑗍', '𑗎', '𑗏', '𑗐', - '𑗑', '𑗒', '𑗓', '𑗔', '𑗕', '𑗖', '𑗗', '𑙁', '𑙂', '𑜼', '𑜽', '𑜾', '𑩂', - '𑩃', '𑪛', '𑪜', '𑱁', '𑱂', '𖩮', '𖩯', '𖫵', '𖬷', '𖬸', '𖭄', '𛲟', '𝪈', - '。', '。'] + default_punct_chars = [ + '!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '߹', + '।', '॥', '၊', '။', '።', '፧', '፨', '᙮', '᜵', '᜶', '᠃', '᠉', '᥄', + '᥅', '᪨', '᪩', '᪪', '᪫', '᭚', '᭛', '᭞', '᭟', '᰻', '᰼', '᱾', '᱿', + '‼', '‽', '⁇', '⁈', '⁉', '⸮', '⸼', '꓿', '꘎', '꘏', '꛳', '꛷', '꡶', + '꡷', '꣎', '꣏', '꤯', '꧈', '꧉', '꩝', '꩞', '꩟', '꫰', '꫱', '꯫', '﹒', + '﹖', '﹗', '!', '.', '?', '𐩖', '𐩗', '𑁇', '𑁈', '𑂾', '𑂿', '𑃀', + '𑃁', '𑅁', '𑅂', '𑅃', '𑇅', '𑇆', '𑇍', '𑇞', '𑇟', '𑈸', '𑈹', '𑈻', '𑈼', + '𑊩', '𑑋', '𑑌', '𑗂', '𑗃', '𑗉', '𑗊', '𑗋', '𑗌', '𑗍', '𑗎', '𑗏', '𑗐', + '𑗑', '𑗒', '𑗓', '𑗔', '𑗕', '𑗖', '𑗗', '𑙁', '𑙂', '𑜼', '𑜽', '𑜾', '𑩂', + '𑩃', '𑪛', '𑪜', '𑱁', '𑱂', '𖩮', '𖩯', '𖫵', '𖬷', '𖬸', '𖭄', '𛲟', '𝪈', + '。', '。' + ] def __init__( self, @@ -128,7 +130,6 @@ class Sentencizer(Pipe): if isinstance(docs, Doc): docs = [docs] cdef Doc doc - cdef int idx = 0 for i, doc in enumerate(docs): doc_tag_ids = batch_tag_ids[i] for j, tag_id in enumerate(doc_tag_ids): @@ -169,7 +170,6 @@ class Sentencizer(Pipe): path = path.with_suffix(".json") srsly.write_json(path, {"punct_chars": list(self.punct_chars), "overwrite": self.overwrite}) - def from_disk(self, path, *, exclude=tuple()): """Load the sentencizer from disk. diff --git a/spacy/pipeline/senter.pyx b/spacy/pipeline/senter.pyx index 26f98ba59..37ddcc3c0 100644 --- a/spacy/pipeline/senter.pyx +++ b/spacy/pipeline/senter.pyx @@ -2,7 +2,6 @@ from itertools import islice from typing import Callable, Optional -import srsly from thinc.api import Config, Model, SequenceCategoricalCrossentropy from ..tokens.doc cimport Doc diff --git a/spacy/pipeline/tagger.pyx b/spacy/pipeline/tagger.pyx index 47aae2bb7..4c5265a78 100644 --- a/spacy/pipeline/tagger.pyx +++ b/spacy/pipeline/tagger.pyx @@ -1,26 +1,18 @@ # cython: infer_types=True, profile=True, binding=True -import warnings from itertools import islice from typing import Callable, Optional import numpy -import srsly from thinc.api import Config, Model, SequenceCategoricalCrossentropy, set_dropout_rate -from thinc.types import Floats2d -from ..morphology cimport Morphology from ..tokens.doc cimport Doc -from ..vocab cimport Vocab from .. import util -from ..attrs import ID, POS -from ..errors import Errors, Warnings +from ..errors import Errors from ..language import Language -from ..parts_of_speech import X from ..scorer import Scorer from ..training import validate_examples, validate_get_examples from ..util import registry -from .pipe import deserialize_config from .trainable_pipe import TrainablePipe # See #9050 @@ -169,7 +161,6 @@ class Tagger(TrainablePipe): if isinstance(docs, Doc): docs = [docs] cdef Doc doc - cdef Vocab vocab = self.vocab cdef bint overwrite = self.cfg["overwrite"] labels = self.labels for i, doc in enumerate(docs): diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx index 7aa91ac16..e5865e070 100644 --- a/spacy/pipeline/trainable_pipe.pyx +++ b/spacy/pipeline/trainable_pipe.pyx @@ -55,7 +55,7 @@ cdef class TrainablePipe(Pipe): except Exception as e: error_handler(self.name, self, [doc], e) - def pipe(self, stream: Iterable[Doc], *, batch_size: int=128) -> Iterator[Doc]: + def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]: """Apply the pipe to a stream of documents. This usually happens under the hood when the nlp object is called on a text and all components are applied to the Doc. @@ -102,9 +102,9 @@ cdef class TrainablePipe(Pipe): def update(self, examples: Iterable["Example"], *, - drop: float=0.0, - sgd: Optimizer=None, - losses: Optional[Dict[str, float]]=None) -> Dict[str, float]: + drop: float = 0.0, + sgd: Optimizer = None, + losses: Optional[Dict[str, float]] = None) -> Dict[str, float]: """Learn from a batch of documents and gold-standard information, updating the pipe's model. Delegates to predict and get_loss. @@ -138,8 +138,8 @@ cdef class TrainablePipe(Pipe): def rehearse(self, examples: Iterable[Example], *, - sgd: Optimizer=None, - losses: Dict[str, float]=None, + sgd: Optimizer = None, + losses: Dict[str, float] = None, **config) -> Dict[str, float]: """Perform a "rehearsal" update from a batch of data. Rehearsal updates teach the current model to make predictions similar to an initial model, @@ -177,7 +177,7 @@ cdef class TrainablePipe(Pipe): """ return util.create_default_optimizer() - def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language=None): + def initialize(self, get_examples: Callable[[], Iterable[Example]], *, nlp: Language = None): """Initialize the pipe for training, using data examples if available. This method needs to be implemented by each TrainablePipe component, ensuring the internal model (if available) is initialized properly diff --git a/spacy/pipeline/transition_parser.pxd b/spacy/pipeline/transition_parser.pxd index e5e88d521..7ddb91e01 100644 --- a/spacy/pipeline/transition_parser.pxd +++ b/spacy/pipeline/transition_parser.pxd @@ -13,8 +13,18 @@ cdef class Parser(TrainablePipe): cdef readonly TransitionSystem moves cdef public object _multitasks - cdef void _parseC(self, CBlas cblas, StateC** states, - WeightsC weights, SizesC sizes) nogil + cdef void _parseC( + self, + CBlas cblas, + StateC** states, + WeightsC weights, + SizesC sizes + ) nogil - cdef void c_transition_batch(self, StateC** states, const float* scores, - int nr_class, int batch_size) nogil + cdef void c_transition_batch( + self, + StateC** states, + const float* scores, + int nr_class, + int batch_size + ) nogil diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx index ef4d9b362..11c8fafc7 100644 --- a/spacy/pipeline/transition_parser.pyx +++ b/spacy/pipeline/transition_parser.pyx @@ -7,20 +7,15 @@ from cymem.cymem cimport Pool from itertools import islice from libc.stdlib cimport calloc, free -from libc.string cimport memcpy, memset +from libc.string cimport memset from libcpp.vector cimport vector import random -import srsly -from thinc.api import CupyOps, NumpyOps, get_ops, set_dropout_rate - -from thinc.extra.search cimport Beam - -import warnings - import numpy import numpy.random +import srsly +from thinc.api import CupyOps, NumpyOps, set_dropout_rate from ..ml.parser_model cimport ( ActivationsC, @@ -42,7 +37,7 @@ from .trainable_pipe import TrainablePipe from ._parser_internals cimport _beam_utils from .. import util -from ..errors import Errors, Warnings +from ..errors import Errors from ..training import validate_examples, validate_get_examples from ._parser_internals import _beam_utils @@ -258,7 +253,6 @@ cdef class Parser(TrainablePipe): except Exception as e: error_handler(self.name, self, batch_in_order, e) - def predict(self, docs): if isinstance(docs, Doc): docs = [docs] @@ -300,8 +294,6 @@ cdef class Parser(TrainablePipe): return batch def beam_parse(self, docs, int beam_width, float drop=0., beam_density=0.): - cdef Beam beam - cdef Doc doc self._ensure_labels_are_added(docs) batch = _beam_utils.BeamBatch( self.moves, @@ -321,16 +313,18 @@ cdef class Parser(TrainablePipe): del model return list(batch) - cdef void _parseC(self, CBlas cblas, StateC** states, - WeightsC weights, SizesC sizes) nogil: - cdef int i, j + cdef void _parseC( + self, CBlas cblas, StateC** states, WeightsC weights, SizesC sizes + ) nogil: + cdef int i cdef vector[StateC*] unfinished cdef ActivationsC activations = alloc_activations(sizes) while sizes.states >= 1: predict_states(cblas, &activations, states, &weights, sizes) # Validate actions, argmax, take action. - self.c_transition_batch(states, - activations.scores, sizes.classes, sizes.states) + self.c_transition_batch( + states, activations.scores, sizes.classes, sizes.states + ) for i in range(sizes.states): if not states[i].is_final(): unfinished.push_back(states[i]) @@ -342,7 +336,6 @@ cdef class Parser(TrainablePipe): def set_annotations(self, docs, states_or_beams): cdef StateClass state - cdef Beam beam cdef Doc doc states = _beam_utils.collect_states(states_or_beams, docs) for i, (state, doc) in enumerate(zip(states, docs)): @@ -359,8 +352,13 @@ cdef class Parser(TrainablePipe): self.c_transition_batch(&c_states[0], c_scores, scores.shape[1], scores.shape[0]) return [state for state in states if not state.c.is_final()] - cdef void c_transition_batch(self, StateC** states, const float* scores, - int nr_class, int batch_size) nogil: + cdef void c_transition_batch( + self, + StateC** states, + const float* scores, + int nr_class, + int batch_size + ) nogil: # n_moves should not be zero at this point, but make sure to avoid zero-length mem alloc with gil: assert self.moves.n_moves > 0, Errors.E924.format(name=self.name) @@ -380,7 +378,6 @@ cdef class Parser(TrainablePipe): free(is_valid) def update(self, examples, *, drop=0., sgd=None, losses=None): - cdef StateClass state if losses is None: losses = {} losses.setdefault(self.name, 0.) @@ -419,8 +416,7 @@ cdef class Parser(TrainablePipe): if not states: return losses model, backprop_tok2vec = self.model.begin_update([eg.x for eg in examples]) - - all_states = list(states) + states_golds = list(zip(states, golds)) n_moves = 0 while states_golds: @@ -500,8 +496,16 @@ cdef class Parser(TrainablePipe): del tutor return losses - def update_beam(self, examples, *, beam_width, - drop=0., sgd=None, losses=None, beam_density=0.0): + def update_beam( + self, + examples, + *, + beam_width, + drop=0., + sgd=None, + losses=None, + beam_density=0.0 + ): states, golds, _ = self.moves.init_gold_batch(examples) if not states: return losses @@ -531,8 +535,9 @@ cdef class Parser(TrainablePipe): is_valid = mem.alloc(self.moves.n_moves, sizeof(int)) costs = mem.alloc(self.moves.n_moves, sizeof(float)) - cdef np.ndarray d_scores = numpy.zeros((len(states), self.moves.n_moves), - dtype='f', order='C') + cdef np.ndarray d_scores = numpy.zeros( + (len(states), self.moves.n_moves), dtype='f', order='C' + ) c_d_scores = d_scores.data unseen_classes = self.model.attrs["unseen_classes"] for i, (state, gold) in enumerate(zip(states, golds)): @@ -542,8 +547,9 @@ cdef class Parser(TrainablePipe): for j in range(self.moves.n_moves): if costs[j] <= 0.0 and j in unseen_classes: unseen_classes.remove(j) - cpu_log_loss(c_d_scores, - costs, is_valid, &scores[i, 0], d_scores.shape[1]) + cpu_log_loss( + c_d_scores, costs, is_valid, &scores[i, 0], d_scores.shape[1] + ) c_d_scores += d_scores.shape[1] # Note that we don't normalize this. See comment in update() for why. if losses is not None: diff --git a/spacy/strings.pyx b/spacy/strings.pyx index 16c3e2b5b..b0799d6fc 100644 --- a/spacy/strings.pyx +++ b/spacy/strings.pyx @@ -2,7 +2,6 @@ cimport cython from libc.stdint cimport uint32_t from libc.string cimport memcpy -from libcpp.set cimport set from murmurhash.mrmr cimport hash32, hash64 import srsly @@ -20,9 +19,10 @@ cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash): try: out_hash[0] = key return True - except: + except: # no-cython-lint return False + def get_string_id(key): """Get a string ID, handling the reserved symbols correctly. If the key is already an ID, return it. @@ -87,7 +87,6 @@ cdef Utf8Str* _allocate(Pool mem, const unsigned char* chars, uint32_t length) e cdef int n_length_bytes cdef int i cdef Utf8Str* string = mem.alloc(1, sizeof(Utf8Str)) - cdef uint32_t ulength = length if length < sizeof(string.s): string.s[0] = length memcpy(&string.s[1], chars, length) diff --git a/spacy/structs.pxd b/spacy/structs.pxd index 9efb068fd..8cfcc2964 100644 --- a/spacy/structs.pxd +++ b/spacy/structs.pxd @@ -52,7 +52,7 @@ cdef struct TokenC: int sent_start int ent_iob - attr_t ent_type # TODO: Is there a better way to do this? Multiple sources of truth.. + attr_t ent_type # TODO: Is there a better way to do this? Multiple sources of truth.. attr_t ent_kb_id hash_t ent_id diff --git a/spacy/symbols.pxd b/spacy/symbols.pxd index bc15d9b80..73be19145 100644 --- a/spacy/symbols.pxd +++ b/spacy/symbols.pxd @@ -92,7 +92,7 @@ cdef enum symbol_t: ADV AUX CONJ - CCONJ # U20 + CCONJ # U20 DET INTJ NOUN @@ -418,7 +418,7 @@ cdef enum symbol_t: ccomp complm conj - cop # U20 + cop # U20 csubj csubjpass dep @@ -441,8 +441,8 @@ cdef enum symbol_t: num number oprd - obj # U20 - obl # U20 + obj # U20 + obl # U20 parataxis partmod pcomp diff --git a/spacy/symbols.pyx b/spacy/symbols.pyx index b0345c710..d1deeb0e7 100644 --- a/spacy/symbols.pyx +++ b/spacy/symbols.pyx @@ -96,7 +96,7 @@ IDS = { "ADV": ADV, "AUX": AUX, "CONJ": CONJ, - "CCONJ": CCONJ, # U20 + "CCONJ": CCONJ, # U20 "DET": DET, "INTJ": INTJ, "NOUN": NOUN, @@ -421,7 +421,7 @@ IDS = { "ccomp": ccomp, "complm": complm, "conj": conj, - "cop": cop, # U20 + "cop": cop, # U20 "csubj": csubj, "csubjpass": csubjpass, "dep": dep, @@ -444,8 +444,8 @@ IDS = { "num": num, "number": number, "oprd": oprd, - "obj": obj, # U20 - "obl": obl, # U20 + "obj": obj, # U20 + "obl": obl, # U20 "parataxis": parataxis, "partmod": partmod, "pcomp": pcomp, diff --git a/spacy/tests/package/test_requirements.py b/spacy/tests/package/test_requirements.py index 9e83d5fb1..fab1e8218 100644 --- a/spacy/tests/package/test_requirements.py +++ b/spacy/tests/package/test_requirements.py @@ -12,6 +12,7 @@ def test_build_dependencies(): "flake8", "hypothesis", "pre-commit", + "cython-lint", "black", "isort", "mypy", diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd index f7585b45a..a902ebad9 100644 --- a/spacy/tokenizer.pxd +++ b/spacy/tokenizer.pxd @@ -31,24 +31,58 @@ cdef class Tokenizer: cdef Doc _tokenize_affixes(self, str string, bint with_special_cases) cdef int _apply_special_cases(self, Doc doc) except -1 - cdef void _filter_special_spans(self, vector[SpanC] &original, - vector[SpanC] &filtered, int doc_len) nogil - cdef object _prepare_special_spans(self, Doc doc, - vector[SpanC] &filtered) - cdef int _retokenize_special_spans(self, Doc doc, TokenC* tokens, - object span_data) - cdef int _try_specials_and_cache(self, hash_t key, Doc tokens, - int* has_special, - bint with_special_cases) except -1 - cdef int _tokenize(self, Doc tokens, str span, hash_t key, - int* has_special, bint with_special_cases) except -1 - cdef str _split_affixes(self, Pool mem, str string, - vector[LexemeC*] *prefixes, - vector[LexemeC*] *suffixes, int* has_special, - bint with_special_cases) - cdef int _attach_tokens(self, Doc tokens, str string, - vector[LexemeC*] *prefixes, - vector[LexemeC*] *suffixes, int* has_special, - bint with_special_cases) except -1 - cdef int _save_cached(self, const TokenC* tokens, hash_t key, - int* has_special, int n) except -1 + cdef void _filter_special_spans( + self, + vector[SpanC] &original, + vector[SpanC] &filtered, + int doc_len, + ) nogil + cdef object _prepare_special_spans( + self, + Doc doc, + vector[SpanC] &filtered, + ) + cdef int _retokenize_special_spans( + self, + Doc doc, + TokenC* tokens, + object span_data, + ) + cdef int _try_specials_and_cache( + self, + hash_t key, + Doc tokens, + int* has_special, + bint with_special_cases, + ) except -1 + cdef int _tokenize( + self, + Doc tokens, + str span, + hash_t key, + int* has_special, + bint with_special_cases, + ) except -1 + cdef str _split_affixes( + self, + Pool mem, + str string, + vector[LexemeC*] *prefixes, + vector[LexemeC*] *suffixes, int* has_special, + bint with_special_cases, + ) + cdef int _attach_tokens( + self, + Doc tokens, + str string, + vector[LexemeC*] *prefixes, + vector[LexemeC*] *suffixes, int* has_special, + bint with_special_cases, + ) except -1 + cdef int _save_cached( + self, + const TokenC* tokens, + hash_t key, + int* has_special, + int n, + ) except -1 diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index 3861b1cee..8fc95bea0 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -8,20 +8,18 @@ from libcpp.set cimport set as stdset from preshed.maps cimport PreshMap import re -import warnings - from .lexeme cimport EMPTY_LEXEME from .strings cimport hash_string from .tokens.doc cimport Doc from . import util from .attrs import intify_attrs -from .errors import Errors, Warnings +from .errors import Errors from .scorer import Scorer from .symbols import NORM, ORTH from .tokens import Span from .training import validate_examples -from .util import get_words_and_spaces, registry +from .util import get_words_and_spaces cdef class Tokenizer: @@ -324,7 +322,7 @@ cdef class Tokenizer: cdef int span_start cdef int span_end while i < doc.length: - if not i in span_data: + if i not in span_data: tokens[i + offset] = doc.c[i] i += 1 else: @@ -395,12 +393,15 @@ cdef class Tokenizer: self._save_cached(&tokens.c[orig_size], orig_key, has_special, tokens.length - orig_size) - cdef str _split_affixes(self, Pool mem, str string, - vector[const LexemeC*] *prefixes, - vector[const LexemeC*] *suffixes, - int* has_special, - bint with_special_cases): - cdef size_t i + cdef str _split_affixes( + self, + Pool mem, + str string, + vector[const LexemeC*] *prefixes, + vector[const LexemeC*] *suffixes, + int* has_special, + bint with_special_cases + ): cdef str prefix cdef str suffix cdef str minus_pre @@ -445,10 +446,6 @@ cdef class Tokenizer: vector[const LexemeC*] *suffixes, int* has_special, bint with_special_cases) except -1: - cdef bint specials_hit = 0 - cdef bint cache_hit = 0 - cdef int split, end - cdef const LexemeC* const* lexemes cdef const LexemeC* lexeme cdef str span cdef int i @@ -458,9 +455,11 @@ cdef class Tokenizer: if string: if self._try_specials_and_cache(hash_string(string), tokens, has_special, with_special_cases): pass - elif (self.token_match and self.token_match(string)) or \ - (self.url_match and \ - self.url_match(string)): + elif ( + (self.token_match and self.token_match(string)) or + (self.url_match and self.url_match(string)) + ): + # We're always saying 'no' to spaces here -- the caller will # fix up the outermost one, with reference to the original. # See Issue #859 @@ -821,7 +820,7 @@ cdef class Tokenizer: self.infix_finditer = None self.token_match = None self.url_match = None - msg = util.from_bytes(bytes_data, deserializers, exclude) + util.from_bytes(bytes_data, deserializers, exclude) if "prefix_search" in data and isinstance(data["prefix_search"], str): self.prefix_search = re.compile(data["prefix_search"]).search if "suffix_search" in data and isinstance(data["suffix_search"], str): diff --git a/spacy/tokens/_retokenize.pyx b/spacy/tokens/_retokenize.pyx index 8ed707ab9..f28d2e088 100644 --- a/spacy/tokens/_retokenize.pyx +++ b/spacy/tokens/_retokenize.pyx @@ -1,7 +1,6 @@ # cython: infer_types=True, bounds_check=False, profile=True from cymem.cymem cimport Pool -from libc.stdlib cimport free, malloc -from libc.string cimport memcpy, memset +from libc.string cimport memset import numpy from thinc.api import get_array_module @@ -10,7 +9,7 @@ from ..attrs cimport MORPH, NORM from ..lexeme cimport EMPTY_LEXEME, Lexeme from ..structs cimport LexemeC, TokenC from ..vocab cimport Vocab -from .doc cimport Doc, set_children_from_heads, token_by_end, token_by_start +from .doc cimport Doc, set_children_from_heads, token_by_start from .span cimport Span from .token cimport Token @@ -147,7 +146,7 @@ def _merge(Doc doc, merges): syntactic root of the span. RETURNS (Token): The first newly merged token. """ - cdef int i, merge_index, start, end, token_index, current_span_index, current_offset, offset, span_index + cdef int i, merge_index, start, token_index, current_span_index, current_offset, offset, span_index cdef Span span cdef const LexemeC* lex cdef TokenC* token @@ -165,7 +164,6 @@ def _merge(Doc doc, merges): merges.sort(key=_get_start) for merge_index, (span, attributes) in enumerate(merges): start = span.start - end = span.end spans.append(span) # House the new merged token where it starts token = &doc.c[start] @@ -203,8 +201,9 @@ def _merge(Doc doc, merges): # for the merged region. To do this, we create a boolean array indicating # whether the row is to be deleted, then use numpy.delete if doc.tensor is not None and doc.tensor.size != 0: - doc.tensor = _resize_tensor(doc.tensor, - [(m[0].start, m[0].end) for m in merges]) + doc.tensor = _resize_tensor( + doc.tensor, [(m[0].start, m[0].end) for m in merges] + ) # Memorize span roots and sets dependencies of the newly merged # tokens to the dependencies of their roots. span_roots = [] @@ -267,11 +266,11 @@ def _merge(Doc doc, merges): span_index += 1 if span_index < len(spans) and i == spans[span_index].start: # First token in a span - doc.c[i - offset] = doc.c[i] # move token to its place + doc.c[i - offset] = doc.c[i] # move token to its place offset += (spans[span_index].end - spans[span_index].start) - 1 in_span = True if not in_span: - doc.c[i - offset] = doc.c[i] # move token to its place + doc.c[i - offset] = doc.c[i] # move token to its place for i in range(doc.length - offset, doc.length): memset(&doc.c[i], 0, sizeof(TokenC)) @@ -345,7 +344,11 @@ def _split(Doc doc, int token_index, orths, heads, attrs): if to_process_tensor: xp = get_array_module(doc.tensor) if xp is numpy: - doc.tensor = xp.append(doc.tensor, xp.zeros((nb_subtokens,doc.tensor.shape[1]), dtype="float32"), axis=0) + doc.tensor = xp.append( + doc.tensor, + xp.zeros((nb_subtokens, doc.tensor.shape[1]), dtype="float32"), + axis=0 + ) else: shape = (doc.tensor.shape[0] + nb_subtokens, doc.tensor.shape[1]) resized_array = xp.zeros(shape, dtype="float32") @@ -367,7 +370,8 @@ def _split(Doc doc, int token_index, orths, heads, attrs): token.norm = 0 # reset norm if to_process_tensor: # setting the tensors of the split tokens to array of zeros - doc.tensor[token_index + i:token_index + i + 1] = xp.zeros((1,doc.tensor.shape[1]), dtype="float32") + doc.tensor[token_index + i:token_index + i + 1] = \ + xp.zeros((1, doc.tensor.shape[1]), dtype="float32") # Update the character offset of the subtokens if i != 0: token.idx = orig_token.idx + idx_offset @@ -455,7 +459,6 @@ def normalize_token_attrs(Vocab vocab, attrs): def set_token_attrs(Token py_token, attrs): cdef TokenC* token = py_token.c cdef const LexemeC* lex = token.lex - cdef Doc doc = py_token.doc # Assign attributes for attr_name, attr_value in attrs.items(): if attr_name == "_": # Set extension attributes diff --git a/spacy/tokens/doc.pxd b/spacy/tokens/doc.pxd index d7f092c94..d9719609c 100644 --- a/spacy/tokens/doc.pxd +++ b/spacy/tokens/doc.pxd @@ -31,7 +31,7 @@ cdef int token_by_start(const TokenC* tokens, int length, int start_char) except cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2 -cdef int [:,:] _get_lca_matrix(Doc, int start, int end) +cdef int [:, :] _get_lca_matrix(Doc, int start, int end) cdef class Doc: @@ -61,7 +61,6 @@ cdef class Doc: cdef int length cdef int max_length - cdef public object noun_chunks_iterator cdef object __weakref__ diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 146b276e2..8fc2c4b3c 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -43,14 +43,13 @@ from ..attrs cimport ( attr_id_t, ) from ..lexeme cimport EMPTY_LEXEME, Lexeme -from ..typedefs cimport attr_t, flags_t +from ..typedefs cimport attr_t from .token cimport Token from .. import parts_of_speech, schemas, util from ..attrs import IDS, intify_attr -from ..compat import copy_reg, pickle +from ..compat import copy_reg from ..errors import Errors, Warnings -from ..morphology import Morphology from ..util import get_words_and_spaces from ._retokenize import Retokenizer from .underscore import Underscore, get_ext_args @@ -784,7 +783,7 @@ cdef class Doc: # TODO: # 1. Test basic data-driven ORTH gazetteer # 2. Test more nuanced date and currency regex - cdef attr_t entity_type, kb_id, ent_id + cdef attr_t kb_id, ent_id cdef int ent_start, ent_end ent_spans = [] for ent_info in ents: @@ -987,7 +986,6 @@ cdef class Doc: >>> np_array = doc.to_array([LOWER, POS, ENT_TYPE, IS_ALPHA]) """ cdef int i, j - cdef attr_id_t feature cdef np.ndarray[attr_t, ndim=2] output # Handle scalar/list inputs of strings/ints for py_attr_ids # See also #3064 @@ -999,8 +997,10 @@ cdef class Doc: py_attr_ids = [py_attr_ids] # Allow strings, e.g. 'lemma' or 'LEMMA' try: - py_attr_ids = [(IDS[id_.upper()] if hasattr(id_, "upper") else id_) - for id_ in py_attr_ids] + py_attr_ids = [ + (IDS[id_.upper()] if hasattr(id_, "upper") else id_) + for id_ in py_attr_ids + ] except KeyError as msg: keys = [k for k in IDS.keys() if not k.startswith("FLAG")] raise KeyError(Errors.E983.format(dict="IDS", key=msg, keys=keys)) from None @@ -1030,8 +1030,6 @@ cdef class Doc: DOCS: https://spacy.io/api/doc#count_by """ cdef int i - cdef attr_t attr - cdef size_t count if counts is None: counts = Counter() @@ -1093,7 +1091,6 @@ cdef class Doc: cdef int i, col cdef int32_t abs_head_index cdef attr_id_t attr_id - cdef TokenC* tokens = self.c cdef int length = len(array) if length != len(self): raise ValueError(Errors.E971.format(array_length=length, doc_length=len(self))) @@ -1225,7 +1222,7 @@ cdef class Doc: span.label, span.kb_id, span.id, - span.text, # included as a check + span.text, # included as a check )) char_offset += len(doc.text) if len(doc) > 0 and ensure_whitespace and not doc[-1].is_space and not bool(doc[-1].whitespace_): @@ -1508,7 +1505,6 @@ cdef class Doc: attributes are inherited from the syntactic root of the span. RETURNS (Token): The first newly merged token. """ - cdef str tag, lemma, ent_type attr_len = len(attributes) span_len = len(spans) if not attr_len == span_len: @@ -1624,7 +1620,6 @@ cdef class Doc: for token in char_span[1:]: token.is_sent_start = False - for span_group in doc_json.get("spans", {}): spans = [] for span in doc_json["spans"][span_group]: @@ -1656,7 +1651,7 @@ cdef class Doc: start = token_by_char(self.c, self.length, token_data["start"]) value = token_data["value"] self[start]._.set(token_attr, value) - + for span_attr in doc_json.get("underscore_span", {}): if not Span.has_extension(span_attr): Span.set_extension(span_attr) @@ -1698,7 +1693,7 @@ cdef class Doc: token_data["dep"] = token.dep_ token_data["head"] = token.head.i data["tokens"].append(token_data) - + if self.spans: data["spans"] = {} for span_group in self.spans: @@ -1769,7 +1764,6 @@ cdef class Doc: output.fill(255) cdef int i, j, start_idx, end_idx cdef bytes byte_string - cdef unsigned char utf8_char for i, byte_string in enumerate(byte_strings): j = 0 start_idx = 0 @@ -1822,8 +1816,6 @@ cdef int token_by_char(const TokenC* tokens, int length, int char_idx) except -2 cdef int set_children_from_heads(TokenC* tokens, int start, int end) except -1: # note: end is exclusive - cdef TokenC* head - cdef TokenC* child cdef int i # Set number of left/right children to 0. We'll increment it in the loops. for i in range(start, end): @@ -1923,7 +1915,7 @@ cdef int _get_tokens_lca(Token token_j, Token token_k): return -1 -cdef int [:,:] _get_lca_matrix(Doc doc, int start, int end): +cdef int [:, :] _get_lca_matrix(Doc doc, int start, int end): """Given a doc and a start and end position defining a set of contiguous tokens within it, returns a matrix of Lowest Common Ancestors (LCA), where LCA[i, j] is the index of the lowest common ancestor among token i and j. @@ -1936,7 +1928,7 @@ cdef int [:,:] _get_lca_matrix(Doc doc, int start, int end): RETURNS (int [:, :]): memoryview of numpy.array[ndim=2, dtype=numpy.int32], with shape (n, n), where n = len(doc). """ - cdef int [:,:] lca_matrix + cdef int [:, :] lca_matrix cdef int j, k n_tokens= end - start lca_mat = numpy.empty((n_tokens, n_tokens), dtype=numpy.int32) diff --git a/spacy/tokens/graph.pyx b/spacy/tokens/graph.pyx index 47f0a20d4..1cbec09f4 100644 --- a/spacy/tokens/graph.pyx +++ b/spacy/tokens/graph.pyx @@ -3,7 +3,7 @@ from typing import Generator, List, Tuple cimport cython from cython.operator cimport dereference -from libc.stdint cimport int32_t, int64_t +from libc.stdint cimport int32_t from libcpp.pair cimport pair from libcpp.unordered_map cimport unordered_map from libcpp.unordered_set cimport unordered_set @@ -11,7 +11,6 @@ from libcpp.unordered_set cimport unordered_set import weakref from murmurhash.mrmr cimport hash64 -from preshed.maps cimport map_get_unless_missing from .. import Errors @@ -28,7 +27,7 @@ from .token import Token cdef class Edge: cdef readonly Graph graph cdef readonly int i - + def __init__(self, Graph graph, int i): self.graph = graph self.i = i @@ -44,7 +43,7 @@ cdef class Edge: @property def head(self) -> "Node": return Node(self.graph, self.graph.c.edges[self.i].head) - + @property def tail(self) -> "Tail": return Node(self.graph, self.graph.c.edges[self.i].tail) @@ -70,7 +69,7 @@ cdef class Node: def __init__(self, Graph graph, int i): """A reference to a node of an annotation graph. Each node is made up of an ordered set of zero or more token indices. - + Node references are usually created by the Graph object itself, or from the Node or Edge objects. You usually won't need to instantiate this class yourself. @@ -109,13 +108,13 @@ cdef class Node: @property def is_none(self) -> bool: """Whether the node is a special value, indicating 'none'. - + The NoneNode type is returned by the Graph, Edge and Node objects when there is no match to a query. It has the same API as Node, but it always returns NoneNode, NoneEdge or empty lists for its queries. """ return False - + @property def doc(self) -> "Doc": """The Doc object that the graph refers to.""" @@ -130,19 +129,19 @@ cdef class Node: def head(self, i=None, label=None) -> "Node": """Get the head of the first matching edge, searching by index, label, both or neither. - + For instance, `node.head(i=1)` will get the head of the second edge that this node is a tail of. `node.head(i=1, label="ARG0")` will further check that the second edge has the label `"ARG0"`. - + If no matching node can be found, the graph's NoneNode is returned. """ return self.headed(i=i, label=label) - + def tail(self, i=None, label=None) -> "Node": """Get the tail of the first matching edge, searching by index, label, both or neither. - + If no matching node can be found, the graph's NoneNode is returned. """ return self.tailed(i=i, label=label).tail @@ -171,7 +170,7 @@ cdef class Node: cdef vector[int] edge_indices self._find_edges(edge_indices, "head", label) return [Node(self.graph, self.graph.c.edges[i].head) for i in edge_indices] - + def tails(self, label=None) -> List["Node"]: """Find all matching tails of this node.""" cdef vector[int] edge_indices @@ -200,7 +199,7 @@ cdef class Node: return NoneEdge(self.graph) else: return Edge(self.graph, idx) - + def tailed(self, i=None, label=None) -> Edge: """Find the first matching edge tailed by this node. If no matching edge can be found, the graph's NoneEdge is returned. @@ -283,7 +282,7 @@ cdef class NoneEdge(Edge): def __init__(self, graph): self.graph = graph self.i = -1 - + @property def doc(self) -> "Doc": return self.graph.doc @@ -291,7 +290,7 @@ cdef class NoneEdge(Edge): @property def head(self) -> "NoneNode": return NoneNode(self.graph) - + @property def tail(self) -> "NoneNode": return NoneNode(self.graph) @@ -319,7 +318,7 @@ cdef class NoneNode(Node): def __len__(self): return 0 - + @property def is_none(self): return -1 @@ -340,14 +339,14 @@ cdef class NoneNode(Node): def walk_heads(self): yield from [] - + def walk_tails(self): yield from [] - + cdef class Graph: """A set of directed labelled relationships between sets of tokens. - + EXAMPLE: Construction 1 >>> graph = Graph(doc, name="srl") @@ -372,7 +371,9 @@ cdef class Graph: >>> assert graph.has_node((0,)) >>> assert graph.has_edge((0,), (1,3), label="agent") """ - def __init__(self, doc, *, name="", nodes=[], edges=[], labels=None, weights=None): + def __init__( + self, doc, *, name="", nodes=[], edges=[], labels=None, weights=None # no-cython-lint + ): """Create a Graph object. doc (Doc): The Doc object the graph will refer to. @@ -438,13 +439,11 @@ cdef class Graph: def add_edge(self, head, tail, *, label="", weight=None) -> Edge: """Add an edge to the graph, connecting two groups of tokens. - + If there is already an edge for the (head, tail, label) triple, it will be returned, and no new edge will be created. The weight of the edge will be updated if a weight is specified. """ - label_hash = self.doc.vocab.strings.as_int(label) - weight_float = weight if weight is not None else 0.0 edge_index = add_edge( &self.c, EdgeC( @@ -478,11 +477,11 @@ cdef class Graph: def has_edge(self, head, tail, label) -> bool: """Check whether a (head, tail, label) triple is an edge in the graph.""" return not self.get_edge(head, tail, label=label).is_none - + def add_node(self, indices) -> Node: """Add a node to the graph and return it. Nodes refer to ordered sets of token indices. - + This method is idempotent: if there is already a node for the given indices, it is returned without a new node being created. """ @@ -510,7 +509,7 @@ cdef class Graph: return NoneNode(self) else: return Node(self, node_index) - + def has_node(self, tuple indices) -> bool: """Check whether the graph has a node for the given indices.""" return not self.get_node(indices).is_none @@ -570,7 +569,7 @@ cdef int add_node(GraphC* graph, vector[int32_t]& node) nogil: graph.roots.insert(index) graph.node_map.insert(pair[hash_t, int](key, index)) return index - + cdef int get_node(const GraphC* graph, vector[int32_t] node) nogil: key = hash64(&node[0], node.size() * sizeof(node[0]), 0) diff --git a/spacy/tokens/morphanalysis.pyx b/spacy/tokens/morphanalysis.pyx index 0992a0b66..ba7c638f6 100644 --- a/spacy/tokens/morphanalysis.pyx +++ b/spacy/tokens/morphanalysis.pyx @@ -89,4 +89,3 @@ cdef class MorphAnalysis: def __repr__(self): return self.to_json() - diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 59ee21687..cf90e416b 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -1,5 +1,4 @@ cimport numpy as np -from libc.math cimport sqrt import copy import warnings @@ -10,11 +9,10 @@ from thinc.api import get_array_module from ..attrs cimport * from ..attrs cimport ORTH, attr_id_t from ..lexeme cimport Lexeme -from ..parts_of_speech cimport univ_pos_t -from ..structs cimport LexemeC, TokenC +from ..structs cimport TokenC from ..symbols cimport dep -from ..typedefs cimport attr_t, flags_t, hash_t -from .doc cimport _get_lca_matrix, get_token_attr, token_by_end, token_by_start +from ..typedefs cimport attr_t, hash_t +from .doc cimport _get_lca_matrix, get_token_attr from .token cimport Token from ..errors import Errors, Warnings @@ -595,7 +593,6 @@ cdef class Span: """ return "".join([t.text_with_ws for t in self]) - @property def noun_chunks(self): """Iterate over the base noun phrases in the span. Yields base diff --git a/spacy/tokens/span_group.pyx b/spacy/tokens/span_group.pyx index 48ad4a516..d245a1425 100644 --- a/spacy/tokens/span_group.pyx +++ b/spacy/tokens/span_group.pyx @@ -1,7 +1,7 @@ import struct import weakref from copy import deepcopy -from typing import TYPE_CHECKING, Iterable, Optional, Tuple, Union +from typing import Iterable, Optional, Union import srsly @@ -34,7 +34,7 @@ cdef class SpanGroup: DOCS: https://spacy.io/api/spangroup """ - def __init__(self, doc, *, name="", attrs={}, spans=[]): + def __init__(self, doc, *, name="", attrs={}, spans=[]): # no-cython-lint """Create a SpanGroup. doc (Doc): The reference Doc object. @@ -311,7 +311,7 @@ cdef class SpanGroup: other_attrs = deepcopy(other_group.attrs) span_group.attrs.update({ - key: value for key, value in other_attrs.items() \ + key: value for key, value in other_attrs.items() if key not in span_group.attrs }) if len(other_group): diff --git a/spacy/tokens/token.pxd b/spacy/tokens/token.pxd index fc02ff624..f4e4611df 100644 --- a/spacy/tokens/token.pxd +++ b/spacy/tokens/token.pxd @@ -26,7 +26,7 @@ cdef class Token: cdef Token self = Token.__new__(Token, vocab, doc, offset) return self - #cdef inline TokenC struct_from_attrs(Vocab vocab, attrs): + # cdef inline TokenC struct_from_attrs(Vocab vocab, attrs): # cdef TokenC token # attrs = normalize_attrs(attrs) @@ -98,12 +98,10 @@ cdef class Token: elif feat_name == SENT_START: token.sent_start = value - @staticmethod cdef inline int missing_dep(const TokenC* token) nogil: return token.dep == MISSING_DEP - @staticmethod cdef inline int missing_head(const TokenC* token) nogil: return Token.missing_dep(token) diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 6018c3112..de967ba25 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -1,13 +1,11 @@ # cython: infer_types=True # Compiler crashes on memory view coercion without this. Should report bug. cimport numpy as np -from cython.view cimport array as cvarray np.import_array() import warnings -import numpy from thinc.api import get_array_module from ..attrs cimport ( @@ -238,7 +236,7 @@ cdef class Token: result = xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm) # ensure we get a scalar back (numpy does this automatically but cupy doesn't) return result.item() - + def has_morph(self): """Check whether the token has annotated morph information. Return False when the morph annotation is unset/missing. @@ -545,9 +543,9 @@ cdef class Token: def __get__(self): if self.i + 1 == len(self.doc): return True - elif self.doc[self.i+1].is_sent_start == None: + elif self.doc[self.i+1].is_sent_start is None: return None - elif self.doc[self.i+1].is_sent_start == True: + elif self.doc[self.i+1].is_sent_start is True: return True else: return False diff --git a/spacy/training/align.pyx b/spacy/training/align.pyx index 8bd43b048..79fec73c4 100644 --- a/spacy/training/align.pyx +++ b/spacy/training/align.pyx @@ -37,10 +37,14 @@ def get_alignments(A: List[str], B: List[str]) -> Tuple[List[List[int]], List[Li b2a.append(set()) # Process the alignment at the current position if A[token_idx_a] == B[token_idx_b] and \ - (char_idx_a == 0 or \ - char_to_token_a[char_idx_a - 1] < token_idx_a) and \ - (char_idx_b == 0 or \ - char_to_token_b[char_idx_b - 1] < token_idx_b): + ( + char_idx_a == 0 or + char_to_token_a[char_idx_a - 1] < token_idx_a + ) and \ + ( + char_idx_b == 0 or + char_to_token_b[char_idx_b - 1] < token_idx_b + ): # Current tokens are identical and both character offsets are the # start of a token (either at the beginning of the document or the # previous character belongs to a different token) diff --git a/spacy/training/example.pyx b/spacy/training/example.pyx index abdac23ea..3f0cf5ade 100644 --- a/spacy/training/example.pyx +++ b/spacy/training/example.pyx @@ -1,4 +1,3 @@ -import warnings from collections.abc import Iterable as IterableInstance import numpy @@ -31,9 +30,9 @@ cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot): attrs, array = _annot2array(vocab, tok_annot, doc_annot) output = Doc(vocab, words=tok_annot["ORTH"], spaces=tok_annot["SPACY"]) if "entities" in doc_annot: - _add_entities_to_doc(output, doc_annot["entities"]) + _add_entities_to_doc(output, doc_annot["entities"]) if "spans" in doc_annot: - _add_spans_to_doc(output, doc_annot["spans"]) + _add_spans_to_doc(output, doc_annot["spans"]) if array.size: output = output.from_array(attrs, array) # links are currently added with ENT_KB_ID on the token level @@ -161,7 +160,6 @@ cdef class Example: self._y_sig = y_sig return self._cached_alignment - def _get_aligned_vectorized(self, align, gold_values): # Fast path for Doc attributes/fields that are predominantly a single value, # i.e., TAG, POS, MORPH. @@ -204,7 +202,6 @@ cdef class Example: return output.tolist() - def _get_aligned_non_vectorized(self, align, gold_values): # Slower path for fields that return multiple values (resulting # in ragged arrays that cannot be vectorized trivially). @@ -221,7 +218,6 @@ cdef class Example: return output - def get_aligned(self, field, as_string=False): """Return an aligned array for a token attribute.""" align = self.alignment.x2y @@ -330,7 +326,7 @@ cdef class Example: missing=None ) # Now fill the tokens we can align to O. - O = 2 # I=1, O=2, B=3 + O = 2 # I=1, O=2, B=3 # no-cython-lint: E741 for i, ent_iob in enumerate(self.get_aligned("ENT_IOB")): if x_tags[i] is None: if ent_iob == O: @@ -340,7 +336,7 @@ cdef class Example: return x_ents, x_tags def get_aligned_ner(self): - x_ents, x_tags = self.get_aligned_ents_and_ner() + _x_ents, x_tags = self.get_aligned_ents_and_ner() return x_tags def get_matching_ents(self, check_label=True): @@ -398,7 +394,6 @@ cdef class Example: return span_dict - def _links_to_dict(self): links = {} for ent in self.reference.ents: @@ -589,6 +584,7 @@ def _fix_legacy_dict_data(example_dict): "doc_annotation": doc_dict } + def _has_field(annot, field): if field not in annot: return False @@ -625,6 +621,7 @@ def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces): ent_types.append("") return ent_iobs, ent_types + def _parse_links(vocab, words, spaces, links): reference = Doc(vocab, words=words, spaces=spaces) starts = {token.idx: token.i for token in reference} diff --git a/spacy/training/gold_io.pyx b/spacy/training/gold_io.pyx index 1e7b3681d..2fc36e41f 100644 --- a/spacy/training/gold_io.pyx +++ b/spacy/training/gold_io.pyx @@ -1,4 +1,3 @@ -import json import warnings import srsly @@ -6,7 +5,7 @@ import srsly from .. import util from ..errors import Warnings from ..tokens import Doc -from .iob_utils import offsets_to_biluo_tags, tags_to_entities +from .iob_utils import offsets_to_biluo_tags def docs_to_json(docs, doc_id=0, ner_missing_tag="O"): @@ -23,7 +22,13 @@ def docs_to_json(docs, doc_id=0, ner_missing_tag="O"): json_doc = {"id": doc_id, "paragraphs": []} for i, doc in enumerate(docs): raw = None if doc.has_unknown_spaces else doc.text - json_para = {'raw': raw, "sentences": [], "cats": [], "entities": [], "links": []} + json_para = { + 'raw': raw, + "sentences": [], + "cats": [], + "entities": [], + "links": [] + } for cat, val in doc.cats.items(): json_cat = {"label": cat, "value": val} json_para["cats"].append(json_cat) @@ -35,13 +40,17 @@ def docs_to_json(docs, doc_id=0, ner_missing_tag="O"): if ent.kb_id_: link_dict = {(ent.start_char, ent.end_char): {ent.kb_id_: 1.0}} json_para["links"].append(link_dict) - biluo_tags = offsets_to_biluo_tags(doc, json_para["entities"], missing=ner_missing_tag) + biluo_tags = offsets_to_biluo_tags( + doc, json_para["entities"], missing=ner_missing_tag + ) attrs = ("TAG", "POS", "MORPH", "LEMMA", "DEP", "ENT_IOB") include_annotation = {attr: doc.has_annotation(attr) for attr in attrs} for j, sent in enumerate(doc.sents): json_sent = {"tokens": [], "brackets": []} for token in sent: - json_token = {"id": token.i, "orth": token.text, "space": token.whitespace_} + json_token = { + "id": token.i, "orth": token.text, "space": token.whitespace_ + } if include_annotation["TAG"]: json_token["tag"] = token.tag_ if include_annotation["POS"]: @@ -125,9 +134,14 @@ def json_to_annotations(doc): else: sent_starts.append(-1) if "brackets" in sent: - brackets.extend((b["first"] + sent_start_i, - b["last"] + sent_start_i, b["label"]) - for b in sent["brackets"]) + brackets.extend( + ( + b["first"] + sent_start_i, + b["last"] + sent_start_i, + b["label"] + ) + for b in sent["brackets"] + ) example["token_annotation"] = dict( ids=ids, @@ -160,6 +174,7 @@ def json_to_annotations(doc): ) yield example + def json_iterate(bytes utf8_str): # We should've made these files jsonl...But since we didn't, parse out # the docs one-by-one to reduce memory usage. diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index bf79481b8..a88f380f9 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -1,10 +1,8 @@ -cimport numpy as np from cython.operator cimport dereference as deref from libc.stdint cimport uint32_t, uint64_t from libcpp.set cimport set as cppset from murmurhash.mrmr cimport hash128_x64 -import functools import warnings from enum import Enum from typing import cast @@ -119,7 +117,7 @@ cdef class Vectors: if self.mode == Mode.default: if data is None: if shape is None: - shape = (0,0) + shape = (0, 0) ops = get_current_ops() data = ops.xp.zeros(shape, dtype="f") self._unset = cppset[int]({i for i in range(data.shape[0])}) @@ -260,11 +258,10 @@ cdef class Vectors: def __eq__(self, other): # Check for equality, with faster checks first return ( - self.shape == other.shape - and self.key2row == other.key2row - and self.to_bytes(exclude=["strings"]) - == other.to_bytes(exclude=["strings"]) - ) + self.shape == other.shape + and self.key2row == other.key2row + and self.to_bytes(exclude=["strings"]) == other.to_bytes(exclude=["strings"]) + ) def resize(self, shape, inplace=False): """Resize the underlying vectors array. If inplace=True, the memory @@ -520,11 +517,12 @@ cdef class Vectors: # vectors e.g. (10000, 300) # sims e.g. (1024, 10000) sims = xp.dot(batch, vectors.T) - best_rows[i:i+batch_size] = xp.argpartition(sims, -n, axis=1)[:,-n:] - scores[i:i+batch_size] = xp.partition(sims, -n, axis=1)[:,-n:] + best_rows[i:i+batch_size] = xp.argpartition(sims, -n, axis=1)[:, -n:] + scores[i:i+batch_size] = xp.partition(sims, -n, axis=1)[:, -n:] if sort and n >= 2: - sorted_index = xp.arange(scores.shape[0])[:,None][i:i+batch_size],xp.argsort(scores[i:i+batch_size], axis=1)[:,::-1] + sorted_index = xp.arange(scores.shape[0])[:, None][i:i+batch_size], \ + xp.argsort(scores[i:i+batch_size], axis=1)[:, ::-1] scores[i:i+batch_size] = scores[sorted_index] best_rows[i:i+batch_size] = best_rows[sorted_index] @@ -538,8 +536,12 @@ cdef class Vectors: numpy_rows = get_current_ops().to_numpy(best_rows) keys = xp.asarray( - [[row2key[row] for row in numpy_rows[i] if row in row2key] - for i in range(len(queries)) ], dtype="uint64") + [ + [row2key[row] for row in numpy_rows[i] if row in row2key] + for i in range(len(queries)) + ], + dtype="uint64" + ) return (keys, best_rows, scores) def to_ops(self, ops: Ops): @@ -582,9 +584,9 @@ cdef class Vectors: """ xp = get_array_module(self.data) if xp is numpy: - save_array = lambda arr, file_: xp.save(file_, arr, allow_pickle=False) + save_array = lambda arr, file_: xp.save(file_, arr, allow_pickle=False) # no-cython-lint else: - save_array = lambda arr, file_: xp.save(file_, arr) + save_array = lambda arr, file_: xp.save(file_, arr) # no-cython-lint def save_vectors(path): # the source of numpy.save indicates that the file object is closed after use. diff --git a/spacy/vocab.pxd b/spacy/vocab.pxd index 3b0173e3e..43e47af1d 100644 --- a/spacy/vocab.pxd +++ b/spacy/vocab.pxd @@ -32,7 +32,7 @@ cdef class Vocab: cdef public object writing_system cdef public object get_noun_chunks cdef readonly int length - cdef public object _unused_object # TODO remove in v4, see #9150 + cdef public object _unused_object # TODO remove in v4, see #9150 cdef public object lex_attr_getters cdef public object cfg diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 520228b51..d1edc8533 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -1,6 +1,4 @@ # cython: profile=True -from libc.string cimport memcpy - import functools import numpy @@ -19,7 +17,6 @@ from .errors import Errors from .lang.lex_attrs import LEX_ATTRS, get_lang, is_stop from .lang.norm_exceptions import BASE_NORMS from .lookups import Lookups -from .util import registry from .vectors import Mode as VectorsMode from .vectors import Vectors @@ -51,9 +48,17 @@ cdef class Vocab: DOCS: https://spacy.io/api/vocab """ - def __init__(self, lex_attr_getters=None, strings=tuple(), lookups=None, - oov_prob=-20., vectors_name=None, writing_system={}, - get_noun_chunks=None, **deprecated_kwargs): + def __init__( + self, + lex_attr_getters=None, + strings=tuple(), + lookups=None, + oov_prob=-20., + vectors_name=None, + writing_system={}, # no-cython-lint + get_noun_chunks=None, + **deprecated_kwargs + ): """Create the vocabulary. lex_attr_getters (dict): A dictionary mapping attribute IDs to @@ -150,7 +155,6 @@ cdef class Vocab: cdef LexemeC* lex cdef hash_t key = self.strings[string] lex = self._by_orth.get(key) - cdef size_t addr if lex != NULL: assert lex.orth in self.strings if lex.orth != key: @@ -183,7 +187,7 @@ cdef class Vocab: # of the doc ownership). # TODO: Change the C API so that the mem isn't passed in here. mem = self.mem - #if len(string) < 3 or self.length < 10000: + # if len(string) < 3 or self.length < 10000: # mem = self.mem cdef bint is_oov = mem is not self.mem lex = mem.alloc(1, sizeof(LexemeC)) @@ -463,7 +467,6 @@ cdef class Vocab: self.lookups.get_table("lexeme_norm"), ) - def to_disk(self, path, *, exclude=tuple()): """Save the current state to a directory. @@ -476,7 +479,6 @@ cdef class Vocab: path = util.ensure_path(path) if not path.exists(): path.mkdir() - setters = ["strings", "vectors"] if "strings" not in exclude: self.strings.to_disk(path / "strings.json") if "vectors" not in exclude: @@ -495,7 +497,6 @@ cdef class Vocab: DOCS: https://spacy.io/api/vocab#to_disk """ path = util.ensure_path(path) - getters = ["strings", "vectors"] if "strings" not in exclude: self.strings.from_disk(path / "strings.json") # TODO: add exclude? if "vectors" not in exclude: From 47a82c61649c39001cad3e2d1c5010f478f14ab6 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Wed, 19 Jul 2023 16:38:29 +0200 Subject: [PATCH 23/27] merge fixes --- .../_parser_internals/_beam_utils.pyx | 6 ----- spacy/pipeline/morphologizer.pyx | 2 +- spacy/pipeline/sentencizer.pyx | 1 - spacy/pipeline/transition_parser.pyx | 25 +++++++++++++------ spacy/tests/pipeline/test_tok2vec.py | 2 +- .../tests/serialize/test_serialize_config.py | 1 + spacy/tokens/span.pyx | 2 +- spacy/tokens/token.pyx | 3 +-- spacy/vectors.pyx | 2 +- 9 files changed, 23 insertions(+), 21 deletions(-) diff --git a/spacy/pipeline/_parser_internals/_beam_utils.pyx b/spacy/pipeline/_parser_internals/_beam_utils.pyx index 9b91459bd..fff8d63e9 100644 --- a/spacy/pipeline/_parser_internals/_beam_utils.pyx +++ b/spacy/pipeline/_parser_internals/_beam_utils.pyx @@ -2,12 +2,6 @@ # cython: profile=True import numpy -from thinc.extra.search cimport Beam - -from thinc.extra.search import MaxViolation - -from thinc.extra.search cimport MaxViolation - from ...typedefs cimport class_t from .transition_system cimport Transition, TransitionSystem diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx index cd93422ec..d6ebfc98c 100644 --- a/spacy/pipeline/morphologizer.pyx +++ b/spacy/pipeline/morphologizer.pyx @@ -17,7 +17,7 @@ from ..parts_of_speech import IDS as POS_IDS from ..scorer import Scorer from ..training import validate_examples, validate_get_examples from ..util import registry -from .tagger import Tagger +from .tagger import ActivationsT, Tagger # See #9050 BACKWARD_OVERWRITE = True diff --git a/spacy/pipeline/sentencizer.pyx b/spacy/pipeline/sentencizer.pyx index f7b47ec3e..28cf5d6b4 100644 --- a/spacy/pipeline/sentencizer.pyx +++ b/spacy/pipeline/sentencizer.pyx @@ -11,7 +11,6 @@ from .pipe import Pipe from .senter import senter_score - @Language.factory( "sentencizer", assigns=["token.is_sent_start", "doc.sents"], diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx index 8241a75ba..95cd21f9b 100644 --- a/spacy/pipeline/transition_parser.pyx +++ b/spacy/pipeline/transition_parser.pyx @@ -6,28 +6,32 @@ from typing import Dict, Iterable, List, Optional, Tuple cimport numpy as np from cymem.cymem cimport Pool -from itertools import islice - import contextlib import random +from itertools import islice import numpy import numpy.random import srsly - -from thinc.api import CupyOps, NumpyOps, set_dropout_rate +from thinc.api import ( + CupyOps, + NumpyOps, + Optimizer, + get_array_module, + get_ops, + set_dropout_rate, +) from thinc.types import Floats2d, Ints1d from ..ml.tb_framework import TransitionModelInputs from ..tokens.doc cimport Doc -from ._parser_internals cimport _beam_utils -from ._parser_internals.stateclass cimport StateC, StateClass -from .trainable_pipe cimport TrainablePipe - from ..typedefs cimport weight_t from ..vocab cimport Vocab +from ._parser_internals cimport _beam_utils +from ._parser_internals.stateclass cimport StateC, StateClass from ._parser_internals.transition_system cimport Transition, TransitionSystem +from .trainable_pipe cimport TrainablePipe from .. import util from ..errors import Errors @@ -38,6 +42,11 @@ from ..training import ( ) from ._parser_internals import _beam_utils + +# TODO: Remove when we switch to Cython 3. +cdef extern from "" namespace "std" nogil: + bint equal[InputIt1, InputIt2](InputIt1 first1, InputIt1 last1, InputIt2 first2) except + + NUMPY_OPS = NumpyOps() diff --git a/spacy/tests/pipeline/test_tok2vec.py b/spacy/tests/pipeline/test_tok2vec.py index b6e865325..f6cefbc1f 100644 --- a/spacy/tests/pipeline/test_tok2vec.py +++ b/spacy/tests/pipeline/test_tok2vec.py @@ -695,4 +695,4 @@ def test_tok2vec_listener_source_replace_listeners(): nlp2.add_pipe("tagger", source=nlp1) assert nlp2.get_pipe("tok2vec").listening_components == [] nlp2.add_pipe("ner", name="ner2", source=nlp1) - assert nlp2.get_pipe("tok2vec").listening_components == ["ner2"] \ No newline at end of file + assert nlp2.get_pipe("tok2vec").listening_components == ["ner2"] diff --git a/spacy/tests/serialize/test_serialize_config.py b/spacy/tests/serialize/test_serialize_config.py index 646ce0f5d..b351ea801 100644 --- a/spacy/tests/serialize/test_serialize_config.py +++ b/spacy/tests/serialize/test_serialize_config.py @@ -18,6 +18,7 @@ from spacy.ml.models import ( build_Tok2Vec_model, ) from spacy.schemas import ConfigSchema, ConfigSchemaDistill, ConfigSchemaPretrain +from spacy.training import Example from spacy.util import ( load_config, load_config_from_str, diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index f0ab486a0..683be9d0a 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -13,7 +13,7 @@ from ..lexeme cimport Lexeme from ..structs cimport TokenC from ..symbols cimport dep from ..typedefs cimport attr_t -from .doc cimport _get_lca_matrix, get_token_attr +from .doc cimport _get_lca_matrix, get_token_attr, token_by_end, token_by_start from .token cimport Token from ..errors import Errors, Warnings diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 14a40c96d..ff1120b7b 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -26,7 +26,6 @@ from ..attrs cimport ( LIKE_EMAIL, LIKE_NUM, LIKE_URL, - ORTH, ) from ..lexeme cimport Lexeme from ..symbols cimport conj @@ -426,7 +425,7 @@ cdef class Token: if "vector" in self.doc.user_token_hooks: return self.doc.user_token_hooks["vector"](self) else: - return self.vocab.get_vector(Token.get_struct_attr(self.c, self.vocab.vectors.attr)) + return self.vocab.get_vector(self.c.lex.orth) @property def vector_norm(self): diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index 050b9743c..783e6d00a 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -65,7 +65,7 @@ cdef class Vectors: cdef readonly unicode eow cdef readonly attr_id_t attr - def __init__(self, *, strings=None, shape=None, data=None, keys=None, mode=Mode.default, minn=0, maxn=0, hash_count=1, hash_seed=0, bow="<", eow=">"): + def __init__(self, *, strings=None, shape=None, data=None, keys=None, mode=Mode.default, minn=0, maxn=0, hash_count=1, hash_seed=0, bow="<", eow=">", attr="ORTH"): """Create a new vector store. strings (StringStore): The string store. From 846472129cbabaa08d793165b38f7d9c1eaf52e5 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Wed, 19 Jul 2023 16:38:37 +0200 Subject: [PATCH 24/27] merge fixes (2) --- spacy/attrs.pxd | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spacy/attrs.pxd b/spacy/attrs.pxd index 4a987b2b2..b8972cb71 100644 --- a/spacy/attrs.pxd +++ b/spacy/attrs.pxd @@ -47,5 +47,4 @@ cdef enum attr_id_t: MORPH = symbols.MORPH ENT_ID = symbols.ENT_ID - IDX - SENT_END + IDX = symbols.IDX From 96f2e30c4bba1f2c4b6d74fd444d5626bb3a1e17 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Wed, 19 Jul 2023 17:41:29 +0200 Subject: [PATCH 25/27] cython fixes and cleanup --- spacy/matcher/phrasematcher.pyx | 2 - spacy/ml/tb_framework.pyx | 55 ++++++++++--------- spacy/morphology.pyx | 6 +- spacy/parts_of_speech.pxd | 2 +- spacy/pipeline/_parser_internals/ner.pyx | 1 - spacy/pipeline/_parser_internals/search.pxd | 1 - spacy/pipeline/_parser_internals/search.pyx | 12 ++-- .../_parser_internals/transition_system.pxd | 4 +- .../_parser_internals/transition_system.pyx | 21 ++++--- spacy/pipeline/morphologizer.pyx | 3 +- spacy/pipeline/pipe.pyx | 3 +- spacy/pipeline/trainable_pipe.pyx | 17 +++--- spacy/pipeline/transition_parser.pyx | 55 ++++++++++--------- spacy/strings.pyx | 9 +-- spacy/tests/parser/_search.pyx | 49 +++++++++-------- spacy/tokens/doc.pyx | 2 +- spacy/tokens/morphanalysis.pyx | 1 - spacy/tokens/span.pyx | 3 +- 18 files changed, 118 insertions(+), 128 deletions(-) diff --git a/spacy/matcher/phrasematcher.pyx b/spacy/matcher/phrasematcher.pyx index c848f4033..eb9ca675f 100644 --- a/spacy/matcher/phrasematcher.pyx +++ b/spacy/matcher/phrasematcher.pyx @@ -158,7 +158,6 @@ cdef class PhraseMatcher: del self._callbacks[key] del self._docs[key] - def _add_from_arrays(self, key, specs, *, on_match=None): """Add a preprocessed list of specs, with an optional callback. @@ -194,7 +193,6 @@ cdef class PhraseMatcher: result = internal_node map_set(self.mem, result, self.vocab.strings[key], NULL) - def add(self, key, docs, *, on_match=None): """Add a match-rule to the phrase-matcher. A match-rule consists of: an ID key, a list of one or more patterns, and (optionally) an on_match callback. diff --git a/spacy/ml/tb_framework.pyx b/spacy/ml/tb_framework.pyx index fd0af12ce..ed04045a6 100644 --- a/spacy/ml/tb_framework.pyx +++ b/spacy/ml/tb_framework.pyx @@ -1,5 +1,5 @@ # cython: infer_types=True, cdivision=True, boundscheck=False -from typing import Any, List, Optional, Tuple, TypeVar, cast +from typing import Any, List, Optional, Tuple, cast from libc.stdlib cimport calloc, free, realloc from libc.string cimport memcpy, memset @@ -23,7 +23,7 @@ from thinc.api import ( from thinc.backends.cblas cimport CBlas, saxpy, sgemm -from thinc.types import Floats1d, Floats2d, Floats3d, Floats4d, Ints1d, Ints2d +from thinc.types import Floats2d, Floats3d, Floats4d, Ints1d, Ints2d from ..errors import Errors from ..pipeline._parser_internals import _beam_utils @@ -136,7 +136,7 @@ def init( Y: Optional[Tuple[List[State], List[Floats2d]]] = None, ): if X is not None: - docs, moves = X + docs, _ = X model.get_ref("tok2vec").initialize(X=docs) else: model.get_ref("tok2vec").initialize() @@ -145,7 +145,7 @@ def init( current_nO = model.maybe_get_dim("nO") if current_nO is None or current_nO != inferred_nO: model.attrs["resize_output"](model, inferred_nO) - nO = model.get_dim("nO") + # nO = model.get_dim("nO") nP = model.get_dim("nP") nH = model.get_dim("nH") nI = model.get_dim("nI") @@ -192,9 +192,10 @@ class TransitionModelInputs: self, docs: List[Doc], moves: TransitionSystem, - actions: Optional[List[Ints1d]]=None, - max_moves: int=0, - states: Optional[List[State]]=None): + actions: Optional[List[Ints1d]] = None, + max_moves: int = 0, + states: Optional[List[State]] = None, + ): """ actions (Optional[List[Ints1d]]): actions to apply for each Doc. docs (List[Doc]): Docs to predict transition sequences for. @@ -234,12 +235,12 @@ def forward(model, inputs: TransitionModelInputs, is_train: bool): return _forward_greedy_cpu(model, moves, states, feats, seen_mask, actions=actions) else: return _forward_fallback(model, moves, states, tokvecs, backprop_tok2vec, - feats, backprop_feats, seen_mask, is_train, actions=actions, - max_moves=inputs.max_moves) + feats, backprop_feats, seen_mask, is_train, actions=actions, + max_moves=inputs.max_moves) def _forward_greedy_cpu(model: Model, TransitionSystem moves, states: List[StateClass], np.ndarray feats, - np.ndarray[np.npy_bool, ndim=1] seen_mask, actions: Optional[List[Ints1d]]=None): + np.ndarray[np.npy_bool, ndim = 1] seen_mask, actions: Optional[List[Ints1d]] = None): cdef vector[StateC*] c_states cdef StateClass state for state in states: @@ -257,9 +258,10 @@ def _forward_greedy_cpu(model: Model, TransitionSystem moves, states: List[State return (states, scores), backprop + cdef list _parse_batch(CBlas cblas, TransitionSystem moves, StateC** states, WeightsC weights, SizesC sizes, actions: Optional[List[Ints1d]]=None): - cdef int i, j + cdef int i cdef vector[StateC *] unfinished cdef ActivationsC activations = _alloc_activations(sizes) cdef np.ndarray step_scores @@ -276,7 +278,7 @@ cdef list _parse_batch(CBlas cblas, TransitionSystem moves, StateC** states, if actions is None: # Validate actions, argmax, take action. c_transition_batch(moves, states, step_scores.data, sizes.classes, - sizes.states) + sizes.states) else: c_apply_actions(moves, states, step_actions.data, sizes.states) for i in range(sizes.states): @@ -302,8 +304,8 @@ def _forward_fallback( backprop_feats, seen_mask, is_train: bool, - actions: Optional[List[Ints1d]]=None, - max_moves: int=0): + actions: Optional[List[Ints1d]] = None, + max_moves: int = 0): nF = model.get_dim("nF") output = model.get_ref("output") hidden_b = model.get_param("hidden_b") @@ -371,7 +373,7 @@ def _forward_fallback( for clas in set(model.attrs["unseen_classes"]): if (d_scores[:, clas] < 0).any(): model.attrs["unseen_classes"].remove(clas) - d_scores *= seen_mask == False + d_scores *= seen_mask == False # no-cython-lint # Calculate the gradients for the parameters of the output layer. # The weight gemm is (nS, nO) @ (nS, nH).T output.inc_grad("b", d_scores.sum(axis=0)) @@ -571,13 +573,13 @@ cdef void _resize_activations(ActivationsC* A, SizesC n) nogil: A._max_size = n.states else: A.token_ids = realloc(A.token_ids, - n.states * n.feats * sizeof(A.token_ids[0])) + n.states * n.feats * sizeof(A.token_ids[0])) A.unmaxed = realloc(A.unmaxed, - n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0])) + n.states * n.hiddens * n.pieces * sizeof(A.unmaxed[0])) A.hiddens = realloc(A.hiddens, - n.states * n.hiddens * sizeof(A.hiddens[0])) + n.states * n.hiddens * sizeof(A.hiddens[0])) A.is_valid = realloc(A.is_valid, - n.states * n.classes * sizeof(A.is_valid[0])) + n.states * n.classes * sizeof(A.is_valid[0])) A._max_size = n.states A._curr_size = n.states @@ -599,9 +601,9 @@ cdef void _predict_states(CBlas cblas, ActivationsC* A, float* scores, StateC** else: # Compute hidden-to-output sgemm(cblas)(False, True, n.states, n.classes, n.hiddens, - 1.0, A.hiddens, n.hiddens, - W.hidden_weights, n.hiddens, - 0.0, scores, n.classes) + 1.0, A.hiddens, n.hiddens, + W.hidden_weights, n.hiddens, + 0.0, scores, n.classes) # Add bias for i in range(n.states): saxpy(cblas)(n.classes, 1., W.hidden_bias, 1, &scores[i*n.classes], 1) @@ -617,12 +619,12 @@ cdef void _predict_states(CBlas cblas, ActivationsC* A, float* scores, StateC** scores[i*n.classes+j] = min_ -cdef void _sum_state_features(CBlas cblas, float* output, - const float* cached, const int* token_ids, SizesC n) nogil: - cdef int idx, b, f, i +cdef void _sum_state_features(CBlas cblas, float* output, const float* cached, + const int* token_ids, SizesC n) nogil: + cdef int idx, b, f cdef const float* feature cdef int B = n.states - cdef int O = n.hiddens * n.pieces + cdef int O = n.hiddens * n.pieces # no-cython-lint cdef int F = n.feats cdef int T = n.tokens padding = cached + (T * F * O) @@ -637,4 +639,3 @@ cdef void _sum_state_features(CBlas cblas, float* output, feature = &cached[idx] saxpy(cblas)(O, one, feature, 1, &output[b*O], 1) token_ids += F - diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index 9a8b8bb51..665e964bf 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -80,15 +80,13 @@ cdef class Morphology: out.sort(key=lambda x: x[0]) return dict(out) - def _normalized_feat_dict_to_str(self, feats: Dict[str, str]) -> str: norm_feats_string = self.FEATURE_SEP.join([ - self.FIELD_SEP.join([field, self.VALUE_SEP.join(values) if isinstance(values, list) else values]) + self.FIELD_SEP.join([field, self.VALUE_SEP.join(values) if isinstance(values, list) else values]) for field, values in feats.items() - ]) + ]) return norm_feats_string or self.EMPTY_MORPH - cdef hash_t _add(self, features): """Insert a morphological analysis in the morphology table, if not already present. The morphological analysis may be provided in the UD diff --git a/spacy/parts_of_speech.pxd b/spacy/parts_of_speech.pxd index 01f116ea6..22a571be7 100644 --- a/spacy/parts_of_speech.pxd +++ b/spacy/parts_of_speech.pxd @@ -8,7 +8,7 @@ cpdef enum univ_pos_t: ADV = symbols.ADV AUX = symbols.AUX CONJ = symbols.CONJ - CCONJ = symbols.CCONJ # U20 + CCONJ = symbols.CCONJ # U20 DET = symbols.DET INTJ = symbols.INTJ NOUN = symbols.NOUN diff --git a/spacy/pipeline/_parser_internals/ner.pyx b/spacy/pipeline/_parser_internals/ner.pyx index 58f6b2a93..9220bb522 100644 --- a/spacy/pipeline/_parser_internals/ner.pyx +++ b/spacy/pipeline/_parser_internals/ner.pyx @@ -1,5 +1,4 @@ from cymem.cymem cimport Pool -from libc.stdint cimport int32_t from libcpp.memory cimport shared_ptr from libcpp.vector cimport vector diff --git a/spacy/pipeline/_parser_internals/search.pxd b/spacy/pipeline/_parser_internals/search.pxd index 462649633..ad68dc5c7 100644 --- a/spacy/pipeline/_parser_internals/search.pxd +++ b/spacy/pipeline/_parser_internals/search.pxd @@ -57,7 +57,6 @@ cdef class Beam: cdef int advance(self, trans_func_t transition_func, hash_func_t hash_func, void* extra_args) except -1 cdef int check_done(self, finish_func_t finish_func, void* extra_args) except -1 - cdef inline void set_cell(self, int i, int j, weight_t score, int is_valid, weight_t cost) nogil: self.scores[i][j] = score diff --git a/spacy/pipeline/_parser_internals/search.pyx b/spacy/pipeline/_parser_internals/search.pyx index 251eaa805..578299b56 100644 --- a/spacy/pipeline/_parser_internals/search.pyx +++ b/spacy/pipeline/_parser_internals/search.pyx @@ -1,11 +1,8 @@ # cython: profile=True, experimental_cpp_class_def=True, cdivision=True, infer_types=True cimport cython -from libc.math cimport exp, log -from libc.string cimport memcpy, memset - -import math - from cymem.cymem cimport Pool +from libc.math cimport exp +from libc.string cimport memcpy, memset from preshed.maps cimport PreshMap @@ -70,7 +67,7 @@ cdef class Beam: self.costs[i][j] = costs[j] cdef int set_table(self, weight_t** scores, int** is_valid, weight_t** costs) except -1: - cdef int i, j + cdef int i for i in range(self.width): memcpy(self.scores[i], scores[i], sizeof(weight_t) * self.nr_class) memcpy(self.is_valid[i], is_valid[i], sizeof(bint) * self.nr_class) @@ -176,7 +173,6 @@ cdef class Beam: beam-width, and n is the number of classes. """ cdef Entry entry - cdef weight_t score cdef _State* s cdef int i, j, move_id assert self.size >= 1 @@ -269,7 +265,7 @@ cdef class MaxViolation: # This can happen from non-monotonic actions # If we find a better gold analysis this way, be sure to keep it. elif pred._states[i].loss <= 0 \ - and tuple(pred.histories[i]) not in seen_golds: + and tuple(pred.histories[i]) not in seen_golds: g_scores.append(pred._states[i].score) g_hist.append(list(pred.histories[i])) for i in range(gold.size): diff --git a/spacy/pipeline/_parser_internals/transition_system.pxd b/spacy/pipeline/_parser_internals/transition_system.pxd index 66cc7747b..08baed932 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pxd +++ b/spacy/pipeline/_parser_internals/transition_system.pxd @@ -60,7 +60,7 @@ cdef class TransitionSystem: cdef void c_apply_actions(TransitionSystem moves, StateC** states, const int* actions, - int batch_size) nogil + int batch_size) nogil cdef void c_transition_batch(TransitionSystem moves, StateC** states, const float* scores, - int nr_class, int batch_size) nogil + int nr_class, int batch_size) nogil diff --git a/spacy/pipeline/_parser_internals/transition_system.pyx b/spacy/pipeline/_parser_internals/transition_system.pyx index 48517c3f5..aaafe2aa0 100644 --- a/spacy/pipeline/_parser_internals/transition_system.pyx +++ b/spacy/pipeline/_parser_internals/transition_system.pyx @@ -291,19 +291,19 @@ cdef class TransitionSystem: cdef void c_apply_actions(TransitionSystem moves, StateC** states, const int* actions, - int batch_size) nogil: - cdef int i - cdef Transition action - cdef StateC* state - for i in range(batch_size): - state = states[i] - action = moves.c[actions[i]] - action.do(state, action.label) - state.history.push_back(action.clas) + int batch_size) nogil: + cdef int i + cdef Transition action + cdef StateC* state + for i in range(batch_size): + state = states[i] + action = moves.c[actions[i]] + action.do(state, action.label) + state.history.push_back(action.clas) cdef void c_transition_batch(TransitionSystem moves, StateC** states, const float* scores, - int nr_class, int batch_size) nogil: + int nr_class, int batch_size) nogil: is_valid = calloc(moves.n_moves, sizeof(int)) cdef int i, guess cdef Transition action @@ -319,4 +319,3 @@ cdef void c_transition_batch(TransitionSystem moves, StateC** states, const floa action.do(states[i], action.label) states[i].history.push_back(guess) free(is_valid) - diff --git a/spacy/pipeline/morphologizer.pyx b/spacy/pipeline/morphologizer.pyx index d6ebfc98c..4b95f1ff7 100644 --- a/spacy/pipeline/morphologizer.pyx +++ b/spacy/pipeline/morphologizer.pyx @@ -1,8 +1,7 @@ # cython: infer_types=True, profile=True, binding=True from itertools import islice -from typing import Callable, Dict, Iterable, List, Optional, Union +from typing import Callable, Dict, Iterable, Optional, Union -import srsly from thinc.api import Config, Model from thinc.legacy import LegacySequenceCategoricalCrossentropy diff --git a/spacy/pipeline/pipe.pyx b/spacy/pipeline/pipe.pyx index e52859002..8409e64c3 100644 --- a/spacy/pipeline/pipe.pyx +++ b/spacy/pipeline/pipe.pyx @@ -1,12 +1,11 @@ # cython: infer_types=True, profile=True, binding=True -import warnings from typing import Callable, Dict, Iterable, Iterator, Tuple, Union import srsly from ..tokens.doc cimport Doc -from ..errors import Errors, Warnings +from ..errors import Errors from ..language import Language from ..training import Example from ..util import raise_error diff --git a/spacy/pipeline/trainable_pipe.pyx b/spacy/pipeline/trainable_pipe.pyx index f17ba5969..42394c907 100644 --- a/spacy/pipeline/trainable_pipe.pyx +++ b/spacy/pipeline/trainable_pipe.pyx @@ -1,5 +1,4 @@ # cython: infer_types=True, profile=True, binding=True -import warnings from typing import Callable, Dict, Iterable, Iterator, Optional, Tuple import srsly @@ -8,7 +7,7 @@ from thinc.api import Model, Optimizer, set_dropout_rate from ..tokens.doc cimport Doc from .. import util -from ..errors import Errors, Warnings +from ..errors import Errors from ..language import Language from ..training import Example, validate_distillation_examples, validate_examples from ..vocab import Vocab @@ -56,14 +55,14 @@ cdef class TrainablePipe(Pipe): except Exception as e: error_handler(self.name, self, [doc], e) - def distill(self, - teacher_pipe: Optional["TrainablePipe"], - examples: Iterable["Example"], - *, - drop: float=0.0, - sgd: Optional[Optimizer]=None, - losses: Optional[Dict[str, float]]=None) -> Dict[str, float]: + teacher_pipe: Optional["TrainablePipe"], + examples: Iterable["Example"], + *, + drop: float = 0.0, + sgd: Optional[Optimizer] = None, + losses: Optional[Dict[str, float]] = None + ) -> Dict[str, float]: """Train a pipe (the student) on the predictions of another pipe (the teacher). The student is typically trained on the probability distribution of the teacher, but details may differ per pipe. diff --git a/spacy/pipeline/transition_parser.pyx b/spacy/pipeline/transition_parser.pyx index 95cd21f9b..2496d1376 100644 --- a/spacy/pipeline/transition_parser.pyx +++ b/spacy/pipeline/transition_parser.pyx @@ -222,12 +222,13 @@ class Parser(TrainablePipe): raise NotImplementedError def distill(self, - teacher_pipe: Optional[TrainablePipe], - examples: Iterable["Example"], - *, - drop: float=0.0, - sgd: Optional[Optimizer]=None, - losses: Optional[Dict[str, float]]=None): + teacher_pipe: Optional[TrainablePipe], + examples: Iterable["Example"], + *, + drop: float = 0.0, + sgd: Optional[Optimizer] = None, + losses: Optional[Dict[str, float]] = None + ): """Train a pipe (the student) on the predictions of another pipe (the teacher). The student is trained on the transition probabilities of the teacher. @@ -277,11 +278,13 @@ class Parser(TrainablePipe): # teacher's distributions. student_inputs = TransitionModelInputs(docs=student_docs, - states=[state.copy() for state in states], moves=self.moves, max_moves=max_moves) + states=[state.copy() for state in states], + moves=self.moves, + max_moves=max_moves) (student_states, student_scores), backprop_scores = self.model.begin_update(student_inputs) actions = _states_diff_to_actions(states, student_states) teacher_inputs = TransitionModelInputs(docs=[eg.reference for eg in examples], - states=states, moves=teacher_pipe.moves, actions=actions) + states=states, moves=teacher_pipe.moves, actions=actions) (_, teacher_scores) = teacher_pipe.model.predict(teacher_inputs) loss, d_scores = self.get_teacher_student_loss(teacher_scores, student_scores) @@ -294,10 +297,9 @@ class Parser(TrainablePipe): return losses - def get_teacher_student_loss( - self, teacher_scores: List[Floats2d], student_scores: List[Floats2d], - normalize: bool=False, + self, teacher_scores: List[Floats2d], student_scores: List[Floats2d], + normalize: bool = False, ) -> Tuple[float, List[Floats2d]]: """Calculate the loss and its gradient for a batch of student scores, relative to teacher scores. @@ -320,9 +322,9 @@ class Parser(TrainablePipe): # ourselves. teacher_scores = self.model.ops.softmax(self.model.ops.xp.vstack(teacher_scores), - axis=-1, inplace=True) + axis=-1, inplace=True) student_scores = self.model.ops.softmax(self.model.ops.xp.vstack(student_scores), - axis=-1, inplace=True) + axis=-1, inplace=True) assert teacher_scores.shape == student_scores.shape @@ -436,13 +438,15 @@ class Parser(TrainablePipe): else: init_states, gold_states, _ = self.moves.init_gold_batch(examples) - inputs = TransitionModelInputs(docs=docs, moves=self.moves, - max_moves=max_moves, states=[state.copy() for state in init_states]) + inputs = TransitionModelInputs(docs=docs, + moves=self.moves, + max_moves=max_moves, + states=[state.copy() for state in init_states]) (pred_states, scores), backprop_scores = self.model.begin_update(inputs) if sum(s.shape[0] for s in scores) == 0: return losses d_scores = self.get_loss((gold_states, init_states, pred_states, scores), - examples, max_moves) + examples, max_moves) backprop_scores((pred_states, d_scores)) if sgd not in (None, False): self.finish_update(sgd) @@ -483,9 +487,7 @@ class Parser(TrainablePipe): cdef TransitionSystem moves = self.moves cdef StateClass state cdef int clas - cdef int nF = self.model.get_dim("nF") cdef int nO = moves.n_moves - cdef int nS = sum([len(history) for history in histories]) cdef Pool mem = Pool() cdef np.ndarray costs_i is_valid = mem.alloc(nO, sizeof(int)) @@ -552,8 +554,8 @@ class Parser(TrainablePipe): return losses - def update_beam(self, examples, *, beam_width, - drop=0., sgd=None, losses=None, beam_density=0.0): + def update_beam(self, examples, *, beam_width, drop=0., + sgd=None, losses=None, beam_density=0.0): raise NotImplementedError def set_output(self, nO): @@ -678,9 +680,10 @@ class Parser(TrainablePipe): return states # Parse the states that are too long with the teacher's parsing model. - teacher_inputs = TransitionModelInputs(docs=docs, moves=moves, - states=[state.copy() for state in to_cut]) - (teacher_states, _ ) = teacher_pipe.model.predict(teacher_inputs) + teacher_inputs = TransitionModelInputs(docs=docs, + moves=moves, + states=[state.copy() for state in to_cut]) + (teacher_states, _) = teacher_pipe.model.predict(teacher_inputs) # Step through the teacher's actions and store every state after # each multiple of max_length. @@ -778,6 +781,7 @@ def _states_to_actions(states: List[StateClass]) -> List[Ints1d]: return actions + def _states_diff_to_actions( before_states: List[StateClass], after_states: List[StateClass] @@ -798,8 +802,9 @@ def _states_diff_to_actions( c_state_before = before_state.c c_state_after = after_state.c - assert equal(c_state_before.history.begin(), c_state_before.history.end(), - c_state_after.history.begin()) + assert equal(c_state_before.history.begin(), + c_state_before.history.end(), + c_state_after.history.begin()) actions = [] while True: diff --git a/spacy/strings.pyx b/spacy/strings.pyx index 05f1c7353..62ab9c20d 100644 --- a/spacy/strings.pyx +++ b/spacy/strings.pyx @@ -1,7 +1,6 @@ # cython: infer_types=True -from typing import Any, Callable, Iterable, Iterator, List, Optional, Tuple, Union +from typing import Iterable, Iterator, List, Optional, Tuple, Union -cimport cython from libc.stdint cimport uint32_t from libc.string cimport memcpy from murmurhash.mrmr cimport hash64 @@ -243,7 +242,6 @@ cdef class StringStore: cdef int n_length_bytes cdef int i cdef Utf8Str* string = self.mem.alloc(1, sizeof(Utf8Str)) - cdef uint32_t ulength = length if length < sizeof(string.s): string.s[0] = length memcpy(&string.s[1], chars, length) @@ -301,7 +299,7 @@ cpdef hash_t get_string_id(object string_or_hash) except -1: try: return hash_string(string_or_hash) - except: + except: # no-cython-lint if _try_coerce_to_hash(string_or_hash, &str_hash): # Coerce the integral key to the expected primitive hash type. # This ensures that custom/overloaded "primitive" data types @@ -318,6 +316,5 @@ cdef inline bint _try_coerce_to_hash(object key, hash_t* out_hash): try: out_hash[0] = key return True - except: + except: # no-cython-lint return False - diff --git a/spacy/tests/parser/_search.pyx b/spacy/tests/parser/_search.pyx index 0983159b7..cd9e6b2f5 100644 --- a/spacy/tests/parser/_search.pyx +++ b/spacy/tests/parser/_search.pyx @@ -2,7 +2,7 @@ from cymem.cymem cimport Pool from spacy.pipeline._parser_internals.search cimport Beam, MaxViolation -from spacy.typedefs cimport class_t, weight_t +from spacy.typedefs cimport class_t import pytest @@ -42,32 +42,35 @@ cdef int destroy(Pool mem, void* state, void* extra_args) except -1: state = state mem.free(state) + @cytest @pytest.mark.parametrize("nr_class,beam_width", - [ - (2, 3), - (3, 6), - (4, 20), - ] -) + [ + (2, 3), + (3, 6), + (4, 20), + ] + ) def test_init(nr_class, beam_width): b = Beam(nr_class, beam_width) assert b.size == 1 assert b.width == beam_width assert b.nr_class == nr_class + @cytest def test_init_violn(): MaxViolation() + @cytest @pytest.mark.parametrize("nr_class,beam_width,length", - [ - (2, 3, 3), - (3, 6, 15), - (4, 20, 32), - ] -) + [ + (2, 3, 3), + (3, 6, 15), + (4, 20, 32), + ] + ) def test_initialize(nr_class, beam_width, length): b = Beam(nr_class, beam_width) b.initialize(initialize, destroy, length, NULL) @@ -79,11 +82,11 @@ def test_initialize(nr_class, beam_width, length): @cytest @pytest.mark.parametrize("nr_class,beam_width,length,extra", - [ - (2, 3, 4, None), - (3, 6, 15, u"test beam 1"), - ] -) + [ + (2, 3, 4, None), + (3, 6, 15, u"test beam 1"), + ] + ) def test_initialize_extra(nr_class, beam_width, length, extra): b = Beam(nr_class, beam_width) if extra is None: @@ -97,11 +100,11 @@ def test_initialize_extra(nr_class, beam_width, length, extra): @cytest @pytest.mark.parametrize("nr_class,beam_width,length", - [ - (3, 6, 15), - (4, 20, 32), - ] -) + [ + (3, 6, 15), + (4, 20, 32), + ] + ) def test_transition(nr_class, beam_width, length): b = Beam(nr_class, beam_width) b.initialize(initialize, destroy, length, NULL) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 58f098cf1..df012a28a 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -1759,7 +1759,7 @@ cdef class Doc: data["underscore_span"] = {} if attr not in data["underscore_span"]: data["underscore_span"][attr] = [] - data["underscore_span"][attr].append({"start": start, "end": end, "value": value, "label": _label, "kb_id": _kb_id, "id":_span_id}) + data["underscore_span"][attr].append({"start": start, "end": end, "value": value, "label": _label, "kb_id": _kb_id, "id": _span_id}) for attr in underscore: if attr not in user_keys: diff --git a/spacy/tokens/morphanalysis.pyx b/spacy/tokens/morphanalysis.pyx index ba73f2eb2..7ff08c4bd 100644 --- a/spacy/tokens/morphanalysis.pyx +++ b/spacy/tokens/morphanalysis.pyx @@ -1,5 +1,4 @@ cimport numpy as np -from libc.string cimport memset from ..errors import Errors from ..morphology import Morphology diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 683be9d0a..26e5920c0 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -225,8 +225,8 @@ cdef class Span: @property def _(self): - cdef SpanC* span_c = self.span_c() """Custom extension attributes registered via `set_extension`.""" + cdef SpanC* span_c = self.span_c() return Underscore(Underscore.span_extensions, self, start=span_c.start_char, end=span_c.end_char, label=self.label, kb_id=self.kb_id, span_id=self.id) @@ -933,7 +933,6 @@ cdef class Span: self.id_ = ent_id_ - cdef int _count_words_to_root(const TokenC* token, int sent_length) except -1: # Don't allow spaces to be the root, if there are # better candidates From 4f37e4031c227231d4a547d2e4fc306435a98a22 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Thu, 20 Jul 2023 09:59:19 +0200 Subject: [PATCH 26/27] Update spacy/ml/tb_framework.pyx Co-authored-by: Raphael Mitsch --- spacy/ml/tb_framework.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy/ml/tb_framework.pyx b/spacy/ml/tb_framework.pyx index ed04045a6..a48c6b901 100644 --- a/spacy/ml/tb_framework.pyx +++ b/spacy/ml/tb_framework.pyx @@ -305,7 +305,8 @@ def _forward_fallback( seen_mask, is_train: bool, actions: Optional[List[Ints1d]] = None, - max_moves: int = 0): + max_moves: int = 0, +): nF = model.get_dim("nF") output = model.get_ref("output") hidden_b = model.get_param("hidden_b") From f293386d3e230fb68b52335113256991564e2ce2 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 20 Jul 2023 14:08:29 +0200 Subject: [PATCH 27/27] remove unnecessary line Co-authored-by: Adriane Boyd --- spacy/ml/tb_framework.pyx | 1 - 1 file changed, 1 deletion(-) diff --git a/spacy/ml/tb_framework.pyx b/spacy/ml/tb_framework.pyx index a48c6b901..6c5c29d85 100644 --- a/spacy/ml/tb_framework.pyx +++ b/spacy/ml/tb_framework.pyx @@ -145,7 +145,6 @@ def init( current_nO = model.maybe_get_dim("nO") if current_nO is None or current_nO != inferred_nO: model.attrs["resize_output"](model, inferred_nO) - # nO = model.get_dim("nO") nP = model.get_dim("nP") nH = model.get_dim("nH") nI = model.get_dim("nI")