From 6ae7618418d1a2514d55bff041fb196957844de6 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Sun, 19 Mar 2023 23:41:20 +0100 Subject: [PATCH] Clean up Vocab constructor (#12290) * Clean up Vocab constructor * Change effective type of `strings` from `Iterable[str]` to `Optional[StringStore]` * Don't automatically add strings to vocab * Change default values to `None` * Remove `**deprecated_kwargs` * Format --- spacy/strings.pyi | 2 +- spacy/tests/pipeline/test_pipe_methods.py | 3 ++- .../serialize/test_serialize_vocab_strings.py | 27 +++++++++++-------- spacy/tests/vocab_vectors/test_lexeme.py | 2 +- spacy/vocab.pyi | 2 +- spacy/vocab.pyx | 18 +++++++------ website/docs/api/vocab.mdx | 5 ++-- 7 files changed, 34 insertions(+), 25 deletions(-) diff --git a/spacy/strings.pyi b/spacy/strings.pyi index d9509ff57..38dee7034 100644 --- a/spacy/strings.pyi +++ b/spacy/strings.pyi @@ -2,7 +2,7 @@ from typing import List, Optional, Iterable, Iterator, Union, Any, Tuple, overlo from pathlib import Path class StringStore: - def __init__(self, strings: Optional[Iterable[str]]) -> None: ... + def __init__(self, strings: Optional[Iterable[str]] = None) -> None: ... @overload def __getitem__(self, string_or_hash: str) -> int: ... @overload diff --git a/spacy/tests/pipeline/test_pipe_methods.py b/spacy/tests/pipeline/test_pipe_methods.py index 9b9786f04..39611a742 100644 --- a/spacy/tests/pipeline/test_pipe_methods.py +++ b/spacy/tests/pipeline/test_pipe_methods.py @@ -9,6 +9,7 @@ from spacy.lang.en import English from spacy.lang.en.syntax_iterators import noun_chunks from spacy.language import Language from spacy.pipeline import TrainablePipe +from spacy.strings import StringStore from spacy.tokens import Doc from spacy.training import Example from spacy.util import SimpleFrozenList, get_arg_names, make_tempdir @@ -131,7 +132,7 @@ def test_issue5458(): # Test that the noun chuncker does not generate overlapping spans # fmt: off words = ["In", "an", "era", "where", "markets", "have", "brought", "prosperity", "and", "empowerment", "."] - vocab = Vocab(strings=words) + vocab = Vocab(strings=StringStore(words)) deps = ["ROOT", "det", "pobj", "advmod", "nsubj", "aux", "relcl", "dobj", "cc", "conj", "punct"] pos = ["ADP", "DET", "NOUN", "ADV", "NOUN", "AUX", "VERB", "NOUN", "CCONJ", "NOUN", "PUNCT"] heads = [0, 2, 0, 9, 6, 6, 2, 6, 7, 7, 0] diff --git a/spacy/tests/serialize/test_serialize_vocab_strings.py b/spacy/tests/serialize/test_serialize_vocab_strings.py index fd80c3d8e..f6356ac9e 100644 --- a/spacy/tests/serialize/test_serialize_vocab_strings.py +++ b/spacy/tests/serialize/test_serialize_vocab_strings.py @@ -13,8 +13,11 @@ from spacy.vocab import Vocab from ..util import make_tempdir -test_strings = [([], []), (["rats", "are", "cute"], ["i", "like", "rats"])] -test_strings_attrs = [(["rats", "are", "cute"], "Hello")] +test_strings = [ + (StringStore(), StringStore()), + (StringStore(["rats", "are", "cute"]), StringStore(["i", "like", "rats"])), +] +test_strings_attrs = [(StringStore(["rats", "are", "cute"]), "Hello")] @pytest.mark.issue(599) @@ -81,7 +84,7 @@ def test_serialize_vocab_roundtrip_bytes(strings1, strings2): vocab2 = Vocab(strings=strings2) vocab1_b = vocab1.to_bytes() vocab2_b = vocab2.to_bytes() - if strings1 == strings2: + if strings1.to_bytes() == strings2.to_bytes(): assert vocab1_b == vocab2_b else: assert vocab1_b != vocab2_b @@ -117,11 +120,12 @@ def test_serialize_vocab_roundtrip_disk(strings1, strings2): def test_serialize_vocab_lex_attrs_bytes(strings, lex_attr): vocab1 = Vocab(strings=strings) vocab2 = Vocab() - vocab1[strings[0]].norm_ = lex_attr - assert vocab1[strings[0]].norm_ == lex_attr - assert vocab2[strings[0]].norm_ != lex_attr + s = next(iter(vocab1.strings)) + vocab1[s].norm_ = lex_attr + assert vocab1[s].norm_ == lex_attr + assert vocab2[s].norm_ != lex_attr vocab2 = vocab2.from_bytes(vocab1.to_bytes()) - assert vocab2[strings[0]].norm_ == lex_attr + assert vocab2[s].norm_ == lex_attr @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) @@ -136,14 +140,15 @@ def test_deserialize_vocab_seen_entries(strings, lex_attr): def test_serialize_vocab_lex_attrs_disk(strings, lex_attr): vocab1 = Vocab(strings=strings) vocab2 = Vocab() - vocab1[strings[0]].norm_ = lex_attr - assert vocab1[strings[0]].norm_ == lex_attr - assert vocab2[strings[0]].norm_ != lex_attr + s = next(iter(vocab1.strings)) + vocab1[s].norm_ = lex_attr + assert vocab1[s].norm_ == lex_attr + assert vocab2[s].norm_ != lex_attr with make_tempdir() as d: file_path = d / "vocab" vocab1.to_disk(file_path) vocab2 = vocab2.from_disk(file_path) - assert vocab2[strings[0]].norm_ == lex_attr + assert vocab2[s].norm_ == lex_attr @pytest.mark.parametrize("strings1,strings2", test_strings) diff --git a/spacy/tests/vocab_vectors/test_lexeme.py b/spacy/tests/vocab_vectors/test_lexeme.py index d91f41db3..cd7f954ae 100644 --- a/spacy/tests/vocab_vectors/test_lexeme.py +++ b/spacy/tests/vocab_vectors/test_lexeme.py @@ -17,7 +17,7 @@ def test_issue361(en_vocab, text1, text2): @pytest.mark.issue(600) def test_issue600(): - vocab = Vocab(tag_map={"NN": {"pos": "NOUN"}}) + vocab = Vocab() doc = Doc(vocab, words=["hello"]) doc[0].tag_ = "NN" diff --git a/spacy/vocab.pyi b/spacy/vocab.pyi index 871044fff..e4a88bfd8 100644 --- a/spacy/vocab.pyi +++ b/spacy/vocab.pyi @@ -26,7 +26,7 @@ class Vocab: def __init__( self, lex_attr_getters: Optional[Dict[str, Callable[[str], Any]]] = ..., - strings: Optional[Union[List[str], StringStore]] = ..., + strings: Optional[StringStore] = ..., lookups: Optional[Lookups] = ..., oov_prob: float = ..., writing_system: Dict[str, Any] = ..., diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index f3c3595ef..0d3c9c883 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -49,9 +49,8 @@ cdef class Vocab: DOCS: https://spacy.io/api/vocab """ - def __init__(self, lex_attr_getters=None, strings=tuple(), lookups=None, - oov_prob=-20., writing_system={}, get_noun_chunks=None, - **deprecated_kwargs): + def __init__(self, lex_attr_getters=None, strings=None, lookups=None, + oov_prob=-20., writing_system=None, get_noun_chunks=None): """Create the vocabulary. lex_attr_getters (dict): A dictionary mapping attribute IDs to @@ -69,16 +68,19 @@ cdef class Vocab: self.cfg = {'oov_prob': oov_prob} self.mem = Pool() self._by_orth = PreshMap() - self.strings = StringStore() self.length = 0 - if strings: - for string in strings: - _ = self[string] + if strings is None: + self.strings = StringStore() + else: + self.strings = strings self.lex_attr_getters = lex_attr_getters self.morphology = Morphology(self.strings) self.vectors = Vectors(strings=self.strings) self.lookups = lookups - self.writing_system = writing_system + if writing_system is None: + self.writing_system = {} + else: + self.writing_system = writing_system self.get_noun_chunks = get_noun_chunks property vectors: diff --git a/website/docs/api/vocab.mdx b/website/docs/api/vocab.mdx index 3faf1f1a0..304040f9c 100644 --- a/website/docs/api/vocab.mdx +++ b/website/docs/api/vocab.mdx @@ -17,14 +17,15 @@ Create the vocabulary. > #### Example > > ```python +> from spacy.strings import StringStore > from spacy.vocab import Vocab -> vocab = Vocab(strings=["hello", "world"]) +> vocab = Vocab(strings=StringStore(["hello", "world"])) > ``` | Name | Description | | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `lex_attr_getters` | A dictionary mapping attribute IDs to functions to compute them. Defaults to `None`. ~~Optional[Dict[str, Callable[[str], Any]]]~~ | -| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values, and vice versa, or a list of strings. ~~Union[List[str], StringStore]~~ | +| `strings` | A [`StringStore`](/api/stringstore) that maps strings to hash values. ~~Optional[StringStore]~~ | | `lookups` | A [`Lookups`](/api/lookups) that stores the `lexeme_norm` and other large lookup tables. Defaults to `None`. ~~Optional[Lookups]~~ | | `oov_prob` | The default OOV probability. Defaults to `-20.0`. ~~float~~ | | `writing_system` | A dictionary describing the language's writing system. Typically provided by [`Language.Defaults`](/api/language#defaults). ~~Dict[str, Any]~~ |