From 684a77870b478228dbb3d5ab45a2798ef83c9b1a Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Thu, 1 Oct 2020 22:17:26 +0200 Subject: [PATCH 01/25] Allow CharacterEmbed to specify feature --- spacy/ml/models/tok2vec.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index fec478e21..888dc9caa 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -1,4 +1,4 @@ -from typing import Optional, List +from typing import Optional, List, Union from thinc.api import chain, clone, concatenate, with_array, with_padded from thinc.api import Model, noop, list2ragged, ragged2list from thinc.api import FeatureExtractor, HashEmbed @@ -165,7 +165,8 @@ def MultiHashEmbed( @registry.architectures.register("spacy.CharacterEmbed.v1") def CharacterEmbed( - width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool + width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool, + feature: Union[int, str]="NORM" ): """Construct an embedded representation based on character embeddings, using a feed-forward network. A fixed number of UTF-8 byte characters are used for @@ -183,7 +184,8 @@ def CharacterEmbed( also concatenated on, and the result is then passed through a feed-forward network to construct a single vector to represent the information. - width (int): The width of the output vector and the NORM hash embedding. + feature (int or str): An attribute to embed, to concatenate with the characters. + width (int): The width of the output vector and the feature embedding. rows (int): The number of rows in the NORM hash embedding table. nM (int): The dimensionality of the character embeddings. Recommended values are between 16 and 64. @@ -193,12 +195,15 @@ def CharacterEmbed( also_use_static_vectors (bool): Whether to also use static word vectors. Requires a vectors table to be loaded in the Doc objects' vocab. """ + feature = intify_attr(feature) + if feature is None: + raise ValueError("Invalid feature: Must be a token attribute.") if also_use_static_vectors: model = chain( concatenate( chain(_character_embed.CharacterEmbed(nM=nM, nC=nC), list2ragged()), chain( - FeatureExtractor([NORM]), + FeatureExtractor([feature]), list2ragged(), with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)), ), @@ -214,7 +219,7 @@ def CharacterEmbed( concatenate( chain(_character_embed.CharacterEmbed(nM=nM, nC=nC), list2ragged()), chain( - FeatureExtractor([NORM]), + FeatureExtractor([feature]), list2ragged(), with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)), ), From b854bca15c0e4cf62d2e1c0f896dc1e6a454c099 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Thu, 1 Oct 2020 22:17:58 +0200 Subject: [PATCH 02/25] Default to LOWER in character embed --- spacy/ml/models/tok2vec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index 888dc9caa..907a7a293 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -166,7 +166,7 @@ def MultiHashEmbed( @registry.architectures.register("spacy.CharacterEmbed.v1") def CharacterEmbed( width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool, - feature: Union[int, str]="NORM" + feature: Union[int, str]="LOWER" ): """Construct an embedded representation based on character embeddings, using a feed-forward network. A fixed number of UTF-8 byte characters are used for From 300e5a9928fd226dfddbf7d5c22558f696bfa1af Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Thu, 1 Oct 2020 23:05:55 +0200 Subject: [PATCH 03/25] Avoid relying on NORM in default v3 models (#6176) * Allow CharacterEmbed to specify feature * Default to LOWER in character embed * Update tok2vec * Use LOWER, not NORM --- spacy/ml/models/tok2vec.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index 95f9c66df..120e9b02c 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -1,4 +1,4 @@ -from typing import Optional, List +from typing import Optional, List, Union from thinc.types import Floats2d from thinc.api import chain, clone, concatenate, with_array, with_padded from thinc.api import Model, noop, list2ragged, ragged2list, HashEmbed @@ -10,7 +10,7 @@ from ...ml import _character_embed from ..staticvectors import StaticVectors from ..featureextractor import FeatureExtractor from ...pipeline.tok2vec import Tok2VecListener -from ...attrs import ORTH, NORM, PREFIX, SUFFIX, SHAPE +from ...attrs import ORTH, LOWER, PREFIX, SUFFIX, SHAPE, intify_attr @registry.architectures.register("spacy.Tok2VecListener.v1") @@ -98,7 +98,7 @@ def MultiHashEmbed( attributes using hash embedding, concatenates the results, and passes it through a feed-forward subnetwork to build a mixed representations. - The features used are the NORM, PREFIX, SUFFIX and SHAPE, which can have + The features used are the LOWER, PREFIX, SUFFIX and SHAPE, which can have varying definitions depending on the Vocab of the Doc object passed in. Vectors from pretrained static vectors can also be incorporated into the concatenated representation. @@ -115,7 +115,7 @@ def MultiHashEmbed( also_use_static_vectors (bool): Whether to also use static word vectors. Requires a vectors table to be loaded in the Doc objects' vocab. """ - cols = [NORM, PREFIX, SUFFIX, SHAPE, ORTH] + cols = [LOWER, PREFIX, SUFFIX, SHAPE, ORTH] seed = 7 def make_hash_embed(feature): @@ -123,7 +123,7 @@ def MultiHashEmbed( seed += 1 return HashEmbed( width, - rows if feature == NORM else rows // 2, + rows if feature == LOWER else rows // 2, column=cols.index(feature), seed=seed, dropout=0.0, @@ -131,13 +131,13 @@ def MultiHashEmbed( if also_embed_subwords: embeddings = [ - make_hash_embed(NORM), + make_hash_embed(LOWER), make_hash_embed(PREFIX), make_hash_embed(SUFFIX), make_hash_embed(SHAPE), ] else: - embeddings = [make_hash_embed(NORM)] + embeddings = [make_hash_embed(LOWER)] concat_size = width * (len(embeddings) + also_use_static_vectors) if also_use_static_vectors: model = chain( @@ -165,7 +165,8 @@ def MultiHashEmbed( @registry.architectures.register("spacy.CharacterEmbed.v1") def CharacterEmbed( - width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool + width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool, + feature: Union[int, str]="LOWER" ): """Construct an embedded representation based on character embeddings, using a feed-forward network. A fixed number of UTF-8 byte characters are used for @@ -179,12 +180,13 @@ def CharacterEmbed( of being in an arbitrary position depending on the word length. The characters are embedded in a embedding table with a given number of rows, - and the vectors concatenated. A hash-embedded vector of the NORM of the word is + and the vectors concatenated. A hash-embedded vector of the LOWER of the word is also concatenated on, and the result is then passed through a feed-forward network to construct a single vector to represent the information. - width (int): The width of the output vector and the NORM hash embedding. - rows (int): The number of rows in the NORM hash embedding table. + feature (int or str): An attribute to embed, to concatenate with the characters. + width (int): The width of the output vector and the feature embedding. + rows (int): The number of rows in the LOWER hash embedding table. nM (int): The dimensionality of the character embeddings. Recommended values are between 16 and 64. nC (int): The number of UTF-8 bytes to embed per word. Recommended values @@ -193,12 +195,15 @@ def CharacterEmbed( also_use_static_vectors (bool): Whether to also use static word vectors. Requires a vectors table to be loaded in the Doc objects' vocab. """ + feature = intify_attr(feature) + if feature is None: + raise ValueError("Invalid feature: Must be a token attribute.") if also_use_static_vectors: model = chain( concatenate( chain(_character_embed.CharacterEmbed(nM=nM, nC=nC), list2ragged()), chain( - FeatureExtractor([NORM]), + FeatureExtractor([feature]), list2ragged(), with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)), ), @@ -214,7 +219,7 @@ def CharacterEmbed( concatenate( chain(_character_embed.CharacterEmbed(nM=nM, nC=nC), list2ragged()), chain( - FeatureExtractor([NORM]), + FeatureExtractor([feature]), list2ragged(), with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)), ), From 6b94cee4687e70514fc30f8295bf13ea3fd2c194 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 01:11:19 +0200 Subject: [PATCH 04/25] Fix docs [ci skip] --- website/docs/api/top-level.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 68d7a3039..22de0ea83 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -269,11 +269,11 @@ If a setting is not present in the options, the default value will be used. > displacy.serve(doc, style="ent", options=options) > ``` -| Name | Description | -| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ | -| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ | -| `template` 2.2 | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ | +| Name | Description | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ | +| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ | +| `template` 2.2 | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ | By default, displaCy comes with colors for all entity types used by [spaCy's trained pipelines](/models). If you're using custom entity types, you From e59ecb12c0d6298c75b713ad9cc2f4a1a1682227 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 01:12:30 +0200 Subject: [PATCH 05/25] Auto-format --- spacy/ml/featureextractor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spacy/ml/featureextractor.py b/spacy/ml/featureextractor.py index dcf212628..3d189008a 100644 --- a/spacy/ml/featureextractor.py +++ b/spacy/ml/featureextractor.py @@ -9,7 +9,9 @@ def FeatureExtractor(columns: List[Union[int, str]]) -> Model[List[Doc], List[In return Model("extract_features", forward, attrs={"columns": columns}) -def forward(model: Model[List[Doc], List[Ints2d]], docs, is_train: bool) -> Tuple[List[Ints2d], Callable]: +def forward( + model: Model[List[Doc], List[Ints2d]], docs, is_train: bool +) -> Tuple[List[Ints2d], Callable]: columns = model.attrs["columns"] features: List[Ints2d] = [] for doc in docs: From af282ae73259dc966bc741de632ee5cab41633a9 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 01:12:34 +0200 Subject: [PATCH 06/25] Fix import --- spacy/ml/featureextractor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy/ml/featureextractor.py b/spacy/ml/featureextractor.py index 3d189008a..ed2918f02 100644 --- a/spacy/ml/featureextractor.py +++ b/spacy/ml/featureextractor.py @@ -1,7 +1,8 @@ from typing import List, Union, Callable, Tuple -from thinc.types import Ints2d, Doc +from thinc.types import Ints2d from thinc.api import Model, registry +from ..tokens import Doc @registry.layers("spacy.FeatureExtractor.v1") From 01c1538c720f529f433163d495c351ecbd13ccc2 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 01:36:06 +0200 Subject: [PATCH 07/25] Integrate file readers --- pyproject.toml | 2 +- requirements.txt | 4 +- setup.cfg | 6 +- spacy/default_config_pretraining.cfg | 2 +- spacy/errors.py | 6 - spacy/tests/training/test_training.py | 6 +- spacy/training/augment.py | 40 +++--- spacy/training/corpus.py | 10 +- spacy/util.py | 4 - website/docs/api/corpus.md | 16 +-- website/docs/api/data-formats.md | 4 +- website/docs/api/top-level.md | 115 ++++++++++++------ website/docs/usage/embeddings-transformers.md | 2 +- website/meta/type-annotations.json | 2 +- 14 files changed, 126 insertions(+), 93 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e88ba7db9..611a95d27 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires = [ "cymem>=2.0.2,<2.1.0", "preshed>=3.0.2,<3.1.0", "murmurhash>=0.28.0,<1.1.0", - "thinc>=8.0.0a42,<8.0.0a50", + "thinc>=8.0.0a43,<8.0.0a50", "blis>=0.4.0,<0.5.0", "pytokenizations", "pathy" diff --git a/requirements.txt b/requirements.txt index 064efed42..44dad38e3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,12 @@ # Our libraries cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 -thinc>=8.0.0a42,<8.0.0a50 +thinc>=8.0.0a43,<8.0.0a50 blis>=0.4.0,<0.5.0 ml_datasets==0.2.0a0 murmurhash>=0.28.0,<1.1.0 wasabi>=0.8.0,<1.1.0 -srsly>=2.1.0,<3.0.0 +srsly>=2.3.0,<3.0.0 catalogue>=2.0.1,<2.1.0 typer>=0.3.0,<0.4.0 pathy diff --git a/setup.cfg b/setup.cfg index 36ab64bd9..7a3a2cb30 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,16 +34,16 @@ setup_requires = cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 murmurhash>=0.28.0,<1.1.0 - thinc>=8.0.0a42,<8.0.0a50 + thinc>=8.0.0a43,<8.0.0a50 install_requires = # Our libraries murmurhash>=0.28.0,<1.1.0 cymem>=2.0.2,<2.1.0 preshed>=3.0.2,<3.1.0 - thinc>=8.0.0a42,<8.0.0a50 + thinc>=8.0.0a43,<8.0.0a50 blis>=0.4.0,<0.5.0 wasabi>=0.8.0,<1.1.0 - srsly>=2.1.0,<3.0.0 + srsly>=2.3.0,<3.0.0 catalogue>=2.0.1,<2.1.0 typer>=0.3.0,<0.4.0 pathy diff --git a/spacy/default_config_pretraining.cfg b/spacy/default_config_pretraining.cfg index 4011159a4..66987171a 100644 --- a/spacy/default_config_pretraining.cfg +++ b/spacy/default_config_pretraining.cfg @@ -34,7 +34,7 @@ learn_rate = 0.001 [corpora] [corpora.pretrain] -@readers = "spacy.JsonlReader.v1" +@readers = "spacy.JsonlCorpus.v1" path = ${paths.raw_text} min_length = 5 max_length = 500 diff --git a/spacy/errors.py b/spacy/errors.py index 5236992e9..881a697f6 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -477,12 +477,6 @@ class Errors: E201 = ("Span index out of range.") # TODO: fix numbering after merging develop into master - E912 = ("No orth_variants lookups table for data augmentation available for " - "language '{lang}'. If orth_variants are available in " - "spacy-lookups-data, make sure the package is installed and the " - "table is loaded in the [initialize.lookups] block of your config. " - "Alternatively, you can provide your own Lookups object with a " - "table orth_variants as the argument 'lookuos' of the augmenter.") E913 = ("Corpus path can't be None. Maybe you forgot to define it in your " "config.cfg or override it on the CLI?") E914 = ("Executing {name} callback failed. Expected the function to " diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py index 405801f62..c53042ef1 100644 --- a/spacy/tests/training/test_training.py +++ b/spacy/tests/training/test_training.py @@ -504,9 +504,9 @@ def test_make_orth_variants(doc): {"tags": [":"], "variants": ["-", "—", "–", "--", "---", "——"]}, ] } - lookups = Lookups() - lookups.add_table("orth_variants", orth_variants) - augmenter = create_orth_variants_augmenter(level=0.2, lower=0.5, lookups=lookups) + augmenter = create_orth_variants_augmenter( + level=0.2, lower=0.5, orth_variants=orth_variants + ) with make_tempdir() as tmpdir: output_file = tmpdir / "roundtrip.spacy" DocBin(docs=[doc]).to_disk(output_file) diff --git a/spacy/training/augment.py b/spacy/training/augment.py index 176530a1c..8965c5457 100644 --- a/spacy/training/augment.py +++ b/spacy/training/augment.py @@ -1,27 +1,43 @@ -from typing import Callable, Iterator, Dict, List, Tuple, Optional, TYPE_CHECKING +from typing import Callable, Iterator, Dict, List, Tuple, TYPE_CHECKING import random import itertools import copy from functools import partial +from pydantic import BaseModel, StrictStr from ..util import registry, logger from ..tokens import Doc from .example import Example -from ..lookups import Lookups -from ..errors import Errors if TYPE_CHECKING: from ..language import Language # noqa: F401 +class OrthVariantsSingle(BaseModel): + tags: List[StrictStr] + variants: List[StrictStr] + + +class OrthVariantsPaired(BaseModel): + tags: List[StrictStr] + variants: List[List[StrictStr]] + + +class OrthVariants(BaseModel): + paired: List[OrthVariantsPaired] = {} + single: List[OrthVariantsSingle] = {} + + @registry.augmenters("spacy.orth_variants.v1") def create_orth_variants_augmenter( - level: float, lower: float, lookups: Optional[Lookups] = None, + level: float, lower: float, orth_variants: OrthVariants, ) -> Callable[["Language", Example], Iterator[Example]]: """Create a data augmentation callback that uses orth-variant replacement. The callback can be added to a corpus or other data iterator during training. """ - return partial(orth_variants_augmenter, level=level, lower=lower, lookups=lookups) + return partial( + orth_variants_augmenter, orth_variants=orth_variants, level=level, lower=lower + ) def dont_augment(nlp: "Language", example: Example) -> Iterator[Example]: @@ -31,20 +47,11 @@ def dont_augment(nlp: "Language", example: Example) -> Iterator[Example]: def orth_variants_augmenter( nlp: "Language", example: Example, + orth_variants: dict, *, level: float = 0.0, lower: float = 0.0, - lookups: Optional[Lookups] = None, ) -> Iterator[Example]: - table_name = "orth_variants" - if lookups is not None: - orth_variants = lookups.get_table(table_name, {}) - logger.debug("Using data augmentation orth variants from provided lookups") - else: - orth_variants = nlp.vocab.lookups.get_table(table_name, {}) - logger.debug("Using data augmentation orth variants from default vocab lookups") - if not orth_variants: - raise ValueError(Errors.E912.format(lang=nlp.lang)) if random.random() >= level: yield example else: @@ -74,13 +81,14 @@ def make_orth_variants( nlp: "Language", raw: str, token_dict: Dict[str, List[str]], - orth_variants: Dict[str, list], + orth_variants: Dict[str, List[Dict[str, List[str]]]], *, lower: bool = False, ) -> Tuple[str, Dict[str, List[str]]]: orig_token_dict = copy.deepcopy(token_dict) ndsv = orth_variants.get("single", []) ndpv = orth_variants.get("paired", []) + logger.debug(f"Data augmentation: {len(ndsv)} single / {len(ndpv)} paired variants") words = token_dict.get("words", []) tags = token_dict.get("tags", []) # keep unmodified if words or tags are not defined diff --git a/spacy/training/corpus.py b/spacy/training/corpus.py index 57787cf76..b3ff30e66 100644 --- a/spacy/training/corpus.py +++ b/spacy/training/corpus.py @@ -38,11 +38,11 @@ def create_docbin_reader( ) -@util.registry.readers("spacy.JsonlReader.v1") +@util.registry.readers("spacy.JsonlCorpus.v1") def create_jsonl_reader( path: Path, min_length: int = 0, max_length: int = 0, limit: int = 0 ) -> Callable[["Language"], Iterable[Doc]]: - return JsonlTexts(path, min_length=min_length, max_length=max_length, limit=limit) + return JsonlCorpus(path, min_length=min_length, max_length=max_length, limit=limit) @util.registry.readers("spacy.read_labels.v1") @@ -193,7 +193,7 @@ class Corpus: break -class JsonlTexts: +class JsonlCorpus: """Iterate Doc objects from a file or directory of jsonl formatted raw text files. @@ -206,7 +206,7 @@ class JsonlTexts: limit (int): Limit corpus to a subset of examples, e.g. for debugging. Defaults to 0, which indicates no limit. - DOCS: https://nightly.spacy.io/api/corpus#jsonltexts + DOCS: https://nightly.spacy.io/api/corpus#jsonlcorpus """ file_type = "jsonl" @@ -230,7 +230,7 @@ class JsonlTexts: nlp (Language): The current nlp object. YIELDS (Example): The example objects. - DOCS: https://nightly.spacy.io/api/corpus#jsonltexts-call + DOCS: https://nightly.spacy.io/api/corpus#jsonlcorpus-call """ for loc in walk_corpus(self.path, ".jsonl"): records = srsly.read_jsonl(loc) diff --git a/spacy/util.py b/spacy/util.py index 8a96ba4fe..f234927d6 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -103,10 +103,6 @@ class registry(thinc.registry): cli = catalogue.create("spacy", "cli", entry_points=True) -# We want json loading in the registry, so manually register srsly.read_json. -registry.readers("srsly.read_json.v0", srsly.read_json) - - class SimpleFrozenDict(dict): """Simplified implementation of a frozen dict, mainly used as default function or method argument (for arguments that should default to empty diff --git a/website/docs/api/corpus.md b/website/docs/api/corpus.md index 58006a19b..986c6f458 100644 --- a/website/docs/api/corpus.md +++ b/website/docs/api/corpus.md @@ -100,7 +100,7 @@ Yield examples from the data. | `nlp` | The current `nlp` object. ~~Language~~ | | **YIELDS** | The examples. ~~Example~~ | -## JsonlTexts {#jsonltexts tag="class"} +## JsonlCorpus {#jsonlcorpus tag="class"} Iterate Doc objects from a file or directory of JSONL (newline-delimited JSON) formatted raw text files. Can be used to read the raw text corpus for language @@ -126,22 +126,22 @@ file. {"text": "My cynical view on this is that it will never be free to the public. Reason: what would be the draw of joining the military? Right now their selling point is free Healthcare and Education. Ironically both are run horribly and most, that I've talked to, come out wishing they never went in."} ``` -### JsonlTexts.\_\init\_\_ {#jsonltexts-init tag="method"} +### JsonlCorpus.\_\init\_\_ {#jsonlcorpus tag="method"} Initialize the reader. > #### Example > > ```python -> from spacy.training import JsonlTexts +> from spacy.training import JsonlCorpus > -> corpus = JsonlTexts("./data/texts.jsonl") +> corpus = JsonlCorpus("./data/texts.jsonl") > ``` > > ```ini > ### Example config > [corpora.pretrain] -> @readers = "spacy.JsonlReader.v1" +> @readers = "spacy.JsonlCorpus.v1" > path = "corpus/raw_text.jsonl" > min_length = 0 > max_length = 0 @@ -156,17 +156,17 @@ Initialize the reader. | `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ | | `limit` | Limit corpus to a subset of examples, e.g. for debugging. Defaults to `0` for no limit. ~~int~~ | -### JsonlTexts.\_\_call\_\_ {#jsonltexts-call tag="method"} +### JsonlCorpus.\_\_call\_\_ {#jsonlcorpus-call tag="method"} Yield examples from the data. > #### Example > > ```python -> from spacy.training import JsonlTexts +> from spacy.training import JsonlCorpus > import spacy > -> corpus = JsonlTexts("./texts.jsonl") +> corpus = JsonlCorpus("./texts.jsonl") > nlp = spacy.blank("en") > data = corpus(nlp) > ``` diff --git a/website/docs/api/data-formats.md b/website/docs/api/data-formats.md index 22a0076cd..c1b9bfef4 100644 --- a/website/docs/api/data-formats.md +++ b/website/docs/api/data-formats.md @@ -135,7 +135,7 @@ $ python -m spacy train config.cfg --paths.train ./corpus/train.spacy > path = ${paths:dev} > > [corpora.pretrain] -> @readers = "spacy.JsonlReader.v1" +> @readers = "spacy.JsonlCorpus.v1" > path = ${paths.raw} > > [corpora.my_custom_data] @@ -146,7 +146,7 @@ This section defines a **dictionary** mapping of string keys to functions. Each function takes an `nlp` object and yields [`Example`](/api/example) objects. By default, the two keys `train` and `dev` are specified and each refer to a [`Corpus`](/api/top-level#Corpus). When pretraining, an additional `pretrain` -section is added that defaults to a [`JsonlReader`](/api/top-level#JsonlReader). +section is added that defaults to a [`JsonlCorpus`](/api/top-level#JsonlCorpus). You can also register custom functions that return a callable. | Name | Description | diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 22de0ea83..876006774 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -327,7 +327,7 @@ factories. | `losses` | Registry for functions that create [losses](https://thinc.ai/docs/api-loss). | | `misc` | Registry for miscellaneous functions that return data assets, knowledge bases or anything else you may need. | | `optimizers` | Registry for functions that create [optimizers](https://thinc.ai/docs/api-optimizers). | -| `readers` | Registry for training and evaluation data readers like [`Corpus`](/api/corpus). | +| `readers` | Registry for file and data readers, including training and evaluation data readers like [`Corpus`](/api/corpus). | | `schedules` | Registry for functions that create [schedules](https://thinc.ai/docs/api-schedules). | | `tokenizers` | Registry for tokenizer factories. Registered functions should return a callback that receives the `nlp` object and returns a [`Tokenizer`](/api/tokenizer) or a custom callable. | @@ -470,7 +470,65 @@ logging the results. -## Readers {#readers source="spacy/training/corpus.py" new="3"} +## Readers {#readers} + +### File readers {#file-readers source="github.com/explosion/srsly" new="3"} + +The following file readers are provided by our serialization library +[`srsly`](https://github.com/explosion/srsly). All registered functions take one +argument `path`, pointing to the file path to load. + +> #### Example config +> +> ```ini +> [corpora.train.augmenter.orth_variants] +> @readers = "srsly.read_json.v1" +> path = "corpus/en_orth_variants.json" +> ``` + +| Name | Description | +| ----------------------- | ----------------------------------------------------- | +| `srsly.read_json.v1` | Read data from a JSON file. | +| `srsly.read_jsonl.v1` | Read data from a JSONL (newline-delimited JSON) file. | +| `srsly.read_yaml.v1` | Read data from a YAML file. | +| `srsly.read_msgpack.v1` | Read data from a binary MessagePack file. | + + + +Since the file readers expect a local path, you should only use them in config +blocks that are **not executed at runtime** – for example, in `[training]` and +`[corpora]` (to load data or resources like data augmentation tables) or in +`[initialize]` (to pass data to pipeline components). + + + +#### spacy.read_labels.v1 {#read_labels tag="registered function"} + +Read a JSON-formatted labels file generated with +[`init labels`](/api/cli#init-labels). Typically used in the +[`[initialize]`](/api/data-formats#config-initialize) block of the training +config to speed up the model initialization process and provide pre-generated +label sets. + +> #### Example config +> +> ```ini +> [initialize.components] +> +> [initialize.components.ner] +> +> [initialize.components.ner.labels] +> @readers = "spacy.read_labels.v1" +> path = "corpus/labels/ner.json" +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `path` | The path to the labels file generated with [`init labels`](/api/cli#init-labels). ~~Path~~ | +| `require` | Whether to require the file to exist. If set to `False` and the labels file doesn't exist, the loader will return `None` and the `initialize` method will extract the labels from the data. Defaults to `False`. ~~bool~~ | +| **CREATES** | The | + +### Corpus readers {#corpus-readers source="spacy/training/corpus.py" new="3"} Corpus readers are registered functions that load data and return a function that takes the current `nlp` object and yields [`Example`](/api/example) objects @@ -480,7 +538,7 @@ with your own registered function in the [`@readers` registry](/api/top-level#registry) to customize the data loading and streaming. -### spacy.Corpus.v1 {#corpus tag="registered function"} +#### spacy.Corpus.v1 {#corpus tag="registered function"} The `Corpus` reader manages annotated corpora and can be used for training and development datasets in the [DocBin](/api/docbin) (`.spacy`) format. Also see @@ -509,12 +567,12 @@ the [`Corpus`](/api/corpus) class. | `augmenter` | Apply some simply data augmentation, where we replace tokens with variations. This is especially useful for punctuation and case replacement, to help generalize beyond corpora that don't have smart-quotes, or only have smart quotes, etc. Defaults to `None`. ~~Optional[Callable]~~ | | **CREATES** | The corpus reader. ~~Corpus~~ | -### spacy.JsonlReader.v1 {#jsonlreader tag="registered function"} +#### spacy.JsonlCorpus.v1 {#jsonlcorpus tag="registered function"} Create [`Example`](/api/example) objects from a JSONL (newline-delimited JSON) file of texts keyed by `"text"`. Can be used to read the raw text corpus for language model [pretraining](/usage/embeddings-transformers#pretraining) from a -JSONL file. Also see the [`JsonlReader`](/api/corpus#jsonlreader) class. +JSONL file. Also see the [`JsonlCorpus`](/api/corpus#jsonlcorpus) class. > #### Example config > @@ -523,7 +581,7 @@ JSONL file. Also see the [`JsonlReader`](/api/corpus#jsonlreader) class. > pretrain = "corpus/raw_text.jsonl" > > [corpora.pretrain] -> @readers = "spacy.JsonlReader.v1" +> @readers = "spacy.JsonlCorpus.v1" > path = ${paths.pretrain} > min_length = 0 > max_length = 0 @@ -536,33 +594,7 @@ JSONL file. Also see the [`JsonlReader`](/api/corpus#jsonlreader) class. | `min_length` | Minimum document length (in tokens). Shorter documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ | | `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ | | `limit` | Limit corpus to a subset of examples, e.g. for debugging. Defaults to `0` for no limit. ~~int~~ | -| **CREATES** | The corpus reader. ~~JsonlTexts~~ | - -### spacy.read_labels.v1 {#read_labels tag="registered function"} - -Read a JSON-formatted labels file generated with -[`init labels`](/api/cli#init-labels). Typically used in the -[`[initialize]`](/api/data-formats#config-initialize) block of the training -config to speed up the model initialization process and provide pre-generated -label sets. - -> #### Example config -> -> ```ini -> [initialize.components] -> -> [initialize.components.ner] -> -> [initialize.components.ner.labels] -> @readers = "spacy.read_labels.v1" -> path = "corpus/labels/ner.json" -> ``` - -| Name | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `path` | The path to the labels file generated with [`init labels`](/api/cli#init-labels). ~~Path~~ | -| `require` | Whether to require the file to exist. If set to `False` and the labels file doesn't exist, the loader will return `None` and the `initialize` method will extract the labels from the data. Defaults to `False`. ~~bool~~ | -| **CREATES** | The | +| **CREATES** | The corpus reader. ~~JsonlCorpus~~ | ## Batchers {#batchers source="spacy/training/batchers.py" new="3"} @@ -664,7 +696,10 @@ sequences in the batch. > @augmenters = "spacy.orth_variants.v1" > level = 0.1 > lower = 0.5 -> lookups = null +> +> [corpora.train.augmenter.orth_variants] +> @readers = "srsly.read_json.v1" +> path = "corpus/en_orth_variants.json" > ``` Create a data augmentation callback that uses orth-variant replacement. The @@ -672,12 +707,12 @@ callback can be added to a corpus or other data iterator during training. This is especially useful for punctuation and case replacement, to help generalize beyond corpora that don't have smart quotes, or only have smart quotes etc. -| Name | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `level` | The percentage of texts that will be augmented. ~~float~~ | -| `lower` | The percentage of texts that will be lowercased. ~~float~~ | -| `lookups` | Lookups table containing the orth variants to use. See [`orth_variants.json`](https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/en_orth_variants.json) for an example. If not set, tables from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) are used if available and added in the [`[initialize]`](/api/data-formats#config-initialize) block of the config. If no orth variants are found, spaCy will raise an error. Defaults to `None`. ~~Optional[Lookups]~~ | -| **CREATES** | A function that takes the current `nlp` object and an [`Example`](/api/example) and yields augmented `Example` objects. ~~Callable[[Language, Example], Iterator[Example]]~~ | +| Name | Description | +| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `level` | The percentage of texts that will be augmented. ~~float~~ | +| `lower` | The percentage of texts that will be lowercased. ~~float~~ | +| `orth_variants` | A dictionary containing the single and paired orth variants. Typically loaded from a JSON file. See [`en_orth_variants.json`](https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/en_orth_variants.json) for an example. ~~Dict[str, Dict[List[Union[str, List[str]]]]]~~ | +| **CREATES** | A function that takes the current `nlp` object and an [`Example`](/api/example) and yields augmented `Example` objects. ~~Callable[[Language, Example], Iterator[Example]]~~ | ## Training data and alignment {#gold source="spacy/training"} diff --git a/website/docs/usage/embeddings-transformers.md b/website/docs/usage/embeddings-transformers.md index 1b78b8dc5..c615097d6 100644 --- a/website/docs/usage/embeddings-transformers.md +++ b/website/docs/usage/embeddings-transformers.md @@ -622,7 +622,7 @@ that are familiar from the training block: the `[pretraining.batcher]`, `[pretraining.optimizer]` and `[pretraining.corpus]` all work the same way and expect the same types of objects, although for pretraining your corpus does not need to have any annotations, so you will often use a different reader, such as -the [`JsonlReader`](/api/top-level#jsonlreader). +the [`JsonlCorpus`](/api/top-level#jsonlcorpus). > #### Raw text format > diff --git a/website/meta/type-annotations.json b/website/meta/type-annotations.json index 43a524e93..acbc88ae2 100644 --- a/website/meta/type-annotations.json +++ b/website/meta/type-annotations.json @@ -24,7 +24,7 @@ "TransformerData": "/api/transformer#transformerdata", "FullTransformerBatch": "/api/transformer#fulltransformerbatch", "Corpus": "/api/corpus", - "JsonlTexts": "/api/corpus#jsonltexts", + "JsonlCorpus": "/api/corpus#jsonlcorpus", "LexemeC": "/api/cython-structs#lexemec", "TokenC": "/api/cython-structs#tokenc", "Config": "https://thinc.ai/docs/api-config#config", From 568768643e62dbc00662dd64f33b8919de6e4b13 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 01:50:13 +0200 Subject: [PATCH 08/25] Increment version [ci skip] --- spacy/about.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/about.py b/spacy/about.py index 18fc77184..acf386ace 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy-nightly" -__version__ = "3.0.0a28" +__version__ = "3.0.0a29" __release__ = True __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" From 77e08c398f7242f62d8c25cb6814e057b2786bb3 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 08:25:15 +0200 Subject: [PATCH 09/25] Switch reset value for set_morph to None --- spacy/tokens/token.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 8099abd92..322c9a54c 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -223,7 +223,7 @@ cdef class Token: def set_morph(self, features): cdef hash_t key - if features is 0: + if features is None: self.c.morph = 0 else: if isinstance(features, int): From 65dfaa4f4b94a2602bb5af74677d7edae6d88ff6 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 08:33:43 +0200 Subject: [PATCH 10/25] Also accept MorphAnalysis in set_morph --- spacy/tokens/token.pyx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 322c9a54c..2075c3cc8 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -225,6 +225,8 @@ cdef class Token: cdef hash_t key if features is None: self.c.morph = 0 + elif isinstance(features, MorphAnalysis): + self.morph = features else: if isinstance(features, int): features = self.vocab.strings[features] From fd09e6b140c1334f6fc110f32dec8d2f93c927b1 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 08:48:28 +0200 Subject: [PATCH 11/25] Update docs for Token.morph / Token.set_morph --- website/docs/api/token.md | 156 ++++++++++++---------- website/docs/usage/linguistic-features.md | 13 +- 2 files changed, 92 insertions(+), 77 deletions(-) diff --git a/website/docs/api/token.md b/website/docs/api/token.md index 068a1d2d2..b3bb63d6c 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -172,6 +172,25 @@ Get a neighboring token. | `i` | The relative position of the token to get. Defaults to `1`. ~~int~~ | | **RETURNS** | The token at position `self.doc[self.i+i]`. ~~Token~~ | +## Token.set_morph {#set_morph tag="method"} + +Set the morphological analysis from a UD FEATS string, hash value of a UD FEATS +string, features dict or `MorphAnalysis`. The value `None` can be used to reset +the morph to an unset state. + +> #### Example +> +> ```python +> doc = nlp("Give it back! He pleaded.") +> doc[0].set_morph("Mood=Imp|VerbForm=Fin") +> assert "Mood=Imp" in doc[0].morph +> assert doc[0].morph.get("Mood") == ["Imp"] +> ``` + +| Name | Description | +| -------- | --------------------------------------------------------------------------------- | +| features | The morphological features to set. ~~Union[int, dict, str, MorphAnalysis, None]~~ | + ## Token.is_ancestor {#is_ancestor tag="method" model="parser"} Check whether this token is a parent, grandparent, etc. of another in the @@ -392,74 +411,73 @@ The L2 norm of the token's vector representation. ## Attributes {#attributes} -| Name | Description | -| -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `doc` | The parent document. ~~Doc~~ | -| `lex` 3 | The underlying lexeme. ~~Lexeme~~ | -| `sent` 2.0.12 | The sentence span that this token is a part of. ~~Span~~ | -| `text` | Verbatim text content. ~~str~~ | -| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ | -| `whitespace_` | Trailing space character if present. ~~str~~ | -| `orth` | ID of the verbatim text content. ~~int~~ | -| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ | -| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ | -| `tensor` 2.1.7 | The tokens's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | -| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ | -| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ | -| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ | -| `i` | The index of the token within the parent document. ~~int~~ | -| `ent_type` | Named entity type. ~~int~~ | -| `ent_type_` | Named entity type. ~~str~~ | -| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ | -| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ | -| `ent_kb_id` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ | -| `ent_kb_id_` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ | -| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ | -| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ | -| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ | -| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ | -| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~int~~ | -| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~str~~ | -| `lower` | Lowercase form of the token. ~~int~~ | -| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | +| Name | Description | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `doc` | The parent document. ~~Doc~~ | +| `lex` 3 | The underlying lexeme. ~~Lexeme~~ | +| `sent` 2.0.12 | The sentence span that this token is a part of. ~~Span~~ | +| `text` | Verbatim text content. ~~str~~ | +| `text_with_ws` | Text content, with trailing space character if present. ~~str~~ | +| `whitespace_` | Trailing space character if present. ~~str~~ | +| `orth` | ID of the verbatim text content. ~~int~~ | +| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ | +| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ | +| `tensor` 2.1.7 | The tokens's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | +| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ | +| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ | +| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ | +| `i` | The index of the token within the parent document. ~~int~~ | +| `ent_type` | Named entity type. ~~int~~ | +| `ent_type_` | Named entity type. ~~str~~ | +| `ent_iob` | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. ~~int~~ | +| `ent_iob_` | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. ~~str~~ | +| `ent_kb_id` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~int~~ | +| `ent_kb_id_` 2.2 | Knowledge base ID that refers to the named entity this token is a part of, if any. ~~str~~ | +| `ent_id` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~int~~ | +| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ | +| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ | +| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ | +| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~int~~ | +| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~str~~ | +| `lower` | Lowercase form of the token. ~~int~~ | +| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | | `shape` | Transform of the tokens's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | | `shape_` | Transform of the tokens's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | -| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ | -| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ | -| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ | -| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ | -| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ | -| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ | -| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ | -| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ | -| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ | -| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ | -| `is_punct` | Is the token punctuation? ~~bool~~ | -| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ | -| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ | -| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ | -| `is_bracket` | Is the token a bracket? ~~bool~~ | -| `is_quote` | Is the token a quotation mark? ~~bool~~ | -| `is_currency` 2.0.8 | Is the token a currency symbol? ~~bool~~ | -| `like_url` | Does the token resemble a URL? ~~bool~~ | -| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | -| `like_email` | Does the token resemble an email address? ~~bool~~ | -| `is_oov` | Does the token have a word vector? ~~bool~~ | -| `is_stop` | Is the token part of a "stop list"? ~~bool~~ | -| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). ~~int~~ | -| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). ~~str~~ | -| `tag` | Fine-grained part-of-speech. ~~int~~ | -| `tag_` | Fine-grained part-of-speech. ~~str~~ | -| `morph` 3 | Morphological analysis. ~~MorphAnalysis~~ | -| `morph_` 3 | Morphological analysis in the Universal Dependencies [FEATS](https://universaldependencies.org/format.html#morphological-annotation) format. ~~str~~ | -| `dep` | Syntactic dependency relation. ~~int~~ | -| `dep_` | Syntactic dependency relation. ~~str~~ | -| `lang` | Language of the parent document's vocabulary. ~~int~~ | -| `lang_` | Language of the parent document's vocabulary. ~~str~~ | -| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ | -| `idx` | The character offset of the token within the parent document. ~~int~~ | -| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ | -| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | -| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | -| `cluster` | Brown cluster ID. ~~int~~ | -| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | +| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ | +| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ | +| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ | +| `suffix_` | Length-N substring from the end of the token. Defaults to `N=3`. ~~str~~ | +| `is_alpha` | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. ~~bool~~ | +| `is_ascii` | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. ~~bool~~ | +| `is_digit` | Does the token consist of digits? Equivalent to `token.text.isdigit()`. ~~bool~~ | +| `is_lower` | Is the token in lowercase? Equivalent to `token.text.islower()`. ~~bool~~ | +| `is_upper` | Is the token in uppercase? Equivalent to `token.text.isupper()`. ~~bool~~ | +| `is_title` | Is the token in titlecase? Equivalent to `token.text.istitle()`. ~~bool~~ | +| `is_punct` | Is the token punctuation? ~~bool~~ | +| `is_left_punct` | Is the token a left punctuation mark, e.g. `"("` ? ~~bool~~ | +| `is_right_punct` | Is the token a right punctuation mark, e.g. `")"` ? ~~bool~~ | +| `is_space` | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. ~~bool~~ | +| `is_bracket` | Is the token a bracket? ~~bool~~ | +| `is_quote` | Is the token a quotation mark? ~~bool~~ | +| `is_currency` 2.0.8 | Is the token a currency symbol? ~~bool~~ | +| `like_url` | Does the token resemble a URL? ~~bool~~ | +| `like_num` | Does the token represent a number? e.g. "10.9", "10", "ten", etc. ~~bool~~ | +| `like_email` | Does the token resemble an email address? ~~bool~~ | +| `is_oov` | Does the token have a word vector? ~~bool~~ | +| `is_stop` | Is the token part of a "stop list"? ~~bool~~ | +| `pos` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). ~~int~~ | +| `pos_` | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). ~~str~~ | +| `tag` | Fine-grained part-of-speech. ~~int~~ | +| `tag_` | Fine-grained part-of-speech. ~~str~~ | +| `morph` 3 | Morphological analysis. ~~MorphAnalysis~~ | +| `dep` | Syntactic dependency relation. ~~int~~ | +| `dep_` | Syntactic dependency relation. ~~str~~ | +| `lang` | Language of the parent document's vocabulary. ~~int~~ | +| `lang_` | Language of the parent document's vocabulary. ~~str~~ | +| `prob` | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). ~~float~~ | +| `idx` | The character offset of the token within the parent document. ~~int~~ | +| `sentiment` | A scalar value indicating the positivity or negativity of the token. ~~float~~ | +| `lex_id` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | +| `rank` | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | +| `cluster` | Brown cluster ID. ~~int~~ | +| `_` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). ~~Underscore~~ | diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md index 25b6c2fac..7b9aaa0b9 100644 --- a/website/docs/usage/linguistic-features.md +++ b/website/docs/usage/linguistic-features.md @@ -56,16 +56,13 @@ create a surface form. Here are some examples: Morphological features are stored in the [`MorphAnalysis`](/api/morphanalysis) under `Token.morph`, which allows you to access individual morphological -features. The attribute `Token.morph_` provides the morphological analysis in -the Universal Dependencies -[FEATS](https://universaldependencies.org/format.html#morphological-annotation) -format. +features. > #### 📝 Things to try > > 1. Change "I" to "She". You should see that the morphological features change > and express that it's a pronoun in the third person. -> 2. Inspect `token.morph_` for the other tokens. +> 2. Inspect `token.morph` for the other tokens. ```python ### {executable="true"} @@ -75,7 +72,7 @@ nlp = spacy.load("en_core_web_sm") print("Pipeline:", nlp.pipe_names) doc = nlp("I was reading the paper.") token = doc[0] # 'I' -print(token.morph_) # 'Case=Nom|Number=Sing|Person=1|PronType=Prs' +print(token.morph) # 'Case=Nom|Number=Sing|Person=1|PronType=Prs' print(token.morph.get("PronType")) # ['Prs'] ``` @@ -91,7 +88,7 @@ import spacy nlp = spacy.load("de_core_news_sm") doc = nlp("Wo bist du?") # English: 'Where are you?' -print(doc[2].morph_) # 'Case=Nom|Number=Sing|Person=2|PronType=Prs' +print(doc[2].morph) # 'Case=Nom|Number=Sing|Person=2|PronType=Prs' print(doc[2].pos_) # 'PRON' ``` @@ -117,7 +114,7 @@ import spacy nlp = spacy.load("en_core_web_sm") doc = nlp("Where are you?") -print(doc[2].morph_) # 'Case=Nom|Person=2|PronType=Prs' +print(doc[2].morph) # 'Case=Nom|Person=2|PronType=Prs' print(doc[2].pos_) # 'PRON' ``` From 3908fff8994f43d23eed1d6cdbb3d37d46bbc3b4 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 09:07:55 +0200 Subject: [PATCH 12/25] Remove tag map sidebar --- website/docs/usage/training.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index c6c05ac5b..a7c23baa7 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -1124,17 +1124,6 @@ a dictionary with keyword arguments specifying the annotations, like `tags` or annotations, the model can be updated to learn a sentence of three words with their assigned part-of-speech tags. -> #### About the tag map -> -> The tag map is part of the vocabulary and defines the annotation scheme. If -> you're training a new pipeline, this will let you map the tags present in the -> treebank you train on to spaCy's tag scheme: -> -> ```python -> tag_map = {"N": {"pos": "NOUN"}, "V": {"pos": "VERB"}} -> vocab = Vocab(tag_map=tag_map) -> ``` - ```python words = ["I", "like", "stuff"] tags = ["NOUN", "VERB", "NOUN"] From 7670df04dd4838d55573abe32a734e51f78648f3 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 10:09:03 +0200 Subject: [PATCH 13/25] Update Chinese usage docs --- website/docs/usage/models.md | 50 +++++++++++++++++------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md index 9b686c947..5e9bd688f 100644 --- a/website/docs/usage/models.md +++ b/website/docs/usage/models.md @@ -85,7 +85,8 @@ import the `MultiLanguage` class directly, or call ### Chinese language support {#chinese new=2.3} -The Chinese language class supports three word segmentation options: +The Chinese language class supports three word segmentation options, `char`, +`jieba` and `pkuseg`: > ```python > from spacy.lang.zh import Chinese @@ -95,11 +96,12 @@ The Chinese language class supports three word segmentation options: > > # Jieba > cfg = {"segmenter": "jieba"} -> nlp = Chinese(meta={"tokenizer": {"config": cfg}}) +> nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}}) > > # PKUSeg with "default" model provided by pkuseg -> cfg = {"segmenter": "pkuseg", "pkuseg_model": "default"} -> nlp = Chinese(meta={"tokenizer": {"config": cfg}}) +> cfg = {"segmenter": "pkuseg"} +> nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}}) +> nlp.tokenizer.initialize(pkuseg_model="default") > ``` 1. **Character segmentation:** Character segmentation is the default @@ -116,41 +118,34 @@ The Chinese language class supports three word segmentation options: In spaCy v3.0, the default Chinese word segmenter has switched from Jieba to -character segmentation. Also note that -[`pkuseg`](https://github.com/lancopku/pkuseg-python) doesn't yet ship with -pre-compiled wheels for Python 3.8. If you're running Python 3.8, you can -install it from our fork and compile it locally: - -```bash -$ pip install https://github.com/honnibal/pkuseg-python/archive/master.zip -``` +character segmentation. -The `meta` argument of the `Chinese` language class supports the following -following tokenizer config settings: +The `initialize` method for the Chinese tokenizer class supports the following +config settings for loading pkuseg models: -| Name | Description | -| ------------------ | --------------------------------------------------------------------------------------------------------------- | -| `segmenter` | Word segmenter: `char`, `jieba` or `pkuseg`. Defaults to `char`. ~~str~~ | -| `pkuseg_model` | **Required for `pkuseg`:** Name of a model provided by `pkuseg` or the path to a local model directory. ~~str~~ | -| `pkuseg_user_dict` | Optional path to a file with one word per line which overrides the default `pkuseg` user dictionary. ~~str~~ | +| Name | Description | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | +| `pkuseg_model` | Name of a model provided by `pkuseg` or the path to a local model directory. ~~str~~ | +| `pkuseg_user_dict` | Optional path to a file with one word per line which overrides the default `pkuseg` user dictionary. Defaults to `"default"`. ~~str~~ | ```python ### Examples +# Initialize the pkuseg tokenizer +cfg = {"segmenter": "pkuseg"} +nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}}) + # Load "default" model -cfg = {"segmenter": "pkuseg", "pkuseg_model": "default"} -nlp = Chinese(config={"tokenizer": {"config": cfg}}) +nlp.tokenizer.initialize(pkuseg_model="default") # Load local model -cfg = {"segmenter": "pkuseg", "pkuseg_model": "/path/to/pkuseg_model"} -nlp = Chinese(config={"tokenizer": {"config": cfg}}) +nlp.tokenizer.initialize(pkuseg_model="/path/to/pkuseg_model") # Override the user directory -cfg = {"segmenter": "pkuseg", "pkuseg_model": "default", "pkuseg_user_dict": "/path"} -nlp = Chinese(config={"tokenizer": {"config": cfg}}) +nlp.tokenizer.initialize(pkuseg_model="default", pkuseg_user_dict="/path/to/user_dict") ``` You can also modify the user dictionary on-the-fly: @@ -185,8 +180,11 @@ from spacy.lang.zh import Chinese # Train pkuseg model pkuseg.train("train.utf8", "test.utf8", "/path/to/pkuseg_model") + # Load pkuseg model in spaCy Chinese tokenizer -nlp = Chinese(meta={"tokenizer": {"config": {"pkuseg_model": "/path/to/pkuseg_model", "require_pkuseg": True}}}) +cfg = {"segmenter": "pkuseg"} +nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}}) +nlp.tokenizer.initialize(pkuseg_model="/path/to/pkuseg_model") ``` From 351f352cdc7ffe2d6c41675e45c1d75ec84180c8 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 10:12:44 +0200 Subject: [PATCH 14/25] Update Japanese docs and pin for sudachipy --- setup.cfg | 2 +- website/docs/usage/models.md | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/setup.cfg b/setup.cfg index 36ab64bd9..babe5fe8b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -84,7 +84,7 @@ cuda102 = cupy-cuda102>=5.0.0b4,<9.0.0 # Language tokenizers with external dependencies ja = - sudachipy>=0.4.5 + sudachipy>=0.4.9 sudachidict_core>=20200330 ko = natto-py==0.9.0 diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md index 5e9bd688f..6792f691c 100644 --- a/website/docs/usage/models.md +++ b/website/docs/usage/models.md @@ -199,20 +199,19 @@ nlp.tokenizer.initialize(pkuseg_model="/path/to/pkuseg_model") > > # Load SudachiPy with split mode B > cfg = {"split_mode": "B"} -> nlp = Japanese(meta={"tokenizer": {"config": cfg}}) +> nlp = Japanese.from_config({"nlp": {"tokenizer": cfg}}) > ``` The Japanese language class uses [SudachiPy](https://github.com/WorksApplications/SudachiPy) for word segmentation and part-of-speech tagging. The default Japanese language class and -the provided Japanese pipelines use SudachiPy split mode `A`. The `meta` -argument of the `Japanese` language class can be used to configure the split -mode to `A`, `B` or `C`. +the provided Japanese pipelines use SudachiPy split mode `A`. The tokenizer +config can be used to configure the split mode to `A`, `B` or `C`. If you run into errors related to `sudachipy`, which is currently under active -development, we suggest downgrading to `sudachipy==0.4.5`, which is the version +development, we suggest downgrading to `sudachipy==0.4.9`, which is the version used for training the current [Japanese pipelines](/models/ja). From f83dfe62dadfce31697989d4c078500ae941a244 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 10:17:26 +0200 Subject: [PATCH 15/25] Fix test --- spacy/tests/doc/test_morphanalysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/tests/doc/test_morphanalysis.py b/spacy/tests/doc/test_morphanalysis.py index 56c80dd66..b44b13d4c 100644 --- a/spacy/tests/doc/test_morphanalysis.py +++ b/spacy/tests/doc/test_morphanalysis.py @@ -77,7 +77,7 @@ def test_morph_property(tokenizer): assert doc.to_array(["MORPH"])[0] != 0 # unset with token.morph - doc[0].set_morph(0) + doc[0].set_morph(None) assert doc.to_array(["MORPH"])[0] == 0 # empty morph is equivalent to "_" From c41a4332e4f21627db1a7c5e057c3cfd70f5fea7 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 11:37:56 +0200 Subject: [PATCH 16/25] Add test for custom data augmentation --- spacy/tests/training/test_training.py | 35 ++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/spacy/tests/training/test_training.py b/spacy/tests/training/test_training.py index c53042ef1..7d41c8908 100644 --- a/spacy/tests/training/test_training.py +++ b/spacy/tests/training/test_training.py @@ -7,11 +7,11 @@ from spacy.training.converters import json_to_docs from spacy.training.augment import create_orth_variants_augmenter from spacy.lang.en import English from spacy.tokens import Doc, DocBin -from spacy.lookups import Lookups from spacy.util import get_words_and_spaces, minibatch from thinc.api import compounding import pytest import srsly +import random from ..util import make_tempdir @@ -515,6 +515,39 @@ def test_make_orth_variants(doc): list(reader(nlp)) +@pytest.mark.filterwarnings("ignore::UserWarning") +def test_custom_data_augmentation(doc): + def create_spongebob_augmenter(randomize: bool = False): + def augment(nlp, example): + text = example.text + if randomize: + ch = [c.lower() if random.random() < 0.5 else c.upper() for c in text] + else: + ch = [c.lower() if i % 2 else c.upper() for i, c in enumerate(text)] + example_dict = example.to_dict() + doc = nlp.make_doc("".join(ch)) + example_dict["token_annotation"]["ORTH"] = [t.text for t in doc] + yield example + yield example.from_dict(doc, example_dict) + + return augment + + nlp = English() + with make_tempdir() as tmpdir: + output_file = tmpdir / "roundtrip.spacy" + DocBin(docs=[doc]).to_disk(output_file) + reader = Corpus(output_file, augmenter=create_spongebob_augmenter()) + corpus = list(reader(nlp)) + orig_text = "Sarah 's sister flew to Silicon Valley via London . " + augmented = "SaRaH 's sIsTeR FlEw tO SiLiCoN VaLlEy vIa lOnDoN . " + assert corpus[0].text == orig_text + assert corpus[0].reference.text == orig_text + assert corpus[0].predicted.text == orig_text + assert corpus[1].text == augmented + assert corpus[1].reference.text == augmented + assert corpus[1].predicted.text == augmented + + @pytest.mark.skip("Outdated") @pytest.mark.parametrize( "tokens_a,tokens_b,expected", From 32cdc1c4f45148e16b5b166b7c7b50077679cb47 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 11:38:03 +0200 Subject: [PATCH 17/25] Update docs [ci skip] --- website/docs/api/top-level.md | 6 +- website/docs/usage/training.md | 125 ++++++++++++++++++++++++++++++++- 2 files changed, 129 insertions(+), 2 deletions(-) diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index 876006774..a65a279a9 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -685,7 +685,11 @@ sequences in the batch. ## Augmenters {#augmenters source="spacy/training/augment.py" new="3"} - +Data augmentation is the process of applying small modifications to the training +data. It can be especially useful for punctuation and case replacement – for +example, if your corpus only uses smart quotes and you want to include +variations using regular quotes, or to make the model less sensitive to +capitalization by including a mix of capitalized and lowercase examples. See the [usage guide](/usage/training#data-augmentation) for details and examples. ### spacy.orth_variants.v1 {#orth_variants tag="registered function"} diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index c6c05ac5b..5c584cfd3 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -1011,9 +1011,132 @@ def filter_batch(size: int) -> Callable[[Iterable[Example]], Iterator[List[Examp +### Data augmentation {#data-augmentation} + +Data augmentation is the process of applying small **modifications** to the +training data. It can be especially useful for punctuation and case replacement +– for example, if your corpus only uses smart quotes and you want to include +variations using regular quotes, or to make the model less sensitive to +capitalization by including a mix of capitalized and lowercase examples. + +The easiest way to use data augmentation during training is to provide an +`augmenter` to the training corpus, e.g. in the `[corpora.train]` section of +your config. The built-in [`orth_variants`](/api/top-level#orth_variants) +augmenter creates a data augmentation callback that uses orth-variant +replacement. + +```ini +### config.cfg (excerpt) {highlight="8,14"} +[corpora.train] +@readers = "spacy.Corpus.v1" +path = ${paths.train} +gold_preproc = false +max_length = 0 +limit = 0 + +[corpora.train.augmenter] +@augmenters = "spacy.orth_variants.v1" +# Percentage of texts that will be augmented / lowercased +level = 0.1 +lower = 0.5 + +[corpora.train.augmenter.orth_variants] +@readers = "srsly.read_json.v1" +path = "corpus/orth_variants.json" +``` + +The `orth_variants` argument lets you pass in a dictionary of replacement rules, +typically loaded from a JSON file. There are two types of orth variant rules: +`"single"` for single tokens that should be replaced (e.g. hyphens) and +`"paired"` for pairs of tokens (e.g. quotes). + + +```json +### orth_variants.json +{ + "single": [{ "tags": ["NFP"], "variants": ["…", "..."] }], + "paired": [{ "tags": ["``", "''"], "variants": [["'", "'"], ["‘", "’"]] }] +} +``` + + + +```json +https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/en_orth_variants.json +``` + +```json +https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/de_orth_variants.json +``` + + + + + +When adding data augmentation, keep in mind that it typically only makes sense +to apply it to the **training corpus**, not the development data. + + + +#### Writing custom data augmenters {#data-augmentation-custom} + +Using the [`@spacy.augmenters`](/api/top-level#registry) registry, you can also +register your own data augmentation callbacks. The callback should be a function +that takes the current `nlp` object and a training [`Example`](/api/example) and +yields `Example` objects. Keep in mind that the augmenter should yield **all +examples** you want to use in your corpus, not only the augmented examples +(unless you want to augment all examples). + +Here'a an example of a custom augmentation callback that produces text variants +in ["SpOnGeBoB cAsE"](https://knowyourmeme.com/memes/mocking-spongebob). The +registered function takes one argument `randomize` that can be set via the +config and decides whether the uppercase/lowercase transformation is applied +randomly or not. The augmenter yields two `Example` objects: the original +example and the augmented example. + +> #### config.cfg +> +> ```ini +> [corpora.train.augmenter] +> @augmenters = "spongebob_augmenter.v1" +> randomize = false +> ``` + +```python +import spacy +import random + +@spacy.registry.augmenters("spongebob_augmenter.v1") +def create_augmenter(randomize: bool = False): + def augment(nlp, example): + text = example.text + if randomize: + # Randomly uppercase/lowercase characters + chars = [c.lower() if random.random() < 0.5 else c.upper() for c in text] + else: + # Uppercase followed by lowercase + chars = [c.lower() if i % 2 else c.upper() for i, c in enumerate(text)] + # Create augmented training example + example_dict = example.to_dict() + doc = nlp.make_doc("".join(chars)) + example_dict["token_annotation"]["ORTH"] = [t.text for t in doc] + # Original example followed by augmented example + yield example + yield example.from_dict(doc, example_dict) + + return augment +``` + +An easy way to create modified `Example` objects is to use the +[`Example.from_dict`](/api/example#from_dict) method with a new reference +[`Doc`](/api/doc) created from the modified text. In this case, only the +capitalization changes, so only the `ORTH` values of the tokens will be +different between the original and augmented examples. + + + ## Parallel & distributed training with Ray {#parallel-training} > #### Installation From df06f7a7921533fd6dd05b8ff17d8cb46867c603 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 13:24:33 +0200 Subject: [PATCH 18/25] Update docs [ci skip] --- website/docs/api/language.md | 10 +-- website/docs/api/lemmatizer.md | 3 +- website/docs/api/token.md | 4 +- website/docs/api/tokenizer.md | 15 ++--- website/docs/api/top-level.md | 2 +- website/docs/usage/models.md | 74 ++++++++++++++++------ website/docs/usage/processing-pipelines.md | 4 ++ website/docs/usage/saving-loading.md | 2 +- website/docs/usage/spacy-101.md | 4 +- website/docs/usage/training.md | 14 ++-- 10 files changed, 88 insertions(+), 44 deletions(-) diff --git a/website/docs/api/language.md b/website/docs/api/language.md index 9f0612b2b..6257199c9 100644 --- a/website/docs/api/language.md +++ b/website/docs/api/language.md @@ -8,8 +8,8 @@ source: spacy/language.py Usually you'll load this once per process as `nlp` and pass the instance around your application. The `Language` class is created when you call [`spacy.load`](/api/top-level#spacy.load) and contains the shared vocabulary and -[language data](/usage/adding-languages), optional binary weights, e.g. provided -by a [trained pipeline](/models), and the +[language data](/usage/linguistic-features#language-data), optional binary +weights, e.g. provided by a [trained pipeline](/models), and the [processing pipeline](/usage/processing-pipelines) containing components like the tagger or parser that are called on a document in order. You can also add your own processing pipeline components that take a `Doc` object, modify it and @@ -210,7 +210,9 @@ settings defined in the [`[initialize]`](/api/data-formats#config-initialize) config block to set up the vocabulary, load in vectors and tok2vec weights and pass optional arguments to the `initialize` methods implemented by pipeline components or the tokenizer. This method is typically called automatically when -you run [`spacy train`](/api/cli#train). +you run [`spacy train`](/api/cli#train). See the usage guide on the +[config lifecycle](/usage/training#config-lifecycle) and +[initialization](/usage/training#initialization) for details. `get_examples` should be a function that returns an iterable of [`Example`](/api/example) objects. The data examples can either be the full @@ -928,7 +930,7 @@ Serialize the current state to a binary string. Load state from a binary string. Note that this method is commonly used via the subclasses like `English` or `German` to make language-specific functionality -like the [lexical attribute getters](/usage/adding-languages#lex-attrs) +like the [lexical attribute getters](/usage/linguistic-features#language-data) available to the loaded object. > #### Example diff --git a/website/docs/api/lemmatizer.md b/website/docs/api/lemmatizer.md index 3693429c4..f980756e5 100644 --- a/website/docs/api/lemmatizer.md +++ b/website/docs/api/lemmatizer.md @@ -130,8 +130,7 @@ applied to the `Doc` in order. ## Lemmatizer.lookup_lemmatize {#lookup_lemmatize tag="method"} Lemmatize a token using a lookup-based approach. If no lemma is found, the -original string is returned. Languages can provide a -[lookup table](/usage/adding-languages#lemmatizer) via the `Lookups`. +original string is returned. | Name | Description | | ----------- | --------------------------------------------------- | diff --git a/website/docs/api/token.md b/website/docs/api/token.md index b3bb63d6c..e7e66e931 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -437,8 +437,8 @@ The L2 norm of the token's vector representation. | `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ | | `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ | | `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ | -| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~int~~ | -| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~str~~ | +| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ | +| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ | | `lower` | Lowercase form of the token. ~~int~~ | | `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | | `shape` | Transform of the tokens's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | diff --git a/website/docs/api/tokenizer.md b/website/docs/api/tokenizer.md index 8ea5a1f65..8809c10bc 100644 --- a/website/docs/api/tokenizer.md +++ b/website/docs/api/tokenizer.md @@ -22,9 +22,8 @@ like punctuation and special case rules from the ## Tokenizer.\_\_init\_\_ {#init tag="method"} -Create a `Tokenizer` to create `Doc` objects given unicode text. For examples -of how to construct a custom tokenizer with different tokenization rules, see -the +Create a `Tokenizer` to create `Doc` objects given unicode text. For examples of +how to construct a custom tokenizer with different tokenization rules, see the [usage documentation](https://spacy.io/usage/linguistic-features#native-tokenizers). > #### Example @@ -87,7 +86,7 @@ Tokenize a stream of texts. | ------------ | ------------------------------------------------------------------------------------ | | `texts` | A sequence of unicode texts. ~~Iterable[str]~~ | | `batch_size` | The number of texts to accumulate in an internal buffer. Defaults to `1000`. ~~int~~ | -| **YIELDS** | The tokenized `Doc` objects, in order. ~~Doc~~ | +| **YIELDS** | The tokenized `Doc` objects, in order. ~~Doc~~ | ## Tokenizer.find_infix {#find_infix tag="method"} @@ -121,10 +120,10 @@ if no suffix rules match. ## Tokenizer.add_special_case {#add_special_case tag="method"} Add a special-case tokenization rule. This mechanism is also used to add custom -tokenizer exceptions to the language data. See the usage guide on -[adding languages](/usage/adding-languages#tokenizer-exceptions) and -[linguistic features](/usage/linguistic-features#special-cases) for more details -and examples. +tokenizer exceptions to the language data. See the usage guide on the +[languages data](/usage/linguistic-features#language-data) and +[tokenizer special cases](/usage/linguistic-features#special-cases) for more +details and examples. > #### Example > diff --git a/website/docs/api/top-level.md b/website/docs/api/top-level.md index a65a279a9..d7273b651 100644 --- a/website/docs/api/top-level.md +++ b/website/docs/api/top-level.md @@ -827,7 +827,7 @@ utilities. ### util.get_lang_class {#util.get_lang_class tag="function"} Import and load a `Language` class. Allows lazy-loading -[language data](/usage/adding-languages) and importing languages using the +[language data](/usage/linguistic-features#language-data) and importing languages using the two-letter language code. To add a language code for a custom language class, you can register it using the [`@registry.languages`](/api/top-level#registry) decorator. diff --git a/website/docs/usage/models.md b/website/docs/usage/models.md index 6792f691c..dc41385f2 100644 --- a/website/docs/usage/models.md +++ b/website/docs/usage/models.md @@ -30,7 +30,7 @@ import QuickstartModels from 'widgets/quickstart-models.js' ## Language support {#languages} spaCy currently provides support for the following languages. You can help by -[improving the existing language data](/usage/adding-languages#language-data) +improving the existing [language data](/usage/linguistic-features#language-data) and extending the tokenization patterns. [See here](https://github.com/explosion/spaCy/issues/3056) for details on how to contribute to development. @@ -83,55 +83,81 @@ To train a pipeline using the neutral multi-language class, you can set import the `MultiLanguage` class directly, or call [`spacy.blank("xx")`](/api/top-level#spacy.blank) for lazy-loading. -### Chinese language support {#chinese new=2.3} +### Chinese language support {#chinese new="2.3"} The Chinese language class supports three word segmentation options, `char`, -`jieba` and `pkuseg`: +`jieba` and `pkuseg`. +> #### Manual setup +> > ```python > from spacy.lang.zh import Chinese > > # Character segmentation (default) > nlp = Chinese() -> > # Jieba > cfg = {"segmenter": "jieba"} > nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}}) -> > # PKUSeg with "default" model provided by pkuseg > cfg = {"segmenter": "pkuseg"} > nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}}) > nlp.tokenizer.initialize(pkuseg_model="default") > ``` -1. **Character segmentation:** Character segmentation is the default - segmentation option. It's enabled when you create a new `Chinese` language - class or call `spacy.blank("zh")`. -2. **Jieba:** `Chinese` uses [Jieba](https://github.com/fxsjy/jieba) for word - segmentation with the tokenizer option `{"segmenter": "jieba"}`. -3. **PKUSeg**: As of spaCy v2.3.0, support for - [PKUSeg](https://github.com/lancopku/PKUSeg-python) has been added to support - better segmentation for Chinese OntoNotes and the provided - [Chinese pipelines](/models/zh). Enable PKUSeg with the tokenizer option - `{"segmenter": "pkuseg"}`. +```ini +### config.cfg +[nlp.tokenizer] +@tokenizers = "spacy.zh.ChineseTokenizer" +segmenter = "char" +``` - +| Segmenter | Description | +| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `char` | **Character segmentation:** Character segmentation is the default segmentation option. It's enabled when you create a new `Chinese` language class or call `spacy.blank("zh")`. | +| `jieba` | **Jieba:** to use [Jieba](https://github.com/fxsjy/jieba) for word segmentation, you can set the option `segmenter` to `"jieba"`. | +| `pkuseg` | **PKUSeg**: As of spaCy v2.3.0, support for [PKUSeg](https://github.com/lancopku/PKUSeg-python) has been added to support better segmentation for Chinese OntoNotes and the provided [Chinese pipelines](/models/zh). Enable PKUSeg by setting tokenizer option `segmenter` to `"pkuseg"`. | -In spaCy v3.0, the default Chinese word segmenter has switched from Jieba to -character segmentation. + + +In v3.0, the default word segmenter has switched from Jieba to character +segmentation. Because the `pkuseg` segmenter depends on a model that can be +loaded from a file, the model is loaded on +[initialization](/usage/training#config-lifecycle) (typically before training). +This ensures that your packaged Chinese model doesn't depend on a local path at +runtime. The `initialize` method for the Chinese tokenizer class supports the following -config settings for loading pkuseg models: +config settings for loading `pkuseg` models: | Name | Description | | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | | `pkuseg_model` | Name of a model provided by `pkuseg` or the path to a local model directory. ~~str~~ | | `pkuseg_user_dict` | Optional path to a file with one word per line which overrides the default `pkuseg` user dictionary. Defaults to `"default"`. ~~str~~ | +The initialization settings are typically provided in the +[training config](/usage/training#config) and the data is loaded in before +training and serialized with the model. This allows you to load the data from a +local path and save out your pipeline and config, without requiring the same +local path at runtime. See the usage guide on the +[config lifecycle](/usage/training#config-lifecycle) for more background on +this. + +```ini +### config.cfg +[initialize] + +[initialize.tokenizer] +pkuseg_model = "/path/to/model" +pkuseg_user_dict = "default" +``` + +You can also initialize the tokenizer for a blank language class by calling its +`initialize` method: + ```python ### Examples # Initialize the pkuseg tokenizer @@ -191,12 +217,13 @@ nlp.tokenizer.initialize(pkuseg_model="/path/to/pkuseg_model") ### Japanese language support {#japanese new=2.3} +> #### Manual setup +> > ```python > from spacy.lang.ja import Japanese > > # Load SudachiPy with split mode A (default) > nlp = Japanese() -> > # Load SudachiPy with split mode B > cfg = {"split_mode": "B"} > nlp = Japanese.from_config({"nlp": {"tokenizer": cfg}}) @@ -208,6 +235,13 @@ segmentation and part-of-speech tagging. The default Japanese language class and the provided Japanese pipelines use SudachiPy split mode `A`. The tokenizer config can be used to configure the split mode to `A`, `B` or `C`. +```ini +### config.cfg +[nlp.tokenizer] +@tokenizers = "spacy.ja.JapaneseTokenizer" +split_mode = "A" +``` + If you run into errors related to `sudachipy`, which is currently under active diff --git a/website/docs/usage/processing-pipelines.md b/website/docs/usage/processing-pipelines.md index 334ed03bd..c98bd08bc 100644 --- a/website/docs/usage/processing-pipelines.md +++ b/website/docs/usage/processing-pipelines.md @@ -895,6 +895,10 @@ the name. Registered functions can also take **arguments** by the way that can be defined in the config as well – you can read more about this in the docs on [training with custom code](/usage/training#custom-code). +### Initializing components with data {#initialization} + + + ### Python type hints and pydantic validation {#type-hints new="3"} spaCy's configs are powered by our machine learning library Thinc's diff --git a/website/docs/usage/saving-loading.md b/website/docs/usage/saving-loading.md index 06fb18591..f8a5eea2a 100644 --- a/website/docs/usage/saving-loading.md +++ b/website/docs/usage/saving-loading.md @@ -291,7 +291,7 @@ installed in the same environment – that's it. | Entry point | Description | | ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. | -| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/adding-languages), keyed by language shortcut. | +| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. | | `spacy_lookups` 2.2 | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. | | [`spacy_displacy_colors`](#entry-points-displacy) 2.2 | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. | diff --git a/website/docs/usage/spacy-101.md b/website/docs/usage/spacy-101.md index cd1b2cb0c..5d7c7d7a5 100644 --- a/website/docs/usage/spacy-101.md +++ b/website/docs/usage/spacy-101.md @@ -200,7 +200,7 @@ import Tokenization101 from 'usage/101/\_tokenization.md' To learn more about how spaCy's tokenization rules work in detail, how to **customize and replace** the default tokenizer and how to **add language-specific data**, see the usage guides on -[adding languages](/usage/adding-languages) and +[language data](/usage/linguistic-features#language-data) and [customizing the tokenizer](/usage/linguistic-features#tokenization). @@ -479,7 +479,7 @@ find a "Suggest edits" link at the bottom of each page that points you to the source. Another way of getting involved is to help us improve the -[language data](/usage/adding-languages#language-data) – especially if you +[language data](/usage/linguistic-features#language-data) – especially if you happen to speak one of the languages currently in [alpha support](/usage/models#languages). Even adding simple tokenizer exceptions, stop words or lemmatizer data can make a big difference. It will diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index 54daa6a15..1dd57fd4a 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -216,7 +216,9 @@ The initialization settings are only loaded and used when [`nlp.initialize`](/api/language#initialize) is called (typically right before training). This allows you to set up your pipeline using local data resources and custom functions, and preserve the information in your config – but without -requiring it to be available at runtime +requiring it to be available at runtime. You can also use this mechanism to +provide data paths to custom pipeline components and custom tokenizers – see the +section on [custom initialization](#initialization) for details. ### Overwriting config settings on the command line {#config-overrides} @@ -815,9 +817,9 @@ def MyModel(output_width: int) -> Model[List[Doc], List[Floats2d]]: return create_model(output_width) ``` - + + ## Data utilities {#data} @@ -1135,7 +1137,11 @@ An easy way to create modified `Example` objects is to use the capitalization changes, so only the `ORTH` values of the tokens will be different between the original and augmented examples. - +Note that if your data augmentation strategy involves changing the tokenization +(for instance, removing or adding tokens) and your training examples include +token-based annotations like the dependency parse or entity labels, you'll need +to take care to adjust the `Example` object so its annotations match and remain +valid. ## Parallel & distributed training with Ray {#parallel-training} From 22158dc24a8f78775d82060767788cdafc392aac Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 15:06:16 +0200 Subject: [PATCH 19/25] Add morphologizer to quickstart template --- spacy/cli/templates/quickstart_training.jinja | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/spacy/cli/templates/quickstart_training.jinja b/spacy/cli/templates/quickstart_training.jinja index 69dac0aa1..3bd237b0a 100644 --- a/spacy/cli/templates/quickstart_training.jinja +++ b/spacy/cli/templates/quickstart_training.jinja @@ -37,6 +37,22 @@ tokenizer_config = {"use_fast": true} window = 128 stride = 96 +{% if "morphologizer" in components %} +[components.morphologizer] +factory = "morphologizer" + +[components.morphologizer.model] +@architectures = "spacy.Tagger.v1" +nO = null + +[components.morphologizer.model.tok2vec] +@architectures = "spacy-transformers.TransformerListener.v1" +grad_factor = 1.0 + +[components.morphologizer.model.tok2vec.pooling] +@layers = "reduce_mean.v1" +{%- endif %} + {% if "tagger" in components %} [components.tagger] factory = "tagger" @@ -166,6 +182,19 @@ depth = {{ 4 if optimize == "efficiency" else 8 }} window_size = 1 maxout_pieces = 3 +{% if "morphologizer" in components %} +[components.morphologizer] +factory = "morphologizer" + +[components.morphologizer.model] +@architectures = "spacy.Tagger.v1" +nO = null + +[components.morphologizer.model.tok2vec] +@architectures = "spacy.Tok2VecListener.v1" +width = ${components.tok2vec.model.encode.width} +{%- endif %} + {% if "tagger" in components %} [components.tagger] factory = "tagger" @@ -257,7 +286,7 @@ no_output_layer = false {% endif %} {% for pipe in components %} -{% if pipe not in ["tagger", "parser", "ner", "textcat", "entity_linker"] %} +{% if pipe not in ["tagger", "morphologizer", "parser", "ner", "textcat", "entity_linker"] %} {# Other components defined by the user: we just assume they're factories #} [components.{{ pipe }}] factory = "{{ pipe }}" From f0b30aedade0d9b3cebc7cb7fabd905b9eecd52d Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 2 Oct 2020 15:42:36 +0200 Subject: [PATCH 20/25] Make lemmatizers use initialize logic (#6182) * Make lemmatizer use initialize logic and tidy up * Fix typo * Raise for uninitialized tables --- spacy/errors.py | 15 +- spacy/lang/bn/__init__.py | 14 +- spacy/lang/el/__init__.py | 14 +- spacy/lang/en/__init__.py | 14 +- spacy/lang/fa/__init__.py | 14 +- spacy/lang/fr/__init__.py | 14 +- spacy/lang/nb/__init__.py | 14 +- spacy/lang/nl/__init__.py | 15 +- spacy/lang/pl/__init__.py | 13 +- spacy/lang/ru/__init__.py | 13 +- spacy/lang/sv/__init__.py | 14 +- spacy/lang/uk/__init__.py | 13 +- spacy/pipeline/lemmatizer.py | 182 ++++++++++++------------ spacy/tests/lang/test_lemmatizers.py | 23 ++- spacy/tests/pipeline/test_lemmatizer.py | 86 +++++------ website/docs/api/lemmatizer.md | 59 ++++++-- 16 files changed, 236 insertions(+), 281 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 881a697f6..4edd1cbae 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -477,6 +477,8 @@ class Errors: E201 = ("Span index out of range.") # TODO: fix numbering after merging develop into master + E912 = ("Failed to initialize lemmatizer. Missing lemmatizer table(s) found " + "for mode '{mode}'. Required tables: {tables}. Found: {found}.") E913 = ("Corpus path can't be None. Maybe you forgot to define it in your " "config.cfg or override it on the CLI?") E914 = ("Executing {name} callback failed. Expected the function to " @@ -556,10 +558,10 @@ class Errors: E953 = ("Mismatched IDs received by the Tok2Vec listener: {id1} vs. {id2}") E954 = ("The Tok2Vec listener did not receive any valid input from an upstream " "component.") - E955 = ("Can't find table(s) '{table}' for language '{lang}' in " - "spacy-lookups-data. If you want to initialize a blank nlp object, " - "make sure you have the spacy-lookups-data package installed or " - "remove the [initialize.lookups] block from your config.") + E955 = ("Can't find table(s) {table} for language '{lang}' in " + "spacy-lookups-data. Make sure you have the package installed or " + "provide your own lookup tables if no default lookups are available " + "for your language.") E956 = ("Can't find component '{name}' in [components] block in the config. " "Available components: {opts}") E957 = ("Writing directly to Language.factories isn't needed anymore in " @@ -685,9 +687,8 @@ class Errors: E1002 = ("Span index out of range.") E1003 = ("Unsupported lemmatizer mode '{mode}'.") E1004 = ("Missing lemmatizer table(s) found for lemmatizer mode '{mode}'. " - "Required tables '{tables}', found '{found}'. If you are not " - "providing custom lookups, make sure you have the package " - "spacy-lookups-data installed.") + "Required tables: {tables}. Found: {found}. Maybe you forgot to " + "call nlp.initialize() to load in the data?") E1005 = ("Unable to set attribute '{attr}' in tokenizer exception for " "'{chunk}'. Tokenizer exceptions are only allowed to specify " "`ORTH` and `NORM`.") diff --git a/spacy/lang/bn/__init__.py b/spacy/lang/bn/__init__.py index 923e29a17..879229888 100644 --- a/spacy/lang/bn/__init__.py +++ b/spacy/lang/bn/__init__.py @@ -4,7 +4,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES from .stop_words import STOP_WORDS from ...language import Language -from ...lookups import Lookups from ...pipeline import Lemmatizer @@ -24,18 +23,11 @@ class Bengali(Language): @Bengali.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups) - return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return Lemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Bengali"] diff --git a/spacy/lang/el/__init__.py b/spacy/lang/el/__init__.py index 1a7b19914..53069334e 100644 --- a/spacy/lang/el/__init__.py +++ b/spacy/lang/el/__init__.py @@ -7,7 +7,6 @@ from .lex_attrs import LEX_ATTRS from .syntax_iterators import SYNTAX_ITERATORS from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES from .lemmatizer import GreekLemmatizer -from ...lookups import Lookups from ...language import Language @@ -29,18 +28,11 @@ class Greek(Language): @Greek.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = GreekLemmatizer.load_lookups(nlp.lang, mode, lookups) - return GreekLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return GreekLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Greek"] diff --git a/spacy/lang/en/__init__.py b/spacy/lang/en/__init__.py index cc01f1aea..3a3ebeefd 100644 --- a/spacy/lang/en/__init__.py +++ b/spacy/lang/en/__init__.py @@ -8,7 +8,6 @@ from .syntax_iterators import SYNTAX_ITERATORS from .punctuation import TOKENIZER_INFIXES from .lemmatizer import EnglishLemmatizer from ...language import Language -from ...lookups import Lookups class EnglishDefaults(Language.Defaults): @@ -27,18 +26,11 @@ class English(Language): @English.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = EnglishLemmatizer.load_lookups(nlp.lang, mode, lookups) - return EnglishLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return EnglishLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["English"] diff --git a/spacy/lang/fa/__init__.py b/spacy/lang/fa/__init__.py index f3a6635dc..77ee3bca3 100644 --- a/spacy/lang/fa/__init__.py +++ b/spacy/lang/fa/__init__.py @@ -6,7 +6,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .punctuation import TOKENIZER_SUFFIXES from .syntax_iterators import SYNTAX_ITERATORS from ...language import Language -from ...lookups import Lookups from ...pipeline import Lemmatizer @@ -27,18 +26,11 @@ class Persian(Language): @Persian.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups) - return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return Lemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Persian"] diff --git a/spacy/lang/fr/__init__.py b/spacy/lang/fr/__init__.py index 72e641d1f..1e0011fba 100644 --- a/spacy/lang/fr/__init__.py +++ b/spacy/lang/fr/__init__.py @@ -9,7 +9,6 @@ from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .syntax_iterators import SYNTAX_ITERATORS from .lemmatizer import FrenchLemmatizer -from ...lookups import Lookups from ...language import Language @@ -32,18 +31,11 @@ class French(Language): @French.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = FrenchLemmatizer.load_lookups(nlp.lang, mode, lookups) - return FrenchLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return FrenchLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["French"] diff --git a/spacy/lang/nb/__init__.py b/spacy/lang/nb/__init__.py index 9672dfd6e..62d7707f3 100644 --- a/spacy/lang/nb/__init__.py +++ b/spacy/lang/nb/__init__.py @@ -6,7 +6,6 @@ from .punctuation import TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .syntax_iterators import SYNTAX_ITERATORS from ...language import Language -from ...lookups import Lookups from ...pipeline import Lemmatizer @@ -27,18 +26,11 @@ class Norwegian(Language): @Norwegian.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups) - return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return Lemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Norwegian"] diff --git a/spacy/lang/nl/__init__.py b/spacy/lang/nl/__init__.py index 15b6b9de2..a3591f1bf 100644 --- a/spacy/lang/nl/__init__.py +++ b/spacy/lang/nl/__init__.py @@ -1,5 +1,4 @@ from typing import Optional - from thinc.api import Model from .stop_words import STOP_WORDS @@ -8,7 +7,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES from .punctuation import TOKENIZER_SUFFIXES from .lemmatizer import DutchLemmatizer -from ...lookups import Lookups from ...language import Language @@ -29,18 +27,11 @@ class Dutch(Language): @Dutch.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = DutchLemmatizer.load_lookups(nlp.lang, mode, lookups) - return DutchLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return DutchLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Dutch"] diff --git a/spacy/lang/pl/__init__.py b/spacy/lang/pl/__init__.py index 573dbc6f9..f7be8a6c2 100644 --- a/spacy/lang/pl/__init__.py +++ b/spacy/lang/pl/__init__.py @@ -34,18 +34,11 @@ class Polish(Language): @Polish.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "pos_lookup", "lookups": None}, + default_config={"model": None, "mode": "pos_lookup"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = PolishLemmatizer.load_lookups(nlp.lang, mode, lookups) - return PolishLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return PolishLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Polish"] diff --git a/spacy/lang/ru/__init__.py b/spacy/lang/ru/__init__.py index 6436ae0c7..1d59ca043 100644 --- a/spacy/lang/ru/__init__.py +++ b/spacy/lang/ru/__init__.py @@ -6,7 +6,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from .lemmatizer import RussianLemmatizer from ...language import Language -from ...lookups import Lookups class RussianDefaults(Language.Defaults): @@ -23,17 +22,11 @@ class Russian(Language): @Russian.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "pymorphy2", "lookups": None}, + default_config={"model": None, "mode": "pymorphy2"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - return RussianLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return RussianLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Russian"] diff --git a/spacy/lang/sv/__init__.py b/spacy/lang/sv/__init__.py index ea314f487..2490eb9ec 100644 --- a/spacy/lang/sv/__init__.py +++ b/spacy/lang/sv/__init__.py @@ -5,7 +5,6 @@ from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .syntax_iterators import SYNTAX_ITERATORS from ...language import Language -from ...lookups import Lookups from ...pipeline import Lemmatizer @@ -30,18 +29,11 @@ class Swedish(Language): @Swedish.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "rule", "lookups": None}, + default_config={"model": None, "mode": "rule"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups) - return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return Lemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Swedish"] diff --git a/spacy/lang/uk/__init__.py b/spacy/lang/uk/__init__.py index 006a1cf7f..73c065379 100644 --- a/spacy/lang/uk/__init__.py +++ b/spacy/lang/uk/__init__.py @@ -7,7 +7,6 @@ from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .lemmatizer import UkrainianLemmatizer from ...language import Language -from ...lookups import Lookups class UkrainianDefaults(Language.Defaults): @@ -24,17 +23,11 @@ class Ukrainian(Language): @Ukrainian.factory( "lemmatizer", assigns=["token.lemma"], - default_config={"model": None, "mode": "pymorphy2", "lookups": None}, + default_config={"model": None, "mode": "pymorphy2"}, default_score_weights={"lemma_acc": 1.0}, ) -def make_lemmatizer( - nlp: Language, - model: Optional[Model], - name: str, - mode: str, - lookups: Optional[Lookups], -): - return UkrainianLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups) +def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str): + return UkrainianLemmatizer(nlp.vocab, model, name, mode=mode) __all__ = ["Ukrainian"] diff --git a/spacy/pipeline/lemmatizer.py b/spacy/pipeline/lemmatizer.py index 391769604..9be596868 100644 --- a/spacy/pipeline/lemmatizer.py +++ b/spacy/pipeline/lemmatizer.py @@ -1,26 +1,25 @@ -from typing import Optional, List, Dict, Any +from typing import Optional, List, Dict, Any, Callable, Iterable, Iterator, Union +from typing import Tuple from thinc.api import Model +from pathlib import Path from .pipe import Pipe from ..errors import Errors from ..language import Language +from ..training import Example from ..lookups import Lookups, load_lookups from ..scorer import Scorer from ..tokens import Doc, Token from ..vocab import Vocab from ..training import validate_examples +from ..util import logger, SimpleFrozenList from .. import util @Language.factory( "lemmatizer", assigns=["token.lemma"], - default_config={ - "model": None, - "mode": "lookup", - "lookups": None, - "overwrite": False, - }, + default_config={"model": None, "mode": "lookup", "overwrite": False}, default_score_weights={"lemma_acc": 1.0}, ) def make_lemmatizer( @@ -28,13 +27,9 @@ def make_lemmatizer( model: Optional[Model], name: str, mode: str, - lookups: Optional[Lookups], overwrite: bool = False, ): - lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups) - return Lemmatizer( - nlp.vocab, model, name, mode=mode, lookups=lookups, overwrite=overwrite - ) + return Lemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite) class Lemmatizer(Pipe): @@ -46,59 +41,19 @@ class Lemmatizer(Pipe): """ @classmethod - def get_lookups_config(cls, mode: str) -> Dict: + def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]: """Returns the lookups configuration settings for a given mode for use in Lemmatizer.load_lookups. mode (str): The lemmatizer mode. - RETURNS (dict): The lookups configuration settings for this mode. - - DOCS: https://nightly.spacy.io/api/lemmatizer#get_lookups_config + RETURNS (Tuple[List[str], List[str]]): The required and optional + lookup tables for this mode. """ if mode == "lookup": - return { - "required_tables": ["lemma_lookup"], - } + return (["lemma_lookup"], []) elif mode == "rule": - return { - "required_tables": ["lemma_rules"], - "optional_tables": ["lemma_exc", "lemma_index"], - } - return {} - - @classmethod - def load_lookups(cls, lang: str, mode: str, lookups: Optional[Lookups]) -> Lookups: - """Load and validate lookups tables. If the provided lookups is None, - load the default lookups tables according to the language and mode - settings. Confirm that all required tables for the language and mode - are present. - - lang (str): The language code. - mode (str): The lemmatizer mode. - lookups (Lookups): The provided lookups, may be None if the default - lookups should be loaded. - RETURNS (Lookups): The Lookups object. - - DOCS: https://nightly.spacy.io/api/lemmatizer#get_lookups_config - """ - config = cls.get_lookups_config(mode) - required_tables = config.get("required_tables", []) - optional_tables = config.get("optional_tables", []) - if lookups is None: - lookups = load_lookups(lang=lang, tables=required_tables) - optional_lookups = load_lookups( - lang=lang, tables=optional_tables, strict=False - ) - for table in optional_lookups.tables: - lookups.set_table(table, optional_lookups.get_table(table)) - for table in required_tables: - if table not in lookups: - raise ValueError( - Errors.E1004.format( - mode=mode, tables=required_tables, found=lookups.tables - ) - ) - return lookups + return (["lemma_rules"], ["lemma_exc", "lemma_index"]) + return ([], []) def __init__( self, @@ -107,7 +62,6 @@ class Lemmatizer(Pipe): name: str = "lemmatizer", *, mode: str = "lookup", - lookups: Optional[Lookups] = None, overwrite: bool = False, ) -> None: """Initialize a Lemmatizer. @@ -116,9 +70,6 @@ class Lemmatizer(Pipe): model (Model): A model (not yet implemented). name (str): The component name. Defaults to "lemmatizer". mode (str): The lemmatizer mode: "lookup", "rule". Defaults to "lookup". - lookups (Lookups): The lookups object containing the (optional) tables - such as "lemma_rules", "lemma_index", "lemma_exc" and - "lemma_lookup". Defaults to None overwrite (bool): Whether to overwrite existing lemmas. Defaults to `False`. @@ -128,8 +79,9 @@ class Lemmatizer(Pipe): self.model = model self.name = name self._mode = mode - self.lookups = lookups if lookups is not None else Lookups() + self.lookups = Lookups() self.overwrite = overwrite + self._validated = False if self.mode == "lookup": self.lemmatize = self.lookup_lemmatize elif self.mode == "rule": @@ -153,12 +105,56 @@ class Lemmatizer(Pipe): DOCS: https://nightly.spacy.io/api/lemmatizer#call """ + if not self._validated: + self._validate_tables(Errors.E1004) for token in doc: if self.overwrite or token.lemma == 0: token.lemma_ = self.lemmatize(token)[0] return doc - def pipe(self, stream, *, batch_size=128): + def initialize( + self, + get_examples: Optional[Callable[[], Iterable[Example]]] = None, + *, + nlp: Optional[Language] = None, + lookups: Optional[Lookups] = None, + ): + """Initialize the lemmatizer and load in data. + + get_examples (Callable[[], Iterable[Example]]): Function that + returns a representative sample of gold-standard Example objects. + nlp (Language): The current nlp object the component is part of. + lookups (Lookups): The lookups object containing the (optional) tables + such as "lemma_rules", "lemma_index", "lemma_exc" and + "lemma_lookup". Defaults to None. + """ + required_tables, optional_tables = self.get_lookups_config(self.mode) + if lookups is None: + logger.debug("Lemmatizer: loading tables from spacy-lookups-data") + lookups = load_lookups(lang=self.vocab.lang, tables=required_tables) + optional_lookups = load_lookups( + lang=self.vocab.lang, tables=optional_tables, strict=False + ) + for table in optional_lookups.tables: + lookups.set_table(table, optional_lookups.get_table(table)) + self.lookups = lookups + self._validate_tables(Errors.E1004) + + def _validate_tables(self, error_message: str = Errors.E912) -> None: + """Check that the lookups are correct for the current mode.""" + required_tables, optional_tables = self.get_lookups_config(self.mode) + for table in required_tables: + if table not in self.lookups: + raise ValueError( + error_message.format( + mode=self.mode, + tables=required_tables, + found=self.lookups.tables, + ) + ) + self._validated = True + + def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]: """Apply the pipe to a stream of documents. This usually happens under the hood when the nlp object is called on a text and all components are applied to the Doc. @@ -263,7 +259,7 @@ class Lemmatizer(Pipe): """ return False - def score(self, examples, **kwargs) -> Dict[str, Any]: + def score(self, examples: Iterable[Example], **kwargs) -> Dict[str, Any]: """Score a batch of examples. examples (Iterable[Example]): The examples to score. @@ -274,58 +270,66 @@ class Lemmatizer(Pipe): validate_examples(examples, "Lemmatizer.score") return Scorer.score_token_attr(examples, "lemma", **kwargs) - def to_disk(self, path, *, exclude=tuple()): - """Save the current state to a directory. + def to_disk( + self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() + ): + """Serialize the pipe to disk. - path (unicode or Path): A path to a directory, which will be created if - it doesn't exist. - exclude (list): String names of serialization fields to exclude. + path (str / Path): Path to a directory. + exclude (Iterable[str]): String names of serialization fields to exclude. - DOCS: https://nightly.spacy.io/api/vocab#to_disk + DOCS: https://nightly.spacy.io/api/lemmatizer#to_disk """ serialize = {} serialize["vocab"] = lambda p: self.vocab.to_disk(p) serialize["lookups"] = lambda p: self.lookups.to_disk(p) util.to_disk(path, serialize, exclude) - def from_disk(self, path, *, exclude=tuple()): - """Loads state from a directory. Modifies the object in place and - returns it. + def from_disk( + self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() + ) -> "Lemmatizer": + """Load the pipe from disk. Modifies the object in place and returns it. - path (unicode or Path): A path to a directory. - exclude (list): String names of serialization fields to exclude. - RETURNS (Vocab): The modified `Vocab` object. + path (str / Path): Path to a directory. + exclude (Iterable[str]): String names of serialization fields to exclude. + RETURNS (Lemmatizer): The modified Lemmatizer object. - DOCS: https://nightly.spacy.io/api/vocab#to_disk + DOCS: https://nightly.spacy.io/api/lemmatizer#from_disk """ deserialize = {} deserialize["vocab"] = lambda p: self.vocab.from_disk(p) deserialize["lookups"] = lambda p: self.lookups.from_disk(p) util.from_disk(path, deserialize, exclude) + self._validate_tables() + return self - def to_bytes(self, *, exclude=tuple()) -> bytes: - """Serialize the current state to a binary string. + def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes: + """Serialize the pipe to a bytestring. - exclude (list): String names of serialization fields to exclude. - RETURNS (bytes): The serialized form of the `Vocab` object. + exclude (Iterable[str]): String names of serialization fields to exclude. + RETURNS (bytes): The serialized object. - DOCS: https://nightly.spacy.io/api/vocab#to_bytes + DOCS: https://nightly.spacy.io/api/lemmatizer#to_bytes """ serialize = {} serialize["vocab"] = self.vocab.to_bytes serialize["lookups"] = self.lookups.to_bytes return util.to_bytes(serialize, exclude) - def from_bytes(self, bytes_data: bytes, *, exclude=tuple()): - """Load state from a binary string. + def from_bytes( + self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList() + ) -> "Lemmatizer": + """Load the pipe from a bytestring. - bytes_data (bytes): The data to load from. - exclude (list): String names of serialization fields to exclude. - RETURNS (Vocab): The `Vocab` object. + bytes_data (bytes): The serialized pipe. + exclude (Iterable[str]): String names of serialization fields to exclude. + RETURNS (Lemmatizer): The loaded Lemmatizer. - DOCS: https://nightly.spacy.io/api/vocab#from_bytes + DOCS: https://nightly.spacy.io/api/lemmatizer#from_bytes """ deserialize = {} deserialize["vocab"] = lambda b: self.vocab.from_bytes(b) deserialize["lookups"] = lambda b: self.lookups.from_bytes(b) util.from_bytes(bytes_data, deserialize, exclude) + self._validate_tables() + return self diff --git a/spacy/tests/lang/test_lemmatizers.py b/spacy/tests/lang/test_lemmatizers.py index 6e7f82341..5f45664eb 100644 --- a/spacy/tests/lang/test_lemmatizers.py +++ b/spacy/tests/lang/test_lemmatizers.py @@ -17,16 +17,31 @@ def test_lemmatizer_initialize(lang, capfd): @registry.misc("lemmatizer_init_lookups") def lemmatizer_init_lookups(): lookups = Lookups() - lookups.add_table("lemma_lookup", {"cope": "cope"}) + lookups.add_table("lemma_lookup", {"cope": "cope", "x": "y"}) lookups.add_table("lemma_index", {"verb": ("cope", "cop")}) lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}}) lookups.add_table("lemma_rules", {"verb": [["ing", ""]]}) return lookups - """Test that languages can be initialized.""" + # Test that languages can be initialized nlp = get_lang_class(lang)() - nlp.add_pipe("lemmatizer", config={"lookups": {"@misc": "lemmatizer_init_lookups"}}) + lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) + assert not lemmatizer.lookups.tables + nlp.config["initialize"]["components"]["lemmatizer"] = { + "lookups": {"@misc": "lemmatizer_init_lookups"} + } + with pytest.raises(ValueError): + nlp("x") + nlp.initialize() + assert lemmatizer.lookups.tables + doc = nlp("x") # Check for stray print statements (see #3342) - doc = nlp("test") # noqa: F841 captured = capfd.readouterr() assert not captured.out + assert doc[0].lemma_ == "y" + + # Test initialization by calling .initialize() directly + nlp = get_lang_class(lang)() + lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) + lemmatizer.initialize(lookups=lemmatizer_init_lookups()) + assert nlp("x")[0].lemma_ == "y" diff --git a/spacy/tests/pipeline/test_lemmatizer.py b/spacy/tests/pipeline/test_lemmatizer.py index 05e15bc16..d37c87059 100644 --- a/spacy/tests/pipeline/test_lemmatizer.py +++ b/spacy/tests/pipeline/test_lemmatizer.py @@ -8,61 +8,52 @@ from ..util import make_tempdir @pytest.fixture def nlp(): - return English() - - -@pytest.fixture -def lemmatizer(nlp): @registry.misc("cope_lookups") def cope_lookups(): lookups = Lookups() - lookups.add_table("lemma_lookup", {"cope": "cope"}) + lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"}) lookups.add_table("lemma_index", {"verb": ("cope", "cop")}) lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}}) lookups.add_table("lemma_rules", {"verb": [["ing", ""]]}) return lookups - lemmatizer = nlp.add_pipe( - "lemmatizer", config={"mode": "rule", "lookups": {"@misc": "cope_lookups"}} - ) - return lemmatizer + nlp = English() + nlp.config["initialize"]["components"]["lemmatizer"] = { + "lookups": {"@misc": "cope_lookups"} + } + return nlp def test_lemmatizer_init(nlp): - @registry.misc("cope_lookups") - def cope_lookups(): - lookups = Lookups() - lookups.add_table("lemma_lookup", {"cope": "cope"}) - lookups.add_table("lemma_index", {"verb": ("cope", "cop")}) - lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}}) - lookups.add_table("lemma_rules", {"verb": [["ing", ""]]}) - return lookups - - lemmatizer = nlp.add_pipe( - "lemmatizer", config={"mode": "lookup", "lookups": {"@misc": "cope_lookups"}} - ) + lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) assert isinstance(lemmatizer.lookups, Lookups) + assert not lemmatizer.lookups.tables assert lemmatizer.mode == "lookup" + with pytest.raises(ValueError): + nlp("test") + nlp.initialize() + assert lemmatizer.lookups.tables + assert nlp("cope")[0].lemma_ == "cope" + assert nlp("coped")[0].lemma_ == "cope" # replace any tables from spacy-lookups-data lemmatizer.lookups = Lookups() - doc = nlp("coping") # lookup with no tables sets text as lemma - assert doc[0].lemma_ == "coping" - + assert nlp("cope")[0].lemma_ == "cope" + assert nlp("coped")[0].lemma_ == "coped" nlp.remove_pipe("lemmatizer") - - @registry.misc("empty_lookups") - def empty_lookups(): - return Lookups() - + lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"}) with pytest.raises(ValueError): - nlp.add_pipe( - "lemmatizer", - config={"mode": "lookup", "lookups": {"@misc": "empty_lookups"}}, - ) + # Can't initialize without required tables + lemmatizer.initialize(lookups=Lookups()) + lookups = Lookups() + lookups.add_table("lemma_lookup", {}) + lemmatizer.initialize(lookups=lookups) -def test_lemmatizer_config(nlp, lemmatizer): +def test_lemmatizer_config(nlp): + lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "rule"}) + nlp.initialize() + doc = nlp.make_doc("coping") doc[0].pos_ = "VERB" assert doc[0].lemma_ == "" @@ -78,20 +69,21 @@ def test_lemmatizer_config(nlp, lemmatizer): assert doc[0].lemma_ == "cope" -def test_lemmatizer_serialize(nlp, lemmatizer): - @registry.misc("cope_lookups") +def test_lemmatizer_serialize(nlp): + lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "rule"}) + nlp.initialize() + def cope_lookups(): lookups = Lookups() - lookups.add_table("lemma_lookup", {"cope": "cope"}) + lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"}) lookups.add_table("lemma_index", {"verb": ("cope", "cop")}) lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}}) lookups.add_table("lemma_rules", {"verb": [["ing", ""]]}) return lookups nlp2 = English() - lemmatizer2 = nlp2.add_pipe( - "lemmatizer", config={"mode": "rule", "lookups": {"@misc": "cope_lookups"}} - ) + lemmatizer2 = nlp2.add_pipe("lemmatizer", config={"mode": "rule"}) + lemmatizer2.initialize(lookups=cope_lookups()) lemmatizer2.from_bytes(lemmatizer.to_bytes()) assert lemmatizer.to_bytes() == lemmatizer2.to_bytes() assert lemmatizer.lookups.tables == lemmatizer2.lookups.tables @@ -100,9 +92,9 @@ def test_lemmatizer_serialize(nlp, lemmatizer): with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) nlp2 = util.load_model_from_path(tmp_dir) - doc2 = nlp2.make_doc("coping") - doc2[0].pos_ = "VERB" - assert doc2[0].lemma_ == "" - doc2 = lemmatizer(doc2) - assert doc2[0].text == "coping" - assert doc2[0].lemma_ == "cope" + doc2 = nlp2.make_doc("coping") + doc2[0].pos_ = "VERB" + assert doc2[0].lemma_ == "" + doc2 = lemmatizer(doc2) + assert doc2[0].text == "coping" + assert doc2[0].lemma_ == "cope" diff --git a/website/docs/api/lemmatizer.md b/website/docs/api/lemmatizer.md index f980756e5..27ea04432 100644 --- a/website/docs/api/lemmatizer.md +++ b/website/docs/api/lemmatizer.md @@ -48,12 +48,11 @@ data format used by the lookup and rule-based lemmatizers, see > nlp.add_pipe("lemmatizer", config=config) > ``` -| Setting | Description | -| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `mode` | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ | -| `lookups` | The lookups object containing the tables such as `"lemma_rules"`, `"lemma_index"`, `"lemma_exc"` and `"lemma_lookup"`. If `None`, default tables are loaded from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data). Defaults to `None`. ~~Optional[Lookups]~~ | -| `overwrite` | Whether to overwrite existing lemmas. Defaults to `False`. ~~bool~~ | -| `model` | **Not yet implemented:** the model to use. ~~Model~~ | +| Setting | Description | +| ----------- | --------------------------------------------------------------------------------- | +| `mode` | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ | +| `overwrite` | Whether to overwrite existing lemmas. Defaults to `False`. ~~bool~~ | +| `model` | **Not yet implemented:** the model to use. ~~Model~~ | ```python %%GITHUB_SPACY/spacy/pipeline/lemmatizer.py @@ -76,15 +75,14 @@ Create a new pipeline instance. In your application, you would normally use a shortcut for this and instantiate the component using its string name and [`nlp.add_pipe`](/api/language#add_pipe). -| Name | Description | -| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `vocab` | The shared vocabulary. ~~Vocab~~ | -| `model` | **Not yet implemented:** The model to use. ~~Model~~ | -| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | -| _keyword-only_ | | -| mode | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ | -| lookups | A lookups object containing the tables such as `"lemma_rules"`, `"lemma_index"`, `"lemma_exc"` and `"lemma_lookup"`. Defaults to `None`. ~~Optional[Lookups]~~ | -| overwrite | Whether to overwrite existing lemmas. ~~bool~ | +| Name | Description | +| -------------- | --------------------------------------------------------------------------------------------------- | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `model` | **Not yet implemented:** The model to use. ~~Model~~ | +| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | +| _keyword-only_ | | +| mode | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ | +| overwrite | Whether to overwrite existing lemmas. ~~bool~ | ## Lemmatizer.\_\_call\_\_ {#call tag="method"} @@ -127,6 +125,37 @@ applied to the `Doc` in order. | `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ | | **YIELDS** | The processed documents in order. ~~Doc~~ | +## Lemmatizer.initialize {#initialize tag="method"} + +Initialize the lemmatizer and load any data resources. This method is typically +called by [`Language.initialize`](/api/language#initialize) and lets you +customize arguments it receives via the +[`[initialize.components]`](/api/data-formats#config-initialize) block in the +config. The loading only happens during initialization, typically before +training. At runtime, all data is loaded from disk. + +> #### Example +> +> ```python +> lemmatizer = nlp.add_pipe("lemmatizer") +> lemmatizer.initialize(lookups=lookups) +> ``` +> +> ```ini +> ### config.cfg +> [initialize.components.lemmatizer] +> +> [initialize.components.lemmatizer.lookups] +> @misc = "load_my_lookups.v1" +> ``` + +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Defaults to `None`. ~~Optional[Callable[[], Iterable[Example]]]~~ | +| _keyword-only_ | | +| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ | +| `lookups` | The lookups object containing the tables such as `"lemma_rules"`, `"lemma_index"`, `"lemma_exc"` and `"lemma_lookup"`. If `None`, default tables are loaded from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data). Defaults to `None`. ~~Optional[Lookups]~~ | + ## Lemmatizer.lookup_lemmatize {#lookup_lemmatize tag="method"} Lemmatize a token using a lookup-based approach. If no lemma is found, the From 09dcb75076e39eca904e54c21e22c25491a82a02 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 2 Oct 2020 15:43:32 +0200 Subject: [PATCH 21/25] small UX fix for DocBin (#6167) * add informative warning when messing up store_user_data DocBin flags * add informative warning when messing up store_user_data DocBin flags * cleanup test * rename to patterns_path --- spacy/errors.py | 2 +- spacy/tests/serialize/test_serialize_doc.py | 20 +++++++++++++ spacy/tokens/_serialize.py | 31 +++++++++++++++------ website/docs/api/docbin.md | 2 +- 4 files changed, 44 insertions(+), 11 deletions(-) diff --git a/spacy/errors.py b/spacy/errors.py index 4edd1cbae..dbb25479d 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -419,7 +419,7 @@ class Errors: E164 = ("x is neither increasing nor decreasing: {}.") E165 = ("Only one class present in y_true. ROC AUC score is not defined in " "that case.") - E166 = ("Can only merge DocBins with the same pre-defined attributes.\n" + E166 = ("Can only merge DocBins with the same value for '{param}'.\n" "Current DocBin: {current}\nOther DocBin: {other}") E169 = ("Can't find module: {module}") E170 = ("Cannot apply transition {name}: invalid for the current state.") diff --git a/spacy/tests/serialize/test_serialize_doc.py b/spacy/tests/serialize/test_serialize_doc.py index 4a976fc02..8b6adb83b 100644 --- a/spacy/tests/serialize/test_serialize_doc.py +++ b/spacy/tests/serialize/test_serialize_doc.py @@ -1,3 +1,6 @@ +import pytest +from spacy.tokens.doc import Underscore + import spacy from spacy.lang.en import English from spacy.tokens import Doc, DocBin @@ -86,3 +89,20 @@ def test_serialize_doc_bin_unknown_spaces(en_vocab): assert re_doc1.text == "that 's " assert not re_doc2.has_unknown_spaces assert re_doc2.text == "that's" + + +@pytest.mark.parametrize( + "writer_flag,reader_flag,reader_value", [(True, True, "bar"), (True, False, "bar"), (False, True, "nothing"), (False, False, "nothing")] +) +def test_serialize_custom_extension(en_vocab, writer_flag, reader_flag, reader_value): + """Test that custom extensions are correctly serialized in DocBin.""" + Doc.set_extension("foo", default="nothing") + doc = Doc(en_vocab, words=["hello", "world"]) + doc._.foo = "bar" + doc_bin_1 = DocBin(store_user_data=writer_flag) + doc_bin_1.add(doc) + doc_bin_bytes = doc_bin_1.to_bytes() + doc_bin_2 = DocBin(store_user_data=reader_flag).from_bytes(doc_bin_bytes) + doc_2 = list(doc_bin_2.get_docs(en_vocab))[0] + assert doc_2._.foo == reader_value + Underscore.doc_extensions = {} diff --git a/spacy/tokens/_serialize.py b/spacy/tokens/_serialize.py index ed283a86b..11eb75821 100644 --- a/spacy/tokens/_serialize.py +++ b/spacy/tokens/_serialize.py @@ -58,7 +58,7 @@ class DocBin: attrs (Iterable[str]): List of attributes to serialize. 'orth' and 'spacy' are always serialized, so they're not required. - store_user_data (bool): Whether to include the `Doc.user_data`. + store_user_data (bool): Whether to write the `Doc.user_data` to bytes/file. docs (Iterable[Doc]): Docs to add. DOCS: https://nightly.spacy.io/api/docbin#init @@ -106,11 +106,12 @@ class DocBin: self.strings.add(token.ent_type_) self.strings.add(token.ent_kb_id_) self.cats.append(doc.cats) - if self.store_user_data: - self.user_data.append(srsly.msgpack_dumps(doc.user_data)) + self.user_data.append(srsly.msgpack_dumps(doc.user_data)) def get_docs(self, vocab: Vocab) -> Iterator[Doc]: """Recover Doc objects from the annotations, using the given vocab. + Note that the user data of each doc will be read (if available) and returned, + regardless of the setting of 'self.store_user_data'. vocab (Vocab): The shared vocab. YIELDS (Doc): The Doc objects. @@ -129,7 +130,7 @@ class DocBin: doc = Doc(vocab, words=tokens[:, orth_col], spaces=spaces) doc = doc.from_array(self.attrs, tokens) doc.cats = self.cats[i] - if self.store_user_data: + if i < len(self.user_data) and self.user_data[i] is not None: user_data = srsly.msgpack_loads(self.user_data[i], use_list=False) doc.user_data.update(user_data) yield doc @@ -137,21 +138,31 @@ class DocBin: def merge(self, other: "DocBin") -> None: """Extend the annotations of this DocBin with the annotations from another. Will raise an error if the pre-defined attrs of the two - DocBins don't match. + DocBins don't match, or if they differ in whether or not to store + user data. other (DocBin): The DocBin to merge into the current bin. DOCS: https://nightly.spacy.io/api/docbin#merge """ if self.attrs != other.attrs: - raise ValueError(Errors.E166.format(current=self.attrs, other=other.attrs)) + raise ValueError( + Errors.E166.format(param="attrs", current=self.attrs, other=other.attrs) + ) + if self.store_user_data != other.store_user_data: + raise ValueError( + Errors.E166.format( + param="store_user_data", + current=self.store_user_data, + other=other.store_user_data, + ) + ) self.tokens.extend(other.tokens) self.spaces.extend(other.spaces) self.strings.update(other.strings) self.cats.extend(other.cats) self.flags.extend(other.flags) - if self.store_user_data: - self.user_data.extend(other.user_data) + self.user_data.extend(other.user_data) def to_bytes(self) -> bytes: """Serialize the DocBin's annotations to a bytestring. @@ -200,8 +211,10 @@ class DocBin: self.spaces = NumpyOps().unflatten(flat_spaces, lengths) self.cats = msg["cats"] self.flags = msg.get("flags", [{} for _ in lengths]) - if self.store_user_data and "user_data" in msg: + if "user_data" in msg: self.user_data = list(msg["user_data"]) + else: + self.user_data = [None] * len(self) for tokens in self.tokens: assert len(tokens.shape) == 2, tokens.shape # this should never happen return self diff --git a/website/docs/api/docbin.md b/website/docs/api/docbin.md index 03aff2f6e..3625ed790 100644 --- a/website/docs/api/docbin.md +++ b/website/docs/api/docbin.md @@ -47,7 +47,7 @@ Create a `DocBin` object to hold serialized annotations. | Argument | Description | | ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `attrs` | List of attributes to serialize. `ORTH` (hash of token text) and `SPACY` (whether the token is followed by whitespace) are always serialized, so they're not required. Defaults to `("ORTH", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "LEMMA", "MORPH", "POS")`. ~~Iterable[str]~~ | -| `store_user_data` | Whether to include the `Doc.user_data` and the values of custom extension attributes. Defaults to `False`. ~~bool~~ | +| `store_user_data` | Whether to write the `Doc.user_data` and the values of custom extension attributes to file/bytes. Defaults to `False`. ~~bool~~ | | `docs` | `Doc` objects to add on initialization. ~~Iterable[Doc]~~ | ## DocBin.\_\len\_\_ {#len tag="method"} From 62ccd5c4dfbcfbf8248b00696eebc97427444e8a Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 2 Oct 2020 16:37:21 +0200 Subject: [PATCH 22/25] Relax model meta performance schema (#6185) Allow more embedded per_x in `ModelMetaSchema` --- spacy/schemas.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/schemas.py b/spacy/schemas.py index 1125fa7da..591b7e134 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -282,7 +282,7 @@ class ModelMetaSchema(BaseModel): sources: Optional[Union[List[StrictStr], List[Dict[str, str]]]] = Field(None, title="Training data sources") vectors: Dict[str, Any] = Field({}, title="Included word vectors") labels: Dict[str, List[str]] = Field({}, title="Component labels, keyed by component name") - performance: Dict[str, Union[float, Dict[str, float]]] = Field({}, title="Accuracy and speed numbers") + performance: Dict[str, Union[float, Dict[str, Union[float, dict]]]] = Field({}, title="Accuracy and speed numbers") spacy_git_version: StrictStr = Field("", title="Commit of spaCy version used") # fmt: on From 6965cdf16dd043913a815781ef77e90d565f6073 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 2 Oct 2020 17:26:21 +0200 Subject: [PATCH 23/25] Fix comment --- spacy/ml/models/tok2vec.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index 84b54f029..120e9b02c 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -186,11 +186,7 @@ def CharacterEmbed( feature (int or str): An attribute to embed, to concatenate with the characters. width (int): The width of the output vector and the feature embedding. -<<<<<<< HEAD - rows (int): The number of rows in the NORM hash embedding table. -======= rows (int): The number of rows in the LOWER hash embedding table. ->>>>>>> 300e5a9928fd226dfddbf7d5c22558f696bfa1af nM (int): The dimensionality of the character embeddings. Recommended values are between 16 and 64. nC (int): The number of UTF-8 bytes to embed per word. Recommended values From 52e4586ec11bf6ef13680cf80c5bdc33499be2c1 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sat, 3 Oct 2020 11:13:00 +0200 Subject: [PATCH 24/25] Add transformers to extras_require [ci skip] --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index 963ce60ca..7192ba9d4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -66,6 +66,8 @@ console_scripts = [options.extras_require] lookups = spacy_lookups_data==1.0.0rc0 +transformers = + spacy_transformers>=1.0.0a17,<1.0.0 cuda = cupy>=5.0.0b4,<9.0.0 cuda80 = From eb9b3ff9c5a2bc779412d85e77e840b5049e4209 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Sat, 3 Oct 2020 11:35:42 +0200 Subject: [PATCH 25/25] Update install docs and quickstarts [ci skip] --- website/docs/usage/index.md | 60 ++++--- website/src/components/quickstart.js | 74 ++++---- website/src/styles/quickstart.module.sass | 36 +++- website/src/widgets/quickstart-install.js | 208 +++++++++++++--------- website/src/widgets/quickstart-models.js | 132 ++++++++------ 5 files changed, 309 insertions(+), 201 deletions(-) diff --git a/website/docs/usage/index.md b/website/docs/usage/index.md index ad2614175..e0a4fdb07 100644 --- a/website/docs/usage/index.md +++ b/website/docs/usage/index.md @@ -8,10 +8,7 @@ menu: - ['Changelog', 'changelog'] --- -spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**, -**macOS/OS X** and **Windows**. The latest spaCy releases are available over -[pip](https://pypi.python.org/pypi/spacy) and -[conda](https://anaconda.org/conda-forge/spacy). +## Quickstart {hidden="true"} > #### 📖 Looking for the old docs? > @@ -19,21 +16,22 @@ spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**, > website to [**v2.spacy.io**](https://v2.spacy.io/docs). To see what's changed > and how to migrate, see the guide on [v3.0 guide](/usage/v3). -## Quickstart {hidden="true"} - import QuickstartInstall from 'widgets/quickstart-install.js' - + ## Installation instructions {#installation} +spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**, +**macOS/OS X** and **Windows**. The latest spaCy releases are available over +[pip](https://pypi.python.org/pypi/spacy) and +[conda](https://anaconda.org/conda-forge/spacy). + ### pip {#pip} Using pip, spaCy releases are available as source packages and binary wheels. - -```bash -$ pip install -U spacy -``` +Before you install spaCy and its dependencies, make sure that your `pip`, +`setuptools` and `wheel` are up to date. > #### Download pipelines > @@ -47,16 +45,10 @@ $ pip install -U spacy > >>> nlp = spacy.load("en_core_web_sm") > ``` - - -To install additional data tables for lemmatization you can run -`pip install spacy[lookups]` or install -[`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) -separately. The lookups package is needed to provide normalization and -lemmatization data for new models and to lemmatize in languages that don't yet -come with trained pipelines and aren't powered by third-party libraries. - - +```bash +$ pip install -U pip setuptools wheel +$ pip install -U spacy +``` When using pip it is generally recommended to install packages in a virtual environment to avoid modifying system state: @@ -64,9 +56,28 @@ environment to avoid modifying system state: ```bash $ python -m venv .env $ source .env/bin/activate +$ pip install -U pip setuptools wheel $ pip install spacy ``` +spaCy also lets you install extra dependencies by specifying the following +keywords in brackets, e.g. `spacy[ja]` or `spacy[lookups,transformers]` (with +multiple comma-separated extras). See the `[options.extras_require]` section in +spaCy's [`setup.cfg`](%%GITHUB_SPACY/setup.cfg) for details on what's included. + +> #### Example +> +> ```bash +> $ pip install spacy[lookups,transformers] +> ``` + +| Name | Description | +| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. | +| `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. | +| `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. | +| `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). | + ### conda {#conda} Thanks to our great community, we've been able to re-add conda support. You can @@ -112,10 +123,9 @@ $ python -m spacy validate ### Run spaCy with GPU {#gpu new="2.0.14"} As of v2.0, spaCy comes with neural network models that are implemented in our -machine learning library, [Thinc](https://github.com/explosion/thinc). For GPU -support, we've been grateful to use the work of Chainer's -[CuPy](https://cupy.chainer.org) module, which provides a numpy-compatible -interface for GPU arrays. +machine learning library, [Thinc](https://thinc.ai). For GPU support, we've been +grateful to use the work of Chainer's [CuPy](https://cupy.chainer.org) module, +which provides a numpy-compatible interface for GPU arrays. spaCy can be installed on GPU by specifying `spacy[cuda]`, `spacy[cuda90]`, `spacy[cuda91]`, `spacy[cuda92]`, `spacy[cuda100]`, `spacy[cuda101]` or diff --git a/website/src/components/quickstart.js b/website/src/components/quickstart.js index 64f828c2f..e47e02e35 100644 --- a/website/src/components/quickstart.js +++ b/website/src/components/quickstart.js @@ -24,6 +24,7 @@ const Quickstart = ({ rawContent = null, id = 'quickstart', setters = {}, + showDropdown = {}, hidePrompts, small, codeLang, @@ -107,6 +108,8 @@ const Quickstart = ({ }) => { // Optional function that's called with the value const setterFunc = setters[id] || (() => {}) + // Check if dropdown should be shown + const dropdownGetter = showDropdown[id] || (() => true) return (