Merge branch 'develop' into nightly.spacy.io

This commit is contained in:
Ines Montani 2020-10-03 11:36:08 +02:00
commit 6cdc090e0e
53 changed files with 1148 additions and 773 deletions

View File

@ -6,7 +6,7 @@ requires = [
"cymem>=2.0.2,<2.1.0",
"preshed>=3.0.2,<3.1.0",
"murmurhash>=0.28.0,<1.1.0",
"thinc>=8.0.0a42,<8.0.0a50",
"thinc>=8.0.0a43,<8.0.0a50",
"blis>=0.4.0,<0.5.0",
"pytokenizations",
"pathy"

View File

@ -1,12 +1,12 @@
# Our libraries
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
thinc>=8.0.0a42,<8.0.0a50
thinc>=8.0.0a43,<8.0.0a50
blis>=0.4.0,<0.5.0
ml_datasets==0.2.0a0
murmurhash>=0.28.0,<1.1.0
wasabi>=0.8.0,<1.1.0
srsly>=2.1.0,<3.0.0
srsly>=2.3.0,<3.0.0
catalogue>=2.0.1,<2.1.0
typer>=0.3.0,<0.4.0
pathy

View File

@ -34,16 +34,16 @@ setup_requires =
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
murmurhash>=0.28.0,<1.1.0
thinc>=8.0.0a42,<8.0.0a50
thinc>=8.0.0a43,<8.0.0a50
install_requires =
# Our libraries
murmurhash>=0.28.0,<1.1.0
cymem>=2.0.2,<2.1.0
preshed>=3.0.2,<3.1.0
thinc>=8.0.0a42,<8.0.0a50
thinc>=8.0.0a43,<8.0.0a50
blis>=0.4.0,<0.5.0
wasabi>=0.8.0,<1.1.0
srsly>=2.1.0,<3.0.0
srsly>=2.3.0,<3.0.0
catalogue>=2.0.1,<2.1.0
typer>=0.3.0,<0.4.0
pathy
@ -66,6 +66,8 @@ console_scripts =
[options.extras_require]
lookups =
spacy_lookups_data==1.0.0rc0
transformers =
spacy_transformers>=1.0.0a17,<1.0.0
cuda =
cupy>=5.0.0b4,<9.0.0
cuda80 =
@ -84,7 +86,7 @@ cuda102 =
cupy-cuda102>=5.0.0b4,<9.0.0
# Language tokenizers with external dependencies
ja =
sudachipy>=0.4.5
sudachipy>=0.4.9
sudachidict_core>=20200330
ko =
natto-py==0.9.0

View File

@ -1,6 +1,6 @@
# fmt: off
__title__ = "spacy-nightly"
__version__ = "3.0.0a28"
__version__ = "3.0.0a29"
__release__ = True
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"

View File

@ -37,6 +37,22 @@ tokenizer_config = {"use_fast": true}
window = 128
stride = 96
{% if "morphologizer" in components %}
[components.morphologizer]
factory = "morphologizer"
[components.morphologizer.model]
@architectures = "spacy.Tagger.v1"
nO = null
[components.morphologizer.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
[components.morphologizer.model.tok2vec.pooling]
@layers = "reduce_mean.v1"
{%- endif %}
{% if "tagger" in components %}
[components.tagger]
factory = "tagger"
@ -166,6 +182,19 @@ depth = {{ 4 if optimize == "efficiency" else 8 }}
window_size = 1
maxout_pieces = 3
{% if "morphologizer" in components %}
[components.morphologizer]
factory = "morphologizer"
[components.morphologizer.model]
@architectures = "spacy.Tagger.v1"
nO = null
[components.morphologizer.model.tok2vec]
@architectures = "spacy.Tok2VecListener.v1"
width = ${components.tok2vec.model.encode.width}
{%- endif %}
{% if "tagger" in components %}
[components.tagger]
factory = "tagger"
@ -257,7 +286,7 @@ no_output_layer = false
{% endif %}
{% for pipe in components %}
{% if pipe not in ["tagger", "parser", "ner", "textcat", "entity_linker"] %}
{% if pipe not in ["tagger", "morphologizer", "parser", "ner", "textcat", "entity_linker"] %}
{# Other components defined by the user: we just assume they're factories #}
[components.{{ pipe }}]
factory = "{{ pipe }}"

View File

@ -34,7 +34,7 @@ learn_rate = 0.001
[corpora]
[corpora.pretrain]
@readers = "spacy.JsonlReader.v1"
@readers = "spacy.JsonlCorpus.v1"
path = ${paths.raw_text}
min_length = 5
max_length = 500

View File

@ -419,7 +419,7 @@ class Errors:
E164 = ("x is neither increasing nor decreasing: {}.")
E165 = ("Only one class present in y_true. ROC AUC score is not defined in "
"that case.")
E166 = ("Can only merge DocBins with the same pre-defined attributes.\n"
E166 = ("Can only merge DocBins with the same value for '{param}'.\n"
"Current DocBin: {current}\nOther DocBin: {other}")
E169 = ("Can't find module: {module}")
E170 = ("Cannot apply transition {name}: invalid for the current state.")
@ -477,12 +477,8 @@ class Errors:
E201 = ("Span index out of range.")
# TODO: fix numbering after merging develop into master
E912 = ("No orth_variants lookups table for data augmentation available for "
"language '{lang}'. If orth_variants are available in "
"spacy-lookups-data, make sure the package is installed and the "
"table is loaded in the [initialize.lookups] block of your config. "
"Alternatively, you can provide your own Lookups object with a "
"table orth_variants as the argument 'lookuos' of the augmenter.")
E912 = ("Failed to initialize lemmatizer. Missing lemmatizer table(s) found "
"for mode '{mode}'. Required tables: {tables}. Found: {found}.")
E913 = ("Corpus path can't be None. Maybe you forgot to define it in your "
"config.cfg or override it on the CLI?")
E914 = ("Executing {name} callback failed. Expected the function to "
@ -562,10 +558,10 @@ class Errors:
E953 = ("Mismatched IDs received by the Tok2Vec listener: {id1} vs. {id2}")
E954 = ("The Tok2Vec listener did not receive any valid input from an upstream "
"component.")
E955 = ("Can't find table(s) '{table}' for language '{lang}' in "
"spacy-lookups-data. If you want to initialize a blank nlp object, "
"make sure you have the spacy-lookups-data package installed or "
"remove the [initialize.lookups] block from your config.")
E955 = ("Can't find table(s) {table} for language '{lang}' in "
"spacy-lookups-data. Make sure you have the package installed or "
"provide your own lookup tables if no default lookups are available "
"for your language.")
E956 = ("Can't find component '{name}' in [components] block in the config. "
"Available components: {opts}")
E957 = ("Writing directly to Language.factories isn't needed anymore in "
@ -691,9 +687,8 @@ class Errors:
E1002 = ("Span index out of range.")
E1003 = ("Unsupported lemmatizer mode '{mode}'.")
E1004 = ("Missing lemmatizer table(s) found for lemmatizer mode '{mode}'. "
"Required tables '{tables}', found '{found}'. If you are not "
"providing custom lookups, make sure you have the package "
"spacy-lookups-data installed.")
"Required tables: {tables}. Found: {found}. Maybe you forgot to "
"call nlp.initialize() to load in the data?")
E1005 = ("Unable to set attribute '{attr}' in tokenizer exception for "
"'{chunk}'. Tokenizer exceptions are only allowed to specify "
"`ORTH` and `NORM`.")

View File

@ -4,7 +4,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from ...language import Language
from ...lookups import Lookups
from ...pipeline import Lemmatizer
@ -24,18 +23,11 @@ class Bengali(Language):
@Bengali.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups)
return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return Lemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Bengali"]

View File

@ -7,7 +7,6 @@ from .lex_attrs import LEX_ATTRS
from .syntax_iterators import SYNTAX_ITERATORS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from .lemmatizer import GreekLemmatizer
from ...lookups import Lookups
from ...language import Language
@ -29,18 +28,11 @@ class Greek(Language):
@Greek.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = GreekLemmatizer.load_lookups(nlp.lang, mode, lookups)
return GreekLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return GreekLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Greek"]

View File

@ -8,7 +8,6 @@ from .syntax_iterators import SYNTAX_ITERATORS
from .punctuation import TOKENIZER_INFIXES
from .lemmatizer import EnglishLemmatizer
from ...language import Language
from ...lookups import Lookups
class EnglishDefaults(Language.Defaults):
@ -27,18 +26,11 @@ class English(Language):
@English.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = EnglishLemmatizer.load_lookups(nlp.lang, mode, lookups)
return EnglishLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return EnglishLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["English"]

View File

@ -6,7 +6,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .punctuation import TOKENIZER_SUFFIXES
from .syntax_iterators import SYNTAX_ITERATORS
from ...language import Language
from ...lookups import Lookups
from ...pipeline import Lemmatizer
@ -27,18 +26,11 @@ class Persian(Language):
@Persian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups)
return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return Lemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Persian"]

View File

@ -9,7 +9,6 @@ from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .syntax_iterators import SYNTAX_ITERATORS
from .lemmatizer import FrenchLemmatizer
from ...lookups import Lookups
from ...language import Language
@ -32,18 +31,11 @@ class French(Language):
@French.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = FrenchLemmatizer.load_lookups(nlp.lang, mode, lookups)
return FrenchLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return FrenchLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["French"]

View File

@ -6,7 +6,6 @@ from .punctuation import TOKENIZER_SUFFIXES
from .stop_words import STOP_WORDS
from .syntax_iterators import SYNTAX_ITERATORS
from ...language import Language
from ...lookups import Lookups
from ...pipeline import Lemmatizer
@ -27,18 +26,11 @@ class Norwegian(Language):
@Norwegian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups)
return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return Lemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Norwegian"]

View File

@ -1,5 +1,4 @@
from typing import Optional
from thinc.api import Model
from .stop_words import STOP_WORDS
@ -8,7 +7,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
from .punctuation import TOKENIZER_SUFFIXES
from .lemmatizer import DutchLemmatizer
from ...lookups import Lookups
from ...language import Language
@ -29,18 +27,11 @@ class Dutch(Language):
@Dutch.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = DutchLemmatizer.load_lookups(nlp.lang, mode, lookups)
return DutchLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return DutchLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Dutch"]

View File

@ -34,18 +34,11 @@ class Polish(Language):
@Polish.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pos_lookup", "lookups": None},
default_config={"model": None, "mode": "pos_lookup"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = PolishLemmatizer.load_lookups(nlp.lang, mode, lookups)
return PolishLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return PolishLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Polish"]

View File

@ -6,7 +6,6 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import RussianLemmatizer
from ...language import Language
from ...lookups import Lookups
class RussianDefaults(Language.Defaults):
@ -23,17 +22,11 @@ class Russian(Language):
@Russian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pymorphy2", "lookups": None},
default_config={"model": None, "mode": "pymorphy2"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
return RussianLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return RussianLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Russian"]

View File

@ -5,7 +5,6 @@ from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .syntax_iterators import SYNTAX_ITERATORS
from ...language import Language
from ...lookups import Lookups
from ...pipeline import Lemmatizer
@ -30,18 +29,11 @@ class Swedish(Language):
@Swedish.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "lookups": None},
default_config={"model": None, "mode": "rule"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups)
return Lemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return Lemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Swedish"]

View File

@ -7,7 +7,6 @@ from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import UkrainianLemmatizer
from ...language import Language
from ...lookups import Lookups
class UkrainianDefaults(Language.Defaults):
@ -24,17 +23,11 @@ class Ukrainian(Language):
@Ukrainian.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "pymorphy2", "lookups": None},
default_config={"model": None, "mode": "pymorphy2"},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
):
return UkrainianLemmatizer(nlp.vocab, model, name, mode=mode, lookups=lookups)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str):
return UkrainianLemmatizer(nlp.vocab, model, name, mode=mode)
__all__ = ["Ukrainian"]

View File

@ -1,7 +1,8 @@
from typing import List, Union, Callable, Tuple
from thinc.types import Ints2d, Doc
from thinc.types import Ints2d
from thinc.api import Model, registry
from ..tokens import Doc
@registry.layers("spacy.FeatureExtractor.v1")
@ -9,7 +10,9 @@ def FeatureExtractor(columns: List[Union[int, str]]) -> Model[List[Doc], List[In
return Model("extract_features", forward, attrs={"columns": columns})
def forward(model: Model[List[Doc], List[Ints2d]], docs, is_train: bool) -> Tuple[List[Ints2d], Callable]:
def forward(
model: Model[List[Doc], List[Ints2d]], docs, is_train: bool
) -> Tuple[List[Ints2d], Callable]:
columns = model.attrs["columns"]
features: List[Ints2d] = []
for doc in docs:

View File

@ -1,4 +1,4 @@
from typing import Optional, List
from typing import Optional, List, Union
from thinc.types import Floats2d
from thinc.api import chain, clone, concatenate, with_array, with_padded
from thinc.api import Model, noop, list2ragged, ragged2list, HashEmbed
@ -10,7 +10,7 @@ from ...ml import _character_embed
from ..staticvectors import StaticVectors
from ..featureextractor import FeatureExtractor
from ...pipeline.tok2vec import Tok2VecListener
from ...attrs import ORTH, NORM, PREFIX, SUFFIX, SHAPE
from ...attrs import ORTH, LOWER, PREFIX, SUFFIX, SHAPE, intify_attr
@registry.architectures.register("spacy.Tok2VecListener.v1")
@ -98,7 +98,7 @@ def MultiHashEmbed(
attributes using hash embedding, concatenates the results, and passes it
through a feed-forward subnetwork to build a mixed representations.
The features used are the NORM, PREFIX, SUFFIX and SHAPE, which can have
The features used are the LOWER, PREFIX, SUFFIX and SHAPE, which can have
varying definitions depending on the Vocab of the Doc object passed in.
Vectors from pretrained static vectors can also be incorporated into the
concatenated representation.
@ -115,7 +115,7 @@ def MultiHashEmbed(
also_use_static_vectors (bool): Whether to also use static word vectors.
Requires a vectors table to be loaded in the Doc objects' vocab.
"""
cols = [NORM, PREFIX, SUFFIX, SHAPE, ORTH]
cols = [LOWER, PREFIX, SUFFIX, SHAPE, ORTH]
seed = 7
def make_hash_embed(feature):
@ -123,7 +123,7 @@ def MultiHashEmbed(
seed += 1
return HashEmbed(
width,
rows if feature == NORM else rows // 2,
rows if feature == LOWER else rows // 2,
column=cols.index(feature),
seed=seed,
dropout=0.0,
@ -131,13 +131,13 @@ def MultiHashEmbed(
if also_embed_subwords:
embeddings = [
make_hash_embed(NORM),
make_hash_embed(LOWER),
make_hash_embed(PREFIX),
make_hash_embed(SUFFIX),
make_hash_embed(SHAPE),
]
else:
embeddings = [make_hash_embed(NORM)]
embeddings = [make_hash_embed(LOWER)]
concat_size = width * (len(embeddings) + also_use_static_vectors)
if also_use_static_vectors:
model = chain(
@ -165,7 +165,8 @@ def MultiHashEmbed(
@registry.architectures.register("spacy.CharacterEmbed.v1")
def CharacterEmbed(
width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool
width: int, rows: int, nM: int, nC: int, also_use_static_vectors: bool,
feature: Union[int, str]="LOWER"
):
"""Construct an embedded representation based on character embeddings, using
a feed-forward network. A fixed number of UTF-8 byte characters are used for
@ -179,12 +180,13 @@ def CharacterEmbed(
of being in an arbitrary position depending on the word length.
The characters are embedded in a embedding table with a given number of rows,
and the vectors concatenated. A hash-embedded vector of the NORM of the word is
and the vectors concatenated. A hash-embedded vector of the LOWER of the word is
also concatenated on, and the result is then passed through a feed-forward
network to construct a single vector to represent the information.
width (int): The width of the output vector and the NORM hash embedding.
rows (int): The number of rows in the NORM hash embedding table.
feature (int or str): An attribute to embed, to concatenate with the characters.
width (int): The width of the output vector and the feature embedding.
rows (int): The number of rows in the LOWER hash embedding table.
nM (int): The dimensionality of the character embeddings. Recommended values
are between 16 and 64.
nC (int): The number of UTF-8 bytes to embed per word. Recommended values
@ -193,12 +195,15 @@ def CharacterEmbed(
also_use_static_vectors (bool): Whether to also use static word vectors.
Requires a vectors table to be loaded in the Doc objects' vocab.
"""
feature = intify_attr(feature)
if feature is None:
raise ValueError("Invalid feature: Must be a token attribute.")
if also_use_static_vectors:
model = chain(
concatenate(
chain(_character_embed.CharacterEmbed(nM=nM, nC=nC), list2ragged()),
chain(
FeatureExtractor([NORM]),
FeatureExtractor([feature]),
list2ragged(),
with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)),
),
@ -214,7 +219,7 @@ def CharacterEmbed(
concatenate(
chain(_character_embed.CharacterEmbed(nM=nM, nC=nC), list2ragged()),
chain(
FeatureExtractor([NORM]),
FeatureExtractor([feature]),
list2ragged(),
with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)),
),

View File

@ -1,26 +1,25 @@
from typing import Optional, List, Dict, Any
from typing import Optional, List, Dict, Any, Callable, Iterable, Iterator, Union
from typing import Tuple
from thinc.api import Model
from pathlib import Path
from .pipe import Pipe
from ..errors import Errors
from ..language import Language
from ..training import Example
from ..lookups import Lookups, load_lookups
from ..scorer import Scorer
from ..tokens import Doc, Token
from ..vocab import Vocab
from ..training import validate_examples
from ..util import logger, SimpleFrozenList
from .. import util
@Language.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "lookup",
"lookups": None,
"overwrite": False,
},
default_config={"model": None, "mode": "lookup", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
@ -28,13 +27,9 @@ def make_lemmatizer(
model: Optional[Model],
name: str,
mode: str,
lookups: Optional[Lookups],
overwrite: bool = False,
):
lookups = Lemmatizer.load_lookups(nlp.lang, mode, lookups)
return Lemmatizer(
nlp.vocab, model, name, mode=mode, lookups=lookups, overwrite=overwrite
)
return Lemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
class Lemmatizer(Pipe):
@ -46,59 +41,19 @@ class Lemmatizer(Pipe):
"""
@classmethod
def get_lookups_config(cls, mode: str) -> Dict:
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
"""Returns the lookups configuration settings for a given mode for use
in Lemmatizer.load_lookups.
mode (str): The lemmatizer mode.
RETURNS (dict): The lookups configuration settings for this mode.
DOCS: https://nightly.spacy.io/api/lemmatizer#get_lookups_config
RETURNS (Tuple[List[str], List[str]]): The required and optional
lookup tables for this mode.
"""
if mode == "lookup":
return {
"required_tables": ["lemma_lookup"],
}
return (["lemma_lookup"], [])
elif mode == "rule":
return {
"required_tables": ["lemma_rules"],
"optional_tables": ["lemma_exc", "lemma_index"],
}
return {}
@classmethod
def load_lookups(cls, lang: str, mode: str, lookups: Optional[Lookups]) -> Lookups:
"""Load and validate lookups tables. If the provided lookups is None,
load the default lookups tables according to the language and mode
settings. Confirm that all required tables for the language and mode
are present.
lang (str): The language code.
mode (str): The lemmatizer mode.
lookups (Lookups): The provided lookups, may be None if the default
lookups should be loaded.
RETURNS (Lookups): The Lookups object.
DOCS: https://nightly.spacy.io/api/lemmatizer#get_lookups_config
"""
config = cls.get_lookups_config(mode)
required_tables = config.get("required_tables", [])
optional_tables = config.get("optional_tables", [])
if lookups is None:
lookups = load_lookups(lang=lang, tables=required_tables)
optional_lookups = load_lookups(
lang=lang, tables=optional_tables, strict=False
)
for table in optional_lookups.tables:
lookups.set_table(table, optional_lookups.get_table(table))
for table in required_tables:
if table not in lookups:
raise ValueError(
Errors.E1004.format(
mode=mode, tables=required_tables, found=lookups.tables
)
)
return lookups
return (["lemma_rules"], ["lemma_exc", "lemma_index"])
return ([], [])
def __init__(
self,
@ -107,7 +62,6 @@ class Lemmatizer(Pipe):
name: str = "lemmatizer",
*,
mode: str = "lookup",
lookups: Optional[Lookups] = None,
overwrite: bool = False,
) -> None:
"""Initialize a Lemmatizer.
@ -116,9 +70,6 @@ class Lemmatizer(Pipe):
model (Model): A model (not yet implemented).
name (str): The component name. Defaults to "lemmatizer".
mode (str): The lemmatizer mode: "lookup", "rule". Defaults to "lookup".
lookups (Lookups): The lookups object containing the (optional) tables
such as "lemma_rules", "lemma_index", "lemma_exc" and
"lemma_lookup". Defaults to None
overwrite (bool): Whether to overwrite existing lemmas. Defaults to
`False`.
@ -128,8 +79,9 @@ class Lemmatizer(Pipe):
self.model = model
self.name = name
self._mode = mode
self.lookups = lookups if lookups is not None else Lookups()
self.lookups = Lookups()
self.overwrite = overwrite
self._validated = False
if self.mode == "lookup":
self.lemmatize = self.lookup_lemmatize
elif self.mode == "rule":
@ -153,12 +105,56 @@ class Lemmatizer(Pipe):
DOCS: https://nightly.spacy.io/api/lemmatizer#call
"""
if not self._validated:
self._validate_tables(Errors.E1004)
for token in doc:
if self.overwrite or token.lemma == 0:
token.lemma_ = self.lemmatize(token)[0]
return doc
def pipe(self, stream, *, batch_size=128):
def initialize(
self,
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
*,
nlp: Optional[Language] = None,
lookups: Optional[Lookups] = None,
):
"""Initialize the lemmatizer and load in data.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
lookups (Lookups): The lookups object containing the (optional) tables
such as "lemma_rules", "lemma_index", "lemma_exc" and
"lemma_lookup". Defaults to None.
"""
required_tables, optional_tables = self.get_lookups_config(self.mode)
if lookups is None:
logger.debug("Lemmatizer: loading tables from spacy-lookups-data")
lookups = load_lookups(lang=self.vocab.lang, tables=required_tables)
optional_lookups = load_lookups(
lang=self.vocab.lang, tables=optional_tables, strict=False
)
for table in optional_lookups.tables:
lookups.set_table(table, optional_lookups.get_table(table))
self.lookups = lookups
self._validate_tables(Errors.E1004)
def _validate_tables(self, error_message: str = Errors.E912) -> None:
"""Check that the lookups are correct for the current mode."""
required_tables, optional_tables = self.get_lookups_config(self.mode)
for table in required_tables:
if table not in self.lookups:
raise ValueError(
error_message.format(
mode=self.mode,
tables=required_tables,
found=self.lookups.tables,
)
)
self._validated = True
def pipe(self, stream: Iterable[Doc], *, batch_size: int = 128) -> Iterator[Doc]:
"""Apply the pipe to a stream of documents. This usually happens under
the hood when the nlp object is called on a text and all components are
applied to the Doc.
@ -263,7 +259,7 @@ class Lemmatizer(Pipe):
"""
return False
def score(self, examples, **kwargs) -> Dict[str, Any]:
def score(self, examples: Iterable[Example], **kwargs) -> Dict[str, Any]:
"""Score a batch of examples.
examples (Iterable[Example]): The examples to score.
@ -274,58 +270,66 @@ class Lemmatizer(Pipe):
validate_examples(examples, "Lemmatizer.score")
return Scorer.score_token_attr(examples, "lemma", **kwargs)
def to_disk(self, path, *, exclude=tuple()):
"""Save the current state to a directory.
def to_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
):
"""Serialize the pipe to disk.
path (unicode or Path): A path to a directory, which will be created if
it doesn't exist.
exclude (list): String names of serialization fields to exclude.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://nightly.spacy.io/api/vocab#to_disk
DOCS: https://nightly.spacy.io/api/lemmatizer#to_disk
"""
serialize = {}
serialize["vocab"] = lambda p: self.vocab.to_disk(p)
serialize["lookups"] = lambda p: self.lookups.to_disk(p)
util.to_disk(path, serialize, exclude)
def from_disk(self, path, *, exclude=tuple()):
"""Loads state from a directory. Modifies the object in place and
returns it.
def from_disk(
self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList()
) -> "Lemmatizer":
"""Load the pipe from disk. Modifies the object in place and returns it.
path (unicode or Path): A path to a directory.
exclude (list): String names of serialization fields to exclude.
RETURNS (Vocab): The modified `Vocab` object.
path (str / Path): Path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (Lemmatizer): The modified Lemmatizer object.
DOCS: https://nightly.spacy.io/api/vocab#to_disk
DOCS: https://nightly.spacy.io/api/lemmatizer#from_disk
"""
deserialize = {}
deserialize["vocab"] = lambda p: self.vocab.from_disk(p)
deserialize["lookups"] = lambda p: self.lookups.from_disk(p)
util.from_disk(path, deserialize, exclude)
self._validate_tables()
return self
def to_bytes(self, *, exclude=tuple()) -> bytes:
"""Serialize the current state to a binary string.
def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the pipe to a bytestring.
exclude (list): String names of serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Vocab` object.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://nightly.spacy.io/api/vocab#to_bytes
DOCS: https://nightly.spacy.io/api/lemmatizer#to_bytes
"""
serialize = {}
serialize["vocab"] = self.vocab.to_bytes
serialize["lookups"] = self.lookups.to_bytes
return util.to_bytes(serialize, exclude)
def from_bytes(self, bytes_data: bytes, *, exclude=tuple()):
"""Load state from a binary string.
def from_bytes(
self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList()
) -> "Lemmatizer":
"""Load the pipe from a bytestring.
bytes_data (bytes): The data to load from.
exclude (list): String names of serialization fields to exclude.
RETURNS (Vocab): The `Vocab` object.
bytes_data (bytes): The serialized pipe.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (Lemmatizer): The loaded Lemmatizer.
DOCS: https://nightly.spacy.io/api/vocab#from_bytes
DOCS: https://nightly.spacy.io/api/lemmatizer#from_bytes
"""
deserialize = {}
deserialize["vocab"] = lambda b: self.vocab.from_bytes(b)
deserialize["lookups"] = lambda b: self.lookups.from_bytes(b)
util.from_bytes(bytes_data, deserialize, exclude)
self._validate_tables()
return self

View File

@ -282,7 +282,7 @@ class ModelMetaSchema(BaseModel):
sources: Optional[Union[List[StrictStr], List[Dict[str, str]]]] = Field(None, title="Training data sources")
vectors: Dict[str, Any] = Field({}, title="Included word vectors")
labels: Dict[str, List[str]] = Field({}, title="Component labels, keyed by component name")
performance: Dict[str, Union[float, Dict[str, float]]] = Field({}, title="Accuracy and speed numbers")
performance: Dict[str, Union[float, Dict[str, Union[float, dict]]]] = Field({}, title="Accuracy and speed numbers")
spacy_git_version: StrictStr = Field("", title="Commit of spaCy version used")
# fmt: on

View File

@ -77,7 +77,7 @@ def test_morph_property(tokenizer):
assert doc.to_array(["MORPH"])[0] != 0
# unset with token.morph
doc[0].set_morph(0)
doc[0].set_morph(None)
assert doc.to_array(["MORPH"])[0] == 0
# empty morph is equivalent to "_"

View File

@ -17,16 +17,31 @@ def test_lemmatizer_initialize(lang, capfd):
@registry.misc("lemmatizer_init_lookups")
def lemmatizer_init_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup", {"cope": "cope"})
lookups.add_table("lemma_lookup", {"cope": "cope", "x": "y"})
lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}})
lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
return lookups
"""Test that languages can be initialized."""
# Test that languages can be initialized
nlp = get_lang_class(lang)()
nlp.add_pipe("lemmatizer", config={"lookups": {"@misc": "lemmatizer_init_lookups"}})
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
assert not lemmatizer.lookups.tables
nlp.config["initialize"]["components"]["lemmatizer"] = {
"lookups": {"@misc": "lemmatizer_init_lookups"}
}
with pytest.raises(ValueError):
nlp("x")
nlp.initialize()
assert lemmatizer.lookups.tables
doc = nlp("x")
# Check for stray print statements (see #3342)
doc = nlp("test") # noqa: F841
captured = capfd.readouterr()
assert not captured.out
assert doc[0].lemma_ == "y"
# Test initialization by calling .initialize() directly
nlp = get_lang_class(lang)()
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
lemmatizer.initialize(lookups=lemmatizer_init_lookups())
assert nlp("x")[0].lemma_ == "y"

View File

@ -8,61 +8,52 @@ from ..util import make_tempdir
@pytest.fixture
def nlp():
return English()
@pytest.fixture
def lemmatizer(nlp):
@registry.misc("cope_lookups")
def cope_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup", {"cope": "cope"})
lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"})
lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}})
lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
return lookups
lemmatizer = nlp.add_pipe(
"lemmatizer", config={"mode": "rule", "lookups": {"@misc": "cope_lookups"}}
)
return lemmatizer
nlp = English()
nlp.config["initialize"]["components"]["lemmatizer"] = {
"lookups": {"@misc": "cope_lookups"}
}
return nlp
def test_lemmatizer_init(nlp):
@registry.misc("cope_lookups")
def cope_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup", {"cope": "cope"})
lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}})
lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
return lookups
lemmatizer = nlp.add_pipe(
"lemmatizer", config={"mode": "lookup", "lookups": {"@misc": "cope_lookups"}}
)
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
assert isinstance(lemmatizer.lookups, Lookups)
assert not lemmatizer.lookups.tables
assert lemmatizer.mode == "lookup"
with pytest.raises(ValueError):
nlp("test")
nlp.initialize()
assert lemmatizer.lookups.tables
assert nlp("cope")[0].lemma_ == "cope"
assert nlp("coped")[0].lemma_ == "cope"
# replace any tables from spacy-lookups-data
lemmatizer.lookups = Lookups()
doc = nlp("coping")
# lookup with no tables sets text as lemma
assert doc[0].lemma_ == "coping"
assert nlp("cope")[0].lemma_ == "cope"
assert nlp("coped")[0].lemma_ == "coped"
nlp.remove_pipe("lemmatizer")
@registry.misc("empty_lookups")
def empty_lookups():
return Lookups()
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
with pytest.raises(ValueError):
nlp.add_pipe(
"lemmatizer",
config={"mode": "lookup", "lookups": {"@misc": "empty_lookups"}},
)
# Can't initialize without required tables
lemmatizer.initialize(lookups=Lookups())
lookups = Lookups()
lookups.add_table("lemma_lookup", {})
lemmatizer.initialize(lookups=lookups)
def test_lemmatizer_config(nlp, lemmatizer):
def test_lemmatizer_config(nlp):
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "rule"})
nlp.initialize()
doc = nlp.make_doc("coping")
doc[0].pos_ = "VERB"
assert doc[0].lemma_ == ""
@ -78,20 +69,21 @@ def test_lemmatizer_config(nlp, lemmatizer):
assert doc[0].lemma_ == "cope"
def test_lemmatizer_serialize(nlp, lemmatizer):
@registry.misc("cope_lookups")
def test_lemmatizer_serialize(nlp):
lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "rule"})
nlp.initialize()
def cope_lookups():
lookups = Lookups()
lookups.add_table("lemma_lookup", {"cope": "cope"})
lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"})
lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
lookups.add_table("lemma_exc", {"verb": {"coping": ("cope",)}})
lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
return lookups
nlp2 = English()
lemmatizer2 = nlp2.add_pipe(
"lemmatizer", config={"mode": "rule", "lookups": {"@misc": "cope_lookups"}}
)
lemmatizer2 = nlp2.add_pipe("lemmatizer", config={"mode": "rule"})
lemmatizer2.initialize(lookups=cope_lookups())
lemmatizer2.from_bytes(lemmatizer.to_bytes())
assert lemmatizer.to_bytes() == lemmatizer2.to_bytes()
assert lemmatizer.lookups.tables == lemmatizer2.lookups.tables

View File

@ -1,3 +1,6 @@
import pytest
from spacy.tokens.doc import Underscore
import spacy
from spacy.lang.en import English
from spacy.tokens import Doc, DocBin
@ -86,3 +89,20 @@ def test_serialize_doc_bin_unknown_spaces(en_vocab):
assert re_doc1.text == "that 's "
assert not re_doc2.has_unknown_spaces
assert re_doc2.text == "that's"
@pytest.mark.parametrize(
"writer_flag,reader_flag,reader_value", [(True, True, "bar"), (True, False, "bar"), (False, True, "nothing"), (False, False, "nothing")]
)
def test_serialize_custom_extension(en_vocab, writer_flag, reader_flag, reader_value):
"""Test that custom extensions are correctly serialized in DocBin."""
Doc.set_extension("foo", default="nothing")
doc = Doc(en_vocab, words=["hello", "world"])
doc._.foo = "bar"
doc_bin_1 = DocBin(store_user_data=writer_flag)
doc_bin_1.add(doc)
doc_bin_bytes = doc_bin_1.to_bytes()
doc_bin_2 = DocBin(store_user_data=reader_flag).from_bytes(doc_bin_bytes)
doc_2 = list(doc_bin_2.get_docs(en_vocab))[0]
assert doc_2._.foo == reader_value
Underscore.doc_extensions = {}

View File

@ -7,11 +7,11 @@ from spacy.training.converters import json_to_docs
from spacy.training.augment import create_orth_variants_augmenter
from spacy.lang.en import English
from spacy.tokens import Doc, DocBin
from spacy.lookups import Lookups
from spacy.util import get_words_and_spaces, minibatch
from thinc.api import compounding
import pytest
import srsly
import random
from ..util import make_tempdir
@ -504,9 +504,9 @@ def test_make_orth_variants(doc):
{"tags": [":"], "variants": ["-", "", "", "--", "---", "——"]},
]
}
lookups = Lookups()
lookups.add_table("orth_variants", orth_variants)
augmenter = create_orth_variants_augmenter(level=0.2, lower=0.5, lookups=lookups)
augmenter = create_orth_variants_augmenter(
level=0.2, lower=0.5, orth_variants=orth_variants
)
with make_tempdir() as tmpdir:
output_file = tmpdir / "roundtrip.spacy"
DocBin(docs=[doc]).to_disk(output_file)
@ -515,6 +515,39 @@ def test_make_orth_variants(doc):
list(reader(nlp))
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_custom_data_augmentation(doc):
def create_spongebob_augmenter(randomize: bool = False):
def augment(nlp, example):
text = example.text
if randomize:
ch = [c.lower() if random.random() < 0.5 else c.upper() for c in text]
else:
ch = [c.lower() if i % 2 else c.upper() for i, c in enumerate(text)]
example_dict = example.to_dict()
doc = nlp.make_doc("".join(ch))
example_dict["token_annotation"]["ORTH"] = [t.text for t in doc]
yield example
yield example.from_dict(doc, example_dict)
return augment
nlp = English()
with make_tempdir() as tmpdir:
output_file = tmpdir / "roundtrip.spacy"
DocBin(docs=[doc]).to_disk(output_file)
reader = Corpus(output_file, augmenter=create_spongebob_augmenter())
corpus = list(reader(nlp))
orig_text = "Sarah 's sister flew to Silicon Valley via London . "
augmented = "SaRaH 's sIsTeR FlEw tO SiLiCoN VaLlEy vIa lOnDoN . "
assert corpus[0].text == orig_text
assert corpus[0].reference.text == orig_text
assert corpus[0].predicted.text == orig_text
assert corpus[1].text == augmented
assert corpus[1].reference.text == augmented
assert corpus[1].predicted.text == augmented
@pytest.mark.skip("Outdated")
@pytest.mark.parametrize(
"tokens_a,tokens_b,expected",

View File

@ -58,7 +58,7 @@ class DocBin:
attrs (Iterable[str]): List of attributes to serialize. 'orth' and
'spacy' are always serialized, so they're not required.
store_user_data (bool): Whether to include the `Doc.user_data`.
store_user_data (bool): Whether to write the `Doc.user_data` to bytes/file.
docs (Iterable[Doc]): Docs to add.
DOCS: https://nightly.spacy.io/api/docbin#init
@ -106,11 +106,12 @@ class DocBin:
self.strings.add(token.ent_type_)
self.strings.add(token.ent_kb_id_)
self.cats.append(doc.cats)
if self.store_user_data:
self.user_data.append(srsly.msgpack_dumps(doc.user_data))
def get_docs(self, vocab: Vocab) -> Iterator[Doc]:
"""Recover Doc objects from the annotations, using the given vocab.
Note that the user data of each doc will be read (if available) and returned,
regardless of the setting of 'self.store_user_data'.
vocab (Vocab): The shared vocab.
YIELDS (Doc): The Doc objects.
@ -129,7 +130,7 @@ class DocBin:
doc = Doc(vocab, words=tokens[:, orth_col], spaces=spaces)
doc = doc.from_array(self.attrs, tokens)
doc.cats = self.cats[i]
if self.store_user_data:
if i < len(self.user_data) and self.user_data[i] is not None:
user_data = srsly.msgpack_loads(self.user_data[i], use_list=False)
doc.user_data.update(user_data)
yield doc
@ -137,20 +138,30 @@ class DocBin:
def merge(self, other: "DocBin") -> None:
"""Extend the annotations of this DocBin with the annotations from
another. Will raise an error if the pre-defined attrs of the two
DocBins don't match.
DocBins don't match, or if they differ in whether or not to store
user data.
other (DocBin): The DocBin to merge into the current bin.
DOCS: https://nightly.spacy.io/api/docbin#merge
"""
if self.attrs != other.attrs:
raise ValueError(Errors.E166.format(current=self.attrs, other=other.attrs))
raise ValueError(
Errors.E166.format(param="attrs", current=self.attrs, other=other.attrs)
)
if self.store_user_data != other.store_user_data:
raise ValueError(
Errors.E166.format(
param="store_user_data",
current=self.store_user_data,
other=other.store_user_data,
)
)
self.tokens.extend(other.tokens)
self.spaces.extend(other.spaces)
self.strings.update(other.strings)
self.cats.extend(other.cats)
self.flags.extend(other.flags)
if self.store_user_data:
self.user_data.extend(other.user_data)
def to_bytes(self) -> bytes:
@ -200,8 +211,10 @@ class DocBin:
self.spaces = NumpyOps().unflatten(flat_spaces, lengths)
self.cats = msg["cats"]
self.flags = msg.get("flags", [{} for _ in lengths])
if self.store_user_data and "user_data" in msg:
if "user_data" in msg:
self.user_data = list(msg["user_data"])
else:
self.user_data = [None] * len(self)
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape # this should never happen
return self

View File

@ -223,8 +223,10 @@ cdef class Token:
def set_morph(self, features):
cdef hash_t key
if features is 0:
if features is None:
self.c.morph = 0
elif isinstance(features, MorphAnalysis):
self.morph = features
else:
if isinstance(features, int):
features = self.vocab.strings[features]

View File

@ -1,27 +1,43 @@
from typing import Callable, Iterator, Dict, List, Tuple, Optional, TYPE_CHECKING
from typing import Callable, Iterator, Dict, List, Tuple, TYPE_CHECKING
import random
import itertools
import copy
from functools import partial
from pydantic import BaseModel, StrictStr
from ..util import registry, logger
from ..tokens import Doc
from .example import Example
from ..lookups import Lookups
from ..errors import Errors
if TYPE_CHECKING:
from ..language import Language # noqa: F401
class OrthVariantsSingle(BaseModel):
tags: List[StrictStr]
variants: List[StrictStr]
class OrthVariantsPaired(BaseModel):
tags: List[StrictStr]
variants: List[List[StrictStr]]
class OrthVariants(BaseModel):
paired: List[OrthVariantsPaired] = {}
single: List[OrthVariantsSingle] = {}
@registry.augmenters("spacy.orth_variants.v1")
def create_orth_variants_augmenter(
level: float, lower: float, lookups: Optional[Lookups] = None,
level: float, lower: float, orth_variants: OrthVariants,
) -> Callable[["Language", Example], Iterator[Example]]:
"""Create a data augmentation callback that uses orth-variant replacement.
The callback can be added to a corpus or other data iterator during training.
"""
return partial(orth_variants_augmenter, level=level, lower=lower, lookups=lookups)
return partial(
orth_variants_augmenter, orth_variants=orth_variants, level=level, lower=lower
)
def dont_augment(nlp: "Language", example: Example) -> Iterator[Example]:
@ -31,20 +47,11 @@ def dont_augment(nlp: "Language", example: Example) -> Iterator[Example]:
def orth_variants_augmenter(
nlp: "Language",
example: Example,
orth_variants: dict,
*,
level: float = 0.0,
lower: float = 0.0,
lookups: Optional[Lookups] = None,
) -> Iterator[Example]:
table_name = "orth_variants"
if lookups is not None:
orth_variants = lookups.get_table(table_name, {})
logger.debug("Using data augmentation orth variants from provided lookups")
else:
orth_variants = nlp.vocab.lookups.get_table(table_name, {})
logger.debug("Using data augmentation orth variants from default vocab lookups")
if not orth_variants:
raise ValueError(Errors.E912.format(lang=nlp.lang))
if random.random() >= level:
yield example
else:
@ -74,13 +81,14 @@ def make_orth_variants(
nlp: "Language",
raw: str,
token_dict: Dict[str, List[str]],
orth_variants: Dict[str, list],
orth_variants: Dict[str, List[Dict[str, List[str]]]],
*,
lower: bool = False,
) -> Tuple[str, Dict[str, List[str]]]:
orig_token_dict = copy.deepcopy(token_dict)
ndsv = orth_variants.get("single", [])
ndpv = orth_variants.get("paired", [])
logger.debug(f"Data augmentation: {len(ndsv)} single / {len(ndpv)} paired variants")
words = token_dict.get("words", [])
tags = token_dict.get("tags", [])
# keep unmodified if words or tags are not defined

View File

@ -38,11 +38,11 @@ def create_docbin_reader(
)
@util.registry.readers("spacy.JsonlReader.v1")
@util.registry.readers("spacy.JsonlCorpus.v1")
def create_jsonl_reader(
path: Path, min_length: int = 0, max_length: int = 0, limit: int = 0
) -> Callable[["Language"], Iterable[Doc]]:
return JsonlTexts(path, min_length=min_length, max_length=max_length, limit=limit)
return JsonlCorpus(path, min_length=min_length, max_length=max_length, limit=limit)
@util.registry.readers("spacy.read_labels.v1")
@ -193,7 +193,7 @@ class Corpus:
break
class JsonlTexts:
class JsonlCorpus:
"""Iterate Doc objects from a file or directory of jsonl
formatted raw text files.
@ -206,7 +206,7 @@ class JsonlTexts:
limit (int): Limit corpus to a subset of examples, e.g. for debugging.
Defaults to 0, which indicates no limit.
DOCS: https://nightly.spacy.io/api/corpus#jsonltexts
DOCS: https://nightly.spacy.io/api/corpus#jsonlcorpus
"""
file_type = "jsonl"
@ -230,7 +230,7 @@ class JsonlTexts:
nlp (Language): The current nlp object.
YIELDS (Example): The example objects.
DOCS: https://nightly.spacy.io/api/corpus#jsonltexts-call
DOCS: https://nightly.spacy.io/api/corpus#jsonlcorpus-call
"""
for loc in walk_corpus(self.path, ".jsonl"):
records = srsly.read_jsonl(loc)

View File

@ -103,10 +103,6 @@ class registry(thinc.registry):
cli = catalogue.create("spacy", "cli", entry_points=True)
# We want json loading in the registry, so manually register srsly.read_json.
registry.readers("srsly.read_json.v0", srsly.read_json)
class SimpleFrozenDict(dict):
"""Simplified implementation of a frozen dict, mainly used as default
function or method argument (for arguments that should default to empty

View File

@ -100,7 +100,7 @@ Yield examples from the data.
| `nlp` | The current `nlp` object. ~~Language~~ |
| **YIELDS** | The examples. ~~Example~~ |
## JsonlTexts {#jsonltexts tag="class"}
## JsonlCorpus {#jsonlcorpus tag="class"}
Iterate Doc objects from a file or directory of JSONL (newline-delimited JSON)
formatted raw text files. Can be used to read the raw text corpus for language
@ -126,22 +126,22 @@ file.
{"text": "My cynical view on this is that it will never be free to the public. Reason: what would be the draw of joining the military? Right now their selling point is free Healthcare and Education. Ironically both are run horribly and most, that I've talked to, come out wishing they never went in."}
```
### JsonlTexts.\_\init\_\_ {#jsonltexts-init tag="method"}
### JsonlCorpus.\_\init\_\_ {#jsonlcorpus tag="method"}
Initialize the reader.
> #### Example
>
> ```python
> from spacy.training import JsonlTexts
> from spacy.training import JsonlCorpus
>
> corpus = JsonlTexts("./data/texts.jsonl")
> corpus = JsonlCorpus("./data/texts.jsonl")
> ```
>
> ```ini
> ### Example config
> [corpora.pretrain]
> @readers = "spacy.JsonlReader.v1"
> @readers = "spacy.JsonlCorpus.v1"
> path = "corpus/raw_text.jsonl"
> min_length = 0
> max_length = 0
@ -156,17 +156,17 @@ Initialize the reader.
| `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
| `limit` | Limit corpus to a subset of examples, e.g. for debugging. Defaults to `0` for no limit. ~~int~~ |
### JsonlTexts.\_\_call\_\_ {#jsonltexts-call tag="method"}
### JsonlCorpus.\_\_call\_\_ {#jsonlcorpus-call tag="method"}
Yield examples from the data.
> #### Example
>
> ```python
> from spacy.training import JsonlTexts
> from spacy.training import JsonlCorpus
> import spacy
>
> corpus = JsonlTexts("./texts.jsonl")
> corpus = JsonlCorpus("./texts.jsonl")
> nlp = spacy.blank("en")
> data = corpus(nlp)
> ```

View File

@ -135,7 +135,7 @@ $ python -m spacy train config.cfg --paths.train ./corpus/train.spacy
> path = ${paths:dev}
>
> [corpora.pretrain]
> @readers = "spacy.JsonlReader.v1"
> @readers = "spacy.JsonlCorpus.v1"
> path = ${paths.raw}
>
> [corpora.my_custom_data]
@ -146,7 +146,7 @@ This section defines a **dictionary** mapping of string keys to functions. Each
function takes an `nlp` object and yields [`Example`](/api/example) objects. By
default, the two keys `train` and `dev` are specified and each refer to a
[`Corpus`](/api/top-level#Corpus). When pretraining, an additional `pretrain`
section is added that defaults to a [`JsonlReader`](/api/top-level#JsonlReader).
section is added that defaults to a [`JsonlCorpus`](/api/top-level#JsonlCorpus).
You can also register custom functions that return a callable.
| Name | Description |

View File

@ -47,7 +47,7 @@ Create a `DocBin` object to hold serialized annotations.
| Argument | Description |
| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `attrs` | List of attributes to serialize. `ORTH` (hash of token text) and `SPACY` (whether the token is followed by whitespace) are always serialized, so they're not required. Defaults to `("ORTH", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "LEMMA", "MORPH", "POS")`. ~~Iterable[str]~~ |
| `store_user_data` | Whether to include the `Doc.user_data` and the values of custom extension attributes. Defaults to `False`. ~~bool~~ |
| `store_user_data` | Whether to write the `Doc.user_data` and the values of custom extension attributes to file/bytes. Defaults to `False`. ~~bool~~ |
| `docs` | `Doc` objects to add on initialization. ~~Iterable[Doc]~~ |
## DocBin.\_\len\_\_ {#len tag="method"}

View File

@ -8,8 +8,8 @@ source: spacy/language.py
Usually you'll load this once per process as `nlp` and pass the instance around
your application. The `Language` class is created when you call
[`spacy.load`](/api/top-level#spacy.load) and contains the shared vocabulary and
[language data](/usage/adding-languages), optional binary weights, e.g. provided
by a [trained pipeline](/models), and the
[language data](/usage/linguistic-features#language-data), optional binary
weights, e.g. provided by a [trained pipeline](/models), and the
[processing pipeline](/usage/processing-pipelines) containing components like
the tagger or parser that are called on a document in order. You can also add
your own processing pipeline components that take a `Doc` object, modify it and
@ -210,7 +210,9 @@ settings defined in the [`[initialize]`](/api/data-formats#config-initialize)
config block to set up the vocabulary, load in vectors and tok2vec weights and
pass optional arguments to the `initialize` methods implemented by pipeline
components or the tokenizer. This method is typically called automatically when
you run [`spacy train`](/api/cli#train).
you run [`spacy train`](/api/cli#train). See the usage guide on the
[config lifecycle](/usage/training#config-lifecycle) and
[initialization](/usage/training#initialization) for details.
`get_examples` should be a function that returns an iterable of
[`Example`](/api/example) objects. The data examples can either be the full
@ -928,7 +930,7 @@ Serialize the current state to a binary string.
Load state from a binary string. Note that this method is commonly used via the
subclasses like `English` or `German` to make language-specific functionality
like the [lexical attribute getters](/usage/adding-languages#lex-attrs)
like the [lexical attribute getters](/usage/linguistic-features#language-data)
available to the loaded object.
> #### Example

View File

@ -49,9 +49,8 @@ data format used by the lookup and rule-based lemmatizers, see
> ```
| Setting | Description |
| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| ----------- | --------------------------------------------------------------------------------- |
| `mode` | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ |
| `lookups` | The lookups object containing the tables such as `"lemma_rules"`, `"lemma_index"`, `"lemma_exc"` and `"lemma_lookup"`. If `None`, default tables are loaded from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data). Defaults to `None`. ~~Optional[Lookups]~~ |
| `overwrite` | Whether to overwrite existing lemmas. Defaults to `False`. ~~bool~~ |
| `model` | **Not yet implemented:** the model to use. ~~Model~~ |
@ -77,13 +76,12 @@ shortcut for this and instantiate the component using its string name and
[`nlp.add_pipe`](/api/language#add_pipe).
| Name | Description |
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| -------------- | --------------------------------------------------------------------------------------------------- |
| `vocab` | The shared vocabulary. ~~Vocab~~ |
| `model` | **Not yet implemented:** The model to use. ~~Model~~ |
| `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ |
| _keyword-only_ | |
| mode | The lemmatizer mode, e.g. `"lookup"` or `"rule"`. Defaults to `"lookup"`. ~~str~~ |
| lookups | A lookups object containing the tables such as `"lemma_rules"`, `"lemma_index"`, `"lemma_exc"` and `"lemma_lookup"`. Defaults to `None`. ~~Optional[Lookups]~~ |
| overwrite | Whether to overwrite existing lemmas. ~~bool~ |
## Lemmatizer.\_\_call\_\_ {#call tag="method"}
@ -127,11 +125,41 @@ applied to the `Doc` in order.
| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ |
| **YIELDS** | The processed documents in order. ~~Doc~~ |
## Lemmatizer.initialize {#initialize tag="method"}
Initialize the lemmatizer and load any data resources. This method is typically
called by [`Language.initialize`](/api/language#initialize) and lets you
customize arguments it receives via the
[`[initialize.components]`](/api/data-formats#config-initialize) block in the
config. The loading only happens during initialization, typically before
training. At runtime, all data is loaded from disk.
> #### Example
>
> ```python
> lemmatizer = nlp.add_pipe("lemmatizer")
> lemmatizer.initialize(lookups=lookups)
> ```
>
> ```ini
> ### config.cfg
> [initialize.components.lemmatizer]
>
> [initialize.components.lemmatizer.lookups]
> @misc = "load_my_lookups.v1"
> ```
| Name | Description |
| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `get_examples` | Function that returns gold-standard annotations in the form of [`Example`](/api/example) objects. Defaults to `None`. ~~Optional[Callable[[], Iterable[Example]]]~~ |
| _keyword-only_ | |
| `nlp` | The current `nlp` object. Defaults to `None`. ~~Optional[Language]~~ |
| `lookups` | The lookups object containing the tables such as `"lemma_rules"`, `"lemma_index"`, `"lemma_exc"` and `"lemma_lookup"`. If `None`, default tables are loaded from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data). Defaults to `None`. ~~Optional[Lookups]~~ |
## Lemmatizer.lookup_lemmatize {#lookup_lemmatize tag="method"}
Lemmatize a token using a lookup-based approach. If no lemma is found, the
original string is returned. Languages can provide a
[lookup table](/usage/adding-languages#lemmatizer) via the `Lookups`.
original string is returned.
| Name | Description |
| ----------- | --------------------------------------------------- |

View File

@ -172,6 +172,25 @@ Get a neighboring token.
| `i` | The relative position of the token to get. Defaults to `1`. ~~int~~ |
| **RETURNS** | The token at position `self.doc[self.i+i]`. ~~Token~~ |
## Token.set_morph {#set_morph tag="method"}
Set the morphological analysis from a UD FEATS string, hash value of a UD FEATS
string, features dict or `MorphAnalysis`. The value `None` can be used to reset
the morph to an unset state.
> #### Example
>
> ```python
> doc = nlp("Give it back! He pleaded.")
> doc[0].set_morph("Mood=Imp|VerbForm=Fin")
> assert "Mood=Imp" in doc[0].morph
> assert doc[0].morph.get("Mood") == ["Imp"]
> ```
| Name | Description |
| -------- | --------------------------------------------------------------------------------- |
| features | The morphological features to set. ~~Union[int, dict, str, MorphAnalysis, None]~~ |
## Token.is_ancestor {#is_ancestor tag="method" model="parser"}
Check whether this token is a parent, grandparent, etc. of another in the
@ -393,7 +412,7 @@ The L2 norm of the token's vector representation.
## Attributes {#attributes}
| Name | Description |
| -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `doc` | The parent document. ~~Doc~~ |
| `lex` <Tag variant="new">3</Tag> | The underlying lexeme. ~~Lexeme~~ |
| `sent` <Tag variant="new">2.0.12</Tag> | The sentence span that this token is a part of. ~~Span~~ |
@ -418,8 +437,8 @@ The L2 norm of the token's vector representation.
| `ent_id_` | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. ~~str~~ |
| `lemma` | Base form of the token, with no inflectional suffixes. ~~int~~ |
| `lemma_` | Base form of the token, with no inflectional suffixes. ~~str~~ |
| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~int~~ |
| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions). ~~str~~ |
| `norm` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~int~~ |
| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
| `lower` | Lowercase form of the token. ~~int~~ |
| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
| `shape` | Transform of the tokens's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
@ -451,7 +470,6 @@ The L2 norm of the token's vector representation.
| `tag` | Fine-grained part-of-speech. ~~int~~ |
| `tag_` | Fine-grained part-of-speech. ~~str~~ |
| `morph` <Tag variant="new">3</Tag> | Morphological analysis. ~~MorphAnalysis~~ |
| `morph_` <Tag variant="new">3</Tag> | Morphological analysis in the Universal Dependencies [FEATS](https://universaldependencies.org/format.html#morphological-annotation) format. ~~str~~ |
| `dep` | Syntactic dependency relation. ~~int~~ |
| `dep_` | Syntactic dependency relation. ~~str~~ |
| `lang` | Language of the parent document's vocabulary. ~~int~~ |

View File

@ -22,9 +22,8 @@ like punctuation and special case rules from the
## Tokenizer.\_\_init\_\_ {#init tag="method"}
Create a `Tokenizer` to create `Doc` objects given unicode text. For examples
of how to construct a custom tokenizer with different tokenization rules, see
the
Create a `Tokenizer` to create `Doc` objects given unicode text. For examples of
how to construct a custom tokenizer with different tokenization rules, see the
[usage documentation](https://spacy.io/usage/linguistic-features#native-tokenizers).
> #### Example
@ -121,10 +120,10 @@ if no suffix rules match.
## Tokenizer.add_special_case {#add_special_case tag="method"}
Add a special-case tokenization rule. This mechanism is also used to add custom
tokenizer exceptions to the language data. See the usage guide on
[adding languages](/usage/adding-languages#tokenizer-exceptions) and
[linguistic features](/usage/linguistic-features#special-cases) for more details
and examples.
tokenizer exceptions to the language data. See the usage guide on the
[languages data](/usage/linguistic-features#language-data) and
[tokenizer special cases](/usage/linguistic-features#special-cases) for more
details and examples.
> #### Example
>

View File

@ -270,10 +270,10 @@ If a setting is not present in the options, the default value will be used.
> ```
| Name | Description |
| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `ents` | Entity types to highlight or `None` for all types (default). ~~Optional[List[str]]~~ |
| `colors` | Color overrides. Entity types should be mapped to color names or values. ~~Dict[str, str]~~ |
| `template` <Tag variant="new">2.2</Tag> | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
| `template` <Tag variant="new">2.2</Tag> | Optional template to overwrite the HTML used to render entity spans. Should be a format string and can use `{bg}`, `{text}` and `{label}`. See [`templates.py`](%%GITHUB_SPACY/spacy/displacy/templates.py) for examples. ~~Optional[str]~~ |
By default, displaCy comes with colors for all entity types used by
[spaCy's trained pipelines](/models). If you're using custom entity types, you
@ -327,7 +327,7 @@ factories.
| `losses` | Registry for functions that create [losses](https://thinc.ai/docs/api-loss). |
| `misc` | Registry for miscellaneous functions that return data assets, knowledge bases or anything else you may need. |
| `optimizers` | Registry for functions that create [optimizers](https://thinc.ai/docs/api-optimizers). |
| `readers` | Registry for training and evaluation data readers like [`Corpus`](/api/corpus). |
| `readers` | Registry for file and data readers, including training and evaluation data readers like [`Corpus`](/api/corpus). |
| `schedules` | Registry for functions that create [schedules](https://thinc.ai/docs/api-schedules). |
| `tokenizers` | Registry for tokenizer factories. Registered functions should return a callback that receives the `nlp` object and returns a [`Tokenizer`](/api/tokenizer) or a custom callable. |
@ -470,7 +470,65 @@ logging the results.
</Project>
## Readers {#readers source="spacy/training/corpus.py" new="3"}
## Readers {#readers}
### File readers {#file-readers source="github.com/explosion/srsly" new="3"}
The following file readers are provided by our serialization library
[`srsly`](https://github.com/explosion/srsly). All registered functions take one
argument `path`, pointing to the file path to load.
> #### Example config
>
> ```ini
> [corpora.train.augmenter.orth_variants]
> @readers = "srsly.read_json.v1"
> path = "corpus/en_orth_variants.json"
> ```
| Name | Description |
| ----------------------- | ----------------------------------------------------- |
| `srsly.read_json.v1` | Read data from a JSON file. |
| `srsly.read_jsonl.v1` | Read data from a JSONL (newline-delimited JSON) file. |
| `srsly.read_yaml.v1` | Read data from a YAML file. |
| `srsly.read_msgpack.v1` | Read data from a binary MessagePack file. |
<Infobox title="Important note" variant="warning">
Since the file readers expect a local path, you should only use them in config
blocks that are **not executed at runtime** for example, in `[training]` and
`[corpora]` (to load data or resources like data augmentation tables) or in
`[initialize]` (to pass data to pipeline components).
</Infobox>
#### spacy.read_labels.v1 {#read_labels tag="registered function"}
Read a JSON-formatted labels file generated with
[`init labels`](/api/cli#init-labels). Typically used in the
[`[initialize]`](/api/data-formats#config-initialize) block of the training
config to speed up the model initialization process and provide pre-generated
label sets.
> #### Example config
>
> ```ini
> [initialize.components]
>
> [initialize.components.ner]
>
> [initialize.components.ner.labels]
> @readers = "spacy.read_labels.v1"
> path = "corpus/labels/ner.json"
> ```
| Name | Description |
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `path` | The path to the labels file generated with [`init labels`](/api/cli#init-labels). ~~Path~~ |
| `require` | Whether to require the file to exist. If set to `False` and the labels file doesn't exist, the loader will return `None` and the `initialize` method will extract the labels from the data. Defaults to `False`. ~~bool~~ |
| **CREATES** | The |
### Corpus readers {#corpus-readers source="spacy/training/corpus.py" new="3"}
Corpus readers are registered functions that load data and return a function
that takes the current `nlp` object and yields [`Example`](/api/example) objects
@ -480,7 +538,7 @@ with your own registered function in the
[`@readers` registry](/api/top-level#registry) to customize the data loading and
streaming.
### spacy.Corpus.v1 {#corpus tag="registered function"}
#### spacy.Corpus.v1 {#corpus tag="registered function"}
The `Corpus` reader manages annotated corpora and can be used for training and
development datasets in the [DocBin](/api/docbin) (`.spacy`) format. Also see
@ -509,12 +567,12 @@ the [`Corpus`](/api/corpus) class.
| `augmenter` | Apply some simply data augmentation, where we replace tokens with variations. This is especially useful for punctuation and case replacement, to help generalize beyond corpora that don't have smart-quotes, or only have smart quotes, etc. Defaults to `None`. ~~Optional[Callable]~~ |
| **CREATES** | The corpus reader. ~~Corpus~~ |
### spacy.JsonlReader.v1 {#jsonlreader tag="registered function"}
#### spacy.JsonlCorpus.v1 {#jsonlcorpus tag="registered function"}
Create [`Example`](/api/example) objects from a JSONL (newline-delimited JSON)
file of texts keyed by `"text"`. Can be used to read the raw text corpus for
language model [pretraining](/usage/embeddings-transformers#pretraining) from a
JSONL file. Also see the [`JsonlReader`](/api/corpus#jsonlreader) class.
JSONL file. Also see the [`JsonlCorpus`](/api/corpus#jsonlcorpus) class.
> #### Example config
>
@ -523,7 +581,7 @@ JSONL file. Also see the [`JsonlReader`](/api/corpus#jsonlreader) class.
> pretrain = "corpus/raw_text.jsonl"
>
> [corpora.pretrain]
> @readers = "spacy.JsonlReader.v1"
> @readers = "spacy.JsonlCorpus.v1"
> path = ${paths.pretrain}
> min_length = 0
> max_length = 0
@ -536,33 +594,7 @@ JSONL file. Also see the [`JsonlReader`](/api/corpus#jsonlreader) class.
| `min_length` | Minimum document length (in tokens). Shorter documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
| `max_length` | Maximum document length (in tokens). Longer documents will be skipped. Defaults to `0`, which indicates no limit. ~~int~~ |
| `limit` | Limit corpus to a subset of examples, e.g. for debugging. Defaults to `0` for no limit. ~~int~~ |
| **CREATES** | The corpus reader. ~~JsonlTexts~~ |
### spacy.read_labels.v1 {#read_labels tag="registered function"}
Read a JSON-formatted labels file generated with
[`init labels`](/api/cli#init-labels). Typically used in the
[`[initialize]`](/api/data-formats#config-initialize) block of the training
config to speed up the model initialization process and provide pre-generated
label sets.
> #### Example config
>
> ```ini
> [initialize.components]
>
> [initialize.components.ner]
>
> [initialize.components.ner.labels]
> @readers = "spacy.read_labels.v1"
> path = "corpus/labels/ner.json"
> ```
| Name | Description |
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `path` | The path to the labels file generated with [`init labels`](/api/cli#init-labels). ~~Path~~ |
| `require` | Whether to require the file to exist. If set to `False` and the labels file doesn't exist, the loader will return `None` and the `initialize` method will extract the labels from the data. Defaults to `False`. ~~bool~~ |
| **CREATES** | The |
| **CREATES** | The corpus reader. ~~JsonlCorpus~~ |
## Batchers {#batchers source="spacy/training/batchers.py" new="3"}
@ -653,7 +685,11 @@ sequences in the batch.
## Augmenters {#augmenters source="spacy/training/augment.py" new="3"}
<!-- TODO: intro, explain data augmentation concept -->
Data augmentation is the process of applying small modifications to the training
data. It can be especially useful for punctuation and case replacement for
example, if your corpus only uses smart quotes and you want to include
variations using regular quotes, or to make the model less sensitive to
capitalization by including a mix of capitalized and lowercase examples. See the [usage guide](/usage/training#data-augmentation) for details and examples.
### spacy.orth_variants.v1 {#orth_variants tag="registered function"}
@ -664,7 +700,10 @@ sequences in the batch.
> @augmenters = "spacy.orth_variants.v1"
> level = 0.1
> lower = 0.5
> lookups = null
>
> [corpora.train.augmenter.orth_variants]
> @readers = "srsly.read_json.v1"
> path = "corpus/en_orth_variants.json"
> ```
Create a data augmentation callback that uses orth-variant replacement. The
@ -673,10 +712,10 @@ is especially useful for punctuation and case replacement, to help generalize
beyond corpora that don't have smart quotes, or only have smart quotes etc.
| Name | Description |
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `level` | The percentage of texts that will be augmented. ~~float~~ |
| `lower` | The percentage of texts that will be lowercased. ~~float~~ |
| `lookups` | Lookups table containing the orth variants to use. See [`orth_variants.json`](https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/en_orth_variants.json) for an example. If not set, tables from [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) are used if available and added in the [`[initialize]`](/api/data-formats#config-initialize) block of the config. If no orth variants are found, spaCy will raise an error. Defaults to `None`. ~~Optional[Lookups]~~ |
| `orth_variants` | A dictionary containing the single and paired orth variants. Typically loaded from a JSON file. See [`en_orth_variants.json`](https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/en_orth_variants.json) for an example. ~~Dict[str, Dict[List[Union[str, List[str]]]]]~~ |
| **CREATES** | A function that takes the current `nlp` object and an [`Example`](/api/example) and yields augmented `Example` objects. ~~Callable[[Language, Example], Iterator[Example]]~~ |
## Training data and alignment {#gold source="spacy/training"}
@ -788,7 +827,7 @@ utilities.
### util.get_lang_class {#util.get_lang_class tag="function"}
Import and load a `Language` class. Allows lazy-loading
[language data](/usage/adding-languages) and importing languages using the
[language data](/usage/linguistic-features#language-data) and importing languages using the
two-letter language code. To add a language code for a custom language class,
you can register it using the [`@registry.languages`](/api/top-level#registry)
decorator.

View File

@ -622,7 +622,7 @@ that are familiar from the training block: the `[pretraining.batcher]`,
`[pretraining.optimizer]` and `[pretraining.corpus]` all work the same way and
expect the same types of objects, although for pretraining your corpus does not
need to have any annotations, so you will often use a different reader, such as
the [`JsonlReader`](/api/top-level#jsonlreader).
the [`JsonlCorpus`](/api/top-level#jsonlcorpus).
> #### Raw text format
>

View File

@ -8,10 +8,7 @@ menu:
- ['Changelog', 'changelog']
---
spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**,
**macOS/OS X** and **Windows**. The latest spaCy releases are available over
[pip](https://pypi.python.org/pypi/spacy) and
[conda](https://anaconda.org/conda-forge/spacy).
## Quickstart {hidden="true"}
> #### 📖 Looking for the old docs?
>
@ -19,21 +16,22 @@ spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**,
> website to [**v2.spacy.io**](https://v2.spacy.io/docs). To see what's changed
> and how to migrate, see the guide on [v3.0 guide](/usage/v3).
## Quickstart {hidden="true"}
import QuickstartInstall from 'widgets/quickstart-install.js'
<QuickstartInstall title="Quickstart" id="quickstart" />
<QuickstartInstall id="quickstart" />
## Installation instructions {#installation}
spaCy is compatible with **64-bit CPython 3.6+** and runs on **Unix/Linux**,
**macOS/OS X** and **Windows**. The latest spaCy releases are available over
[pip](https://pypi.python.org/pypi/spacy) and
[conda](https://anaconda.org/conda-forge/spacy).
### pip {#pip}
Using pip, spaCy releases are available as source packages and binary wheels.
```bash
$ pip install -U spacy
```
Before you install spaCy and its dependencies, make sure that your `pip`,
`setuptools` and `wheel` are up to date.
> #### Download pipelines
>
@ -47,16 +45,10 @@ $ pip install -U spacy
> >>> nlp = spacy.load("en_core_web_sm")
> ```
<Infobox variant="warning">
To install additional data tables for lemmatization you can run
`pip install spacy[lookups]` or install
[`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data)
separately. The lookups package is needed to provide normalization and
lemmatization data for new models and to lemmatize in languages that don't yet
come with trained pipelines and aren't powered by third-party libraries.
</Infobox>
```bash
$ pip install -U pip setuptools wheel
$ pip install -U spacy
```
When using pip it is generally recommended to install packages in a virtual
environment to avoid modifying system state:
@ -64,9 +56,28 @@ environment to avoid modifying system state:
```bash
$ python -m venv .env
$ source .env/bin/activate
$ pip install -U pip setuptools wheel
$ pip install spacy
```
spaCy also lets you install extra dependencies by specifying the following
keywords in brackets, e.g. `spacy[ja]` or `spacy[lookups,transformers]` (with
multiple comma-separated extras). See the `[options.extras_require]` section in
spaCy's [`setup.cfg`](%%GITHUB_SPACY/setup.cfg) for details on what's included.
> #### Example
>
> ```bash
> $ pip install spacy[lookups,transformers]
> ```
| Name | Description |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. |
| `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. |
| `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. |
| `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). |
### conda {#conda}
Thanks to our great community, we've been able to re-add conda support. You can
@ -112,10 +123,9 @@ $ python -m spacy validate
### Run spaCy with GPU {#gpu new="2.0.14"}
As of v2.0, spaCy comes with neural network models that are implemented in our
machine learning library, [Thinc](https://github.com/explosion/thinc). For GPU
support, we've been grateful to use the work of Chainer's
[CuPy](https://cupy.chainer.org) module, which provides a numpy-compatible
interface for GPU arrays.
machine learning library, [Thinc](https://thinc.ai). For GPU support, we've been
grateful to use the work of Chainer's [CuPy](https://cupy.chainer.org) module,
which provides a numpy-compatible interface for GPU arrays.
spaCy can be installed on GPU by specifying `spacy[cuda]`, `spacy[cuda90]`,
`spacy[cuda91]`, `spacy[cuda92]`, `spacy[cuda100]`, `spacy[cuda101]` or

View File

@ -56,16 +56,13 @@ create a surface form. Here are some examples:
Morphological features are stored in the [`MorphAnalysis`](/api/morphanalysis)
under `Token.morph`, which allows you to access individual morphological
features. The attribute `Token.morph_` provides the morphological analysis in
the Universal Dependencies
[FEATS](https://universaldependencies.org/format.html#morphological-annotation)
format.
features.
> #### 📝 Things to try
>
> 1. Change "I" to "She". You should see that the morphological features change
> and express that it's a pronoun in the third person.
> 2. Inspect `token.morph_` for the other tokens.
> 2. Inspect `token.morph` for the other tokens.
```python
### {executable="true"}
@ -75,7 +72,7 @@ nlp = spacy.load("en_core_web_sm")
print("Pipeline:", nlp.pipe_names)
doc = nlp("I was reading the paper.")
token = doc[0] # 'I'
print(token.morph_) # 'Case=Nom|Number=Sing|Person=1|PronType=Prs'
print(token.morph) # 'Case=Nom|Number=Sing|Person=1|PronType=Prs'
print(token.morph.get("PronType")) # ['Prs']
```
@ -91,7 +88,7 @@ import spacy
nlp = spacy.load("de_core_news_sm")
doc = nlp("Wo bist du?") # English: 'Where are you?'
print(doc[2].morph_) # 'Case=Nom|Number=Sing|Person=2|PronType=Prs'
print(doc[2].morph) # 'Case=Nom|Number=Sing|Person=2|PronType=Prs'
print(doc[2].pos_) # 'PRON'
```
@ -117,7 +114,7 @@ import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp("Where are you?")
print(doc[2].morph_) # 'Case=Nom|Person=2|PronType=Prs'
print(doc[2].morph) # 'Case=Nom|Person=2|PronType=Prs'
print(doc[2].pos_) # 'PRON'
```

View File

@ -30,7 +30,7 @@ import QuickstartModels from 'widgets/quickstart-models.js'
## Language support {#languages}
spaCy currently provides support for the following languages. You can help by
[improving the existing language data](/usage/adding-languages#language-data)
improving the existing [language data](/usage/linguistic-features#language-data)
and extending the tokenization patterns.
[See here](https://github.com/explosion/spaCy/issues/3056) for details on how to
contribute to development.
@ -83,74 +83,95 @@ To train a pipeline using the neutral multi-language class, you can set
import the `MultiLanguage` class directly, or call
[`spacy.blank("xx")`](/api/top-level#spacy.blank) for lazy-loading.
### Chinese language support {#chinese new=2.3}
### Chinese language support {#chinese new="2.3"}
The Chinese language class supports three word segmentation options:
The Chinese language class supports three word segmentation options, `char`,
`jieba` and `pkuseg`.
> #### Manual setup
>
> ```python
> from spacy.lang.zh import Chinese
>
> # Character segmentation (default)
> nlp = Chinese()
>
> # Jieba
> cfg = {"segmenter": "jieba"}
> nlp = Chinese(meta={"tokenizer": {"config": cfg}})
>
> nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
> # PKUSeg with "default" model provided by pkuseg
> cfg = {"segmenter": "pkuseg", "pkuseg_model": "default"}
> nlp = Chinese(meta={"tokenizer": {"config": cfg}})
> cfg = {"segmenter": "pkuseg"}
> nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
> nlp.tokenizer.initialize(pkuseg_model="default")
> ```
1. **Character segmentation:** Character segmentation is the default
segmentation option. It's enabled when you create a new `Chinese` language
class or call `spacy.blank("zh")`.
2. **Jieba:** `Chinese` uses [Jieba](https://github.com/fxsjy/jieba) for word
segmentation with the tokenizer option `{"segmenter": "jieba"}`.
3. **PKUSeg**: As of spaCy v2.3.0, support for
[PKUSeg](https://github.com/lancopku/PKUSeg-python) has been added to support
better segmentation for Chinese OntoNotes and the provided
[Chinese pipelines](/models/zh). Enable PKUSeg with the tokenizer option
`{"segmenter": "pkuseg"}`.
<Infobox variant="warning">
In spaCy v3.0, the default Chinese word segmenter has switched from Jieba to
character segmentation. Also note that
[`pkuseg`](https://github.com/lancopku/pkuseg-python) doesn't yet ship with
pre-compiled wheels for Python 3.8. If you're running Python 3.8, you can
install it from our fork and compile it locally:
```bash
$ pip install https://github.com/honnibal/pkuseg-python/archive/master.zip
```ini
### config.cfg
[nlp.tokenizer]
@tokenizers = "spacy.zh.ChineseTokenizer"
segmenter = "char"
```
| Segmenter | Description |
| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `char` | **Character segmentation:** Character segmentation is the default segmentation option. It's enabled when you create a new `Chinese` language class or call `spacy.blank("zh")`. |
| `jieba` | **Jieba:** to use [Jieba](https://github.com/fxsjy/jieba) for word segmentation, you can set the option `segmenter` to `"jieba"`. |
| `pkuseg` | **PKUSeg**: As of spaCy v2.3.0, support for [PKUSeg](https://github.com/lancopku/PKUSeg-python) has been added to support better segmentation for Chinese OntoNotes and the provided [Chinese pipelines](/models/zh). Enable PKUSeg by setting tokenizer option `segmenter` to `"pkuseg"`. |
<Infobox title="Changed in v3.0" variant="warning">
In v3.0, the default word segmenter has switched from Jieba to character
segmentation. Because the `pkuseg` segmenter depends on a model that can be
loaded from a file, the model is loaded on
[initialization](/usage/training#config-lifecycle) (typically before training).
This ensures that your packaged Chinese model doesn't depend on a local path at
runtime.
</Infobox>
<Accordion title="Details on spaCy's Chinese API">
The `meta` argument of the `Chinese` language class supports the following
following tokenizer config settings:
The `initialize` method for the Chinese tokenizer class supports the following
config settings for loading `pkuseg` models:
| Name | Description |
| ------------------ | --------------------------------------------------------------------------------------------------------------- |
| `segmenter` | Word segmenter: `char`, `jieba` or `pkuseg`. Defaults to `char`. ~~str~~ |
| `pkuseg_model` | **Required for `pkuseg`:** Name of a model provided by `pkuseg` or the path to a local model directory. ~~str~~ |
| `pkuseg_user_dict` | Optional path to a file with one word per line which overrides the default `pkuseg` user dictionary. ~~str~~ |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------- |
| `pkuseg_model` | Name of a model provided by `pkuseg` or the path to a local model directory. ~~str~~ |
| `pkuseg_user_dict` | Optional path to a file with one word per line which overrides the default `pkuseg` user dictionary. Defaults to `"default"`. ~~str~~ |
The initialization settings are typically provided in the
[training config](/usage/training#config) and the data is loaded in before
training and serialized with the model. This allows you to load the data from a
local path and save out your pipeline and config, without requiring the same
local path at runtime. See the usage guide on the
[config lifecycle](/usage/training#config-lifecycle) for more background on
this.
```ini
### config.cfg
[initialize]
[initialize.tokenizer]
pkuseg_model = "/path/to/model"
pkuseg_user_dict = "default"
```
You can also initialize the tokenizer for a blank language class by calling its
`initialize` method:
```python
### Examples
# Initialize the pkuseg tokenizer
cfg = {"segmenter": "pkuseg"}
nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
# Load "default" model
cfg = {"segmenter": "pkuseg", "pkuseg_model": "default"}
nlp = Chinese(config={"tokenizer": {"config": cfg}})
nlp.tokenizer.initialize(pkuseg_model="default")
# Load local model
cfg = {"segmenter": "pkuseg", "pkuseg_model": "/path/to/pkuseg_model"}
nlp = Chinese(config={"tokenizer": {"config": cfg}})
nlp.tokenizer.initialize(pkuseg_model="/path/to/pkuseg_model")
# Override the user directory
cfg = {"segmenter": "pkuseg", "pkuseg_model": "default", "pkuseg_user_dict": "/path"}
nlp = Chinese(config={"tokenizer": {"config": cfg}})
nlp.tokenizer.initialize(pkuseg_model="default", pkuseg_user_dict="/path/to/user_dict")
```
You can also modify the user dictionary on-the-fly:
@ -185,36 +206,46 @@ from spacy.lang.zh import Chinese
# Train pkuseg model
pkuseg.train("train.utf8", "test.utf8", "/path/to/pkuseg_model")
# Load pkuseg model in spaCy Chinese tokenizer
nlp = Chinese(meta={"tokenizer": {"config": {"pkuseg_model": "/path/to/pkuseg_model", "require_pkuseg": True}}})
cfg = {"segmenter": "pkuseg"}
nlp = Chinese.from_config({"nlp": {"tokenizer": cfg}})
nlp.tokenizer.initialize(pkuseg_model="/path/to/pkuseg_model")
```
</Accordion>
### Japanese language support {#japanese new=2.3}
> #### Manual setup
>
> ```python
> from spacy.lang.ja import Japanese
>
> # Load SudachiPy with split mode A (default)
> nlp = Japanese()
>
> # Load SudachiPy with split mode B
> cfg = {"split_mode": "B"}
> nlp = Japanese(meta={"tokenizer": {"config": cfg}})
> nlp = Japanese.from_config({"nlp": {"tokenizer": cfg}})
> ```
The Japanese language class uses
[SudachiPy](https://github.com/WorksApplications/SudachiPy) for word
segmentation and part-of-speech tagging. The default Japanese language class and
the provided Japanese pipelines use SudachiPy split mode `A`. The `meta`
argument of the `Japanese` language class can be used to configure the split
mode to `A`, `B` or `C`.
the provided Japanese pipelines use SudachiPy split mode `A`. The tokenizer
config can be used to configure the split mode to `A`, `B` or `C`.
```ini
### config.cfg
[nlp.tokenizer]
@tokenizers = "spacy.ja.JapaneseTokenizer"
split_mode = "A"
```
<Infobox variant="warning">
If you run into errors related to `sudachipy`, which is currently under active
development, we suggest downgrading to `sudachipy==0.4.5`, which is the version
development, we suggest downgrading to `sudachipy==0.4.9`, which is the version
used for training the current [Japanese pipelines](/models/ja).
</Infobox>

View File

@ -895,6 +895,10 @@ the name. Registered functions can also take **arguments** by the way that can
be defined in the config as well you can read more about this in the docs on
[training with custom code](/usage/training#custom-code).
### Initializing components with data {#initialization}
<!-- TODO: -->
### Python type hints and pydantic validation {#type-hints new="3"}
spaCy's configs are powered by our machine learning library Thinc's

View File

@ -291,7 +291,7 @@ installed in the same environment that's it.
| Entry point | Description |
| ------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [`spacy_factories`](#entry-points-components) | Group of entry points for pipeline component factories, keyed by component name. Can be used to expose custom components defined by another package. |
| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/adding-languages), keyed by language shortcut. |
| [`spacy_languages`](#entry-points-languages) | Group of entry points for custom [`Language` subclasses](/usage/linguistic-features#language-data), keyed by language shortcut. |
| `spacy_lookups` <Tag variant="new">2.2</Tag> | Group of entry points for custom [`Lookups`](/api/lookups), including lemmatizer data. Used by spaCy's [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) package. |
| [`spacy_displacy_colors`](#entry-points-displacy) <Tag variant="new">2.2</Tag> | Group of entry points of custom label colors for the [displaCy visualizer](/usage/visualizers#ent). The key name doesn't matter, but it should point to a dict of labels and color values. Useful for custom models that predict different entity types. |

View File

@ -200,7 +200,7 @@ import Tokenization101 from 'usage/101/\_tokenization.md'
To learn more about how spaCy's tokenization rules work in detail, how to
**customize and replace** the default tokenizer and how to **add
language-specific data**, see the usage guides on
[adding languages](/usage/adding-languages) and
[language data](/usage/linguistic-features#language-data) and
[customizing the tokenizer](/usage/linguistic-features#tokenization).
</Infobox>
@ -479,7 +479,7 @@ find a "Suggest edits" link at the bottom of each page that points you to the
source.
Another way of getting involved is to help us improve the
[language data](/usage/adding-languages#language-data) especially if you
[language data](/usage/linguistic-features#language-data) especially if you
happen to speak one of the languages currently in
[alpha support](/usage/models#languages). Even adding simple tokenizer
exceptions, stop words or lemmatizer data can make a big difference. It will

View File

@ -216,7 +216,9 @@ The initialization settings are only loaded and used when
[`nlp.initialize`](/api/language#initialize) is called (typically right before
training). This allows you to set up your pipeline using local data resources
and custom functions, and preserve the information in your config but without
requiring it to be available at runtime
requiring it to be available at runtime. You can also use this mechanism to
provide data paths to custom pipeline components and custom tokenizers see the
section on [custom initialization](#initialization) for details.
### Overwriting config settings on the command line {#config-overrides}
@ -815,9 +817,9 @@ def MyModel(output_width: int) -> Model[List[Doc], List[Floats2d]]:
return create_model(output_width)
```
<!-- TODO:
### Customizing the initialization {#initialization}
-->
<!-- TODO: -->
## Data utilities {#data}
@ -1011,9 +1013,136 @@ def filter_batch(size: int) -> Callable[[Iterable[Example]], Iterator[List[Examp
<!-- TODO:
* Custom corpus class
* Minibatching
* Data augmentation
-->
### Data augmentation {#data-augmentation}
Data augmentation is the process of applying small **modifications** to the
training data. It can be especially useful for punctuation and case replacement
for example, if your corpus only uses smart quotes and you want to include
variations using regular quotes, or to make the model less sensitive to
capitalization by including a mix of capitalized and lowercase examples.
The easiest way to use data augmentation during training is to provide an
`augmenter` to the training corpus, e.g. in the `[corpora.train]` section of
your config. The built-in [`orth_variants`](/api/top-level#orth_variants)
augmenter creates a data augmentation callback that uses orth-variant
replacement.
```ini
### config.cfg (excerpt) {highlight="8,14"}
[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
gold_preproc = false
max_length = 0
limit = 0
[corpora.train.augmenter]
@augmenters = "spacy.orth_variants.v1"
# Percentage of texts that will be augmented / lowercased
level = 0.1
lower = 0.5
[corpora.train.augmenter.orth_variants]
@readers = "srsly.read_json.v1"
path = "corpus/orth_variants.json"
```
The `orth_variants` argument lets you pass in a dictionary of replacement rules,
typically loaded from a JSON file. There are two types of orth variant rules:
`"single"` for single tokens that should be replaced (e.g. hyphens) and
`"paired"` for pairs of tokens (e.g. quotes).
<!-- prettier-ignore -->
```json
### orth_variants.json
{
"single": [{ "tags": ["NFP"], "variants": ["…", "..."] }],
"paired": [{ "tags": ["``", "''"], "variants": [["'", "'"], ["", ""]] }]
}
```
<Accordion title="Full examples for English and German" spaced>
```json
https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/en_orth_variants.json
```
```json
https://github.com/explosion/spacy-lookups-data/blob/master/spacy_lookups_data/data/de_orth_variants.json
```
</Accordion>
<Infobox title="Important note" variant="warning">
When adding data augmentation, keep in mind that it typically only makes sense
to apply it to the **training corpus**, not the development data.
</Infobox>
#### Writing custom data augmenters {#data-augmentation-custom}
Using the [`@spacy.augmenters`](/api/top-level#registry) registry, you can also
register your own data augmentation callbacks. The callback should be a function
that takes the current `nlp` object and a training [`Example`](/api/example) and
yields `Example` objects. Keep in mind that the augmenter should yield **all
examples** you want to use in your corpus, not only the augmented examples
(unless you want to augment all examples).
Here'a an example of a custom augmentation callback that produces text variants
in ["SpOnGeBoB cAsE"](https://knowyourmeme.com/memes/mocking-spongebob). The
registered function takes one argument `randomize` that can be set via the
config and decides whether the uppercase/lowercase transformation is applied
randomly or not. The augmenter yields two `Example` objects: the original
example and the augmented example.
> #### config.cfg
>
> ```ini
> [corpora.train.augmenter]
> @augmenters = "spongebob_augmenter.v1"
> randomize = false
> ```
```python
import spacy
import random
@spacy.registry.augmenters("spongebob_augmenter.v1")
def create_augmenter(randomize: bool = False):
def augment(nlp, example):
text = example.text
if randomize:
# Randomly uppercase/lowercase characters
chars = [c.lower() if random.random() < 0.5 else c.upper() for c in text]
else:
# Uppercase followed by lowercase
chars = [c.lower() if i % 2 else c.upper() for i, c in enumerate(text)]
# Create augmented training example
example_dict = example.to_dict()
doc = nlp.make_doc("".join(chars))
example_dict["token_annotation"]["ORTH"] = [t.text for t in doc]
# Original example followed by augmented example
yield example
yield example.from_dict(doc, example_dict)
return augment
```
An easy way to create modified `Example` objects is to use the
[`Example.from_dict`](/api/example#from_dict) method with a new reference
[`Doc`](/api/doc) created from the modified text. In this case, only the
capitalization changes, so only the `ORTH` values of the tokens will be
different between the original and augmented examples.
Note that if your data augmentation strategy involves changing the tokenization
(for instance, removing or adding tokens) and your training examples include
token-based annotations like the dependency parse or entity labels, you'll need
to take care to adjust the `Example` object so its annotations match and remain
valid.
## Parallel & distributed training with Ray {#parallel-training}
> #### Installation
@ -1124,17 +1253,6 @@ a dictionary with keyword arguments specifying the annotations, like `tags` or
annotations, the model can be updated to learn a sentence of three words with
their assigned part-of-speech tags.
> #### About the tag map
>
> The tag map is part of the vocabulary and defines the annotation scheme. If
> you're training a new pipeline, this will let you map the tags present in the
> treebank you train on to spaCy's tag scheme:
>
> ```python
> tag_map = {"N": {"pos": "NOUN"}, "V": {"pos": "VERB"}}
> vocab = Vocab(tag_map=tag_map)
> ```
```python
words = ["I", "like", "stuff"]
tags = ["NOUN", "VERB", "NOUN"]

View File

@ -24,7 +24,7 @@
"TransformerData": "/api/transformer#transformerdata",
"FullTransformerBatch": "/api/transformer#fulltransformerbatch",
"Corpus": "/api/corpus",
"JsonlTexts": "/api/corpus#jsonltexts",
"JsonlCorpus": "/api/corpus#jsonlcorpus",
"LexemeC": "/api/cython-structs#lexemec",
"TokenC": "/api/cython-structs#tokenc",
"Config": "https://thinc.ai/docs/api-config#config",

View File

@ -24,6 +24,7 @@ const Quickstart = ({
rawContent = null,
id = 'quickstart',
setters = {},
showDropdown = {},
hidePrompts,
small,
codeLang,
@ -107,6 +108,8 @@ const Quickstart = ({
}) => {
// Optional function that's called with the value
const setterFunc = setters[id] || (() => {})
// Check if dropdown should be shown
const dropdownGetter = showDropdown[id] || (() => true)
return (
<div key={id} data-quickstart-group={id} className={classes.group}>
<style data-quickstart-style={id} scoped>
@ -123,37 +126,6 @@ const Quickstart = ({
)}
</div>
<div className={classes.fields}>
{!!dropdown.length && (
<select
defaultValue={defaultValue}
className={classes.select}
onChange={({ target }) => {
const value = target.value
if (value != other) {
setterFunc(value)
setOther(id, false)
} else {
setterFunc('')
setOther(id, true)
}
}}
>
{dropdown.map(({ id, title }) => (
<option key={id} value={id}>
{title}
</option>
))}
{other && <option value={other}>{other}</option>}
</select>
)}
{other && otherState[id] && (
<input
type="text"
className={classes.textInput}
placeholder="Type here..."
onChange={({ target }) => setterFunc(target.value)}
/>
)}
{options.map(option => {
const optionType = multiple ? 'checkbox' : 'radio'
const checkedForId = checked[id] || []
@ -179,7 +151,10 @@ const Quickstart = ({
type={optionType}
className={classNames(
classes.input,
classes[optionType]
classes[optionType],
{
[classes.long]: options.length >= 4,
}
)}
name={id}
id={`quickstart-${option.id}`}
@ -209,6 +184,41 @@ const Quickstart = ({
</Fragment>
)
})}
<span className={classes.fieldExtra}>
{!!dropdown.length && (
<select
defaultValue={defaultValue}
className={classNames(classes.select, {
[classes.selectHidden]: !dropdownGetter(),
})}
onChange={({ target }) => {
const value = target.value
if (value != other) {
setterFunc(value)
setOther(id, false)
} else {
setterFunc('')
setOther(id, true)
}
}}
>
{dropdown.map(({ id, title }) => (
<option key={id} value={id}>
{title}
</option>
))}
{other && <option value={other}>{other}</option>}
</select>
)}
{other && otherState[id] && (
<input
type="text"
className={classes.textInput}
placeholder="Type here..."
onChange={({ target }) => setterFunc(target.value)}
/>
)}
</span>
</div>
</div>
)

View File

@ -36,22 +36,37 @@
.label
cursor: pointer
border: 1px solid var(--color-subtle)
border-radius: var(--border-radius)
display: inline-block
padding: 0.65rem 1.25rem
margin: 0 0.5rem 0.75rem 0
padding: 0.35rem 0.5rem 0.25rem 0
margin: 0 1rem 0.75rem 0
font-size: var(--font-size-xs)
font-weight: bold
background: var(--color-back)
&:hover
background: var(--color-subtle-light)
.input:focus + &
.input:focus +
border: 1px solid var(--color-theme)
outline: none
.radio + &
margin: 0 0 0.75rem 0
border-radius: 0
border-width: 1px 0 1px 1px
border-style: solid
border-color: var(--color-subtle)
background: var(--color-back)
padding: 0.65rem 1.25rem
&:nth-child(2) // first child is checkbox
border-top-left-radius: var(--border-radius)
border-bottom-left-radius: var(--border-radius)
&:nth-last-child(2) // last child is additional container
border-top-right-radius: var(--border-radius)
border-bottom-right-radius: var(--border-radius)
border-right-width: 1px
.radio:checked + &
color: var(--color-back)
border-color: var(--color-theme)
@ -64,9 +79,10 @@
height: 20px
border: 1px solid var(--color-subtle)
vertical-align: middle
margin-right: 1rem
margin-right: 0.5rem
cursor: pointer
border-radius: var(--border-radius)
background: var(--color-back)
.checkbox:checked + &:before
// Embed "check" icon here for simplicity
@ -74,6 +90,9 @@
background-size: contain
border-color: var(--color-theme)
.field-extra:not(:empty):not(:first-child)
margin-left: 1rem
.legend
color: var(--color-dark)
padding: 0.75rem 0
@ -93,6 +112,9 @@
font-size: var(--font-size-sm)
background: var(--color-back)
.select-hidden
display: none
.text-input
border: 1px solid var(--color-subtle)
border-radius: var(--border-radius)

View File

@ -1,9 +1,20 @@
import React from 'react'
import React, { useState } from 'react'
import { StaticQuery, graphql } from 'gatsby'
import { Quickstart, QS } from '../components/quickstart'
import { repo } from '../components/util'
const DEFAULT_HARDWARE = 'cpu'
const DEFAULT_CUDA = 'cuda100'
const CUDA = {
'8.0': 'cuda80',
'9.0': 'cuda90',
'9.1': 'cuda91',
'9.2': 'cuda92',
'10.0': 'cuda100',
'10.1': 'cuda101',
'10.2': 'cuda102',
}
const DATA = [
{
id: 'os',
@ -23,6 +34,16 @@ const DATA = [
{ id: 'source', title: 'from source' },
],
},
{
id: 'hardware',
title: 'Hardware',
options: [
{ id: 'cpu', title: 'CPU', checked: DEFAULT_HARDWARE === 'cpu' },
{ id: 'gpu', title: 'GPU', checked: DEFAULT_HARDWARE == 'gpu' },
],
dropdown: Object.keys(CUDA).map(id => ({ id: CUDA[id], title: `CUDA ${id}` })),
defaultValue: DEFAULT_CUDA,
},
{
id: 'config',
title: 'Configuration',
@ -30,35 +51,39 @@ const DATA = [
options: [
{
id: 'venv',
title: 'virtualenv',
title: 'virtual env',
help: 'Use a virtual environment and install spaCy into a user directory',
},
],
},
{
id: 'addition',
title: 'Additions',
multiple: true,
options: [
{
id: 'transformers',
title: 'Transformers',
help: 'Use transformers like BERT to train your spaCy pipelines',
},
{
id: 'lookups',
title: 'Lemmatizer data',
help: 'Install additional lookup tables and rules for lemmatization',
id: 'train',
title: 'train models',
help:
'Check this if you plan to train your own models with spaCy to install extra dependencies and data resources',
},
],
},
]
const QuickstartInstall = ({ id, title }) => (
const QuickstartInstall = ({ id, title }) => {
const [train, setTrain] = useState(false)
const [hardware, setHardware] = useState(DEFAULT_HARDWARE)
const [cuda, setCuda] = useState(DEFAULT_CUDA)
const setters = {
hardware: v => (Array.isArray(v) ? setHardware(v[0]) : setCuda(v)),
config: v => setTrain(v.includes('train')),
}
const showDropdown = {
hardware: () => hardware === 'gpu',
}
const pipExtras = [hardware === 'gpu' && cuda, train && 'transformers', train && 'lookups']
.filter(e => e)
.join(',')
return (
<StaticQuery
query={query}
render={({ site }) => {
const { nightly, languages } = site.siteMetadata
const pkg = nightly ? 'spacy-nightly' : 'spacy'
const models = languages.filter(({ models }) => models !== null)
const data = [
...DATA,
@ -66,11 +91,19 @@ const QuickstartInstall = ({ id, title }) => (
id: 'models',
title: 'Trained Pipelines',
multiple: true,
options: models.map(({ code, name }) => ({ id: code, title: name })),
options: models
.sort((a, b) => a.name.localeCompare(b.name))
.map(({ code, name }) => ({ id: code, title: name })),
},
]
return (
<Quickstart data={data} title={title} id={id}>
<Quickstart
data={data}
title={title}
id={id}
setters={setters}
showDropdown={showDropdown}
>
<QS config="venv">python -m venv .env</QS>
<QS config="venv" os="mac">
source .env/bin/activate
@ -81,8 +114,17 @@ const QuickstartInstall = ({ id, title }) => (
<QS config="venv" os="windows">
.env\Scripts\activate
</QS>
<QS package="pip">pip install -U spacy</QS>
<QS package="pip">pip install -U pip setuptools wheel</QS>
<QS package="source">pip install -U pip setuptools wheel</QS>
<QS package="pip">
pip install -U {pkg}
{pipExtras && `[${pipExtras}]`}
{nightly ? ' --pre' : ''}
</QS>
<QS package="conda">conda install -c conda-forge spacy</QS>
<QS package="conda" hardware="gpu">
conda install -c conda-forge cupy
</QS>
<QS package="source">
git clone https://github.com/{repo}
{nightly ? ` --branch develop` : ''}
@ -95,25 +137,18 @@ const QuickstartInstall = ({ id, title }) => (
set PYTHONPATH=C:\path\to\spaCy
</QS>
<QS package="source">pip install -r requirements.txt</QS>
<QS addition="transformers" package="pip">
pip install -U spacy-transformers
<QS package="source">python setup.py build_ext --inplace</QS>
<QS package="source" config="train">
pip install -e '.[{pipExtras}]'
</QS>
<QS addition="transformers" package="source">
pip install -U spacy-transformers
</QS>
<QS addition="transformers" package="conda">
<QS config="train" package="conda">
conda install -c conda-forge spacy-transformers
</QS>
<QS addition="lookups" package="pip">
pip install -U spacy-lookups-data
</QS>
<QS addition="lookups" package="source">
pip install -U spacy-lookups-data
</QS>
<QS addition="lookups" package="conda">
<QS config="train" package="conda">
conda install -c conda-forge spacy-lookups-data
</QS>
<QS package="source">python setup.py build_ext --inplace</QS>
{models.map(({ code, models: modelOptions }) => (
<QS models={code} key={code}>
python -m spacy download {modelOptions[0]}
@ -123,7 +158,8 @@ const QuickstartInstall = ({ id, title }) => (
)
}}
/>
)
)
}
export default QuickstartInstall

View File

@ -1,12 +1,16 @@
import React, { Fragment } from 'react'
import React, { Fragment, useState } from 'react'
import { StaticQuery, graphql } from 'gatsby'
import { Quickstart, QS } from '../components/quickstart'
const DEFAULT_LANG = 'en'
const DEFAULT_OPT = 'efficiency'
const data = [
{
id: 'lang',
title: 'Language',
defaultValue: DEFAULT_LANG,
},
{
id: 'load',
@ -25,6 +29,16 @@ const data = [
},
],
},
{
id: 'optimize',
title: 'Optimize for',
help:
'Optimize for efficiency (faster & smaller model) or higher accuracy (larger & slower model)',
options: [
{ id: 'efficiency', title: 'efficiency', checked: DEFAULT_OPT === 'efficiency' },
{ id: 'accuracy', title: 'accuracy', checked: DEFAULT_OPT === 'accuracy' },
],
},
{
id: 'config',
title: 'Options',
@ -33,41 +47,56 @@ const data = [
},
]
const QuickstartInstall = ({ id, title, description, defaultLang = 'en', children }) => (
const QuickstartInstall = ({ id, title, description, children }) => {
const [lang, setLang] = useState(DEFAULT_LANG)
const [efficiency, setEfficiency] = useState(DEFAULT_OPT)
const setters = {
lang: setLang,
optimize: v => setEfficiency(v.includes('efficiency')),
}
return (
<StaticQuery
query={query}
render={({ site }) => {
const models = site.siteMetadata.languages.filter(({ models }) => models !== null)
data[0].options = models.map(({ code, name }) => ({
data[0].dropdown = models
.sort((a, b) => a.name.localeCompare(b.name))
.map(({ code, name }) => ({
id: code,
title: name,
checked: code === defaultLang,
}))
return (
<Quickstart data={data} title={title} id={id} description={description}>
<Quickstart
data={data}
title={title}
id={id}
description={description}
setters={setters}
copy={false}
>
{models.map(({ code, models, example }) => {
const pkg = models[0]
const pkg = efficiency ? models[0] : models[models.length - 1]
const exampleText = example || 'No text available yet'
return (
return lang !== code ? null : (
<Fragment key={code}>
<QS lang={code}>python -m spacy download {pkg}</QS>
<QS lang={code} divider />
<QS lang={code} load="spacy" prompt="python">
<QS>python -m spacy download {pkg}</QS>
<QS divider />
<QS load="spacy" prompt="python">
import spacy
</QS>
<QS lang={code} load="spacy" prompt="python">
<QS load="spacy" prompt="python">
nlp = spacy.load("{pkg}")
</QS>
<QS lang={code} load="module" prompt="python">
<QS load="module" prompt="python">
import {pkg}
</QS>
<QS lang={code} load="module" prompt="python">
<QS load="module" prompt="python">
nlp = {pkg}.load()
</QS>
<QS lang={code} config="example" prompt="python">
<QS config="example" prompt="python">
doc = nlp("{exampleText}")
</QS>
<QS lang={code} config="example" prompt="python">
<QS config="example" prompt="python">
print([
{code === 'xx'
? '(ent.text, ent.label) for ent in doc.ents'
@ -83,7 +112,8 @@ const QuickstartInstall = ({ id, title, description, defaultLang = 'en', childre
)
}}
/>
)
)
}
export default QuickstartInstall