Merge branch 'develop' into feature/language-data-config

This commit is contained in:
Ines Montani 2020-07-22 22:18:53 +02:00
commit 14d7d46f89
7 changed files with 342 additions and 19 deletions

View File

@ -25,7 +25,7 @@ def debug_model_cli(
P1: bool = Opt(False, "--print-step1", "-P1", help="Print model after initialization"), P1: bool = Opt(False, "--print-step1", "-P1", help="Print model after initialization"),
P2: bool = Opt(False, "--print-step2", "-P2", help="Print model after training"), P2: bool = Opt(False, "--print-step2", "-P2", help="Print model after training"),
P3: bool = Opt(True, "--print-step3", "-P3", help="Print final predictions"), P3: bool = Opt(True, "--print-step3", "-P3", help="Print final predictions"),
use_gpu: int = Opt(-1, "--use-gpu", "-g", help="GPU ID or -1 for CPU") use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU")
# fmt: on # fmt: on
): ):
""" """

View File

@ -36,7 +36,7 @@ def pretrain_cli(
code_path: Optional[Path] = Opt(None, "--code-path", "-c", help="Path to Python file with additional code (registered functions) to be imported"), code_path: Optional[Path] = Opt(None, "--code-path", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
resume_path: Optional[Path] = Opt(None, "--resume-path", "-r", help="Path to pretrained weights from which to resume pretraining"), resume_path: Optional[Path] = Opt(None, "--resume-path", "-r", help="Path to pretrained weights from which to resume pretraining"),
epoch_resume: Optional[int] = Opt(None, "--epoch-resume", "-er", help="The epoch to resume counting from when using '--resume_path'. Prevents unintended overwriting of existing weight files."), epoch_resume: Optional[int] = Opt(None, "--epoch-resume", "-er", help="The epoch to resume counting from when using '--resume_path'. Prevents unintended overwriting of existing weight files."),
use_gpu: int = Opt(-1, "--use-gpu", "-g", help="GPU ID or -1 for CPU"), use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
# fmt: on # fmt: on
): ):
""" """

View File

@ -36,7 +36,7 @@ def train_cli(
output_path: Optional[Path] = Opt(None, "--output", "--output-path", "-o", help="Output directory to store model in"), output_path: Optional[Path] = Opt(None, "--output", "--output-path", "-o", help="Output directory to store model in"),
code_path: Optional[Path] = Opt(None, "--code-path", "-c", help="Path to Python file with additional code (registered functions) to be imported"), code_path: Optional[Path] = Opt(None, "--code-path", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
verbose: bool = Opt(False, "--verbose", "-V", "-VV", help="Display more information for debugging purposes"), verbose: bool = Opt(False, "--verbose", "-V", "-VV", help="Display more information for debugging purposes"),
use_gpu: int = Opt(-1, "--use-gpu", "-g", help="GPU ID or -1 for CPU"), use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
resume: bool = Opt(False, "--resume", "-R", help="Resume training"), resume: bool = Opt(False, "--resume", "-R", help="Resume training"),
# fmt: on # fmt: on
): ):
@ -518,7 +518,7 @@ def verify_config(nlp: Language) -> None:
# in config["nlp"]["pipeline"] instead? # in config["nlp"]["pipeline"] instead?
for pipe_config in nlp.config["components"].values(): for pipe_config in nlp.config["components"].values():
# We can't assume that the component name == the factory # We can't assume that the component name == the factory
factory = pipe_config["@factories"] factory = pipe_config["factory"]
if factory == "textcat": if factory == "textcat":
verify_textcat_config(nlp, pipe_config) verify_textcat_config(nlp, pipe_config)

View File

@ -561,9 +561,9 @@ class Errors:
"into {values}, but found {value}.") "into {values}, but found {value}.")
E983 = ("Invalid key for '{dict}': {key}. Available keys: " E983 = ("Invalid key for '{dict}': {key}. Available keys: "
"{keys}") "{keys}")
E984 = ("Invalid component config for '{name}': no @factories key " E984 = ("Invalid component config for '{name}': no 'factory' key "
"specifying the registered function used to initialize the " "specifying the registered function used to initialize the "
"component. For example, @factories = \"ner\" will use the 'ner' " "component. For example, factory = \"ner\" will use the 'ner' "
"factory and all other settings in the block will be passed " "factory and all other settings in the block will be passed "
"to it as arguments.\n\n{config}") "to it as arguments.\n\n{config}")
E985 = ("Can't load model from config file: no 'nlp' section found.\n\n{config}") E985 = ("Can't load model from config file: no 'nlp' section found.\n\n{config}")

View File

@ -171,7 +171,7 @@ class Language:
for pipe_name in self.pipe_names: for pipe_name in self.pipe_names:
pipe_meta = self.get_pipe_meta(pipe_name) pipe_meta = self.get_pipe_meta(pipe_name)
pipe_config = self.get_pipe_config(pipe_name) pipe_config = self.get_pipe_config(pipe_name)
pipeline[pipe_name] = {"@factories": pipe_meta.factory, **pipe_config} pipeline[pipe_name] = {"factory": pipe_meta.factory, **pipe_config}
self._config["nlp"]["pipeline"] = self.pipe_names self._config["nlp"]["pipeline"] = self.pipe_names
self._config["components"] = pipeline self._config["components"] = pipeline
if not srsly.is_json_serializable(self._config): if not srsly.is_json_serializable(self._config):
@ -477,7 +477,7 @@ class Language:
# pipeline component and why it failed, explain default config # pipeline component and why it failed, explain default config
resolved, filled = registry.resolve(cfg, validate=validate, overrides=overrides) resolved, filled = registry.resolve(cfg, validate=validate, overrides=overrides)
filled = filled[factory_name] filled = filled[factory_name]
filled["@factories"] = factory_name filled["factory"] = factory_name
self._pipe_configs[name] = filled self._pipe_configs[name] = filled
return resolved[factory_name] return resolved[factory_name]
@ -1270,12 +1270,12 @@ class Language:
if pipe_name not in pipeline: if pipe_name not in pipeline:
opts = ", ".join(pipeline.keys()) opts = ", ".join(pipeline.keys())
raise ValueError(Errors.E956.format(name=pipe_name, opts=opts)) raise ValueError(Errors.E956.format(name=pipe_name, opts=opts))
pipe_cfg = pipeline[pipe_name] pipe_cfg = util.copy_config(pipeline[pipe_name])
if pipe_name not in disable: if pipe_name not in disable:
if "@factories" not in pipe_cfg: if "factory" not in pipe_cfg:
err = Errors.E984.format(name=pipe_name, config=pipe_cfg) err = Errors.E984.format(name=pipe_name, config=pipe_cfg)
raise ValueError(err) raise ValueError(err)
factory = pipe_cfg["@factories"] factory = pipe_cfg.pop("factory")
# The pipe name (key in the config) here is the unique name of the # The pipe name (key in the config) here is the unique name of the
# component, not necessarily the factory # component, not necessarily the factory
nlp.add_pipe( nlp.add_pipe(

View File

@ -20,7 +20,7 @@ pipeline = ["tok2vec", "tagger"]
[components] [components]
[components.tok2vec] [components.tok2vec]
@factories = "tok2vec" factory = "tok2vec"
[components.tok2vec.model] [components.tok2vec.model]
@architectures = "spacy.HashEmbedCNN.v1" @architectures = "spacy.HashEmbedCNN.v1"
@ -34,7 +34,7 @@ subword_features = true
dropout = null dropout = null
[components.tagger] [components.tagger]
@factories = "tagger" factory = "tagger"
[components.tagger.model] [components.tagger.model]
@architectures = "spacy.Tagger.v1" @architectures = "spacy.Tagger.v1"
@ -245,7 +245,7 @@ def test_serialize_config_language_specific():
nlp.add_pipe(name, config={"foo": 100}, name="bar") nlp.add_pipe(name, config={"foo": 100}, name="bar")
pipe_config = nlp.config["components"]["bar"] pipe_config = nlp.config["components"]["bar"]
assert pipe_config["foo"] == 100 assert pipe_config["foo"] == 100
assert pipe_config["@factories"] == name assert pipe_config["factory"] == name
with make_tempdir() as d: with make_tempdir() as d:
nlp.to_disk(d) nlp.to_disk(d)
@ -255,7 +255,7 @@ def test_serialize_config_language_specific():
assert nlp2.get_pipe_meta("bar").factory == name assert nlp2.get_pipe_meta("bar").factory == name
pipe_config = nlp2.config["components"]["bar"] pipe_config = nlp2.config["components"]["bar"]
assert pipe_config["foo"] == 100 assert pipe_config["foo"] == 100
assert pipe_config["@factories"] == name assert pipe_config["factory"] == name
config = Config().from_str(nlp2.config.to_str()) config = Config().from_str(nlp2.config.to_str())
config["nlp"]["lang"] = "de" config["nlp"]["lang"] = "de"

View File

@ -5,10 +5,14 @@ source: spacy/pipeline/morphologizer.pyx
new: 3 new: 3
--- ---
A trainable pipeline component to predict morphological features. This class is A trainable pipeline component to predict morphological features and
a subclass of `Pipe` and follows the same API. The component is also available coarse-grained POS tags following the Universal Dependencies
via the string name `"morphologizer"`. After initialization, it is typically [UPOS](https://universaldependencies.org/u/pos/index.html) and
added to the processing pipeline using [`nlp.add_pipe`](/api/language#add_pipe). [FEATS](https://universaldependencies.org/format.html#morphological-annotation)
annotation guidelines. This class is a subclass of `Pipe` and follows the same
API. The component is also available via the string name `"morphologizer"`.
After initialization, it is typically added to the processing pipeline using
[`nlp.add_pipe`](/api/language#add_pipe).
## Default config {#config} ## Default config {#config}
@ -21,3 +25,322 @@ custom models, check out the [training config](/usage/training#config) docs.
```python ```python
https://github.com/explosion/spaCy/blob/develop/spacy/pipeline/defaults/morphologizer_defaults.cfg https://github.com/explosion/spaCy/blob/develop/spacy/pipeline/defaults/morphologizer_defaults.cfg
``` ```
## Morphologizer.\_\_init\_\_ {#init tag="method"}
Initialize the morphologizer.
> #### Example
>
> ```python
> # Construction via create_pipe
> morphologizer = nlp.create_pipe("morphologizer")
>
> # Construction from class
> from spacy.pipeline import Morphologizer
> morphologizer = Morphologizer()
> ```
Create a new pipeline instance. In your application, you would normally use a
shortcut for this and instantiate the component using its string name and
[`nlp.create_pipe`](/api/language#create_pipe).
| Name | Type | Description |
| ----------- | -------- | ------------------------------------------------------------------------------- |
| `vocab` | `Vocab` | The shared vocabulary. |
| `model` | `Model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. |
| `**cfg` | - | Configuration parameters. |
| **RETURNS** | `Morphologizer` | The newly constructed object. |
## Morphologizer.\_\_call\_\_ {#call tag="method"}
Apply the pipe to one document. The document is modified in place, and returned.
This usually happens under the hood when the `nlp` object is called on a text
and all pipeline components are applied to the `Doc` in order. Both
[`__call__`](/api/morphologizer#call) and [`pipe`](/api/morphologizer#pipe) delegate to the
[`predict`](/api/morphologizer#predict) and
[`set_annotations`](/api/morphologizer#set_annotations) methods.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> doc = nlp("This is a sentence.")
> # This usually happens under the hood
> processed = morphologizer(doc)
> ```
| Name | Type | Description |
| ----------- | ----- | ------------------------ |
| `doc` | `Doc` | The document to process. |
| **RETURNS** | `Doc` | The processed document. |
## Morphologizer.pipe {#pipe tag="method"}
Apply the pipe to a stream of documents. This usually happens under the hood
when the `nlp` object is called on a text and all pipeline components are
applied to the `Doc` in order. Both [`__call__`](/api/morphologizer#call) and
[`pipe`](/api/morphologizer#pipe) delegate to the [`predict`](/api/morphologizer#predict) and
[`set_annotations`](/api/morphologizer#set_annotations) methods.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> for doc in morphologizer.pipe(docs, batch_size=50):
> pass
> ```
| Name | Type | Description |
| ------------ | --------------- | ------------------------------------------------------ |
| `stream` | `Iterable[Doc]` | A stream of documents. |
| `batch_size` | int | The number of texts to buffer. Defaults to `128`. |
| **YIELDS** | `Doc` | Processed documents in the order of the original text. |
## Morphologizer.predict {#predict tag="method"}
Apply the pipeline's model to a batch of docs, without modifying them.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> scores = morphologizer.predict([doc1, doc2])
> ```
| Name | Type | Description |
| ----------- | --------------- | ----------------------------------------- |
| `docs` | `Iterable[Doc]` | The documents to predict. |
| **RETURNS** | - | The model's prediction for each document. |
## Morphologizer.set_annotations {#set_annotations tag="method"}
Modify a batch of documents, using pre-computed scores.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> scores = morphologizer.predict([doc1, doc2])
> morphologizer.set_annotations([doc1, doc2], scores)
> ```
| Name | Type | Description |
| -------- | --------------- | ------------------------------------------------ |
| `docs` | `Iterable[Doc]` | The documents to modify. |
| `scores` | - | The scores to set, produced by `Morphologizer.predict`. |
## Morphologizer.update {#update tag="method"}
Learn from a batch of documents and gold-standard information, updating the
pipe's model. Delegates to [`predict`](/api/morphologizer#predict) and
[`get_loss`](/api/morphologizer#get_loss).
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab, morphologizer_model)
> optimizer = nlp.begin_training()
> losses = morphologizer.update(examples, sgd=optimizer)
> ```
| Name | Type | Description |
| ----------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
| `examples` | `Iterable[Example]` | A batch of [`Example`](/api/example) objects to learn from. |
| _keyword-only_ | | |
| `drop` | float | The dropout rate. |
| `set_annotations` | bool | Whether or not to update the `Example` objects with the predictions, delegating to [`set_annotations`](/api/morphologizer#set_annotations). |
| `sgd` | `Optimizer` | The [`Optimizer`](https://thinc.ai/docs/api-optimizers) object. |
| `losses` | `Dict[str, float]` | Optional record of the loss during training. The value keyed by the model's name is updated. |
| **RETURNS** | `Dict[str, float]` | The updated `losses` dictionary. |
## Morphologizer.get_loss {#get_loss tag="method"}
Find the loss and gradient of loss for the batch of documents and their
predicted scores.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> scores = morphologizer.predict([eg.predicted for eg in examples])
> loss, d_loss = morphologizer.get_loss(examples, scores)
> ```
| Name | Type | Description |
| ----------- | ------------------- | --------------------------------------------------- |
| `examples` | `Iterable[Example]` | The batch of examples. |
| `scores` | - | Scores representing the model's predictions. |
| **RETURNS** | tuple | The loss and the gradient, i.e. `(loss, gradient)`. |
## Morphologizer.begin_training {#begin_training tag="method"}
Initialize the pipe for training, using data examples if available. Return an
[`Optimizer`](https://thinc.ai/docs/api-optimizers) object.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> nlp.pipeline.append(morphologizer)
> optimizer = morphologizer.begin_training(pipeline=nlp.pipeline)
> ```
| Name | Type | Description |
| -------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `get_examples` | `Iterable[Example]` | Optional gold-standard annotations in the form of [`Example`](/api/example) objects. |
| `pipeline` | `List[(str, callable)]` | Optional list of pipeline components that this component is part of. |
| `sgd` | `Optimizer` | An optional [`Optimizer`](https://thinc.ai/docs/api-optimizers) object. Will be created via [`create_optimizer`](/api/morphologizer#create_optimizer) if not set. |
| **RETURNS** | `Optimizer` | An optimizer. |
## Morphologizer.create_optimizer {#create_optimizer tag="method"}
Create an optimizer for the pipeline component.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> optimizer = morphologizer.create_optimizer()
> ```
| Name | Type | Description |
| ----------- | ----------- | --------------------------------------------------------------- |
| **RETURNS** | `Optimizer` | The [`Optimizer`](https://thinc.ai/docs/api-optimizers) object. |
## Morphologizer.use_params {#use_params tag="method, contextmanager"}
Modify the pipe's model, to use the given parameter values.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> with morphologizer.use_params():
> morphologizer.to_disk("/best_model")
> ```
| Name | Type | Description |
| -------- | ---- | ---------------------------------------------------------------------------------------------------------- |
| `params` | - | The parameter values to use in the model. At the end of the context, the original parameters are restored. |
## Morphologizer.add_label {#add_label tag="method"}
Add a new label to the pipe. If the `Morphologizer` should set annotations for
both `pos` and `morph`, the label should include the UPOS as the feature `POS`.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> morphologizer.add_label("Mood=Ind|POS=VERB|Tense=Past|VerbForm=Fin")
> ```
| Name | Type | Description |
| -------- | ---- | --------------------------------------------------------------- |
| `label` | str | The label to add. |
## Morphologizer.to_disk {#to_disk tag="method"}
Serialize the pipe to disk.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> morphologizer.to_disk("/path/to/morphologizer")
> ```
| Name | Type | Description |
| --------- | ------------ | --------------------------------------------------------------------------------------------------------------------- |
| `path` | str / `Path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. |
| `exclude` | list | String names of [serialization fields](#serialization-fields) to exclude. |
## Morphologizer.from_disk {#from_disk tag="method"}
Load the pipe from disk. Modifies the object in place and returns it.
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> morphologizer.from_disk("/path/to/morphologizer")
> ```
| Name | Type | Description |
| ----------- | ------------ | -------------------------------------------------------------------------- |
| `path` | str / `Path` | A path to a directory. Paths may be either strings or `Path`-like objects. |
| `exclude` | list | String names of [serialization fields](#serialization-fields) to exclude. |
| **RETURNS** | `Morphologizer` | The modified `Morphologizer` object. |
## Morphologizer.to_bytes {#to_bytes tag="method"}
> #### Example
>
> ```python
> morphologizer = Morphologizer(nlp.vocab)
> morphologizer_bytes = morphologizer.to_bytes()
> ```
Serialize the pipe to a bytestring.
| Name | Type | Description |
| ----------- | ----- | ------------------------------------------------------------------------- |
| `exclude` | list | String names of [serialization fields](#serialization-fields) to exclude. |
| **RETURNS** | bytes | The serialized form of the `Morphologizer` object. |
## Morphologizer.from_bytes {#from_bytes tag="method"}
Load the pipe from a bytestring. Modifies the object in place and returns it.
> #### Example
>
> ```python
> morphologizer_bytes = morphologizer.to_bytes()
> morphologizer = Morphologizer(nlp.vocab)
> morphologizer.from_bytes(morphologizer_bytes)
> ```
| Name | Type | Description |
| ------------ | -------- | ------------------------------------------------------------------------- |
| `bytes_data` | bytes | The data to load from. |
| `exclude` | list | String names of [serialization fields](#serialization-fields) to exclude. |
| **RETURNS** | `Morphologizer` | The `Morphologizer` object. |
## Morphologizer.labels {#labels tag="property"}
The labels currently added to the component in Universal Dependencies [FEATS
format](https://universaldependencies.org/format.html#morphological-annotation).
Note that even for a blank component, this will always include the internal
empty label `_`. If POS features are used, the labels will include the
coarse-grained POS as the feature `POS`.
> #### Example
>
> ```python
> morphologizer.add_label("Mood=Ind|POS=VERB|Tense=Past|VerbForm=Fin")
> assert "Mood=Ind|POS=VERB|Tense=Past|VerbForm=Fin" in morphologizer.labels
> ```
| Name | Type | Description |
| ----------- | ----- | ---------------------------------- |
| **RETURNS** | tuple | The labels added to the component. |
## Serialization fields {#serialization-fields}
During serialization, spaCy will export several data fields used to restore
different aspects of the object. If needed, you can exclude them from
serialization by passing in the string names via the `exclude` argument.
> #### Example
>
> ```python
> data = morphologizer.to_disk("/path", exclude=["vocab"])
> ```
| Name | Description |
| --------- | ------------------------------------------------------------------------------------------ |
| `vocab` | The shared [`Vocab`](/api/vocab). |
| `cfg` | The config file. You usually don't want to exclude this. |
| `model` | The binary model data. You usually don't want to exclude this. |