2023-06-14 18:48:41 +03:00
|
|
|
from typing import List, Optional, cast
|
|
|
|
|
|
|
|
from thinc.api import Linear, Model, chain, list2array, use_ops, zero_init
|
2020-08-07 17:55:54 +03:00
|
|
|
from thinc.types import Floats2d
|
2020-02-27 20:42:27 +03:00
|
|
|
|
2020-09-23 18:32:14 +03:00
|
|
|
from ...compat import Literal
|
2023-06-14 18:48:41 +03:00
|
|
|
from ...errors import Errors
|
|
|
|
from ...tokens import Doc
|
2020-02-28 13:57:41 +03:00
|
|
|
from ...util import registry
|
2020-03-29 20:40:36 +03:00
|
|
|
from .._precomputable_affine import PrecomputableAffine
|
2020-05-18 23:23:33 +03:00
|
|
|
from ..tb_framework import TransitionModel
|
2020-02-28 13:57:41 +03:00
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
|
2021-03-02 19:56:28 +03:00
|
|
|
@registry.architectures("spacy.TransitionBasedParser.v2")
|
2020-12-18 13:56:57 +03:00
|
|
|
def build_tb_parser_model(
|
|
|
|
tok2vec: Model[List[Doc], List[Floats2d]],
|
|
|
|
state_type: Literal["parser", "ner"],
|
|
|
|
extra_state_tokens: bool,
|
|
|
|
hidden_width: int,
|
|
|
|
maxout_pieces: int,
|
|
|
|
use_upper: bool,
|
|
|
|
nO: Optional[int] = None,
|
2020-07-31 18:02:54 +03:00
|
|
|
) -> Model:
|
2020-08-07 15:59:34 +03:00
|
|
|
"""
|
|
|
|
Build a transition-based parser model. Can apply to NER or dependency-parsing.
|
2020-08-07 19:40:54 +03:00
|
|
|
|
2020-08-07 15:59:34 +03:00
|
|
|
Transition-based parsing is an approach to structured prediction where the
|
|
|
|
task of predicting the structure is mapped to a series of state transitions.
|
|
|
|
You might find this tutorial helpful as background:
|
|
|
|
https://explosion.ai/blog/parsing-english-in-python
|
|
|
|
|
|
|
|
The neural network state prediction model consists of either two or three
|
|
|
|
subnetworks:
|
|
|
|
|
|
|
|
* tok2vec: Map each token into a vector representations. This subnetwork
|
|
|
|
is run once for each batch.
|
|
|
|
* lower: Construct a feature-specific vector for each (token, feature) pair.
|
|
|
|
This is also run once for each batch. Constructing the state
|
|
|
|
representation is then simply a matter of summing the component features
|
|
|
|
and applying the non-linearity.
|
|
|
|
* upper (optional): A feed-forward network that predicts scores from the
|
|
|
|
state representation. If not present, the output from the lower model is
|
2020-08-07 19:40:54 +03:00
|
|
|
used as action scores directly.
|
2020-08-07 15:59:34 +03:00
|
|
|
|
|
|
|
tok2vec (Model[List[Doc], List[Floats2d]]):
|
|
|
|
Subnetwork to map tokens into vector representations.
|
2020-09-23 14:35:09 +03:00
|
|
|
state_type (str):
|
2020-09-23 17:53:49 +03:00
|
|
|
String value denoting the type of parser model: "parser" or "ner"
|
2020-09-23 14:35:09 +03:00
|
|
|
extra_state_tokens (bool): Whether or not to use additional tokens in the context
|
|
|
|
to construct the state vector. Defaults to `False`, which means 3 and 8
|
|
|
|
for the NER and parser respectively. When set to `True`, this would become 6
|
|
|
|
feature sets (for the NER) or 13 (for the parser).
|
2020-08-07 15:59:34 +03:00
|
|
|
hidden_width (int): The width of the hidden layer.
|
|
|
|
maxout_pieces (int): How many pieces to use in the state prediction layer.
|
|
|
|
Recommended values are 1, 2 or 3. If 1, the maxout non-linearity
|
|
|
|
is replaced with a ReLu non-linearity if use_upper=True, and no
|
|
|
|
non-linearity if use_upper=False.
|
|
|
|
use_upper (bool): Whether to use an additional hidden layer after the state
|
|
|
|
vector in order to predict the action scores. It is recommended to set
|
2021-09-10 17:19:58 +03:00
|
|
|
this to False for large pretrained models such as transformers, and True
|
2020-08-07 15:59:34 +03:00
|
|
|
for smaller networks. The upper layer is computed on CPU, which becomes
|
|
|
|
a bottleneck on larger GPU-based models, where it's also less necessary.
|
|
|
|
nO (int or None): The number of actions the model will predict between.
|
|
|
|
Usually inferred from data at the beginning of training, or loaded from
|
|
|
|
disk.
|
|
|
|
"""
|
2020-09-23 17:53:49 +03:00
|
|
|
if state_type == "parser":
|
2020-09-23 14:35:09 +03:00
|
|
|
nr_feature_tokens = 13 if extra_state_tokens else 8
|
|
|
|
elif state_type == "ner":
|
|
|
|
nr_feature_tokens = 6 if extra_state_tokens else 3
|
|
|
|
else:
|
2020-09-23 17:57:14 +03:00
|
|
|
raise ValueError(Errors.E917.format(value=state_type))
|
2020-05-21 21:46:10 +03:00
|
|
|
t2v_width = tok2vec.get_dim("nO") if tok2vec.has_dim("nO") else None
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 16:21:40 +03:00
|
|
|
tok2vec = chain(
|
|
|
|
tok2vec,
|
2022-05-25 10:33:54 +03:00
|
|
|
list2array(),
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 16:21:40 +03:00
|
|
|
Linear(hidden_width, t2v_width),
|
|
|
|
)
|
2020-05-18 23:23:33 +03:00
|
|
|
tok2vec.set_dim("nO", hidden_width)
|
2020-12-18 13:56:57 +03:00
|
|
|
lower = _define_lower(
|
2020-05-18 23:23:33 +03:00
|
|
|
nO=hidden_width if use_upper else nO,
|
2020-02-27 20:42:27 +03:00
|
|
|
nF=nr_feature_tokens,
|
|
|
|
nI=tok2vec.get_dim("nO"),
|
2020-06-20 15:15:04 +03:00
|
|
|
nP=maxout_pieces,
|
2020-02-27 20:42:27 +03:00
|
|
|
)
|
2020-12-18 13:56:57 +03:00
|
|
|
upper = None
|
2020-05-18 23:23:33 +03:00
|
|
|
if use_upper:
|
2021-10-21 12:22:45 +03:00
|
|
|
with use_ops("cpu"):
|
2020-05-18 23:23:33 +03:00
|
|
|
# Initialize weights at zero, as it's a classification layer.
|
2020-12-18 13:56:57 +03:00
|
|
|
upper = _define_upper(nO=nO, nI=None)
|
|
|
|
return TransitionModel(tok2vec, lower, upper, resize_output)
|
|
|
|
|
|
|
|
|
|
|
|
def _define_upper(nO, nI):
|
|
|
|
return Linear(nO=nO, nI=nI, init_W=zero_init)
|
|
|
|
|
|
|
|
|
|
|
|
def _define_lower(nO, nF, nI, nP):
|
|
|
|
return PrecomputableAffine(nO=nO, nF=nF, nI=nI, nP=nP)
|
|
|
|
|
|
|
|
|
|
|
|
def resize_output(model, new_nO):
|
|
|
|
if model.attrs["has_upper"]:
|
|
|
|
return _resize_upper(model, new_nO)
|
|
|
|
return _resize_lower(model, new_nO)
|
|
|
|
|
|
|
|
|
|
|
|
def _resize_upper(model, new_nO):
|
|
|
|
upper = model.get_ref("upper")
|
|
|
|
if upper.has_dim("nO") is None:
|
|
|
|
upper.set_dim("nO", new_nO)
|
|
|
|
return model
|
|
|
|
elif new_nO == upper.get_dim("nO"):
|
|
|
|
return model
|
|
|
|
|
|
|
|
smaller = upper
|
|
|
|
nI = smaller.maybe_get_dim("nI")
|
2021-10-21 12:22:45 +03:00
|
|
|
with use_ops("cpu"):
|
2020-12-18 13:56:57 +03:00
|
|
|
larger = _define_upper(nO=new_nO, nI=nI)
|
|
|
|
# it could be that the model is not initialized yet, then skip this bit
|
|
|
|
if smaller.has_param("W"):
|
|
|
|
larger_W = larger.ops.alloc2f(new_nO, nI)
|
|
|
|
larger_b = larger.ops.alloc1f(new_nO)
|
|
|
|
smaller_W = smaller.get_param("W")
|
|
|
|
smaller_b = smaller.get_param("b")
|
|
|
|
# Weights are stored in (nr_out, nr_in) format, so we're basically
|
|
|
|
# just adding rows here.
|
|
|
|
if smaller.has_dim("nO"):
|
|
|
|
old_nO = smaller.get_dim("nO")
|
2021-01-05 05:41:53 +03:00
|
|
|
larger_W[:old_nO] = smaller_W
|
|
|
|
larger_b[:old_nO] = smaller_b
|
2020-12-18 13:56:57 +03:00
|
|
|
for i in range(old_nO, new_nO):
|
|
|
|
model.attrs["unseen_classes"].add(i)
|
|
|
|
|
|
|
|
larger.set_param("W", larger_W)
|
|
|
|
larger.set_param("b", larger_b)
|
|
|
|
model._layers[-1] = larger
|
|
|
|
model.set_ref("upper", larger)
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
|
|
|
def _resize_lower(model, new_nO):
|
|
|
|
lower = model.get_ref("lower")
|
|
|
|
if lower.has_dim("nO") is None:
|
|
|
|
lower.set_dim("nO", new_nO)
|
|
|
|
return model
|
|
|
|
|
|
|
|
smaller = lower
|
|
|
|
nI = smaller.maybe_get_dim("nI")
|
|
|
|
nF = smaller.maybe_get_dim("nF")
|
|
|
|
nP = smaller.maybe_get_dim("nP")
|
2021-01-18 22:43:15 +03:00
|
|
|
larger = _define_lower(nO=new_nO, nI=nI, nF=nF, nP=nP)
|
2020-12-18 13:56:57 +03:00
|
|
|
# it could be that the model is not initialized yet, then skip this bit
|
|
|
|
if smaller.has_param("W"):
|
|
|
|
larger_W = larger.ops.alloc4f(nF, new_nO, nP, nI)
|
|
|
|
larger_b = larger.ops.alloc2f(new_nO, nP)
|
|
|
|
larger_pad = larger.ops.alloc4f(1, nF, new_nO, nP)
|
|
|
|
smaller_W = smaller.get_param("W")
|
|
|
|
smaller_b = smaller.get_param("b")
|
|
|
|
smaller_pad = smaller.get_param("pad")
|
|
|
|
# Copy the old weights and padding into the new layer
|
|
|
|
if smaller.has_dim("nO"):
|
|
|
|
old_nO = smaller.get_dim("nO")
|
|
|
|
larger_W[:, 0:old_nO, :, :] = smaller_W
|
|
|
|
larger_pad[:, :, 0:old_nO, :] = smaller_pad
|
|
|
|
larger_b[0:old_nO, :] = smaller_b
|
|
|
|
for i in range(old_nO, new_nO):
|
|
|
|
model.attrs["unseen_classes"].add(i)
|
|
|
|
|
|
|
|
larger.set_param("W", larger_W)
|
|
|
|
larger.set_param("b", larger_b)
|
|
|
|
larger.set_param("pad", larger_pad)
|
|
|
|
model._layers[1] = larger
|
|
|
|
model.set_ref("lower", larger)
|
|
|
|
return model
|