mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-10 19:57:17 +03:00
TextCatParametricAttention.v1
: set key transform dimensions (#13249)
* TextCatParametricAttention.v1: set key transform dimensions This is necessary for tok2vec implementations that initialize lazily (e.g. curated transformers). * Add lazily-initialized tok2vec to simulate transformers Add a lazily-initialized tok2vec to the tests and test the current textcat models with it. Fix some additional issues found using this test. * isort * Add `test.` prefix to `LazyInitTok2Vec.v1`
This commit is contained in:
parent
d84068e460
commit
2dbb332cea
|
@ -185,6 +185,11 @@ def build_text_classifier_v2(
|
||||||
|
|
||||||
|
|
||||||
def init_ensemble_textcat(model, X, Y) -> Model:
|
def init_ensemble_textcat(model, X, Y) -> Model:
|
||||||
|
# When tok2vec is lazily initialized, we need to initialize it before
|
||||||
|
# the rest of the chain to ensure that we can get its width.
|
||||||
|
tok2vec = model.get_ref("tok2vec")
|
||||||
|
tok2vec.initialize(X)
|
||||||
|
|
||||||
tok2vec_width = get_tok2vec_width(model)
|
tok2vec_width = get_tok2vec_width(model)
|
||||||
model.get_ref("attention_layer").set_dim("nO", tok2vec_width)
|
model.get_ref("attention_layer").set_dim("nO", tok2vec_width)
|
||||||
model.get_ref("maxout_layer").set_dim("nO", tok2vec_width)
|
model.get_ref("maxout_layer").set_dim("nO", tok2vec_width)
|
||||||
|
@ -264,6 +269,7 @@ def _build_parametric_attention_with_residual_nonlinear(
|
||||||
|
|
||||||
parametric_attention.set_ref("tok2vec", tok2vec)
|
parametric_attention.set_ref("tok2vec", tok2vec)
|
||||||
parametric_attention.set_ref("attention_layer", attention_layer)
|
parametric_attention.set_ref("attention_layer", attention_layer)
|
||||||
|
parametric_attention.set_ref("key_transform", key_transform)
|
||||||
parametric_attention.set_ref("nonlinear_layer", nonlinear_layer)
|
parametric_attention.set_ref("nonlinear_layer", nonlinear_layer)
|
||||||
parametric_attention.set_ref("norm_layer", norm_layer)
|
parametric_attention.set_ref("norm_layer", norm_layer)
|
||||||
|
|
||||||
|
@ -271,10 +277,17 @@ def _build_parametric_attention_with_residual_nonlinear(
|
||||||
|
|
||||||
|
|
||||||
def _init_parametric_attention_with_residual_nonlinear(model, X, Y) -> Model:
|
def _init_parametric_attention_with_residual_nonlinear(model, X, Y) -> Model:
|
||||||
|
# When tok2vec is lazily initialized, we need to initialize it before
|
||||||
|
# the rest of the chain to ensure that we can get its width.
|
||||||
|
tok2vec = model.get_ref("tok2vec")
|
||||||
|
tok2vec.initialize(X)
|
||||||
|
|
||||||
tok2vec_width = get_tok2vec_width(model)
|
tok2vec_width = get_tok2vec_width(model)
|
||||||
model.get_ref("attention_layer").set_dim("nO", tok2vec_width)
|
model.get_ref("attention_layer").set_dim("nO", tok2vec_width)
|
||||||
model.get_ref("nonlinear_layer").set_dim("nO", tok2vec_width)
|
model.get_ref("key_transform").set_dim("nI", tok2vec_width)
|
||||||
|
model.get_ref("key_transform").set_dim("nO", tok2vec_width)
|
||||||
model.get_ref("nonlinear_layer").set_dim("nI", tok2vec_width)
|
model.get_ref("nonlinear_layer").set_dim("nI", tok2vec_width)
|
||||||
|
model.get_ref("nonlinear_layer").set_dim("nO", tok2vec_width)
|
||||||
model.get_ref("norm_layer").set_dim("nI", tok2vec_width)
|
model.get_ref("norm_layer").set_dim("nI", tok2vec_width)
|
||||||
model.get_ref("norm_layer").set_dim("nO", tok2vec_width)
|
model.get_ref("norm_layer").set_dim("nO", tok2vec_width)
|
||||||
init_chain(model, X, Y)
|
init_chain(model, X, Y)
|
||||||
|
|
|
@ -28,6 +28,8 @@ from spacy.tokens import Doc, DocBin
|
||||||
from spacy.training import Example
|
from spacy.training import Example
|
||||||
from spacy.training.initialize import init_nlp
|
from spacy.training.initialize import init_nlp
|
||||||
|
|
||||||
|
# Ensure that the architecture gets added to the registry.
|
||||||
|
from ..tok2vec import build_lazy_init_tok2vec as _
|
||||||
from ..util import make_tempdir
|
from ..util import make_tempdir
|
||||||
|
|
||||||
TRAIN_DATA_SINGLE_LABEL = [
|
TRAIN_DATA_SINGLE_LABEL = [
|
||||||
|
@ -40,6 +42,13 @@ TRAIN_DATA_MULTI_LABEL = [
|
||||||
("I'm confused but happy", {"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}}),
|
("I'm confused but happy", {"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}}),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
lazy_init_model_config = """
|
||||||
|
[model]
|
||||||
|
@architectures = "test.LazyInitTok2Vec.v1"
|
||||||
|
width = 96
|
||||||
|
"""
|
||||||
|
LAZY_INIT_TOK2VEC_MODEL = Config().from_str(lazy_init_model_config)["model"]
|
||||||
|
|
||||||
|
|
||||||
def make_get_examples_single_label(nlp):
|
def make_get_examples_single_label(nlp):
|
||||||
train_examples = []
|
train_examples = []
|
||||||
|
@ -546,6 +555,34 @@ def test_error_with_multi_labels():
|
||||||
nlp.initialize(get_examples=lambda: train_examples)
|
nlp.initialize(get_examples=lambda: train_examples)
|
||||||
|
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"name,textcat_config",
|
||||||
|
[
|
||||||
|
# ENSEMBLE V2
|
||||||
|
("textcat_multilabel", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}}),
|
||||||
|
("textcat", {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v3", "exclusive_classes": True, "ngram_size": 5, "no_output_layer": False}}),
|
||||||
|
# PARAMETRIC ATTENTION V1
|
||||||
|
("textcat", {"@architectures": "spacy.TextCatParametricAttention.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": True}),
|
||||||
|
("textcat_multilabel", {"@architectures": "spacy.TextCatParametricAttention.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": False}),
|
||||||
|
# REDUCE
|
||||||
|
("textcat", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": True, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}),
|
||||||
|
("textcat_multilabel", {"@architectures": "spacy.TextCatReduce.v1", "tok2vec": LAZY_INIT_TOK2VEC_MODEL, "exclusive_classes": False, "use_reduce_first": True, "use_reduce_last": True, "use_reduce_max": True, "use_reduce_mean": True}),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
# fmt: on
|
||||||
|
def test_tok2vec_lazy_init(name, textcat_config):
|
||||||
|
# Check that we can properly initialize and use a textcat model using
|
||||||
|
# a lazily-initialized tok2vec.
|
||||||
|
nlp = English()
|
||||||
|
pipe_config = {"model": textcat_config}
|
||||||
|
textcat = nlp.add_pipe(name, config=pipe_config)
|
||||||
|
textcat.add_label("POSITIVE")
|
||||||
|
textcat.add_label("NEGATIVE")
|
||||||
|
nlp.initialize()
|
||||||
|
nlp.pipe(["This is a test."])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"name,get_examples, train_data",
|
"name,get_examples, train_data",
|
||||||
[
|
[
|
||||||
|
|
36
spacy/tests/tok2vec.py
Normal file
36
spacy/tests/tok2vec.py
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from thinc.api import Model
|
||||||
|
from thinc.types import Floats2d
|
||||||
|
|
||||||
|
from spacy.tokens import Doc
|
||||||
|
from spacy.util import registry
|
||||||
|
|
||||||
|
|
||||||
|
@registry.architectures("test.LazyInitTok2Vec.v1")
|
||||||
|
def build_lazy_init_tok2vec(*, width: int) -> Model[List[Doc], List[Floats2d]]:
|
||||||
|
"""tok2vec model of which the output size is only known after
|
||||||
|
initialization. This implementation does not output meaningful
|
||||||
|
embeddings, it is strictly for testing."""
|
||||||
|
return Model(
|
||||||
|
"lazy_init_tok2vec",
|
||||||
|
lazy_init_tok2vec_forward,
|
||||||
|
init=lazy_init_tok2vec_init,
|
||||||
|
dims={"nO": None},
|
||||||
|
attrs={"width": width},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def lazy_init_tok2vec_init(model: Model, X=None, Y=None):
|
||||||
|
width = model.attrs["width"]
|
||||||
|
model.set_dim("nO", width)
|
||||||
|
|
||||||
|
|
||||||
|
def lazy_init_tok2vec_forward(model: Model, X: List[Doc], is_train: bool):
|
||||||
|
width = model.get_dim("nO")
|
||||||
|
Y = [model.ops.alloc2f(len(doc), width) for doc in X]
|
||||||
|
|
||||||
|
def backprop(dY):
|
||||||
|
return []
|
||||||
|
|
||||||
|
return Y, backprop
|
Loading…
Reference in New Issue
Block a user