2019-10-25 23:28:20 +03:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
from thinc.api import chain, layerize, clone, concatenate, with_flatten, uniqued
|
|
|
|
from thinc.api import noop, with_square_sequences
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
from thinc.v2v import Maxout, Model
|
2019-10-25 23:28:20 +03:00
|
|
|
from thinc.i2v import HashEmbed, StaticVectors
|
|
|
|
from thinc.t2t import ExtractWindow
|
|
|
|
from thinc.misc import Residual, LayerNorm, FeatureExtracter
|
2019-11-07 13:45:22 +03:00
|
|
|
from ..util import make_layer, registry
|
2019-10-25 23:28:20 +03:00
|
|
|
from ._wire import concatenate_lists
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.Tok2Vec.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def Tok2Vec(config):
|
|
|
|
doc2feats = make_layer(config["@doc2feats"])
|
|
|
|
embed = make_layer(config["@embed"])
|
|
|
|
encode = make_layer(config["@encode"])
|
2019-10-28 17:16:33 +03:00
|
|
|
field_size = getattr(encode, "receptive_field", 0)
|
|
|
|
tok2vec = chain(doc2feats, with_flatten(chain(embed, encode), pad=field_size))
|
2019-10-25 23:28:20 +03:00
|
|
|
tok2vec.cfg = config
|
|
|
|
tok2vec.nO = encode.nO
|
|
|
|
tok2vec.embed = embed
|
|
|
|
tok2vec.encode = encode
|
|
|
|
return tok2vec
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.Doc2Feats.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def Doc2Feats(config):
|
|
|
|
columns = config["columns"]
|
|
|
|
return FeatureExtracter(columns)
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.MultiHashEmbed.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def MultiHashEmbed(config):
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
# For backwards compatibility with models before the architecture registry,
|
|
|
|
# we have to be careful to get exactly the same model structure. One subtle
|
|
|
|
# trick is that when we define concatenation with the operator, the operator
|
|
|
|
# is actually binary associative. So when we write (a | b | c), we're actually
|
|
|
|
# getting concatenate(concatenate(a, b), c). That's why the implementation
|
|
|
|
# is a bit ugly here.
|
2019-10-25 23:28:20 +03:00
|
|
|
cols = config["columns"]
|
|
|
|
width = config["width"]
|
|
|
|
rows = config["rows"]
|
|
|
|
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
norm = HashEmbed(width, rows, column=cols.index("NORM"), name="embed_norm")
|
2019-10-25 23:28:20 +03:00
|
|
|
if config["use_subwords"]:
|
2019-10-30 21:27:18 +03:00
|
|
|
prefix = HashEmbed(
|
|
|
|
width, rows // 2, column=cols.index("PREFIX"), name="embed_prefix"
|
|
|
|
)
|
|
|
|
suffix = HashEmbed(
|
|
|
|
width, rows // 2, column=cols.index("SUFFIX"), name="embed_suffix"
|
|
|
|
)
|
|
|
|
shape = HashEmbed(
|
|
|
|
width, rows // 2, column=cols.index("SHAPE"), name="embed_shape"
|
|
|
|
)
|
2019-10-25 23:28:20 +03:00
|
|
|
if config.get("@pretrained_vectors"):
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
glove = make_layer(config["@pretrained_vectors"])
|
2019-10-25 23:28:20 +03:00
|
|
|
mix = make_layer(config["@mix"])
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
|
|
|
|
with Model.define_operators({">>": chain, "|": concatenate}):
|
|
|
|
if config["use_subwords"] and config["@pretrained_vectors"]:
|
|
|
|
mix._layers[0].nI = width * 5
|
|
|
|
layer = uniqued(
|
|
|
|
(glove | norm | prefix | suffix | shape) >> mix,
|
2019-10-30 21:27:18 +03:00
|
|
|
column=cols.index("ORTH"),
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
)
|
|
|
|
elif config["use_subwords"]:
|
|
|
|
mix._layers[0].nI = width * 4
|
|
|
|
layer = uniqued(
|
2019-10-30 21:27:18 +03:00
|
|
|
(norm | prefix | suffix | shape) >> mix, column=cols.index("ORTH")
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
)
|
|
|
|
elif config["@pretrained_vectors"]:
|
|
|
|
mix._layers[0].nI = width * 2
|
2019-10-31 17:01:15 +03:00
|
|
|
layer = uniqued((glove | norm) >> mix, column=cols.index("ORTH"),)
|
Fix tok2vec structure after model registry refactor (#4549)
The model registry refactor of the Tok2Vec function broke loading models
trained with the previous function, because the model tree was slightly
different. Specifically, the new function wrote:
concatenate(norm, prefix, suffix, shape)
To build the embedding layer. In the previous implementation, I had used
the operator overloading shortcut:
( norm | prefix | suffix | shape )
This actually gets mapped to a binary association, giving something
like:
concatenate(norm, concatenate(prefix, concatenate(suffix, shape)))
This is a different tree, so the layers iterate differently and we
loaded the weights wrongly.
2019-10-29 01:59:03 +03:00
|
|
|
else:
|
2019-10-31 17:01:15 +03:00
|
|
|
layer = norm
|
2019-10-25 23:28:20 +03:00
|
|
|
layer.cfg = config
|
|
|
|
return layer
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.CharacterEmbed.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def CharacterEmbed(config):
|
2019-10-31 17:01:15 +03:00
|
|
|
from .. import _ml
|
2019-10-31 17:01:56 +03:00
|
|
|
|
2019-10-25 23:28:20 +03:00
|
|
|
width = config["width"]
|
|
|
|
chars = config["chars"]
|
|
|
|
|
2019-10-31 17:01:15 +03:00
|
|
|
chr_embed = _ml.CharacterEmbedModel(nM=width, nC=chars)
|
2019-10-25 23:28:20 +03:00
|
|
|
other_tables = make_layer(config["@embed_features"])
|
|
|
|
mix = make_layer(config["@mix"])
|
|
|
|
|
|
|
|
model = chain(concatenate_lists(chr_embed, other_tables), mix)
|
|
|
|
model.cfg = config
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.MaxoutWindowEncoder.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def MaxoutWindowEncoder(config):
|
|
|
|
nO = config["width"]
|
|
|
|
nW = config["window_size"]
|
|
|
|
nP = config["pieces"]
|
|
|
|
depth = config["depth"]
|
|
|
|
|
|
|
|
cnn = chain(
|
2019-10-28 14:43:55 +03:00
|
|
|
ExtractWindow(nW=nW), LayerNorm(Maxout(nO, nO * ((nW * 2) + 1), pieces=nP))
|
2019-10-25 23:28:20 +03:00
|
|
|
)
|
|
|
|
model = clone(Residual(cnn), depth)
|
|
|
|
model.nO = nO
|
2019-10-28 17:16:33 +03:00
|
|
|
model.receptive_field = nW * depth
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.MishWindowEncoder.v1")
|
2019-10-28 17:16:33 +03:00
|
|
|
def MishWindowEncoder(config):
|
|
|
|
from thinc.v2v import Mish
|
|
|
|
|
|
|
|
nO = config["width"]
|
|
|
|
nW = config["window_size"]
|
|
|
|
depth = config["depth"]
|
|
|
|
|
|
|
|
cnn = chain(ExtractWindow(nW=nW), LayerNorm(Mish(nO, nO * ((nW * 2) + 1))))
|
|
|
|
model = clone(Residual(cnn), depth)
|
|
|
|
model.nO = nO
|
2019-10-25 23:28:20 +03:00
|
|
|
return model
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.PretrainedVectors.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def PretrainedVectors(config):
|
|
|
|
return StaticVectors(config["vectors_name"], config["width"], config["column"])
|
|
|
|
|
|
|
|
|
2019-11-07 13:45:22 +03:00
|
|
|
@registry.architectures.register("spacy.TorchBiLSTMEncoder.v1")
|
2019-10-25 23:28:20 +03:00
|
|
|
def TorchBiLSTMEncoder(config):
|
|
|
|
import torch.nn
|
|
|
|
from thinc.extra.wrappers import PyTorchWrapperRNN
|
|
|
|
|
|
|
|
width = config["width"]
|
|
|
|
depth = config["depth"]
|
|
|
|
if depth == 0:
|
|
|
|
return layerize(noop())
|
|
|
|
return with_square_sequences(
|
|
|
|
PyTorchWrapperRNN(torch.nn.LSTM(width, width // 2, depth, bidirectional=True))
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
_EXAMPLE_CONFIG = {
|
|
|
|
"@doc2feats": {
|
|
|
|
"arch": "Doc2Feats",
|
|
|
|
"config": {"columns": ["ID", "NORM", "PREFIX", "SUFFIX", "SHAPE", "ORTH"]},
|
|
|
|
},
|
|
|
|
"@embed": {
|
|
|
|
"arch": "spacy.MultiHashEmbed.v1",
|
|
|
|
"config": {
|
|
|
|
"width": 96,
|
|
|
|
"rows": 2000,
|
|
|
|
"columns": ["ID", "NORM", "PREFIX", "SUFFIX", "SHAPE", "ORTH"],
|
|
|
|
"use_subwords": True,
|
|
|
|
"@pretrained_vectors": {
|
|
|
|
"arch": "TransformedStaticVectors",
|
|
|
|
"config": {
|
|
|
|
"vectors_name": "en_vectors_web_lg.vectors",
|
|
|
|
"width": 96,
|
|
|
|
"column": 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"@mix": {
|
|
|
|
"arch": "LayerNormalizedMaxout",
|
|
|
|
"config": {"width": 96, "pieces": 3},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"@encode": {
|
|
|
|
"arch": "MaxoutWindowEncode",
|
|
|
|
"config": {"width": 96, "window_size": 1, "depth": 4, "pieces": 3},
|
|
|
|
},
|
|
|
|
}
|