2020-12-08 09:41:03 +03:00
|
|
|
from typing import Optional, Iterable, Tuple, List, Callable, TYPE_CHECKING
|
2020-06-03 15:45:00 +03:00
|
|
|
from thinc.api import chain, Maxout, LayerNorm, Softmax, Linear, zero_init, Model
|
2020-07-03 18:57:28 +03:00
|
|
|
from thinc.api import MultiSoftmax, list2array
|
2020-12-08 09:41:03 +03:00
|
|
|
from thinc.api import to_categorical, CosineDistance, L2Distance
|
|
|
|
|
2021-07-13 07:48:12 +03:00
|
|
|
from ...util import registry, OOV_RANK
|
2020-12-08 09:41:03 +03:00
|
|
|
from ...errors import Errors
|
|
|
|
from ...attrs import ID
|
|
|
|
|
|
|
|
import numpy
|
|
|
|
from functools import partial
|
2020-02-27 20:42:27 +03:00
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
# This lets us add type hints for mypy etc. without causing circular imports
|
|
|
|
from ...vocab import Vocab # noqa: F401
|
2021-06-02 12:25:30 +03:00
|
|
|
from ...tokens.doc import Doc # noqa: F401
|
2020-07-31 18:02:54 +03:00
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
|
2021-03-02 19:56:28 +03:00
|
|
|
@registry.architectures("spacy.PretrainVectors.v1")
|
2020-12-08 09:41:03 +03:00
|
|
|
def create_pretrain_vectors(
|
|
|
|
maxout_pieces: int, hidden_size: int, loss: str
|
|
|
|
) -> Callable[["Vocab", Model], Model]:
|
|
|
|
def create_vectors_objective(vocab: "Vocab", tok2vec: Model) -> Model:
|
2021-03-09 06:01:13 +03:00
|
|
|
if vocab.vectors.data.shape[1] == 0:
|
|
|
|
raise ValueError(Errors.E875)
|
2020-12-08 09:41:03 +03:00
|
|
|
model = build_cloze_multi_task_model(
|
|
|
|
vocab, tok2vec, hidden_size=hidden_size, maxout_pieces=maxout_pieces
|
|
|
|
)
|
|
|
|
model.attrs["loss"] = create_vectors_loss()
|
|
|
|
return model
|
|
|
|
|
|
|
|
def create_vectors_loss() -> Callable:
|
|
|
|
if loss == "cosine":
|
|
|
|
distance = CosineDistance(normalize=True, ignore_zeros=True)
|
|
|
|
return partial(get_vectors_loss, distance=distance)
|
|
|
|
elif loss == "L2":
|
|
|
|
distance = L2Distance(normalize=True)
|
|
|
|
return partial(get_vectors_loss, distance=distance)
|
|
|
|
else:
|
|
|
|
raise ValueError(Errors.E906.format(found=loss, supported="'cosine', 'L2'"))
|
|
|
|
|
|
|
|
return create_vectors_objective
|
|
|
|
|
|
|
|
|
2021-03-02 19:56:28 +03:00
|
|
|
@registry.architectures("spacy.PretrainCharacters.v1")
|
2020-12-08 09:41:03 +03:00
|
|
|
def create_pretrain_characters(
|
|
|
|
maxout_pieces: int, hidden_size: int, n_characters: int
|
|
|
|
) -> Callable[["Vocab", Model], Model]:
|
|
|
|
def create_characters_objective(vocab: "Vocab", tok2vec: Model) -> Model:
|
|
|
|
model = build_cloze_characters_multi_task_model(
|
|
|
|
vocab,
|
|
|
|
tok2vec,
|
|
|
|
hidden_size=hidden_size,
|
|
|
|
maxout_pieces=maxout_pieces,
|
|
|
|
nr_char=n_characters,
|
|
|
|
)
|
|
|
|
model.attrs["loss"] = partial(get_characters_loss, nr_char=n_characters)
|
|
|
|
return model
|
|
|
|
|
|
|
|
return create_characters_objective
|
|
|
|
|
|
|
|
|
|
|
|
def get_vectors_loss(ops, docs, prediction, distance):
|
|
|
|
"""Compute a loss based on a distance between the documents' vectors and
|
|
|
|
the prediction.
|
|
|
|
"""
|
|
|
|
# The simplest way to implement this would be to vstack the
|
|
|
|
# token.vector values, but that's a bit inefficient, especially on GPU.
|
|
|
|
# Instead we fetch the index into the vectors table for each of our tokens,
|
|
|
|
# and look them up all at once. This prevents data copying.
|
|
|
|
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
|
|
|
|
target = docs[0].vocab.vectors.data[ids]
|
2021-07-13 07:48:12 +03:00
|
|
|
target[ids == OOV_RANK] = 0
|
2020-12-08 09:41:03 +03:00
|
|
|
d_target, loss = distance(prediction, target)
|
|
|
|
return loss, d_target
|
|
|
|
|
|
|
|
|
|
|
|
def get_characters_loss(ops, docs, prediction, nr_char):
|
|
|
|
"""Compute a loss based on a number of characters predicted from the docs."""
|
|
|
|
target_ids = numpy.vstack([doc.to_utf8_array(nr_char=nr_char) for doc in docs])
|
|
|
|
target_ids = target_ids.reshape((-1,))
|
|
|
|
target = ops.asarray(to_categorical(target_ids, n_classes=256), dtype="f")
|
|
|
|
target = target.reshape((-1, 256 * nr_char))
|
|
|
|
diff = prediction - target
|
|
|
|
loss = (diff ** 2).sum()
|
|
|
|
d_target = diff / float(prediction.shape[0])
|
|
|
|
return loss, d_target
|
|
|
|
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def build_multi_task_model(
|
|
|
|
tok2vec: Model,
|
|
|
|
maxout_pieces: int,
|
|
|
|
token_vector_width: int,
|
|
|
|
nO: Optional[int] = None,
|
|
|
|
) -> Model:
|
2020-06-12 03:02:07 +03:00
|
|
|
softmax = Softmax(nO=nO, nI=token_vector_width * 2)
|
2020-02-27 20:42:27 +03:00
|
|
|
model = chain(
|
|
|
|
tok2vec,
|
2020-06-20 15:15:04 +03:00
|
|
|
Maxout(
|
|
|
|
nO=token_vector_width * 2,
|
|
|
|
nI=token_vector_width,
|
|
|
|
nP=maxout_pieces,
|
|
|
|
dropout=0.0,
|
|
|
|
),
|
2020-02-27 20:42:27 +03:00
|
|
|
LayerNorm(token_vector_width * 2),
|
2020-06-12 03:02:07 +03:00
|
|
|
softmax,
|
2020-02-27 20:42:27 +03:00
|
|
|
)
|
2020-06-12 03:02:07 +03:00
|
|
|
model.set_ref("tok2vec", tok2vec)
|
|
|
|
model.set_ref("output_layer", softmax)
|
2020-02-27 20:42:27 +03:00
|
|
|
return model
|
|
|
|
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def build_cloze_multi_task_model(
|
2020-12-08 09:41:03 +03:00
|
|
|
vocab: "Vocab", tok2vec: Model, maxout_pieces: int, hidden_size: int
|
2020-07-31 18:02:54 +03:00
|
|
|
) -> Model:
|
2020-12-08 09:41:03 +03:00
|
|
|
nO = vocab.vectors.data.shape[1]
|
2020-02-27 20:42:27 +03:00
|
|
|
output_layer = chain(
|
2020-07-03 18:57:28 +03:00
|
|
|
list2array(),
|
2020-02-27 20:42:27 +03:00
|
|
|
Maxout(
|
2020-12-08 09:41:03 +03:00
|
|
|
nO=hidden_size,
|
2020-06-20 15:15:04 +03:00
|
|
|
nI=tok2vec.get_dim("nO"),
|
|
|
|
nP=maxout_pieces,
|
|
|
|
normalize=True,
|
|
|
|
dropout=0.0,
|
2020-02-27 20:42:27 +03:00
|
|
|
),
|
2020-12-08 09:41:03 +03:00
|
|
|
Linear(nO=nO, nI=hidden_size, init_W=zero_init),
|
2020-02-27 20:42:27 +03:00
|
|
|
)
|
|
|
|
model = chain(tok2vec, output_layer)
|
|
|
|
model = build_masked_language_model(vocab, model)
|
2020-06-12 03:02:07 +03:00
|
|
|
model.set_ref("tok2vec", tok2vec)
|
|
|
|
model.set_ref("output_layer", output_layer)
|
2020-02-27 20:42:27 +03:00
|
|
|
return model
|
|
|
|
|
|
|
|
|
2020-07-03 18:57:28 +03:00
|
|
|
def build_cloze_characters_multi_task_model(
|
2020-07-31 18:02:54 +03:00
|
|
|
vocab: "Vocab", tok2vec: Model, maxout_pieces: int, hidden_size: int, nr_char: int
|
|
|
|
) -> Model:
|
2020-07-03 18:57:28 +03:00
|
|
|
output_layer = chain(
|
|
|
|
list2array(),
|
2021-03-09 06:01:13 +03:00
|
|
|
Maxout(nO=hidden_size, nP=maxout_pieces),
|
2020-07-03 18:57:28 +03:00
|
|
|
LayerNorm(nI=hidden_size),
|
|
|
|
MultiSoftmax([256] * nr_char, nI=hidden_size),
|
|
|
|
)
|
|
|
|
model = build_masked_language_model(vocab, chain(tok2vec, output_layer))
|
|
|
|
model.set_ref("tok2vec", tok2vec)
|
|
|
|
model.set_ref("output_layer", output_layer)
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def build_masked_language_model(
|
|
|
|
vocab: "Vocab", wrapped_model: Model, mask_prob: float = 0.15
|
|
|
|
) -> Model:
|
2020-06-03 15:45:00 +03:00
|
|
|
"""Convert a model into a BERT-style masked language model"""
|
|
|
|
random_words = _RandomWords(vocab)
|
|
|
|
|
|
|
|
def mlm_forward(model, docs, is_train):
|
|
|
|
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
|
|
|
|
mask = model.ops.asarray(mask).reshape((mask.shape[0], 1))
|
2020-07-03 18:57:28 +03:00
|
|
|
output, backprop = model.layers[0](docs, is_train)
|
2020-06-03 15:45:00 +03:00
|
|
|
|
|
|
|
def mlm_backward(d_output):
|
|
|
|
d_output *= 1 - mask
|
|
|
|
return backprop(d_output)
|
|
|
|
|
|
|
|
return output, mlm_backward
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def mlm_initialize(model: Model, X=None, Y=None):
|
2020-07-03 18:57:28 +03:00
|
|
|
wrapped = model.layers[0]
|
|
|
|
wrapped.initialize(X=X, Y=Y)
|
|
|
|
for dim in wrapped.dim_names:
|
|
|
|
if wrapped.has_dim(dim):
|
|
|
|
model.set_dim(dim, wrapped.get_dim(dim))
|
|
|
|
|
|
|
|
mlm_model = Model(
|
|
|
|
"masked-language-model",
|
|
|
|
mlm_forward,
|
|
|
|
layers=[wrapped_model],
|
|
|
|
init=mlm_initialize,
|
|
|
|
refs={"wrapped": wrapped_model},
|
|
|
|
dims={dim: None for dim in wrapped_model.dim_names},
|
|
|
|
)
|
|
|
|
mlm_model.set_ref("wrapped", wrapped_model)
|
2020-06-03 15:45:00 +03:00
|
|
|
return mlm_model
|
|
|
|
|
|
|
|
|
2020-07-12 15:03:23 +03:00
|
|
|
class _RandomWords:
|
2020-07-31 18:02:54 +03:00
|
|
|
def __init__(self, vocab: "Vocab") -> None:
|
2020-06-03 15:45:00 +03:00
|
|
|
self.words = [lex.text for lex in vocab if lex.prob != 0.0]
|
|
|
|
self.probs = [lex.prob for lex in vocab if lex.prob != 0.0]
|
|
|
|
self.words = self.words[:10000]
|
|
|
|
self.probs = self.probs[:10000]
|
|
|
|
self.probs = numpy.exp(numpy.array(self.probs, dtype="f"))
|
|
|
|
self.probs /= self.probs.sum()
|
|
|
|
self._cache = []
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def next(self) -> str:
|
2020-06-03 15:45:00 +03:00
|
|
|
if not self._cache:
|
|
|
|
self._cache.extend(
|
|
|
|
numpy.random.choice(len(self.words), 10000, p=self.probs)
|
|
|
|
)
|
|
|
|
index = self._cache.pop()
|
|
|
|
return self.words[index]
|
|
|
|
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def _apply_mask(
|
|
|
|
docs: Iterable["Doc"], random_words: _RandomWords, mask_prob: float = 0.15
|
|
|
|
) -> Tuple[numpy.ndarray, List["Doc"]]:
|
2020-06-03 15:45:00 +03:00
|
|
|
# This needs to be here to avoid circular imports
|
2021-06-02 12:25:30 +03:00
|
|
|
from ...tokens.doc import Doc # noqa: F811
|
2020-06-03 15:45:00 +03:00
|
|
|
|
|
|
|
N = sum(len(doc) for doc in docs)
|
|
|
|
mask = numpy.random.uniform(0.0, 1.0, (N,))
|
|
|
|
mask = mask >= mask_prob
|
|
|
|
i = 0
|
|
|
|
masked_docs = []
|
|
|
|
for doc in docs:
|
|
|
|
words = []
|
|
|
|
for token in doc:
|
|
|
|
if not mask[i]:
|
|
|
|
word = _replace_word(token.text, random_words)
|
|
|
|
else:
|
|
|
|
word = token.text
|
|
|
|
words.append(word)
|
|
|
|
i += 1
|
|
|
|
spaces = [bool(w.whitespace_) for w in doc]
|
|
|
|
# NB: If you change this implementation to instead modify
|
|
|
|
# the docs in place, take care that the IDs reflect the original
|
|
|
|
# words. Currently we use the original docs to make the vectors
|
|
|
|
# for the target, so we don't lose the original tokens. But if
|
|
|
|
# you modified the docs in place here, you would.
|
|
|
|
masked_docs.append(Doc(doc.vocab, words=words, spaces=spaces))
|
|
|
|
return mask, masked_docs
|
|
|
|
|
|
|
|
|
2020-07-31 18:02:54 +03:00
|
|
|
def _replace_word(word: str, random_words: _RandomWords, mask: str = "[MASK]") -> str:
|
2020-06-03 15:45:00 +03:00
|
|
|
roll = numpy.random.random()
|
|
|
|
if roll < 0.8:
|
|
|
|
return mask
|
|
|
|
elif roll < 0.9:
|
|
|
|
return random_words.next()
|
|
|
|
else:
|
2020-06-04 16:49:23 +03:00
|
|
|
return word
|