mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-13 13:17:06 +03:00
569cc98982
* Add load_from_config function * Add train_from_config script * Merge configs and expose via spacy.config * Fix script * Suggest create_evaluation_callback * Hard-code for NER * Fix errors * Register command * Add TODO * Update train-from-config todos * Fix imports * Allow delayed setting of parser model nr_class * Get train-from-config working * Tidy up and fix scores and printing * Hide traceback if cancelled * Fix weighted score formatting * Fix score formatting * Make output_path optional * Add Tok2Vec component * Tidy up and add tok2vec_tensors * Add option to copy docs in nlp.update * Copy docs in nlp.update * Adjust nlp.update() for set_annotations * Don't shuffle pipes in nlp.update, decruft * Support set_annotations arg in component update * Support set_annotations in parser update * Add get_gradients method * Add get_gradients to parser * Update errors.py * Fix problems caused by merge * Add _link_components method in nlp * Add concept of 'listeners' and ControlledModel * Support optional attributes arg in ControlledModel * Try having tok2vec component in pipeline * Fix tok2vec component * Fix config * Fix tok2vec * Update for Example * Update for Example * Update config * Add eg2doc util * Update and add schemas/types * Update schemas * Fix nlp.update * Fix tagger * Remove hacks from train-from-config * Remove hard-coded config str * Calculate loss in tok2vec component * Tidy up and use function signatures instead of models * Support union types for registry models * Minor cleaning in Language.update * Make ControlledModel specifically Tok2VecListener * Fix train_from_config * Fix tok2vec * Tidy up * Add function for bilstm tok2vec * Fix type * Fix syntax * Fix pytorch optimizer * Add example configs * Update for thinc describe changes * Update for Thinc changes * Update for dropout/sgd changes * Update for dropout/sgd changes * Unhack gradient update * Work on refactoring _ml * Remove _ml.py module * WIP upgrade cli scripts for thinc * Move some _ml stuff to util * Import link_vectors from util * Update train_from_config * Import from util * Import from util * Temporarily add ml.component_models module * Move ml methods * Move typedefs * Update load vectors * Update gitignore * Move imports * Add PrecomputableAffine * Fix imports * Fix imports * Fix imports * Fix missing imports * Update CLI scripts * Update spacy.language * Add stubs for building the models * Update model definition * Update create_default_optimizer * Fix import * Fix comment * Update imports in tests * Update imports in spacy.cli * Fix import * fix obsolete thinc imports * update srsly pin * from thinc to ml_datasets for example data such as imdb * update ml_datasets pin * using STATE.vectors * small fix * fix Sentencizer.pipe * black formatting * rename Affine to Linear as in thinc * set validate explicitely to True * rename with_square_sequences to with_list2padded * rename with_flatten to with_list2array * chaining layernorm * small fixes * revert Optimizer import * build_nel_encoder with new thinc style * fixes using model's get and set methods * Tok2Vec in component models, various fixes * fix up legacy tok2vec code * add model initialize calls * add in build_tagger_model * small fixes * setting model dims * fixes for ParserModel * various small fixes * initialize thinc Models * fixes * consistent naming of window_size * fixes, removing set_dropout * work around Iterable issue * remove legacy tok2vec * util fix * fix forward function of tok2vec listener * more fixes * trying to fix PrecomputableAffine (not succesful yet) * alloc instead of allocate * add morphologizer * rename residual * rename fixes * Fix predict function * Update parser and parser model * fixing few more tests * Fix precomputable affine * Update component model * Update parser model * Move backprop padding to own function, for test * Update test * Fix p. affine * Update NEL * build_bow_text_classifier and extract_ngrams * Fix parser init * Fix test add label * add build_simple_cnn_text_classifier * Fix parser init * Set gpu off by default in example * Fix tok2vec listener * Fix parser model * Small fixes * small fix for PyTorchLSTM parameters * revert my_compounding hack (iterable fixed now) * fix biLSTM * Fix uniqued * PyTorchRNNWrapper fix * small fixes * use helper function to calculate cosine loss * small fixes for build_simple_cnn_text_classifier * putting dropout default at 0.0 to ensure the layer gets built * using thinc util's set_dropout_rate * moving layer normalization inside of maxout definition to optimize dropout * temp debugging in NEL * fixed NEL model by using init defaults ! * fixing after set_dropout_rate refactor * proper fix * fix test_update_doc after refactoring optimizers in thinc * Add CharacterEmbed layer * Construct tagger Model * Add missing import * Remove unused stuff * Work on textcat * fix test (again :)) after optimizer refactor * fixes to allow reading Tagger from_disk without overwriting dimensions * don't build the tok2vec prematuraly * fix CharachterEmbed init * CharacterEmbed fixes * Fix CharacterEmbed architecture * fix imports * renames from latest thinc update * one more rename * add initialize calls where appropriate * fix parser initialization * Update Thinc version * Fix errors, auto-format and tidy up imports * Fix validation * fix if bias is cupy array * revert for now * ensure it's a numpy array before running bp in ParserStepModel * no reason to call require_gpu twice * use CupyOps.to_numpy instead of cupy directly * fix initialize of ParserModel * remove unnecessary import * fixes for CosineDistance * fix device renaming * use refactored loss functions (Thinc PR 251) * overfitting test for tagger * experimental settings for the tagger: avoid zero-init and subword normalization * clean up tagger overfitting test * use previous default value for nP * remove toy config * bringing layernorm back (had a bug - fixed in thinc) * revert setting nP explicitly * remove setting default in constructor * restore values as they used to be * add overfitting test for NER * add overfitting test for dep parser * add overfitting test for textcat * fixing init for linear (previously affine) * larger eps window for textcat * ensure doc is not None * Require newer thinc * Make float check vaguer * Slop the textcat overfit test more * Fix textcat test * Fix exclusive classes for textcat * fix after renaming of alloc methods * fixing renames and mandatory arguments (staticvectors WIP) * upgrade to thinc==8.0.0.dev3 * refer to vocab.vectors directly instead of its name * rename alpha to learn_rate * adding hashembed and staticvectors dropout * upgrade to thinc 8.0.0.dev4 * add name back to avoid warning W020 * thinc dev4 * update srsly * using thinc 8.0.0a0 ! Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com> Co-authored-by: Ines Montani <ines@ines.io>
166 lines
5.4 KiB
Python
166 lines
5.4 KiB
Python
from thinc.model import Model
|
|
from thinc.api import normal_init
|
|
|
|
|
|
def PrecomputableAffine(nO, nI, nF, nP):
|
|
model = Model(
|
|
"precomputable_affine",
|
|
forward,
|
|
init=init,
|
|
dims={"nO": nO, "nI": nI, "nF": nF, "nP": nP},
|
|
params={"W": None, "b": None, "pad": None},
|
|
)
|
|
model.initialize()
|
|
return model
|
|
|
|
|
|
def forward(model, X, is_train):
|
|
nF = model.get_dim("nF")
|
|
nO = model.get_dim("nO")
|
|
nP = model.get_dim("nP")
|
|
nI = model.get_dim("nI")
|
|
W = model.get_param("W")
|
|
Yf = model.ops.gemm(
|
|
X, W.reshape((nF * nO * nP, nI)), trans2=True
|
|
)
|
|
Yf = Yf.reshape((Yf.shape[0], nF, nO, nP))
|
|
Yf = model.ops.xp.vstack((model.get_param("pad"), Yf))
|
|
|
|
def backward(dY_ids):
|
|
# This backprop is particularly tricky, because we get back a different
|
|
# thing from what we put out. We put out an array of shape:
|
|
# (nB, nF, nO, nP), and get back:
|
|
# (nB, nO, nP) and ids (nB, nF)
|
|
# The ids tell us the values of nF, so we would have:
|
|
#
|
|
# dYf = zeros((nB, nF, nO, nP))
|
|
# for b in range(nB):
|
|
# for f in range(nF):
|
|
# dYf[b, ids[b, f]] += dY[b]
|
|
#
|
|
# However, we avoid building that array for efficiency -- and just pass
|
|
# in the indices.
|
|
dY, ids = dY_ids
|
|
assert dY.ndim == 3
|
|
assert dY.shape[1] == nO, dY.shape
|
|
assert dY.shape[2] == nP, dY.shape
|
|
nB = dY.shape[0]
|
|
model.inc_grad("pad", _backprop_precomputable_affine_padding(model, dY, ids))
|
|
Xf = X[ids]
|
|
Xf = Xf.reshape((Xf.shape[0], nF * nI))
|
|
|
|
model.inc_grad("b", dY.sum(axis=0))
|
|
dY = dY.reshape((dY.shape[0], nO * nP))
|
|
|
|
Wopfi = W.transpose((1, 2, 0, 3))
|
|
Wopfi = model.ops.xp.ascontiguousarray(Wopfi)
|
|
Wopfi = Wopfi.reshape((nO * nP, nF * nI))
|
|
dXf = model.ops.gemm(dY.reshape((dY.shape[0], nO * nP)), Wopfi)
|
|
|
|
# Reuse the buffer
|
|
dWopfi = Wopfi
|
|
dWopfi.fill(0.0)
|
|
model.ops.gemm(dY, Xf, out=dWopfi, trans1=True)
|
|
dWopfi = dWopfi.reshape((nO, nP, nF, nI))
|
|
# (o, p, f, i) --> (f, o, p, i)
|
|
model.inc_grad("W", dWopfi.transpose((2, 0, 1, 3)))
|
|
return dXf.reshape((dXf.shape[0], nF, nI))
|
|
|
|
return Yf, backward
|
|
|
|
|
|
def _backprop_precomputable_affine_padding(model, dY, ids):
|
|
nB = dY.shape[0]
|
|
nF = model.get_dim("nF")
|
|
nP = model.get_dim("nP")
|
|
nO = model.get_dim("nO")
|
|
# Backprop the "padding", used as a filler for missing values.
|
|
# Values that are missing are set to -1, and each state vector could
|
|
# have multiple missing values. The padding has different values for
|
|
# different missing features. The gradient of the padding vector is:
|
|
#
|
|
# for b in range(nB):
|
|
# for f in range(nF):
|
|
# if ids[b, f] < 0:
|
|
# d_padding[0, f] += dY[b]
|
|
#
|
|
# Which can be rewritten as:
|
|
#
|
|
# for b in range(nB):
|
|
# d_pad[0, ids[b] < 0] += dY[b]
|
|
#
|
|
# I don't know how to avoid the loop without building a whole array :(.
|
|
# Cursed numpy.
|
|
d_pad = model.ops.alloc((1, nF, nO, nP))
|
|
for b in range(nB):
|
|
d_pad[0, ids[b] < 0] += dY[b]
|
|
return d_pad
|
|
|
|
|
|
def init(model, X=None, Y=None):
|
|
"""This is like the 'layer sequential unit variance', but instead
|
|
of taking the actual inputs, we randomly generate whitened data.
|
|
|
|
Why's this all so complicated? We have a huge number of inputs,
|
|
and the maxout unit makes guessing the dynamics tricky. Instead
|
|
we set the maxout weights to values that empirically result in
|
|
whitened outputs given whitened inputs.
|
|
"""
|
|
if model.has_param("W") and model.get_param("W").any():
|
|
return
|
|
|
|
nF = model.get_dim("nF")
|
|
nO = model.get_dim("nO")
|
|
nP = model.get_dim("nP")
|
|
nI = model.get_dim("nI")
|
|
W = model.ops.alloc4f(nF, nO, nP, nI)
|
|
b = model.ops.alloc2f(nO, nP)
|
|
pad = model.ops.alloc4f(1, nF, nO, nP)
|
|
|
|
ops = model.ops
|
|
W = normal_init(ops, W.shape, fan_in=nF*nI)
|
|
model.set_param("W", W)
|
|
model.set_param("b", b)
|
|
model.set_param("pad", pad)
|
|
|
|
ids = ops.alloc((5000, nF), dtype="f")
|
|
ids += ops.xp.random.uniform(0, 1000, ids.shape)
|
|
ids = ops.asarray(ids, dtype="i")
|
|
tokvecs = ops.alloc((5000, nI), dtype="f")
|
|
tokvecs += ops.xp.random.normal(loc=0.0, scale=1.0, size=tokvecs.size).reshape(
|
|
tokvecs.shape
|
|
)
|
|
|
|
def predict(ids, tokvecs):
|
|
# nS ids. nW tokvecs. Exclude the padding array.
|
|
hiddens = model.predict(tokvecs[:-1]) # (nW, f, o, p)
|
|
vectors = model.ops.alloc((ids.shape[0], nO * nP), dtype="f")
|
|
# need nS vectors
|
|
hiddens = hiddens.reshape((hiddens.shape[0] * nF, nO * nP))
|
|
model.ops.scatter_add(vectors, ids.flatten(), hiddens)
|
|
vectors = vectors.reshape((vectors.shape[0], nO, nP))
|
|
vectors += b
|
|
vectors = model.ops.asarray(vectors)
|
|
if nP >= 2:
|
|
return model.ops.maxout(vectors)[0]
|
|
else:
|
|
return vectors * (vectors >= 0)
|
|
|
|
tol_var = 0.01
|
|
tol_mean = 0.01
|
|
t_max = 10
|
|
W = model.get_param("W").copy()
|
|
b = model.get_param("b").copy()
|
|
for t_i in range(t_max):
|
|
acts1 = predict(ids, tokvecs)
|
|
var = model.ops.xp.var(acts1)
|
|
mean = model.ops.xp.mean(acts1)
|
|
if abs(var - 1.0) >= tol_var:
|
|
W /= model.ops.xp.sqrt(var)
|
|
model.set_param("W", W)
|
|
elif abs(mean) >= tol_mean:
|
|
b -= mean
|
|
model.set_param("b", b)
|
|
else:
|
|
break
|