mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-24 08:14:15 +03:00
569cc98982
* Add load_from_config function * Add train_from_config script * Merge configs and expose via spacy.config * Fix script * Suggest create_evaluation_callback * Hard-code for NER * Fix errors * Register command * Add TODO * Update train-from-config todos * Fix imports * Allow delayed setting of parser model nr_class * Get train-from-config working * Tidy up and fix scores and printing * Hide traceback if cancelled * Fix weighted score formatting * Fix score formatting * Make output_path optional * Add Tok2Vec component * Tidy up and add tok2vec_tensors * Add option to copy docs in nlp.update * Copy docs in nlp.update * Adjust nlp.update() for set_annotations * Don't shuffle pipes in nlp.update, decruft * Support set_annotations arg in component update * Support set_annotations in parser update * Add get_gradients method * Add get_gradients to parser * Update errors.py * Fix problems caused by merge * Add _link_components method in nlp * Add concept of 'listeners' and ControlledModel * Support optional attributes arg in ControlledModel * Try having tok2vec component in pipeline * Fix tok2vec component * Fix config * Fix tok2vec * Update for Example * Update for Example * Update config * Add eg2doc util * Update and add schemas/types * Update schemas * Fix nlp.update * Fix tagger * Remove hacks from train-from-config * Remove hard-coded config str * Calculate loss in tok2vec component * Tidy up and use function signatures instead of models * Support union types for registry models * Minor cleaning in Language.update * Make ControlledModel specifically Tok2VecListener * Fix train_from_config * Fix tok2vec * Tidy up * Add function for bilstm tok2vec * Fix type * Fix syntax * Fix pytorch optimizer * Add example configs * Update for thinc describe changes * Update for Thinc changes * Update for dropout/sgd changes * Update for dropout/sgd changes * Unhack gradient update * Work on refactoring _ml * Remove _ml.py module * WIP upgrade cli scripts for thinc * Move some _ml stuff to util * Import link_vectors from util * Update train_from_config * Import from util * Import from util * Temporarily add ml.component_models module * Move ml methods * Move typedefs * Update load vectors * Update gitignore * Move imports * Add PrecomputableAffine * Fix imports * Fix imports * Fix imports * Fix missing imports * Update CLI scripts * Update spacy.language * Add stubs for building the models * Update model definition * Update create_default_optimizer * Fix import * Fix comment * Update imports in tests * Update imports in spacy.cli * Fix import * fix obsolete thinc imports * update srsly pin * from thinc to ml_datasets for example data such as imdb * update ml_datasets pin * using STATE.vectors * small fix * fix Sentencizer.pipe * black formatting * rename Affine to Linear as in thinc * set validate explicitely to True * rename with_square_sequences to with_list2padded * rename with_flatten to with_list2array * chaining layernorm * small fixes * revert Optimizer import * build_nel_encoder with new thinc style * fixes using model's get and set methods * Tok2Vec in component models, various fixes * fix up legacy tok2vec code * add model initialize calls * add in build_tagger_model * small fixes * setting model dims * fixes for ParserModel * various small fixes * initialize thinc Models * fixes * consistent naming of window_size * fixes, removing set_dropout * work around Iterable issue * remove legacy tok2vec * util fix * fix forward function of tok2vec listener * more fixes * trying to fix PrecomputableAffine (not succesful yet) * alloc instead of allocate * add morphologizer * rename residual * rename fixes * Fix predict function * Update parser and parser model * fixing few more tests * Fix precomputable affine * Update component model * Update parser model * Move backprop padding to own function, for test * Update test * Fix p. affine * Update NEL * build_bow_text_classifier and extract_ngrams * Fix parser init * Fix test add label * add build_simple_cnn_text_classifier * Fix parser init * Set gpu off by default in example * Fix tok2vec listener * Fix parser model * Small fixes * small fix for PyTorchLSTM parameters * revert my_compounding hack (iterable fixed now) * fix biLSTM * Fix uniqued * PyTorchRNNWrapper fix * small fixes * use helper function to calculate cosine loss * small fixes for build_simple_cnn_text_classifier * putting dropout default at 0.0 to ensure the layer gets built * using thinc util's set_dropout_rate * moving layer normalization inside of maxout definition to optimize dropout * temp debugging in NEL * fixed NEL model by using init defaults ! * fixing after set_dropout_rate refactor * proper fix * fix test_update_doc after refactoring optimizers in thinc * Add CharacterEmbed layer * Construct tagger Model * Add missing import * Remove unused stuff * Work on textcat * fix test (again :)) after optimizer refactor * fixes to allow reading Tagger from_disk without overwriting dimensions * don't build the tok2vec prematuraly * fix CharachterEmbed init * CharacterEmbed fixes * Fix CharacterEmbed architecture * fix imports * renames from latest thinc update * one more rename * add initialize calls where appropriate * fix parser initialization * Update Thinc version * Fix errors, auto-format and tidy up imports * Fix validation * fix if bias is cupy array * revert for now * ensure it's a numpy array before running bp in ParserStepModel * no reason to call require_gpu twice * use CupyOps.to_numpy instead of cupy directly * fix initialize of ParserModel * remove unnecessary import * fixes for CosineDistance * fix device renaming * use refactored loss functions (Thinc PR 251) * overfitting test for tagger * experimental settings for the tagger: avoid zero-init and subword normalization * clean up tagger overfitting test * use previous default value for nP * remove toy config * bringing layernorm back (had a bug - fixed in thinc) * revert setting nP explicitly * remove setting default in constructor * restore values as they used to be * add overfitting test for NER * add overfitting test for dep parser * add overfitting test for textcat * fixing init for linear (previously affine) * larger eps window for textcat * ensure doc is not None * Require newer thinc * Make float check vaguer * Slop the textcat overfit test more * Fix textcat test * Fix exclusive classes for textcat * fix after renaming of alloc methods * fixing renames and mandatory arguments (staticvectors WIP) * upgrade to thinc==8.0.0.dev3 * refer to vocab.vectors directly instead of its name * rename alpha to learn_rate * adding hashembed and staticvectors dropout * upgrade to thinc 8.0.0.dev4 * add name back to avoid warning W020 * thinc dev4 * update srsly * using thinc 8.0.0a0 ! Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com> Co-authored-by: Ines Montani <ines@ines.io>
169 lines
6.7 KiB
Cython
169 lines
6.7 KiB
Cython
from collections import defaultdict
|
|
|
|
import numpy
|
|
cimport numpy as np
|
|
|
|
from thinc.layers import chain, list2array
|
|
from thinc.util import to_categorical, copy_array, get_array_module
|
|
|
|
from .. import util
|
|
from .pipes import Pipe
|
|
from ..language import component
|
|
from ..util import link_vectors_to_models, create_default_optimizer
|
|
from ..errors import Errors, TempErrors
|
|
from ..tokens.doc cimport Doc
|
|
from ..vocab cimport Vocab
|
|
from ..morphology cimport Morphology
|
|
|
|
from ..ml.component_models import build_morphologizer_model
|
|
|
|
|
|
@component("morphologizer", assigns=["token.morph", "token.pos"])
|
|
class Morphologizer(Pipe):
|
|
|
|
@classmethod
|
|
def Model(cls, **cfg):
|
|
if cfg.get('pretrained_dims') and not cfg.get('pretrained_vectors'):
|
|
raise ValueError(TempErrors.T008)
|
|
class_map = Morphology.create_class_map()
|
|
return build_morphologizer_model(class_map.field_sizes, **cfg)
|
|
|
|
def __init__(self, vocab, model=True, **cfg):
|
|
self.vocab = vocab
|
|
self.model = model
|
|
self.cfg = dict(sorted(cfg.items()))
|
|
self.cfg.setdefault('cnn_maxout_pieces', 2)
|
|
self._class_map = self.vocab.morphology.create_class_map()
|
|
|
|
@property
|
|
def labels(self):
|
|
return self.vocab.morphology.tag_names
|
|
|
|
@property
|
|
def tok2vec(self):
|
|
if self.model in (None, True, False):
|
|
return None
|
|
else:
|
|
return chain(self.model.get_ref("tok2vec"), list2array())
|
|
|
|
def __call__(self, doc):
|
|
features, tokvecs = self.predict([doc])
|
|
self.set_annotations([doc], features, tensors=tokvecs)
|
|
return doc
|
|
|
|
def pipe(self, stream, batch_size=128, n_threads=-1):
|
|
for docs in util.minibatch(stream, size=batch_size):
|
|
docs = list(docs)
|
|
features, tokvecs = self.predict(docs)
|
|
self.set_annotations(docs, features, tensors=tokvecs)
|
|
yield from docs
|
|
|
|
def predict(self, docs):
|
|
if not any(len(doc) for doc in docs):
|
|
# Handle case where there are no tokens in any docs.
|
|
n_labels = self.model.get_dim("nO")
|
|
guesses = [self.model.ops.alloc((0, n_labels)) for doc in docs]
|
|
tokvecs = self.model.ops.alloc((0, self.model.get_ref("tok2vec").get_dim("nO")))
|
|
return guesses, tokvecs
|
|
tokvecs = self.model.tok2vec(docs)
|
|
scores = self.model.softmax(tokvecs)
|
|
return scores, tokvecs
|
|
|
|
def set_annotations(self, docs, batch_scores, tensors=None):
|
|
if isinstance(docs, Doc):
|
|
docs = [docs]
|
|
cdef Doc doc
|
|
cdef Vocab vocab = self.vocab
|
|
offsets = [self._class_map.get_field_offset(field)
|
|
for field in self._class_map.fields]
|
|
for i, doc in enumerate(docs):
|
|
doc_scores = batch_scores[i]
|
|
doc_guesses = scores_to_guesses(doc_scores, self.model.get_ref("softmax").attrs["nOs"])
|
|
# Convert the neuron indices into feature IDs.
|
|
doc_feat_ids = numpy.zeros((len(doc), len(self._class_map.fields)), dtype='i')
|
|
for j in range(len(doc)):
|
|
for k, offset in enumerate(offsets):
|
|
if doc_guesses[j, k] == 0:
|
|
doc_feat_ids[j, k] = 0
|
|
else:
|
|
doc_feat_ids[j, k] = offset + doc_guesses[j, k]
|
|
# Get the set of feature names.
|
|
feats = {self._class_map.col2info[f][2] for f in doc_feat_ids[j]}
|
|
if "NIL" in feats:
|
|
feats.remove("NIL")
|
|
# Now add the analysis, and set the hash.
|
|
doc.c[j].morph = self.vocab.morphology.add(feats)
|
|
if doc[j].morph.pos != 0:
|
|
doc.c[j].pos = doc[j].morph.pos
|
|
|
|
def update(self, examples, drop=0., sgd=None, losses=None):
|
|
if losses is not None and self.name not in losses:
|
|
losses[self.name] = 0.
|
|
|
|
docs = [self._get_doc(ex) for ex in examples]
|
|
tag_scores, bp_tag_scores = self.model.begin_update(docs, drop=drop)
|
|
loss, d_tag_scores = self.get_loss(examples, tag_scores)
|
|
bp_tag_scores(d_tag_scores, sgd=sgd)
|
|
|
|
if losses is not None:
|
|
losses[self.name] += loss
|
|
|
|
def get_loss(self, examples, scores):
|
|
guesses = []
|
|
for doc_scores in scores:
|
|
guesses.append(scores_to_guesses(doc_scores, self.model.get_ref("softmax").attrs["nOs"]))
|
|
guesses = self.model.ops.xp.vstack(guesses)
|
|
scores = self.model.ops.xp.vstack(scores)
|
|
if not isinstance(scores, numpy.ndarray):
|
|
scores = scores.get()
|
|
if not isinstance(guesses, numpy.ndarray):
|
|
guesses = guesses.get()
|
|
cdef int idx = 0
|
|
# Do this on CPU, as we can't vectorize easily.
|
|
target = numpy.zeros(scores.shape, dtype='f')
|
|
field_sizes = self.model.get_ref("softmax").attrs["nOs"]
|
|
for example in examples:
|
|
doc = example.doc
|
|
gold = example.gold
|
|
for t, features in enumerate(gold.morphology):
|
|
if features is None:
|
|
target[idx] = scores[idx]
|
|
else:
|
|
gold_fields = {}
|
|
for feature in features:
|
|
field = self._class_map.feat2field[feature]
|
|
gold_fields[field] = self._class_map.feat2offset[feature]
|
|
for field in self._class_map.fields:
|
|
field_id = self._class_map.field2id[field]
|
|
col_offset = self._class_map.field2col[field]
|
|
if field_id in gold_fields:
|
|
target[idx, col_offset + gold_fields[field_id]] = 1.
|
|
else:
|
|
target[idx, col_offset] = 1.
|
|
#print(doc[t])
|
|
#for col, info in enumerate(self._class_map.col2info):
|
|
# print(col, info, scores[idx, col], target[idx, col])
|
|
idx += 1
|
|
target = self.model.ops.asarray(target, dtype='f')
|
|
scores = self.model.ops.asarray(scores, dtype='f')
|
|
d_scores = scores - target
|
|
loss = (d_scores**2).sum()
|
|
docs = [self._get_doc(ex) for ex in examples]
|
|
d_scores = self.model.ops.unflatten(d_scores, [len(d) for d in docs])
|
|
return float(loss), d_scores
|
|
|
|
def use_params(self, params):
|
|
with self.model.use_params(params):
|
|
yield
|
|
|
|
def scores_to_guesses(scores, out_sizes):
|
|
xp = get_array_module(scores)
|
|
guesses = xp.zeros((scores.shape[0], len(out_sizes)), dtype='i')
|
|
offset = 0
|
|
for i, size in enumerate(out_sizes):
|
|
slice_ = scores[:, offset : offset + size]
|
|
col_guesses = slice_.argmax(axis=1)
|
|
guesses[:, i] = col_guesses
|
|
offset += size
|
|
return guesses
|