From b439e04f8d631d799342b8ae68f04ae442d7fac2 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sat, 6 May 2017 20:38:12 +0200 Subject: [PATCH] Learning smoothly --- bin/parser/train_ud.py | 2 +- spacy/_ml.py | 42 ++++++++++++++++++++++++++++++++----- spacy/syntax/parser.pyx | 17 ++++++++++----- spacy/syntax/stateclass.pyx | 3 ++- 4 files changed, 52 insertions(+), 12 deletions(-) diff --git a/bin/parser/train_ud.py b/bin/parser/train_ud.py index df8099d15..f0aff22b2 100644 --- a/bin/parser/train_ud.py +++ b/bin/parser/train_ud.py @@ -150,7 +150,7 @@ def main(lang_name, train_loc, dev_loc, model_dir, clusters_loc=None): print('%d:\t%.3f\t%.3f\t%.3f' % (itn, nn_loss[-1], scorer.uas, scorer.tags_acc)) nn_loss.append(0.) trainer.each_epoch.append(track_progress) - trainer.batch_size = 2 + trainer.batch_size = 6 trainer.nb_epoch = 10000 for docs, golds in trainer.iterate(Xs, ys, progress_bar=False): docs = [Doc(vocab, words=[w.text for w in doc]) for doc in docs] diff --git a/spacy/_ml.py b/spacy/_ml.py index d13d014bb..87549369f 100644 --- a/spacy/_ml.py +++ b/spacy/_ml.py @@ -1,4 +1,4 @@ -from thinc.api import layerize, chain, clone, concatenate, with_flatten +from thinc.api import add, layerize, chain, clone, concatenate, with_flatten from thinc.neural import Model, Maxout, Softmax, Affine from thinc.neural._classes.hash_embed import HashEmbed @@ -11,8 +11,13 @@ from .attrs import ID, LOWER, PREFIX, SUFFIX, SHAPE, TAG, DEP def get_col(idx): def forward(X, drop=0.): + assert len(X.shape) <= 3 output = Model.ops.xp.ascontiguousarray(X[:, idx]) - return output, None + def backward(y, sgd=None): + dX = Model.ops.allocate(X.shape) + dX[:, idx] += y + return dX + return output, backward return layerize(forward) @@ -37,12 +42,11 @@ def build_debug_model(state2vec, width, depth, nr_class): return model - def build_debug_state2vec(width, nr_vector=1000, nF=1, nB=0, nS=1, nL=2, nR=2): ops = Model.ops def forward(tokens_attrs_vectors, drop=0.): tokens, attr_vals, tokvecs = tokens_attrs_vectors - + orig_tokvecs_shape = tokvecs.shape tokvecs = tokvecs.reshape((tokvecs.shape[0], tokvecs.shape[1] * tokvecs.shape[2])) @@ -57,6 +61,34 @@ def build_debug_state2vec(width, nr_vector=1000, nF=1, nB=0, nS=1, nL=2, nR=2): return model +def build_state2vec(nr_context_tokens, width, nr_vector=1000): + ops = Model.ops + with Model.define_operators({'|': concatenate, '+': add, '>>': chain}): + + hiddens = [get_col(i) >> Affine(width) for i in range(nr_context_tokens)] + model = ( + get_token_vectors + >> add(*hiddens) + >> Maxout(width) + ) + return model + + +def print_shape(prefix): + def forward(X, drop=0.): + return X, lambda dX, **kwargs: dX + return layerize(forward) + + +@layerize +def get_token_vectors(tokens_attrs_vectors, drop=0.): + ops = Model.ops + tokens, attrs, vectors = tokens_attrs_vectors + def backward(d_output, sgd=None): + return (tokens, d_output) + return vectors, backward + + def build_parser_state2vec(width, nr_vector=1000, nF=1, nB=0, nS=1, nL=2, nR=2): embed_tags = _reshape(chain(get_col(0), HashEmbed(16, nr_vector))) embed_deps = _reshape(chain(get_col(1), HashEmbed(16, nr_vector))) @@ -161,7 +193,7 @@ def build_tok2vec(lang, width, depth=2, embed_size=1000): >> with_flatten( #(static | prefix | suffix | shape) (lower | prefix | suffix | shape | tag) - >> BatchNorm(Maxout(width, width*5), nO=width) + >> Maxout(width, width*5) #>> (ExtractWindow(nW=1) >> Maxout(width, width*3)) #>> (ExtractWindow(nW=1) >> Maxout(width, width*3)) ) diff --git a/spacy/syntax/parser.pyx b/spacy/syntax/parser.pyx index 3b8a9f849..b7d33e1c9 100644 --- a/spacy/syntax/parser.pyx +++ b/spacy/syntax/parser.pyx @@ -45,6 +45,7 @@ from ..gold cimport GoldParse from ..attrs cimport TAG, DEP from .._ml import build_parser_state2vec, build_model +from .._ml import build_state2vec, build_model from .._ml import build_debug_state2vec, build_debug_model @@ -114,8 +115,10 @@ cdef class Parser: return (Parser, (self.vocab, self.moves, self.model), None, None) def build_model(self, width=64, nr_vector=1000, nF=1, nB=1, nS=1, nL=1, nR=1, **_): - state2vec = build_debug_state2vec(width, nr_vector, nF, nB, nL, nR) - model = build_debug_model(state2vec, width, 2, self.moves.n_moves) + nr_context_tokens = StateClass.nr_context_tokens(nF, nB, nS, nL, nR) + state2vec = build_state2vec(nr_context_tokens, width, nr_vector) + #state2vec = build_debug_state2vec(width, nr_vector) + model = build_debug_model(state2vec, width*2, 2, self.moves.n_moves) return model def __call__(self, Doc tokens): @@ -220,8 +223,10 @@ cdef class Parser: scores, finish_update = self._begin_update(states, tokvecs) token_ids, batch_token_grads = finish_update(golds, sgd=sgd, losses=losses, force_gold=False) - for i, tok_i in enumerate(token_ids): - d_tokens[i][tok_i] += batch_token_grads[i] + for i, tok_ids in enumerate(token_ids): + for j, tok_i in enumerate(tok_ids): + if tok_i >= 0: + d_tokens[i][tok_i] += batch_token_grads[i, j] self._transition_batch(states, scores) @@ -237,6 +242,8 @@ cdef class Parser: features = self._get_features(states, tokvecs, attr_names) scores, finish_update = self.model.begin_update(features, drop=drop) + assert scores.shape[0] == len(states), (len(states), scores.shape) + assert len(scores.shape) == 2 is_valid = self.model.ops.allocate((len(states), nr_class), dtype='i') self._validate_batch(is_valid, states) softmaxed = self.model.ops.softmax(scores) @@ -283,7 +290,7 @@ cdef class Parser: cdef int i for i, state in enumerate(states): self.moves.set_valid(&is_valid[i, 0], state.c) - + def _cost_batch(self, weight_t[:, ::1] costs, int[:, ::1] is_valid, states, golds): cdef int i diff --git a/spacy/syntax/stateclass.pyx b/spacy/syntax/stateclass.pyx index e6e67eaeb..22d8134aa 100644 --- a/spacy/syntax/stateclass.pyx +++ b/spacy/syntax/stateclass.pyx @@ -46,7 +46,8 @@ cdef class StateClass: n1 = words[self.B(1)] return ' '.join((third, second, top, '|', n0, n1)) - def nr_context_tokens(self, int nF, int nB, int nS, int nL, int nR): + @classmethod + def nr_context_tokens(cls, int nF, int nB, int nS, int nL, int nR): return 4 def set_context_tokens(self, int[:] output, nF=1, nB=0, nS=2,