From 60d811124548d4f1d9f0cc8d5abb64afc7827aa8 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 15 Aug 2017 03:12:26 -0500 Subject: [PATCH 01/12] Require thinc 6.8.1 --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index aae0f9388..fa1a3e6d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ pathlib numpy>=1.7 cymem>=1.30,<1.32 preshed>=1.0.0,<2.0.0 -thinc>=6.8.0,<6.9.0 +thinc>=6.8.1,<6.9.0 murmurhash>=0.28,<0.29 plac<1.0.0,>=0.9.6 six diff --git a/setup.py b/setup.py index 02d4fe0d9..ca8e41cb4 100755 --- a/setup.py +++ b/setup.py @@ -194,7 +194,7 @@ def setup_package(): 'murmurhash>=0.28,<0.29', 'cymem>=1.30,<1.32', 'preshed>=1.0.0,<2.0.0', - 'thinc>=6.8.0,<6.9.0', + 'thinc>=6.8.1,<6.9.0', 'plac<1.0.0,>=0.9.6', 'pip>=9.0.0,<10.0.0', 'six', From e420e0366cba712ab678679fe9fbb212f2eb6466 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 15 Aug 2017 03:13:57 -0500 Subject: [PATCH 02/12] Remove use of hash function in beam parser --- spacy/syntax/_beam_utils.pyx | 6 +----- spacy/syntax/nn_parser.pyx | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/spacy/syntax/_beam_utils.pyx b/spacy/syntax/_beam_utils.pyx index e77036e55..15f1ce59b 100644 --- a/spacy/syntax/_beam_utils.pyx +++ b/spacy/syntax/_beam_utils.pyx @@ -86,11 +86,7 @@ cdef class ParserBeam(object): self._set_scores(beam, scores[i]) if self.golds is not None: self._set_costs(beam, self.golds[i], follow_gold=follow_gold) - if follow_gold: - assert self.golds is not None - beam.advance(_transition_state, NULL, self.moves.c) - else: - beam.advance(_transition_state, _hash_state, self.moves.c) + beam.advance(_transition_state, NULL, self.moves.c) beam.check_done(_check_final_state, NULL) if beam.is_done: for j in range(beam.size): diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index a193c96a3..fb7099022 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -471,7 +471,7 @@ cdef class Parser: for k in range(nr_class): beam.scores[i][k] = c_scores[j * scores.shape[1] + k] j += 1 - beam.advance(_transition_state, _hash_state, self.moves.c) + beam.advance(_transition_state, NULL, self.moves.c) beam.check_done(_check_final_state, NULL) beams.append(beam) return beams From a8e4064dd81dcebea53a2bdbee4a2d6904b23ff5 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 15 Aug 2017 03:14:36 -0500 Subject: [PATCH 03/12] Fix tensor gradient in parser --- spacy/syntax/nn_parser.pyx | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index fb7099022..5d6f51538 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -633,10 +633,9 @@ cdef class Parser: xp = get_array_module(d_tokvecs) for ids, d_vector, bp_vector in backprops: d_state_features = bp_vector(d_vector, sgd=sgd) - mask = ids >= 0 - indices = xp.nonzero(mask) - self.model[0].ops.scatter_add(d_tokvecs, ids[indices], - d_state_features[indices]) + mask = (ids >= 0).reshape((ids.shape[0], ids.shape[1], 1)) + self.model[0].ops.scatter_add(d_tokvecs, ids, + d_state_features * mask) @property def move_names(self): From 500e92553d477c29cd306590e078b8b3256d41d0 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 15 Aug 2017 03:15:04 -0500 Subject: [PATCH 04/12] Fix memory error when copying scores in beam --- spacy/syntax/_beam_utils.pyx | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/spacy/syntax/_beam_utils.pyx b/spacy/syntax/_beam_utils.pyx index 15f1ce59b..09738b584 100644 --- a/spacy/syntax/_beam_utils.pyx +++ b/spacy/syntax/_beam_utils.pyx @@ -97,12 +97,19 @@ cdef class ParserBeam(object): def _set_scores(self, Beam beam, float[:, ::1] scores): cdef float* c_scores = &scores[0, 0] - for i in range(beam.size): + cdef int nr_state = min(scores.shape[0], beam.size) + cdef int nr_class = scores.shape[1] + for i in range(nr_state): state = beam.at(i) if not state.is_final(): - for j in range(beam.nr_class): - beam.scores[i][j] = c_scores[i * beam.nr_class + j] + for j in range(nr_class): + beam.scores[i][j] = c_scores[i * nr_class + j] self.moves.set_valid(beam.is_valid[i], state.c) + else: + for j in range(beam.nr_class): + beam.scores[i][j] = 0 + beam.costs[i][j] = 0 + def _set_costs(self, Beam beam, GoldParse gold, int follow_gold=False): for i in range(beam.size): @@ -196,8 +203,7 @@ def update_beam(TransitionSystem moves, int nr_feature, int max_steps, losses = [((v.p_probs + v.g_probs) if v.p_probs else []) for v in violns] states_d_scores = get_gradient(moves.n_moves, beam_maps, histories, losses) - assert len(states_d_scores) == len(backprops), (len(states_d_scores), len(backprops)) - return states_d_scores, backprops + return states_d_scores, backprops[:len(states_d_scores)] def get_states(pbeams, gbeams, beam_map, nr_update): From 23537a011d7f27bc93579d7aae5579062823f67f Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 15 Aug 2017 03:15:28 -0500 Subject: [PATCH 05/12] Tweaks to beam parser --- spacy/syntax/_beam_utils.pyx | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/spacy/syntax/_beam_utils.pyx b/spacy/syntax/_beam_utils.pyx index 09738b584..48030b72a 100644 --- a/spacy/syntax/_beam_utils.pyx +++ b/spacy/syntax/_beam_utils.pyx @@ -216,12 +216,13 @@ def get_states(pbeams, gbeams, beam_map, nr_update): for eg_id, (pbeam, gbeam) in enumerate(zip(pbeams, gbeams)): p_indices.append([]) g_indices.append([]) - if pbeam.loss > 0 and pbeam.min_score > gbeam.score: + if pbeam.loss > 0 and pbeam.min_score > (gbeam.score + nr_update): continue for i in range(pbeam.size): state = pbeam.at(i) if not state.is_final(): key = tuple([eg_id] + pbeam.histories[i]) + assert key not in seen, (key, seen) seen[key] = len(states) p_indices[-1].append(len(states)) states.append(state) @@ -257,12 +258,18 @@ def get_gradient(nr_class, beam_maps, histories, losses): """ nr_step = len(beam_maps) grads = [] - for beam_map in beam_maps: - if beam_map: - grads.append(numpy.zeros((max(beam_map.values())+1, nr_class), dtype='f')) + nr_step = 0 + for eg_id, hists in enumerate(histories): + for loss, hist in zip(losses[eg_id], hists): + if abs(loss) >= 0.0001 and not numpy.isnan(loss): + nr_step = max(nr_step, len(hist)) + for i in range(nr_step): + grads.append(numpy.zeros((max(beam_maps[i].values())+1, nr_class), dtype='f')) assert len(histories) == len(losses) for eg_id, hists in enumerate(histories): for loss, hist in zip(losses[eg_id], hists): + if abs(loss) < 0.0001 or numpy.isnan(loss): + continue key = tuple([eg_id]) for j, clas in enumerate(hist): i = beam_maps[j][key] From 210f6d5175f867d985d2d20c05b063ca259867ee Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 15 Aug 2017 03:19:03 -0500 Subject: [PATCH 06/12] Fix efficiency error in batch parse --- spacy/syntax/nn_parser.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index 5d6f51538..9a35c69d7 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -411,7 +411,7 @@ cdef class Parser: st = next_step[i] st.set_context_tokens(&c_token_ids[i*nr_feat], nr_feat) self.moves.set_valid(&c_is_valid[i*nr_class], st) - vectors = state2vec(token_ids[:next_step.size()]) + vectors = state2vec(token_ids[:next_step.size()]) scores = vec2scores(vectors) c_scores = scores.data for i in range(next_step.size()): From 1cb2f15d654ed475eb0e0699fa97059bb0a4769c Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Wed, 16 Aug 2017 18:22:26 -0500 Subject: [PATCH 07/12] Clean up unused predict_confidences function --- spacy/pipeline.pyx | 8 -------- 1 file changed, 8 deletions(-) diff --git a/spacy/pipeline.pyx b/spacy/pipeline.pyx index 634d3e4b5..d1a2a2964 100644 --- a/spacy/pipeline.pyx +++ b/spacy/pipeline.pyx @@ -653,14 +653,6 @@ cdef class NeuralEntityRecognizer(NeuralParser): nr_feature = 6 - def predict_confidences(self, docs): - tensors = [d.tensor for d in docs] - samples = [] - for i in range(10): - states = self.parse_batch(docs, tensors, drop=0.3) - for state in states: - samples.append(self._get_entities(state)) - def __reduce__(self): return (NeuralEntityRecognizer, (self.vocab, self.moves, self.model), None, None) From 3533bb61cbf4a12ce4d7efb7d86a362becb0afe1 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Wed, 16 Aug 2017 18:23:27 -0500 Subject: [PATCH 08/12] Add option of 8 feature parse state --- spacy/syntax/_state.pxd | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/spacy/syntax/_state.pxd b/spacy/syntax/_state.pxd index 9aeeba441..3da9e5d4c 100644 --- a/spacy/syntax/_state.pxd +++ b/spacy/syntax/_state.pxd @@ -74,7 +74,16 @@ cdef cppclass StateC: free(this.shifted - PADDING) void set_context_tokens(int* ids, int n) nogil: - if n == 13: + if n == 8: + ids[0] = this.B(0) + ids[1] = this.B(1) + ids[2] = this.S(0) + ids[3] = this.S(1) + ids[4] = this.H(this.S(0)) + ids[5] = this.L(this.B(0), 1) + ids[6] = this.L(this.S(0), 2) + ids[7] = this.R(this.S(0), 1) + elif n == 13: ids[0] = this.B(0) ids[1] = this.B(1) ids[2] = this.S(0) From a6d8d7c82e269c2f91ca25ac73cc70dcc9cf890d Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Wed, 16 Aug 2017 18:24:09 -0500 Subject: [PATCH 09/12] Add is_gold_parse method to transition system --- spacy/syntax/arc_eager.pyx | 14 ++++++++++++++ spacy/syntax/transition_system.pyx | 3 +++ 2 files changed, 17 insertions(+) diff --git a/spacy/syntax/arc_eager.pyx b/spacy/syntax/arc_eager.pyx index 9477449a5..aab350d76 100644 --- a/spacy/syntax/arc_eager.pyx +++ b/spacy/syntax/arc_eager.pyx @@ -351,6 +351,20 @@ cdef class ArcEager(TransitionSystem): def __get__(self): return (SHIFT, REDUCE, LEFT, RIGHT, BREAK) + def is_gold_parse(self, StateClass state, GoldParse gold): + predicted = set() + truth = set() + for i in range(gold.length): + if gold.cand_to_gold[i] is None: + continue + if state.safe_get(i).dep: + predicted.add((i, state.H(i), self.strings[state.safe_get(i).dep])) + else: + predicted.add((i, state.H(i), 'ROOT')) + id_, word, tag, head, dep, ner = gold.orig_annot[gold.cand_to_gold[i]] + truth.add((id_, head, dep)) + return truth == predicted + def has_gold(self, GoldParse gold, start=0, end=None): end = end or len(gold.heads) if all([tag is None for tag in gold.heads[start:end]]): diff --git a/spacy/syntax/transition_system.pyx b/spacy/syntax/transition_system.pyx index d3f64f827..9cf82e0c7 100644 --- a/spacy/syntax/transition_system.pyx +++ b/spacy/syntax/transition_system.pyx @@ -99,6 +99,9 @@ cdef class TransitionSystem: def preprocess_gold(self, GoldParse gold): raise NotImplementedError + def is_gold_parse(self, StateClass state, GoldParse gold): + raise NotImplementedError + cdef Transition lookup_transition(self, object name) except *: raise NotImplementedError From 4b1e7bd6d8c677b6bb96e7ec69bb7e2ce436c648 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Wed, 16 Aug 2017 18:25:20 -0500 Subject: [PATCH 10/12] Improve tensorizer model --- spacy/_ml.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/spacy/_ml.py b/spacy/_ml.py index 91b530fad..c49bad6d4 100644 --- a/spacy/_ml.py +++ b/spacy/_ml.py @@ -9,7 +9,7 @@ import cytoolz from thinc.neural._classes.convolution import ExtractWindow from thinc.neural._classes.static_vectors import StaticVectors -from thinc.neural._classes.batchnorm import BatchNorm +from thinc.neural._classes.batchnorm import BatchNorm as BN from thinc.neural._classes.layernorm import LayerNorm as LN from thinc.neural._classes.resnet import Residual from thinc.neural import ReLu @@ -22,6 +22,7 @@ from thinc.neural.pooling import Pooling, max_pool, mean_pool, sum_pool from thinc.neural._classes.attention import ParametricAttention from thinc.linear.linear import LinearModel from thinc.api import uniqued, wrap, flatten_add_lengths +from thinc.neural._classes.rnn import BiLSTM from .attrs import ID, ORTH, LOWER, NORM, PREFIX, SUFFIX, SHAPE, TAG, DEP @@ -229,14 +230,14 @@ def Tok2Vec(width, embed_size, preprocess=None): suffix = get_col(cols.index(SUFFIX)) >> HashEmbed(width, embed_size//2, name='embed_suffix') shape = get_col(cols.index(SHAPE)) >> HashEmbed(width, embed_size//2, name='embed_shape') - embed = (norm | prefix | suffix | shape ) >> Maxout(width, width*4, pieces=3) + embed = (norm | prefix | suffix | shape ) >> LN(Maxout(width, width*4, pieces=3)) tok2vec = ( with_flatten( asarray(Model.ops, dtype='uint64') >> uniqued(embed, column=5) >> drop_layer( Residual( - (ExtractWindow(nW=1) >> ReLu(width, width*3)) + (ExtractWindow(nW=1) >> BN(Maxout(width, width*3))) ) ) ** 4, pad=4 ) From 0209a06b4e2e572d8dbdab8724c05cd884a85319 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Wed, 16 Aug 2017 18:25:49 -0500 Subject: [PATCH 11/12] Update beam parser --- spacy/syntax/_beam_utils.pyx | 71 ++++++++++++++++++------------------ spacy/syntax/beam_parser.pyx | 7 ++-- spacy/syntax/nn_parser.pyx | 24 +++++++----- 3 files changed, 53 insertions(+), 49 deletions(-) diff --git a/spacy/syntax/_beam_utils.pyx b/spacy/syntax/_beam_utils.pyx index 48030b72a..7afe51d4f 100644 --- a/spacy/syntax/_beam_utils.pyx +++ b/spacy/syntax/_beam_utils.pyx @@ -6,6 +6,7 @@ from cpython.ref cimport PyObject, Py_INCREF, Py_XDECREF from thinc.extra.search cimport Beam from thinc.extra.search import MaxViolation from thinc.typedefs cimport hash_t, class_t +from thinc.extra.search cimport MaxViolation from .transition_system cimport TransitionSystem, Transition from .stateclass cimport StateClass @@ -45,6 +46,7 @@ cdef class ParserBeam(object): cdef public object states cdef public object golds cdef public object beams + cdef public object dones def __init__(self, TransitionSystem moves, states, golds, int width=4, float density=0.001): @@ -61,6 +63,7 @@ cdef class ParserBeam(object): st = beam.at(i) st.c.offset = state.c.offset self.beams.append(beam) + self.dones = [False] * len(self.beams) def __dealloc__(self): if self.beams is not None: @@ -70,7 +73,7 @@ cdef class ParserBeam(object): @property def is_done(self): - return all(b.is_done for b in self.beams) + return all(b.is_done or self.dones[i] for i, b in enumerate(self.beams)) def __getitem__(self, i): return self.beams[i] @@ -81,19 +84,24 @@ cdef class ParserBeam(object): def advance(self, scores, follow_gold=False): cdef Beam beam for i, beam in enumerate(self.beams): - if beam.is_done or not scores[i].size: + if beam.is_done or not scores[i].size or self.dones[i]: continue self._set_scores(beam, scores[i]) if self.golds is not None: self._set_costs(beam, self.golds[i], follow_gold=follow_gold) - beam.advance(_transition_state, NULL, self.moves.c) + beam.advance(_transition_state, _hash_state, self.moves.c) beam.check_done(_check_final_state, NULL) - if beam.is_done: + if beam.is_done and self.golds is not None: for j in range(beam.size): - if is_gold(beam.at(j), self.golds[i], self.moves.strings): - beam._states[j].loss = 0.0 - elif beam._states[j].loss == 0.0: - beam._states[j].loss = 1.0 + state = beam.at(j) + if state.is_final(): + try: + if self.moves.is_gold_parse(state, self.golds[i]): + beam._states[j].loss = 0.0 + elif beam._states[j].loss == 0.0: + beam._states[j].loss = 1.0 + except NotImplementedError: + break def _set_scores(self, Beam beam, float[:, ::1] scores): cdef float* c_scores = &scores[0, 0] @@ -110,7 +118,6 @@ cdef class ParserBeam(object): beam.scores[i][j] = 0 beam.costs[i][j] = 0 - def _set_costs(self, Beam beam, GoldParse gold, int follow_gold=False): for i in range(beam.size): state = beam.at(i) @@ -122,21 +129,6 @@ cdef class ParserBeam(object): beam.is_valid[i][j] = 0 -def is_gold(StateClass state, GoldParse gold, strings): - predicted = set() - truth = set() - for i in range(gold.length): - if gold.cand_to_gold[i] is None: - continue - if state.safe_get(i).dep: - predicted.add((i, state.H(i), strings[state.safe_get(i).dep])) - else: - predicted.add((i, state.H(i), 'ROOT')) - id_, word, tag, head, dep, ner = gold.orig_annot[gold.cand_to_gold[i]] - truth.add((id_, head, dep)) - return truth == predicted - - def get_token_ids(states, int n_tokens): cdef StateClass state cdef np.ndarray ids = numpy.zeros((len(states), n_tokens), @@ -156,16 +148,19 @@ def update_beam(TransitionSystem moves, int nr_feature, int max_steps, state2vec, vec2scores, drop=0., sgd=None, losses=None, int width=4, float density=0.001): global nr_update + cdef MaxViolation violn nr_update += 1 pbeam = ParserBeam(moves, states, golds, width=width, density=density) gbeam = ParserBeam(moves, states, golds, - width=width, density=0.0) + width=width, density=density) cdef StateClass state beam_maps = [] backprops = [] violns = [MaxViolation() for _ in range(len(states))] for t in range(max_steps): + if pbeam.is_done and gbeam.is_done: + break # The beam maps let us find the right row in the flattened scores # arrays for each state. States are identified by (example id, history). # We keep a different beam map for each step (since we'll have a flat @@ -197,12 +192,16 @@ def update_beam(TransitionSystem moves, int nr_feature, int max_steps, # Track the "maximum violation", to use in the update. for i, violn in enumerate(violns): violn.check_crf(pbeam[i], gbeam[i]) - - # Only make updates if we have non-gold states - histories = [((v.p_hist + v.g_hist) if v.p_hist else []) for v in violns] - losses = [((v.p_probs + v.g_probs) if v.p_probs else []) for v in violns] - states_d_scores = get_gradient(moves.n_moves, beam_maps, - histories, losses) + histories = [] + losses = [] + for i, violn in enumerate(violns): + if violn.cost < 1: + histories.append([]) + losses.append([]) + else: + histories.append(violn.p_hist + violn.g_hist) + losses.append(violn.p_probs + violn.g_probs) + states_d_scores = get_gradient(moves.n_moves, beam_maps, histories, losses) return states_d_scores, backprops[:len(states_d_scores)] @@ -216,7 +215,9 @@ def get_states(pbeams, gbeams, beam_map, nr_update): for eg_id, (pbeam, gbeam) in enumerate(zip(pbeams, gbeams)): p_indices.append([]) g_indices.append([]) - if pbeam.loss > 0 and pbeam.min_score > (gbeam.score + nr_update): + if pbeam.loss > 0 and pbeam.min_score > (gbeam.score + numpy.sqrt(nr_update)): + pbeams.dones[eg_id] = True + gbeams.dones[eg_id] = True continue for i in range(pbeam.size): state = pbeam.at(i) @@ -261,21 +262,21 @@ def get_gradient(nr_class, beam_maps, histories, losses): nr_step = 0 for eg_id, hists in enumerate(histories): for loss, hist in zip(losses[eg_id], hists): - if abs(loss) >= 0.0001 and not numpy.isnan(loss): + if loss != 0.0 and not numpy.isnan(loss): nr_step = max(nr_step, len(hist)) for i in range(nr_step): grads.append(numpy.zeros((max(beam_maps[i].values())+1, nr_class), dtype='f')) assert len(histories) == len(losses) for eg_id, hists in enumerate(histories): for loss, hist in zip(losses[eg_id], hists): - if abs(loss) < 0.0001 or numpy.isnan(loss): + if abs(loss) == 0.0 or numpy.isnan(loss): continue key = tuple([eg_id]) for j, clas in enumerate(hist): i = beam_maps[j][key] # In step j, at state i action clas # resulted in loss - grads[j][i, clas] += loss / len(histories) + grads[j][i, clas] += loss key = key + tuple([clas]) return grads diff --git a/spacy/syntax/beam_parser.pyx b/spacy/syntax/beam_parser.pyx index f4f66f9fb..68e9f27af 100644 --- a/spacy/syntax/beam_parser.pyx +++ b/spacy/syntax/beam_parser.pyx @@ -34,7 +34,6 @@ from ._parse_features cimport CONTEXT_SIZE from ._parse_features cimport fill_context from .stateclass cimport StateClass from .parser cimport Parser -from ._beam_utils import is_gold DEBUG = False @@ -108,7 +107,7 @@ cdef class BeamParser(Parser): # The non-monotonic oracle makes it difficult to ensure final costs are # correct. Therefore do final correction for i in range(pred.size): - if is_gold(pred.at(i), gold_parse, self.moves.strings): + if self.moves.is_gold_parse(pred.at(i), gold_parse): pred._states[i].loss = 0.0 elif pred._states[i].loss == 0.0: pred._states[i].loss = 1.0 @@ -214,7 +213,7 @@ def _check_train_integrity(Beam pred, Beam gold, GoldParse gold_parse, Transitio if not pred._states[i].is_done or pred._states[i].loss == 0: continue state = pred.at(i) - if is_gold(state, gold_parse, moves.strings) == True: + if moves.is_gold_parse(state, gold_parse) == True: for dep in gold_parse.orig_annot: print(dep[1], dep[3], dep[4]) print("Cost", pred._states[i].loss) @@ -228,7 +227,7 @@ def _check_train_integrity(Beam pred, Beam gold, GoldParse gold_parse, Transitio if not gold._states[i].is_done: continue state = gold.at(i) - if is_gold(state, gold_parse, moves.strings) == False: + if moves.is_gold(state, gold_parse) == False: print("Truth") for dep in gold_parse.orig_annot: print(dep[1], dep[3], dep[4]) diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index 9a35c69d7..11fc4e742 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -38,6 +38,7 @@ from preshed.maps cimport map_get from thinc.api import layerize, chain, noop, clone from thinc.neural import Model, Affine, ReLu, Maxout +from thinc.neural._classes.batchnorm import BatchNorm as BN from thinc.neural._classes.selu import SELU from thinc.neural._classes.layernorm import LayerNorm from thinc.neural.ops import NumpyOps, CupyOps @@ -258,7 +259,7 @@ cdef class Parser: with Model.use_device('cpu'): upper = chain( - clone(Residual(ReLu(hidden_width)), (depth-1)), + clone(Maxout(hidden_width), (depth-1)), zero_init(Affine(nr_class, drop_factor=0.0)) ) # TODO: This is an unfortunate hack atm! @@ -321,6 +322,8 @@ cdef class Parser: beam_width = self.cfg.get('beam_width', 1) if beam_density is None: beam_density = self.cfg.get('beam_density', 0.001) + if BEAM_PARSE: + beam_width = 16 cdef Beam beam if beam_width == 1: states = self.parse_batch([doc], [doc.tensor]) @@ -349,7 +352,7 @@ cdef class Parser: Yields (Doc): Documents, in order. """ if BEAM_PARSE: - beam_width = 8 + beam_width = 16 cdef Doc doc cdef Beam beam for docs in cytoolz.partition_all(batch_size, docs): @@ -427,7 +430,7 @@ cdef class Parser: next_step.push_back(st) return states - def beam_parse(self, docs, tokvecses, int beam_width=8, float beam_density=0.001): + def beam_parse(self, docs, tokvecses, int beam_width=16, float beam_density=0.001): cdef Beam beam cdef np.ndarray scores cdef Doc doc @@ -471,13 +474,13 @@ cdef class Parser: for k in range(nr_class): beam.scores[i][k] = c_scores[j * scores.shape[1] + k] j += 1 - beam.advance(_transition_state, NULL, self.moves.c) + beam.advance(_transition_state, _hash_state, self.moves.c) beam.check_done(_check_final_state, NULL) beams.append(beam) return beams def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None): - if BEAM_PARSE: + if BEAM_PARSE and numpy.random.random() >= 0.5: return self.update_beam(docs_tokvecs, golds, drop=drop, sgd=sgd, losses=losses) if losses is not None and self.name not in losses: @@ -568,7 +571,7 @@ cdef class Parser: states, tokvecs, golds, state2vec, vec2scores, drop, sgd, losses, - width=8) + width=16) backprop_lower = [] for i, d_scores in enumerate(states_d_scores): if losses is not None: @@ -633,9 +636,10 @@ cdef class Parser: xp = get_array_module(d_tokvecs) for ids, d_vector, bp_vector in backprops: d_state_features = bp_vector(d_vector, sgd=sgd) - mask = (ids >= 0).reshape((ids.shape[0], ids.shape[1], 1)) - self.model[0].ops.scatter_add(d_tokvecs, ids, - d_state_features * mask) + mask = ids >= 0 + d_state_features *= mask.reshape(ids.shape + (1,)) + self.model[0].ops.scatter_add(d_tokvecs, ids * mask, + d_state_features) @property def move_names(self): @@ -651,7 +655,7 @@ cdef class Parser: lower, stream, drop=dropout) return state2vec, upper - nr_feature = 13 + nr_feature = 8 def get_token_ids(self, states): cdef StateClass state From f75420ae79f67da5091bd9c02622aa3c756d36d9 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 18 Aug 2017 13:31:15 -0500 Subject: [PATCH 12/12] Unhack beam parsing, moving it under options instead of global flags --- spacy/syntax/_beam_utils.pyx | 33 +++++++++++++++++--------------- spacy/syntax/nn_parser.pyx | 37 ++++++++++++++++++++++-------------- 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/spacy/syntax/_beam_utils.pyx b/spacy/syntax/_beam_utils.pyx index 7afe51d4f..4d90fe23b 100644 --- a/spacy/syntax/_beam_utils.pyx +++ b/spacy/syntax/_beam_utils.pyx @@ -49,7 +49,7 @@ cdef class ParserBeam(object): cdef public object dones def __init__(self, TransitionSystem moves, states, golds, - int width=4, float density=0.001): + int width, float density): self.moves = moves self.states = states self.golds = golds @@ -89,7 +89,10 @@ cdef class ParserBeam(object): self._set_scores(beam, scores[i]) if self.golds is not None: self._set_costs(beam, self.golds[i], follow_gold=follow_gold) - beam.advance(_transition_state, _hash_state, self.moves.c) + if follow_gold: + beam.advance(_transition_state, NULL, self.moves.c) + else: + beam.advance(_transition_state, _hash_state, self.moves.c) beam.check_done(_check_final_state, NULL) if beam.is_done and self.golds is not None: for j in range(beam.size): @@ -145,15 +148,16 @@ def get_token_ids(states, int n_tokens): nr_update = 0 def update_beam(TransitionSystem moves, int nr_feature, int max_steps, states, tokvecs, golds, - state2vec, vec2scores, drop=0., sgd=None, - losses=None, int width=4, float density=0.001): + state2vec, vec2scores, + int width, float density, + sgd=None, losses=None, drop=0.): global nr_update cdef MaxViolation violn nr_update += 1 pbeam = ParserBeam(moves, states, golds, width=width, density=density) gbeam = ParserBeam(moves, states, golds, - width=width, density=density) + width=width, density=0.0) cdef StateClass state beam_maps = [] backprops = [] @@ -194,13 +198,13 @@ def update_beam(TransitionSystem moves, int nr_feature, int max_steps, violn.check_crf(pbeam[i], gbeam[i]) histories = [] losses = [] - for i, violn in enumerate(violns): - if violn.cost < 1: - histories.append([]) - losses.append([]) - else: + for violn in violns: + if violn.p_hist: histories.append(violn.p_hist + violn.g_hist) losses.append(violn.p_probs + violn.g_probs) + else: + histories.append([]) + losses.append([]) states_d_scores = get_gradient(moves.n_moves, beam_maps, histories, losses) return states_d_scores, backprops[:len(states_d_scores)] @@ -215,10 +219,6 @@ def get_states(pbeams, gbeams, beam_map, nr_update): for eg_id, (pbeam, gbeam) in enumerate(zip(pbeams, gbeams)): p_indices.append([]) g_indices.append([]) - if pbeam.loss > 0 and pbeam.min_score > (gbeam.score + numpy.sqrt(nr_update)): - pbeams.dones[eg_id] = True - gbeams.dones[eg_id] = True - continue for i in range(pbeam.size): state = pbeam.at(i) if not state.is_final(): @@ -269,9 +269,12 @@ def get_gradient(nr_class, beam_maps, histories, losses): assert len(histories) == len(losses) for eg_id, hists in enumerate(histories): for loss, hist in zip(losses[eg_id], hists): - if abs(loss) == 0.0 or numpy.isnan(loss): + if loss == 0.0 or numpy.isnan(loss): continue key = tuple([eg_id]) + # Adjust loss for length + avg_loss = loss / len(hist) + loss += avg_loss * (nr_step - len(hist)) for j, clas in enumerate(hist): i = beam_maps[j][key] # In step j, at state i action clas diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index 11fc4e742..3e5566705 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -67,7 +67,6 @@ from ..attrs cimport ID, TAG, DEP, ORTH, NORM, PREFIX, SUFFIX, TAG from . import _beam_utils USE_FINE_TUNE = True -BEAM_PARSE = True def get_templates(*args, **kwargs): return [] @@ -299,6 +298,10 @@ cdef class Parser: self.moves = self.TransitionSystem(self.vocab.strings, {}) else: self.moves = moves + if 'beam_width' not in cfg: + cfg['beam_width'] = util.env_opt('beam_width', 1) + if 'beam_density' not in cfg: + cfg['beam_density'] = util.env_opt('beam_density', 0.0) self.cfg = cfg if 'actions' in self.cfg: for action, labels in self.cfg.get('actions', {}).items(): @@ -321,9 +324,7 @@ cdef class Parser: if beam_width is None: beam_width = self.cfg.get('beam_width', 1) if beam_density is None: - beam_density = self.cfg.get('beam_density', 0.001) - if BEAM_PARSE: - beam_width = 16 + beam_density = self.cfg.get('beam_density', 0.0) cdef Beam beam if beam_width == 1: states = self.parse_batch([doc], [doc.tensor]) @@ -339,7 +340,7 @@ cdef class Parser: return output def pipe(self, docs, int batch_size=1000, int n_threads=2, - beam_width=1, beam_density=0.001): + beam_width=None, beam_density=None): """ Process a stream of documents. @@ -351,8 +352,10 @@ cdef class Parser: The number of threads with which to work on the buffer in parallel. Yields (Doc): Documents, in order. """ - if BEAM_PARSE: - beam_width = 16 + if beam_width is None: + beam_width = self.cfg.get('beam_width', 1) + if beam_density is None: + beam_density = self.cfg.get('beam_density', 0.0) cdef Doc doc cdef Beam beam for docs in cytoolz.partition_all(batch_size, docs): @@ -430,7 +433,7 @@ cdef class Parser: next_step.push_back(st) return states - def beam_parse(self, docs, tokvecses, int beam_width=16, float beam_density=0.001): + def beam_parse(self, docs, tokvecses, int beam_width=3, float beam_density=0.001): cdef Beam beam cdef np.ndarray scores cdef Doc doc @@ -480,9 +483,10 @@ cdef class Parser: return beams def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None): - if BEAM_PARSE and numpy.random.random() >= 0.5: - return self.update_beam(docs_tokvecs, golds, drop=drop, sgd=sgd, - losses=losses) + if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= 0.5: + return self.update_beam(docs_tokvecs, golds, + self.cfg['beam_width'], self.cfg['beam_density'], + drop=drop, sgd=sgd, losses=losses) if losses is not None and self.name not in losses: losses[self.name] = 0. docs, tokvec_lists = docs_tokvecs @@ -548,7 +552,12 @@ cdef class Parser: bp_my_tokvecs(d_tokvecs, sgd=sgd) return d_tokvecs - def update_beam(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None): + def update_beam(self, docs_tokvecs, golds, width=None, density=None, + drop=0., sgd=None, losses=None): + if width is None: + width = self.cfg.get('beam_width', 2) + if density is None: + density = self.cfg.get('beam_density', 0.0) if losses is not None and self.name not in losses: losses[self.name] = 0. docs, tokvecs = docs_tokvecs @@ -570,8 +579,8 @@ cdef class Parser: states_d_scores, backprops = _beam_utils.update_beam(self.moves, self.nr_feature, 500, states, tokvecs, golds, state2vec, vec2scores, - drop, sgd, losses, - width=16) + width, density, + sgd=sgd, drop=drop, losses=losses) backprop_lower = [] for i, d_scores in enumerate(states_d_scores): if losses is not None: