2017-08-12 22:47:45 +03:00
|
|
|
# cython: infer_types=True
|
|
|
|
cimport numpy as np
|
|
|
|
import numpy
|
|
|
|
from cpython.ref cimport PyObject, Py_INCREF, Py_XDECREF
|
|
|
|
from thinc.extra.search cimport Beam
|
|
|
|
from thinc.extra.search import MaxViolation
|
|
|
|
from thinc.typedefs cimport hash_t, class_t
|
|
|
|
|
|
|
|
from .transition_system cimport TransitionSystem, Transition
|
|
|
|
from .stateclass cimport StateClass
|
|
|
|
from ..gold cimport GoldParse
|
|
|
|
from ..tokens.doc cimport Doc
|
|
|
|
|
|
|
|
|
|
|
|
# These are passed as callbacks to thinc.search.Beam
|
|
|
|
cdef int _transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1:
|
|
|
|
dest = <StateClass>_dest
|
|
|
|
src = <StateClass>_src
|
|
|
|
moves = <const Transition*>_moves
|
|
|
|
dest.clone(src)
|
|
|
|
moves[clas].do(dest.c, moves[clas].label)
|
|
|
|
|
|
|
|
|
|
|
|
cdef int _check_final_state(void* _state, void* extra_args) except -1:
|
|
|
|
return (<StateClass>_state).is_final()
|
|
|
|
|
|
|
|
|
|
|
|
def _cleanup(Beam beam):
|
|
|
|
for i in range(beam.width):
|
|
|
|
Py_XDECREF(<PyObject*>beam._states[i].content)
|
|
|
|
Py_XDECREF(<PyObject*>beam._parents[i].content)
|
|
|
|
|
|
|
|
|
|
|
|
cdef hash_t _hash_state(void* _state, void* _) except 0:
|
|
|
|
state = <StateClass>_state
|
|
|
|
if state.c.is_final():
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
return state.c.hash()
|
|
|
|
|
|
|
|
|
|
|
|
cdef class ParserBeam(object):
|
|
|
|
cdef public TransitionSystem moves
|
2017-08-13 01:15:16 +03:00
|
|
|
cdef public object states
|
2017-08-12 22:47:45 +03:00
|
|
|
cdef public object golds
|
|
|
|
cdef public object beams
|
|
|
|
|
2017-08-13 01:15:16 +03:00
|
|
|
def __init__(self, TransitionSystem moves, states, golds,
|
2017-08-12 22:47:45 +03:00
|
|
|
int width=4, float density=0.001):
|
|
|
|
self.moves = moves
|
2017-08-13 01:15:16 +03:00
|
|
|
self.states = states
|
2017-08-12 22:47:45 +03:00
|
|
|
self.golds = golds
|
|
|
|
self.beams = []
|
|
|
|
cdef Beam beam
|
2017-08-13 01:15:16 +03:00
|
|
|
cdef StateClass state, st
|
|
|
|
for state in states:
|
2017-08-12 22:47:45 +03:00
|
|
|
beam = Beam(self.moves.n_moves, width, density)
|
2017-08-13 01:15:16 +03:00
|
|
|
beam.initialize(self.moves.init_beam_state, state.c.length, state.c._sent)
|
|
|
|
for i in range(beam.size):
|
|
|
|
st = <StateClass>beam.at(i)
|
|
|
|
st.c.offset = state.c.offset
|
2017-08-12 22:47:45 +03:00
|
|
|
self.beams.append(beam)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def is_done(self):
|
|
|
|
return all(beam.is_done for beam in self.beams)
|
|
|
|
|
|
|
|
def __getitem__(self, i):
|
|
|
|
return self.beams[i]
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.beams)
|
|
|
|
|
|
|
|
def advance(self, scores, follow_gold=False):
|
|
|
|
cdef Beam beam
|
|
|
|
for i, beam in enumerate(self.beams):
|
|
|
|
self._set_scores(beam, scores[i])
|
|
|
|
if self.golds is not None:
|
|
|
|
self._set_costs(beam, self.golds[i], follow_gold=follow_gold)
|
|
|
|
if follow_gold:
|
|
|
|
assert self.golds is not None
|
|
|
|
beam.advance(_transition_state, NULL, <void*>self.moves.c)
|
|
|
|
else:
|
|
|
|
beam.advance(_transition_state, _hash_state, <void*>self.moves.c)
|
|
|
|
beam.check_done(_check_final_state, NULL)
|
|
|
|
|
|
|
|
def _set_scores(self, Beam beam, scores):
|
|
|
|
for i in range(beam.size):
|
|
|
|
state = <StateClass>beam.at(i)
|
2017-08-13 02:21:54 +03:00
|
|
|
if not state.is_final():
|
|
|
|
for j in range(beam.nr_class):
|
|
|
|
beam.scores[i][j] = scores[i, j]
|
2017-08-12 22:47:45 +03:00
|
|
|
self.moves.set_valid(beam.is_valid[i], state.c)
|
|
|
|
|
|
|
|
def _set_costs(self, Beam beam, GoldParse gold, int follow_gold=False):
|
|
|
|
for i in range(beam.size):
|
|
|
|
state = <StateClass>beam.at(i)
|
|
|
|
self.moves.set_costs(beam.is_valid[i], beam.costs[i], state, gold)
|
|
|
|
if follow_gold:
|
|
|
|
for j in range(beam.nr_class):
|
|
|
|
beam.is_valid[i][j] *= beam.costs[i][j] <= 0
|
|
|
|
|
|
|
|
|
|
|
|
def get_token_ids(states, int n_tokens):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef np.ndarray ids = numpy.zeros((len(states), n_tokens),
|
2017-08-13 01:15:16 +03:00
|
|
|
dtype='int32', order='C')
|
2017-08-12 22:47:45 +03:00
|
|
|
c_ids = <int*>ids.data
|
|
|
|
for i, state in enumerate(states):
|
|
|
|
if not state.is_final():
|
|
|
|
state.c.set_context_tokens(c_ids, n_tokens)
|
2017-08-13 01:15:16 +03:00
|
|
|
else:
|
|
|
|
ids[i] = -1
|
2017-08-12 22:47:45 +03:00
|
|
|
c_ids += ids.shape[1]
|
|
|
|
return ids
|
|
|
|
|
|
|
|
|
2017-08-13 01:15:16 +03:00
|
|
|
def update_beam(TransitionSystem moves, int nr_feature, int max_steps,
|
|
|
|
states, tokvecs, golds,
|
2017-08-12 22:47:45 +03:00
|
|
|
state2vec, vec2scores, drop=0., sgd=None,
|
|
|
|
losses=None, int width=4, float density=0.001):
|
2017-08-13 01:15:16 +03:00
|
|
|
pbeam = ParserBeam(moves, states, golds,
|
2017-08-12 22:47:45 +03:00
|
|
|
width=width, density=density)
|
2017-08-13 01:15:16 +03:00
|
|
|
gbeam = ParserBeam(moves, states, golds,
|
2017-08-12 22:47:45 +03:00
|
|
|
width=width, density=density)
|
2017-08-13 01:15:16 +03:00
|
|
|
beam_maps = []
|
2017-08-12 22:47:45 +03:00
|
|
|
backprops = []
|
2017-08-13 01:15:16 +03:00
|
|
|
violns = [MaxViolation() for _ in range(len(states))]
|
|
|
|
for t in range(max_steps):
|
|
|
|
if pbeam.is_done and gbeam.is_done:
|
|
|
|
break
|
|
|
|
beam_maps.append({})
|
|
|
|
states, p_indices, g_indices = get_states(pbeam, gbeam, beam_maps[-1])
|
2017-08-12 22:47:45 +03:00
|
|
|
|
|
|
|
token_ids = get_token_ids(states, nr_feature)
|
|
|
|
vectors, bp_vectors = state2vec.begin_update(token_ids, drop=drop)
|
|
|
|
scores, bp_scores = vec2scores.begin_update(vectors, drop=drop)
|
2017-08-13 01:15:16 +03:00
|
|
|
|
2017-08-12 22:47:45 +03:00
|
|
|
backprops.append((token_ids, bp_vectors, bp_scores))
|
|
|
|
|
2017-08-13 02:21:54 +03:00
|
|
|
p_scores = [numpy.ascontiguousarray(scores[indices], dtype='f') for indices in p_indices]
|
|
|
|
g_scores = [numpy.ascontiguousarray(scores[indices], dtype='f') for indices in g_indices]
|
2017-08-12 22:47:45 +03:00
|
|
|
pbeam.advance(p_scores)
|
|
|
|
gbeam.advance(g_scores, follow_gold=True)
|
|
|
|
|
|
|
|
for i, violn in enumerate(violns):
|
|
|
|
violn.check_crf(pbeam[i], gbeam[i])
|
|
|
|
|
|
|
|
histories = [(v.p_hist + v.g_hist) for v in violns]
|
|
|
|
losses = [(v.p_probs + v.g_probs) for v in violns]
|
2017-08-13 01:15:16 +03:00
|
|
|
states_d_scores = get_gradient(moves.n_moves, beam_maps,
|
2017-08-12 22:47:45 +03:00
|
|
|
histories, losses)
|
|
|
|
return states_d_scores, backprops
|
|
|
|
|
|
|
|
|
2017-08-13 01:15:16 +03:00
|
|
|
def get_states(pbeams, gbeams, beam_map):
|
2017-08-12 22:47:45 +03:00
|
|
|
seen = {}
|
2017-08-13 01:15:16 +03:00
|
|
|
states = []
|
2017-08-12 22:47:45 +03:00
|
|
|
p_indices = []
|
|
|
|
g_indices = []
|
|
|
|
cdef Beam pbeam, gbeam
|
2017-08-13 01:15:16 +03:00
|
|
|
for eg_id, (pbeam, gbeam) in enumerate(zip(pbeams, gbeams)):
|
2017-08-12 22:47:45 +03:00
|
|
|
p_indices.append([])
|
|
|
|
for j in range(pbeam.size):
|
|
|
|
key = tuple([eg_id] + pbeam.histories[j])
|
|
|
|
seen[key] = len(states)
|
|
|
|
p_indices[-1].append(len(states))
|
|
|
|
states.append(<StateClass>pbeam.at(j))
|
|
|
|
beam_map.update(seen)
|
|
|
|
g_indices.append([])
|
|
|
|
for i in range(gbeam.size):
|
|
|
|
key = tuple([eg_id] + gbeam.histories[i])
|
|
|
|
if key in seen:
|
|
|
|
g_indices[-1].append(seen[key])
|
|
|
|
else:
|
|
|
|
g_indices[-1].append(len(states))
|
|
|
|
beam_map[key] = len(states)
|
|
|
|
states.append(<StateClass>gbeam.at(i))
|
|
|
|
|
2017-08-13 02:21:54 +03:00
|
|
|
p_indices = [numpy.asarray(idx, dtype='i') for idx in p_indices]
|
|
|
|
g_indices = [numpy.asarray(idx, dtype='i') for idx in g_indices]
|
2017-08-12 22:47:45 +03:00
|
|
|
return states, p_indices, g_indices
|
|
|
|
|
|
|
|
|
2017-08-13 01:15:16 +03:00
|
|
|
def get_gradient(nr_class, beam_maps, histories, losses):
|
2017-08-12 22:47:45 +03:00
|
|
|
"""
|
|
|
|
The global model assigns a loss to each parse. The beam scores
|
|
|
|
are additive, so the same gradient is applied to each action
|
|
|
|
in the history. This gives the gradient of a single *action*
|
|
|
|
for a beam state -- so we have "the gradient of loss for taking
|
|
|
|
action i given history H."
|
2017-08-13 01:15:16 +03:00
|
|
|
|
|
|
|
Histories: Each hitory is a list of actions
|
|
|
|
Each candidate has a history
|
|
|
|
Each beam has multiple candidates
|
|
|
|
Each batch has multiple beams
|
|
|
|
So history is list of lists of lists of ints
|
2017-08-12 22:47:45 +03:00
|
|
|
"""
|
2017-08-13 01:15:16 +03:00
|
|
|
nr_step = len(beam_maps)
|
|
|
|
grads = [numpy.zeros((max(beam_map.values())+1, nr_class), dtype='f')
|
|
|
|
for beam_map in beam_maps]
|
|
|
|
for eg_id, hists in enumerate(histories):
|
|
|
|
for loss, hist in zip(losses[eg_id], hists):
|
|
|
|
key = tuple([eg_id])
|
|
|
|
for j, clas in enumerate(hist):
|
|
|
|
i = beam_maps[j][key]
|
2017-08-13 02:21:54 +03:00
|
|
|
# In step j, at state i action clas
|
|
|
|
# resulted in loss
|
|
|
|
grads[j][i, clas] += loss
|
2017-08-13 01:15:16 +03:00
|
|
|
key = key + tuple([clas])
|
2017-08-12 22:47:45 +03:00
|
|
|
return grads
|
|
|
|
|
|
|
|
|