Revert "Move to contiguous buffer for token_ids and d_vectors"

This reverts commit 3ff8c35a79.
This commit is contained in:
Matthew Honnibal 2017-05-20 11:26:23 -05:00
parent b272890a8c
commit d52b65aec2
2 changed files with 30 additions and 38 deletions

View File

@ -237,9 +237,10 @@ cdef class NeuralEntityRecognizer(NeuralParser):
nr_feature = 6 nr_feature = 6
def set_token_ids(self, ids, states): def get_token_ids(self, states):
cdef StateClass state cdef StateClass state
cdef int n_tokens = 6 cdef int n_tokens = 6
ids = numpy.zeros((len(states), n_tokens), dtype='i', order='c')
for i, state in enumerate(states): for i, state in enumerate(states):
ids[i, 0] = state.c.B(0)-1 ids[i, 0] = state.c.B(0)-1
ids[i, 1] = state.c.B(0) ids[i, 1] = state.c.B(0)
@ -252,7 +253,7 @@ cdef class NeuralEntityRecognizer(NeuralParser):
ids[i, j] = -1 ids[i, j] = -1
if ids[i, j] != -1: if ids[i, j] != -1:
ids[i, j] += state.c.offset ids[i, j] += state.c.offset
ids[i+1:ids.shape[0]] = -1 return ids
cdef class BeamDependencyParser(BeamParser): cdef class BeamDependencyParser(BeamParser):

View File

@ -303,9 +303,7 @@ cdef class Parser:
todo = [st for st in states if not st.is_final()] todo = [st for st in states if not st.is_final()]
while todo: while todo:
token_ids = numpy.zeros((len(todo), self.nr_feature), token_ids = self.get_token_ids(todo)
dtype='i', order='C')
self.set_token_ids(token_ids, todo)
vectors = state2vec(token_ids) vectors = state2vec(token_ids)
scores = vec2scores(vectors) scores = vec2scores(vectors)
self.transition_batch(todo, scores) self.transition_batch(todo, scores)
@ -329,53 +327,44 @@ cdef class Parser:
todo = [(s, g) for s, g in zip(states, golds) if not s.is_final()] todo = [(s, g) for s, g in zip(states, golds) if not s.is_final()]
backprops = [] backprops = []
cdef int max_steps = max(len(doc)*3 for doc in docs)
# Allocate one buffer for the token_ids and d_vectors
# This will make it quicker to copy back to GPU
token_ids = numpy.zeros((max_steps, len(todo), self.nr_feature),
dtype='i', order='C')
d_vectors = numpy.zeros((max_steps, len(todo), self.model[0].nO),
dtype='f', order='C')
cdef float loss = 0. cdef float loss = 0.
cdef int nr_step = 0 while todo:
while len(todo) >= 4 and nr_step < max_steps:
states, golds = zip(*todo) states, golds = zip(*todo)
self.set_token_ids(token_ids[nr_step], states) token_ids = self.get_token_ids(states)
length = len(todo) vector, bp_vector = state2vec.begin_update(token_ids, drop=drop)
vector, bp_vector = state2vec.begin_update(token_ids[nr_step, :length],
drop=drop)
scores, bp_scores = vec2scores.begin_update(vector, drop=drop) scores, bp_scores = vec2scores.begin_update(vector, drop=drop)
d_scores = self.get_batch_loss(states, golds, scores) d_scores = self.get_batch_loss(states, golds, scores)
d_vectors[nr_step, :length] = bp_scores(d_scores, sgd=sgd) d_vector = bp_scores(d_scores, sgd=sgd)
backprops.append((length, bp_vector)) if isinstance(self.model[0].ops, CupyOps) \
and not isinstance(token_ids, state2vec.ops.xp.ndarray):
# Move token_ids and d_vector to CPU, asynchronously
backprops.append((
get_async(cuda_stream, token_ids),
get_async(cuda_stream, d_vector),
bp_vector
))
else:
backprops.append((token_ids, d_vector, bp_vector))
self.transition_batch(states, scores) self.transition_batch(states, scores)
todo = [st for st in todo if not st[0].is_final()] todo = [st for st in todo if not st[0].is_final()]
nr_step += 1
d_tokvecs = state2vec.ops.allocate(tokvecs.shape)
if type(token_ids) != type(d_tokvecs):
token_ids = get_async(cuda_stream, token_ids)
d_vectors = get_async(cuda_stream, d_vectors)
if cuda_stream is not None:
# Tells CUDA to block, so our async copies complete. # Tells CUDA to block, so our async copies complete.
if cuda_stream is not None:
cuda_stream.synchronize() cuda_stream.synchronize()
d_tokvecs = state2vec.ops.allocate(tokvecs.shape)
xp = state2vec.ops.xp # Handle for numpy/cupy xp = state2vec.ops.xp # Handle for numpy/cupy
for i, (length, bp_vector) in enumerate(backprops): for token_ids, d_vector, bp_vector in backprops:
d_vector = d_vectors[i, :length]
d_state_features = bp_vector(d_vector, sgd=sgd) d_state_features = bp_vector(d_vector, sgd=sgd)
step_token_ids = token_ids[i, :length] active_feats = token_ids * (token_ids >= 0)
active_feats = step_token_ids * (step_token_ids >= 0) active_feats = active_feats.reshape((token_ids.shape[0], token_ids.shape[1], 1))
active_feats = active_feats.reshape((active_feats.shape[0],
active_feats.shape[1], 1))
if hasattr(xp, 'scatter_add'): if hasattr(xp, 'scatter_add'):
xp.scatter_add(d_tokvecs, xp.scatter_add(d_tokvecs,
step_token_ids, d_state_features) token_ids, d_state_features * active_feats)
else: else:
xp.add.at(d_tokvecs, xp.add.at(d_tokvecs,
step_token_ids, d_state_features * active_feats) token_ids, d_state_features * active_feats)
return d_tokvecs return d_tokvecs
def get_batch_model(self, batch_size, tokvecs, stream, dropout): def get_batch_model(self, batch_size, tokvecs, stream, dropout):
@ -386,11 +375,13 @@ cdef class Parser:
nr_feature = 13 nr_feature = 13
def set_token_ids(self, token_ids, states): def get_token_ids(self, states):
cdef StateClass state cdef StateClass state
cdef int n_tokens = self.nr_feature
ids = numpy.zeros((len(states), n_tokens), dtype='i', order='C')
for i, state in enumerate(states): for i, state in enumerate(states):
state.set_context_tokens(token_ids[i]) state.set_context_tokens(ids[i])
token_ids[i+1:token_ids.shape[0]] = -1 return ids
def transition_batch(self, states, float[:, ::1] scores): def transition_batch(self, states, float[:, ::1] scores):
cdef StateClass state cdef StateClass state