Don't share CNN, to reduce complexities

This commit is contained in:
Matthew Honnibal 2017-09-21 14:59:48 +02:00
parent 1d73dec8b1
commit 20193371f5
8 changed files with 69 additions and 150 deletions

View File

@ -226,8 +226,8 @@ def drop_layer(layer, factor=2.):
return model
def Tok2Vec(width, embed_size, pretrained_dims=0, **kwargs):
assert pretrained_dims is not None
def Tok2Vec(width, embed_size, **kwargs):
pretrained_dims = kwargs.get('pretrained_dims', 0)
cnn_maxout_pieces = kwargs.get('cnn_maxout_pieces', 3)
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
with Model.define_operators({'>>': chain, '|': concatenate, '**': clone, '+': add}):
@ -474,20 +474,18 @@ def getitem(i):
return X[i], None
return layerize(getitem_fwd)
def build_tagger_model(nr_class, token_vector_width, pretrained_dims=0, **cfg):
embed_size = util.env_opt('embed_size', 4000)
with Model.define_operators({'>>': chain, '+': add}):
# Input: (doc, tensor) tuples
private_tok2vec = Tok2Vec(token_vector_width, embed_size,
pretrained_dims=pretrained_dims)
model = (
fine_tune(private_tok2vec)
>> with_flatten(
Maxout(token_vector_width, token_vector_width)
>> Softmax(nr_class, token_vector_width)
)
tok2vec = Tok2Vec(token_vector_width, embed_size,
pretrained_dims=pretrained_dims)
model = with_flatten(
tok2vec
>> Softmax(nr_class, token_vector_width)
)
model.nI = None
model.tok2vec = tok2vec
return model

View File

@ -3,12 +3,13 @@
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.0.0a14'
__version__ = '2.0.0a15'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = 'contact@explosion.ai'
__license__ = 'MIT'
__release__ = False
__docs_models__ = 'https://spacy.io/docs/usage/models'
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'

View File

@ -55,7 +55,7 @@ def train(cmd, lang, output_dir, train_data, dev_data, n_iter=20, n_sents=0,
prints(dev_path, title="Development data not found", exits=1)
pipeline = ['token_vectors', 'tags', 'dependencies', 'entities']
pipeline = ['tags', 'dependencies', 'entities']
if no_tagger and 'tags' in pipeline: pipeline.remove('tags')
if no_parser and 'dependencies' in pipeline: pipeline.remove('dependencies')
if no_entities and 'entities' in pipeline: pipeline.remove('entities')

View File

@ -303,31 +303,17 @@ class Language(object):
if self._optimizer is None:
self._optimizer = Adam(Model.ops, 0.001)
sgd = self._optimizer
tok2vec = self.pipeline[0]
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
pipes = list(self.pipeline[1:])
pipes = list(self.pipeline)
random.shuffle(pipes)
tokvecses, bp_tokvecses = tok2vec.model.begin_update(docs, drop=drop)
all_d_tokvecses = [tok2vec.model.ops.allocate(tv.shape) for tv in tokvecses]
for proc in pipes:
if not hasattr(proc, 'update'):
continue
d_tokvecses = proc.update((docs, tokvecses), golds,
drop=drop, sgd=get_grads, losses=losses)
if update_shared and d_tokvecses is not None:
for i, d_tv in enumerate(d_tokvecses):
all_d_tokvecses[i] += d_tv
if update_shared and bp_tokvecses is not None:
bp_tokvecses(all_d_tokvecses, sgd=sgd)
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
# Clear the tensor variable, to free GPU memory.
# If we don't do this, the memory leak gets pretty
# bad, because we may be holding part of a batch.
for doc in docs:
doc.tensor = None
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
@ -371,8 +357,6 @@ class Language(object):
**cfg: Config parameters.
returns: An optimizer
"""
if self.parser:
self.pipeline.append(NeuralLabeller(self.vocab))
# Populate vocab
if get_gold_tuples is not None:
for _, annots_brackets in get_gold_tuples():
@ -418,7 +402,6 @@ class Language(object):
assert len(docs) == len(golds)
for doc, gold in zip(docs, golds):
scorer.score(doc, gold)
doc.tensor = None
return scorer
@contextmanager

View File

@ -299,27 +299,25 @@ class NeuralTagger(BaseThincComponent):
self.cfg.setdefault('cnn_maxout_pieces', 2)
def __call__(self, doc):
tags = self.predict(([doc], [doc.tensor]))
tags = self.predict([doc])
self.set_annotations([doc], tags)
return doc
def pipe(self, stream, batch_size=128, n_threads=-1):
for docs in cytoolz.partition_all(batch_size, stream):
docs = list(docs)
tokvecs = [d.tensor for d in docs]
tag_ids = self.predict((docs, tokvecs))
tag_ids = self.predict(docs)
self.set_annotations(docs, tag_ids)
yield from docs
def predict(self, docs_tokvecs):
scores = self.model(docs_tokvecs)
def predict(self, docs):
scores = self.model(docs)
scores = self.model.ops.flatten(scores)
guesses = scores.argmax(axis=1)
if not isinstance(guesses, numpy.ndarray):
guesses = guesses.get()
tokvecs = docs_tokvecs[1]
guesses = self.model.ops.unflatten(guesses,
[tv.shape[0] for tv in tokvecs])
[len(d) for d in docs])
return guesses
def set_annotations(self, docs, batch_tag_ids):
@ -339,20 +337,15 @@ class NeuralTagger(BaseThincComponent):
idx += 1
doc.is_tagged = True
def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None):
def update(self, docs, golds, drop=0., sgd=None, losses=None):
if losses is not None and self.name not in losses:
losses[self.name] = 0.
docs, tokvecs = docs_tokvecs
if self.model.nI is None:
self.model.nI = tokvecs[0].shape[1]
tag_scores, bp_tag_scores = self.model.begin_update(docs_tokvecs, drop=drop)
tag_scores, bp_tag_scores = self.model.begin_update(docs, drop=drop)
loss, d_tag_scores = self.get_loss(docs, golds, tag_scores)
d_tokvecs = bp_tag_scores(d_tag_scores, sgd=sgd)
if losses is not None:
losses[self.name] += loss
return d_tokvecs
def get_loss(self, docs, golds, scores):
scores = self.model.ops.flatten(scores)
@ -399,9 +392,9 @@ class NeuralTagger(BaseThincComponent):
pretrained_dims=self.vocab.vectors_length)
@classmethod
def Model(cls, n_tags, token_vector_width, pretrained_dims=0):
def Model(cls, n_tags, token_vector_width, pretrained_dims=0, **cfg):
return build_tagger_model(n_tags, token_vector_width,
pretrained_dims)
pretrained_dims, **cfg)
def use_params(self, params):
with self.model.use_params(params):
@ -573,15 +566,10 @@ class SimilarityHook(BaseThincComponent):
yield self(doc)
def predict(self, doc1, doc2):
return self.model.predict([(doc1.tensor, doc2.tensor)])
return self.model.predict([(doc1, doc2)])
def update(self, doc1_tensor1_doc2_tensor2, golds, sgd=None, drop=0.):
doc1s, tensor1s, doc2s, tensor2s = doc1_tensor1_doc2_tensor2
sims, bp_sims = self.model.begin_update(zip(tensor1s, tensor2s),
drop=drop)
d_tensor1s, d_tensor2s = bp_sims(golds, sgd=sgd)
return d_tensor1s, d_tensor2s
def update(self, doc1_doc2, golds, sgd=None, drop=0.):
sims, bp_sims = self.model.begin_update(doc1_doc2, drop=drop)
def begin_training(self, _=tuple(), pipeline=None):
"""
@ -636,15 +624,13 @@ class TextCategorizer(BaseThincComponent):
for j, label in enumerate(self.labels):
doc.cats[label] = float(scores[i, j])
def update(self, docs_tensors, golds, state=None, drop=0., sgd=None, losses=None):
docs, tensors = docs_tensors
def update(self, docs, golds, state=None, drop=0., sgd=None, losses=None):
scores, bp_scores = self.model.begin_update(docs, drop=drop)
loss, d_scores = self.get_loss(docs, golds, scores)
d_tensors = bp_scores(d_scores, sgd=sgd)
bp_scores(d_scores, sgd=sgd)
if losses is not None:
losses.setdefault(self.name, 0.0)
losses[self.name] += loss
return d_tensors
def get_loss(self, docs, golds, scores):
truths = numpy.zeros((len(golds), len(self.labels)), dtype='f')

View File

@ -147,10 +147,10 @@ def get_token_ids(states, int n_tokens):
nr_update = 0
def update_beam(TransitionSystem moves, int nr_feature, int max_steps,
states, tokvecs, golds,
states, golds,
state2vec, vec2scores,
int width, float density,
sgd=None, losses=None, drop=0.):
losses=None, drop=0.):
global nr_update
cdef MaxViolation violn
nr_update += 1

View File

@ -48,7 +48,7 @@ from .. import util
from ..util import get_async, get_cuda_stream
from .._ml import zero_init, PrecomputableAffine, PrecomputableMaxouts
from .._ml import Tok2Vec, doc2feats, rebatch, fine_tune
from .._ml import Residual, drop_layer
from .._ml import Residual, drop_layer, flatten
from ..compat import json_dumps
from . import _parse_features
@ -244,8 +244,9 @@ cdef class Parser:
hidden_width = util.env_opt('hidden_width', hidden_width)
parser_maxout_pieces = util.env_opt('parser_maxout_pieces', 2)
embed_size = util.env_opt('embed_size', 4000)
tensors = fine_tune(Tok2Vec(token_vector_width, embed_size,
pretrained_dims=cfg.get('pretrained_dims')))
tok2vec = Tok2Vec(token_vector_width, embed_size,
pretrained_dims=cfg.get('pretrained_dims', 0))
tok2vec = chain(tok2vec, flatten)
if parser_maxout_pieces == 1:
lower = PrecomputableAffine(hidden_width if depth >= 1 else nr_class,
nF=cls.nr_feature,
@ -277,7 +278,7 @@ cdef class Parser:
'hidden_width': hidden_width,
'maxout_pieces': parser_maxout_pieces
}
return (tensors, lower, upper), cfg
return (tok2vec, lower, upper), cfg
def __init__(self, Vocab vocab, moves=True, model=True, **cfg):
"""
@ -309,7 +310,6 @@ cdef class Parser:
cfg['beam_density'] = util.env_opt('beam_density', 0.0)
if 'pretrained_dims' not in cfg:
cfg['pretrained_dims'] = self.vocab.vectors.data.shape[1]
cfg.setdefault('cnn_maxout_pieces', 2)
self.cfg = cfg
if 'actions' in self.cfg:
for action, labels in self.cfg.get('actions', {}).items():
@ -335,11 +335,11 @@ cdef class Parser:
beam_density = self.cfg.get('beam_density', 0.0)
cdef Beam beam
if beam_width == 1:
states = self.parse_batch([doc], [doc.tensor])
states = self.parse_batch([doc])
self.set_annotations([doc], states)
return doc
else:
beam = self.beam_parse([doc], [doc.tensor],
beam = self.beam_parse([doc],
beam_width=beam_width, beam_density=beam_density)[0]
output = self.moves.get_beam_annot(beam)
state = <StateClass>beam.at(0)
@ -368,11 +368,10 @@ cdef class Parser:
cdef Beam beam
for docs in cytoolz.partition_all(batch_size, docs):
docs = list(docs)
tokvecs = [doc.tensor for doc in docs]
if beam_width == 1:
parse_states = self.parse_batch(docs, tokvecs)
parse_states = self.parse_batch(docs)
else:
beams = self.beam_parse(docs, tokvecs,
beams = self.beam_parse(docs,
beam_width=beam_width, beam_density=beam_density)
parse_states = []
for beam in beams:
@ -380,7 +379,7 @@ cdef class Parser:
self.set_annotations(docs, parse_states)
yield from docs
def parse_batch(self, docs, tokvecses):
def parse_batch(self, docs):
cdef:
precompute_hiddens state2vec
StateClass state
@ -391,21 +390,15 @@ cdef class Parser:
int nr_class, nr_feat, nr_piece, nr_dim, nr_state
if isinstance(docs, Doc):
docs = [docs]
if isinstance(tokvecses, np.ndarray):
tokvecses = [tokvecses]
if USE_FINE_TUNE:
tokvecs = self.model[0].ops.flatten(self.model[0]((docs, tokvecses)))
else:
tokvecs = self.model[0].ops.flatten(tokvecses)
cuda_stream = get_cuda_stream()
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
0.0)
nr_state = len(docs)
nr_class = self.moves.n_moves
nr_dim = tokvecs.shape[1]
nr_feat = self.nr_feature
cuda_stream = get_cuda_stream()
state2vec, vec2scores = self.get_batch_model(nr_state, tokvecs,
cuda_stream, 0.0)
nr_piece = state2vec.nP
states = self.moves.init_batch(docs)
@ -448,19 +441,15 @@ cdef class Parser:
next_step.push_back(st)
return states
def beam_parse(self, docs, tokvecses, int beam_width=3, float beam_density=0.001):
def beam_parse(self, docs, int beam_width=3, float beam_density=0.001):
cdef Beam beam
cdef np.ndarray scores
cdef Doc doc
cdef int nr_class = self.moves.n_moves
cdef StateClass stcls, output
if USE_FINE_TUNE:
tokvecs = self.model[0].ops.flatten(self.model[0]((docs, tokvecses)))
else:
tokvecs = self.model[0].ops.flatten(tokvecses)
cuda_stream = get_cuda_stream()
state2vec, vec2scores = self.get_batch_model(len(docs), tokvecs,
cuda_stream, 0.0)
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
0.0)
beams = []
cdef int offset = 0
cdef int j = 0
@ -520,30 +509,24 @@ cdef class Parser:
free(scores)
free(token_ids)
def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None):
def update(self, docs, golds, drop=0., sgd=None, losses=None):
if not any(self.moves.has_gold(gold) for gold in golds):
return None
if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= 0.5:
return self.update_beam(docs_tokvecs, golds,
return self.update_beam(docs, golds,
self.cfg['beam_width'], self.cfg['beam_density'],
drop=drop, sgd=sgd, losses=losses)
if losses is not None and self.name not in losses:
losses[self.name] = 0.
docs, tokvec_lists = docs_tokvecs
if isinstance(docs, Doc) and isinstance(golds, GoldParse):
docs = [docs]
golds = [golds]
if USE_FINE_TUNE:
my_tokvecs, bp_my_tokvecs = self.model[0].begin_update(docs_tokvecs, drop=drop)
tokvecs = self.model[0].ops.flatten(my_tokvecs)
else:
tokvecs = self.model[0].ops.flatten(docs_tokvecs[1])
cuda_stream = get_cuda_stream()
states, golds, max_steps = self._init_gold_batch(docs, golds)
state2vec, vec2scores = self.get_batch_model(len(states), tokvecs, cuda_stream,
0.0)
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
0.0)
todo = [(s, g) for (s, g) in zip(states, golds)
if not s.is_final() and g is not None]
if not todo:
@ -587,13 +570,9 @@ cdef class Parser:
if n_steps >= max_steps:
break
self._make_updates(d_tokvecs,
backprops, sgd, cuda_stream)
d_tokvecs = self.model[0].ops.unflatten(d_tokvecs, [len(d) for d in docs])
if USE_FINE_TUNE:
d_tokvecs = bp_my_tokvecs(d_tokvecs, sgd=sgd)
return d_tokvecs
bp_tokvecs, backprops, sgd, cuda_stream)
def update_beam(self, docs_tokvecs, golds, width=None, density=None,
def update_beam(self, docs, golds, width=None, density=None,
drop=0., sgd=None, losses=None):
if not any(self.moves.has_gold(gold) for gold in golds):
return None
@ -605,26 +584,20 @@ cdef class Parser:
density = self.cfg.get('beam_density', 0.0)
if losses is not None and self.name not in losses:
losses[self.name] = 0.
docs, tokvecs = docs_tokvecs
lengths = [len(d) for d in docs]
assert min(lengths) >= 1
if USE_FINE_TUNE:
my_tokvecs, bp_my_tokvecs = self.model[0].begin_update(docs_tokvecs, drop=drop)
tokvecs = self.model[0].ops.flatten(my_tokvecs)
else:
tokvecs = self.model[0].ops.flatten(tokvecs)
states = self.moves.init_batch(docs)
for gold in golds:
self.moves.preprocess_gold(gold)
cuda_stream = get_cuda_stream()
state2vec, vec2scores = self.get_batch_model(len(states), tokvecs, cuda_stream, 0.0)
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream, 0.0)
states_d_scores, backprops = _beam_utils.update_beam(self.moves, self.nr_feature, 500,
states, tokvecs, golds,
states, golds,
state2vec, vec2scores,
width, density,
sgd=sgd, drop=drop, losses=losses)
drop=drop, losses=losses)
backprop_lower = []
cdef float batch_size = len(docs)
for i, d_scores in enumerate(states_d_scores):
@ -642,20 +615,7 @@ cdef class Parser:
else:
backprop_lower.append((ids, d_vector, bp_vectors))
d_tokvecs = self.model[0].ops.allocate(tokvecs.shape)
self._make_updates(d_tokvecs, backprop_lower, sgd, cuda_stream)
d_tokvecs = self.model[0].ops.unflatten(d_tokvecs, lengths)
if USE_FINE_TUNE:
d_tokvecs = bp_my_tokvecs(d_tokvecs, sgd=sgd)
return d_tokvecs
def _pad_tokvecs(self, tokvecs):
# Add a vector for missing values at the start of tokvecs
xp = get_array_module(tokvecs)
pad = xp.zeros((1, tokvecs.shape[1]), dtype=tokvecs.dtype)
return xp.vstack((pad, tokvecs))
def _unpad_tokvecs(self, d_tokvecs):
return d_tokvecs[1:]
self._make_updates(d_tokvecs, bp_tokvecs, backprop_lower, sgd, cuda_stream)
def _init_gold_batch(self, whole_docs, whole_golds):
"""Make a square batch, of length equal to the shortest doc. A long
@ -693,7 +653,7 @@ cdef class Parser:
max_moves = max(max_moves, len(oracle_actions))
return states, golds, max_moves
def _make_updates(self, d_tokvecs, backprops, sgd, cuda_stream=None):
def _make_updates(self, d_tokvecs, bp_tokvecs, backprops, sgd, cuda_stream=None):
# Tells CUDA to block, so our async copies complete.
if cuda_stream is not None:
cuda_stream.synchronize()
@ -704,6 +664,7 @@ cdef class Parser:
d_state_features *= mask.reshape(ids.shape + (1,))
self.model[0].ops.scatter_add(d_tokvecs, ids * mask,
d_state_features)
bp_tokvecs(d_tokvecs, sgd=sgd)
@property
def move_names(self):
@ -713,11 +674,12 @@ cdef class Parser:
names.append(name)
return names
def get_batch_model(self, batch_size, tokvecs, stream, dropout):
_, lower, upper = self.model
state2vec = precompute_hiddens(batch_size, tokvecs,
def get_batch_model(self, docs, stream, dropout):
tok2vec, lower, upper = self.model
tokvecs, bp_tokvecs = tok2vec.begin_update(docs, drop=dropout)
state2vec = precompute_hiddens(len(docs), tokvecs,
lower, stream, drop=dropout)
return state2vec, upper
return (tokvecs, bp_tokvecs), state2vec, upper
nr_feature = 8

View File

@ -61,33 +61,22 @@ def test_predict_doc(parser, tok2vec, model, doc):
parser(doc)
def test_update_doc(parser, tok2vec, model, doc, gold):
def test_update_doc(parser, model, doc, gold):
parser.model = model
tokvecs, bp_tokvecs = tok2vec.begin_update([doc])
d_tokvecs = parser.update(([doc], tokvecs), [gold])
assert d_tokvecs[0].shape == tokvecs[0].shape
def optimize(weights, gradient, key=None):
weights -= 0.001 * gradient
bp_tokvecs(d_tokvecs, sgd=optimize)
assert d_tokvecs[0].sum() == 0.
parser.update([doc], [gold], sgd=optimize)
def test_predict_doc_beam(parser, tok2vec, model, doc):
doc.tensor = tok2vec([doc])[0]
def test_predict_doc_beam(parser, model, doc):
parser.model = model
parser(doc, beam_width=32, beam_density=0.001)
for word in doc:
print(word.text, word.head, word.dep_)
def test_update_doc_beam(parser, tok2vec, model, doc, gold):
def test_update_doc_beam(parser, model, doc, gold):
parser.model = model
tokvecs, bp_tokvecs = tok2vec.begin_update([doc])
d_tokvecs = parser.update_beam(([doc], tokvecs), [gold])
assert d_tokvecs[0].shape == tokvecs[0].shape
def optimize(weights, gradient, key=None):
weights -= 0.001 * gradient
bp_tokvecs(d_tokvecs, sgd=optimize)
assert d_tokvecs[0].sum() == 0.
parser.update_beam([doc], [gold], sgd=optimize)