mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 09:26:27 +03:00
Don't share CNN, to reduce complexities
This commit is contained in:
parent
1d73dec8b1
commit
20193371f5
20
spacy/_ml.py
20
spacy/_ml.py
|
@ -226,8 +226,8 @@ def drop_layer(layer, factor=2.):
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def Tok2Vec(width, embed_size, pretrained_dims=0, **kwargs):
|
def Tok2Vec(width, embed_size, **kwargs):
|
||||||
assert pretrained_dims is not None
|
pretrained_dims = kwargs.get('pretrained_dims', 0)
|
||||||
cnn_maxout_pieces = kwargs.get('cnn_maxout_pieces', 3)
|
cnn_maxout_pieces = kwargs.get('cnn_maxout_pieces', 3)
|
||||||
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
|
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
|
||||||
with Model.define_operators({'>>': chain, '|': concatenate, '**': clone, '+': add}):
|
with Model.define_operators({'>>': chain, '|': concatenate, '**': clone, '+': add}):
|
||||||
|
@ -474,20 +474,18 @@ def getitem(i):
|
||||||
return X[i], None
|
return X[i], None
|
||||||
return layerize(getitem_fwd)
|
return layerize(getitem_fwd)
|
||||||
|
|
||||||
|
|
||||||
def build_tagger_model(nr_class, token_vector_width, pretrained_dims=0, **cfg):
|
def build_tagger_model(nr_class, token_vector_width, pretrained_dims=0, **cfg):
|
||||||
embed_size = util.env_opt('embed_size', 4000)
|
embed_size = util.env_opt('embed_size', 4000)
|
||||||
with Model.define_operators({'>>': chain, '+': add}):
|
with Model.define_operators({'>>': chain, '+': add}):
|
||||||
# Input: (doc, tensor) tuples
|
tok2vec = Tok2Vec(token_vector_width, embed_size,
|
||||||
private_tok2vec = Tok2Vec(token_vector_width, embed_size,
|
pretrained_dims=pretrained_dims)
|
||||||
pretrained_dims=pretrained_dims)
|
model = with_flatten(
|
||||||
model = (
|
tok2vec
|
||||||
fine_tune(private_tok2vec)
|
>> Softmax(nr_class, token_vector_width)
|
||||||
>> with_flatten(
|
|
||||||
Maxout(token_vector_width, token_vector_width)
|
|
||||||
>> Softmax(nr_class, token_vector_width)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
model.nI = None
|
model.nI = None
|
||||||
|
model.tok2vec = tok2vec
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,12 +3,13 @@
|
||||||
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
|
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
|
||||||
|
|
||||||
__title__ = 'spacy-nightly'
|
__title__ = 'spacy-nightly'
|
||||||
__version__ = '2.0.0a14'
|
__version__ = '2.0.0a15'
|
||||||
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
|
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
|
||||||
__uri__ = 'https://spacy.io'
|
__uri__ = 'https://spacy.io'
|
||||||
__author__ = 'Explosion AI'
|
__author__ = 'Explosion AI'
|
||||||
__email__ = 'contact@explosion.ai'
|
__email__ = 'contact@explosion.ai'
|
||||||
__license__ = 'MIT'
|
__license__ = 'MIT'
|
||||||
|
__release__ = False
|
||||||
|
|
||||||
__docs_models__ = 'https://spacy.io/docs/usage/models'
|
__docs_models__ = 'https://spacy.io/docs/usage/models'
|
||||||
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
|
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
|
||||||
|
|
|
@ -55,7 +55,7 @@ def train(cmd, lang, output_dir, train_data, dev_data, n_iter=20, n_sents=0,
|
||||||
prints(dev_path, title="Development data not found", exits=1)
|
prints(dev_path, title="Development data not found", exits=1)
|
||||||
|
|
||||||
|
|
||||||
pipeline = ['token_vectors', 'tags', 'dependencies', 'entities']
|
pipeline = ['tags', 'dependencies', 'entities']
|
||||||
if no_tagger and 'tags' in pipeline: pipeline.remove('tags')
|
if no_tagger and 'tags' in pipeline: pipeline.remove('tags')
|
||||||
if no_parser and 'dependencies' in pipeline: pipeline.remove('dependencies')
|
if no_parser and 'dependencies' in pipeline: pipeline.remove('dependencies')
|
||||||
if no_entities and 'entities' in pipeline: pipeline.remove('entities')
|
if no_entities and 'entities' in pipeline: pipeline.remove('entities')
|
||||||
|
|
|
@ -303,31 +303,17 @@ class Language(object):
|
||||||
if self._optimizer is None:
|
if self._optimizer is None:
|
||||||
self._optimizer = Adam(Model.ops, 0.001)
|
self._optimizer = Adam(Model.ops, 0.001)
|
||||||
sgd = self._optimizer
|
sgd = self._optimizer
|
||||||
tok2vec = self.pipeline[0]
|
|
||||||
grads = {}
|
grads = {}
|
||||||
def get_grads(W, dW, key=None):
|
def get_grads(W, dW, key=None):
|
||||||
grads[key] = (W, dW)
|
grads[key] = (W, dW)
|
||||||
pipes = list(self.pipeline[1:])
|
pipes = list(self.pipeline)
|
||||||
random.shuffle(pipes)
|
random.shuffle(pipes)
|
||||||
tokvecses, bp_tokvecses = tok2vec.model.begin_update(docs, drop=drop)
|
|
||||||
all_d_tokvecses = [tok2vec.model.ops.allocate(tv.shape) for tv in tokvecses]
|
|
||||||
for proc in pipes:
|
for proc in pipes:
|
||||||
if not hasattr(proc, 'update'):
|
if not hasattr(proc, 'update'):
|
||||||
continue
|
continue
|
||||||
d_tokvecses = proc.update((docs, tokvecses), golds,
|
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
|
||||||
drop=drop, sgd=get_grads, losses=losses)
|
|
||||||
if update_shared and d_tokvecses is not None:
|
|
||||||
for i, d_tv in enumerate(d_tokvecses):
|
|
||||||
all_d_tokvecses[i] += d_tv
|
|
||||||
if update_shared and bp_tokvecses is not None:
|
|
||||||
bp_tokvecses(all_d_tokvecses, sgd=sgd)
|
|
||||||
for key, (W, dW) in grads.items():
|
for key, (W, dW) in grads.items():
|
||||||
sgd(W, dW, key=key)
|
sgd(W, dW, key=key)
|
||||||
# Clear the tensor variable, to free GPU memory.
|
|
||||||
# If we don't do this, the memory leak gets pretty
|
|
||||||
# bad, because we may be holding part of a batch.
|
|
||||||
for doc in docs:
|
|
||||||
doc.tensor = None
|
|
||||||
|
|
||||||
def preprocess_gold(self, docs_golds):
|
def preprocess_gold(self, docs_golds):
|
||||||
"""Can be called before training to pre-process gold data. By default,
|
"""Can be called before training to pre-process gold data. By default,
|
||||||
|
@ -371,8 +357,6 @@ class Language(object):
|
||||||
**cfg: Config parameters.
|
**cfg: Config parameters.
|
||||||
returns: An optimizer
|
returns: An optimizer
|
||||||
"""
|
"""
|
||||||
if self.parser:
|
|
||||||
self.pipeline.append(NeuralLabeller(self.vocab))
|
|
||||||
# Populate vocab
|
# Populate vocab
|
||||||
if get_gold_tuples is not None:
|
if get_gold_tuples is not None:
|
||||||
for _, annots_brackets in get_gold_tuples():
|
for _, annots_brackets in get_gold_tuples():
|
||||||
|
@ -418,7 +402,6 @@ class Language(object):
|
||||||
assert len(docs) == len(golds)
|
assert len(docs) == len(golds)
|
||||||
for doc, gold in zip(docs, golds):
|
for doc, gold in zip(docs, golds):
|
||||||
scorer.score(doc, gold)
|
scorer.score(doc, gold)
|
||||||
doc.tensor = None
|
|
||||||
return scorer
|
return scorer
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
|
|
|
@ -299,27 +299,25 @@ class NeuralTagger(BaseThincComponent):
|
||||||
self.cfg.setdefault('cnn_maxout_pieces', 2)
|
self.cfg.setdefault('cnn_maxout_pieces', 2)
|
||||||
|
|
||||||
def __call__(self, doc):
|
def __call__(self, doc):
|
||||||
tags = self.predict(([doc], [doc.tensor]))
|
tags = self.predict([doc])
|
||||||
self.set_annotations([doc], tags)
|
self.set_annotations([doc], tags)
|
||||||
return doc
|
return doc
|
||||||
|
|
||||||
def pipe(self, stream, batch_size=128, n_threads=-1):
|
def pipe(self, stream, batch_size=128, n_threads=-1):
|
||||||
for docs in cytoolz.partition_all(batch_size, stream):
|
for docs in cytoolz.partition_all(batch_size, stream):
|
||||||
docs = list(docs)
|
docs = list(docs)
|
||||||
tokvecs = [d.tensor for d in docs]
|
tag_ids = self.predict(docs)
|
||||||
tag_ids = self.predict((docs, tokvecs))
|
|
||||||
self.set_annotations(docs, tag_ids)
|
self.set_annotations(docs, tag_ids)
|
||||||
yield from docs
|
yield from docs
|
||||||
|
|
||||||
def predict(self, docs_tokvecs):
|
def predict(self, docs):
|
||||||
scores = self.model(docs_tokvecs)
|
scores = self.model(docs)
|
||||||
scores = self.model.ops.flatten(scores)
|
scores = self.model.ops.flatten(scores)
|
||||||
guesses = scores.argmax(axis=1)
|
guesses = scores.argmax(axis=1)
|
||||||
if not isinstance(guesses, numpy.ndarray):
|
if not isinstance(guesses, numpy.ndarray):
|
||||||
guesses = guesses.get()
|
guesses = guesses.get()
|
||||||
tokvecs = docs_tokvecs[1]
|
|
||||||
guesses = self.model.ops.unflatten(guesses,
|
guesses = self.model.ops.unflatten(guesses,
|
||||||
[tv.shape[0] for tv in tokvecs])
|
[len(d) for d in docs])
|
||||||
return guesses
|
return guesses
|
||||||
|
|
||||||
def set_annotations(self, docs, batch_tag_ids):
|
def set_annotations(self, docs, batch_tag_ids):
|
||||||
|
@ -339,20 +337,15 @@ class NeuralTagger(BaseThincComponent):
|
||||||
idx += 1
|
idx += 1
|
||||||
doc.is_tagged = True
|
doc.is_tagged = True
|
||||||
|
|
||||||
def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None):
|
def update(self, docs, golds, drop=0., sgd=None, losses=None):
|
||||||
if losses is not None and self.name not in losses:
|
if losses is not None and self.name not in losses:
|
||||||
losses[self.name] = 0.
|
losses[self.name] = 0.
|
||||||
docs, tokvecs = docs_tokvecs
|
|
||||||
|
|
||||||
if self.model.nI is None:
|
tag_scores, bp_tag_scores = self.model.begin_update(docs, drop=drop)
|
||||||
self.model.nI = tokvecs[0].shape[1]
|
|
||||||
tag_scores, bp_tag_scores = self.model.begin_update(docs_tokvecs, drop=drop)
|
|
||||||
loss, d_tag_scores = self.get_loss(docs, golds, tag_scores)
|
loss, d_tag_scores = self.get_loss(docs, golds, tag_scores)
|
||||||
|
|
||||||
d_tokvecs = bp_tag_scores(d_tag_scores, sgd=sgd)
|
|
||||||
if losses is not None:
|
if losses is not None:
|
||||||
losses[self.name] += loss
|
losses[self.name] += loss
|
||||||
return d_tokvecs
|
|
||||||
|
|
||||||
def get_loss(self, docs, golds, scores):
|
def get_loss(self, docs, golds, scores):
|
||||||
scores = self.model.ops.flatten(scores)
|
scores = self.model.ops.flatten(scores)
|
||||||
|
@ -399,9 +392,9 @@ class NeuralTagger(BaseThincComponent):
|
||||||
pretrained_dims=self.vocab.vectors_length)
|
pretrained_dims=self.vocab.vectors_length)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def Model(cls, n_tags, token_vector_width, pretrained_dims=0):
|
def Model(cls, n_tags, token_vector_width, pretrained_dims=0, **cfg):
|
||||||
return build_tagger_model(n_tags, token_vector_width,
|
return build_tagger_model(n_tags, token_vector_width,
|
||||||
pretrained_dims)
|
pretrained_dims, **cfg)
|
||||||
|
|
||||||
def use_params(self, params):
|
def use_params(self, params):
|
||||||
with self.model.use_params(params):
|
with self.model.use_params(params):
|
||||||
|
@ -573,15 +566,10 @@ class SimilarityHook(BaseThincComponent):
|
||||||
yield self(doc)
|
yield self(doc)
|
||||||
|
|
||||||
def predict(self, doc1, doc2):
|
def predict(self, doc1, doc2):
|
||||||
return self.model.predict([(doc1.tensor, doc2.tensor)])
|
return self.model.predict([(doc1, doc2)])
|
||||||
|
|
||||||
def update(self, doc1_tensor1_doc2_tensor2, golds, sgd=None, drop=0.):
|
def update(self, doc1_doc2, golds, sgd=None, drop=0.):
|
||||||
doc1s, tensor1s, doc2s, tensor2s = doc1_tensor1_doc2_tensor2
|
sims, bp_sims = self.model.begin_update(doc1_doc2, drop=drop)
|
||||||
sims, bp_sims = self.model.begin_update(zip(tensor1s, tensor2s),
|
|
||||||
drop=drop)
|
|
||||||
d_tensor1s, d_tensor2s = bp_sims(golds, sgd=sgd)
|
|
||||||
|
|
||||||
return d_tensor1s, d_tensor2s
|
|
||||||
|
|
||||||
def begin_training(self, _=tuple(), pipeline=None):
|
def begin_training(self, _=tuple(), pipeline=None):
|
||||||
"""
|
"""
|
||||||
|
@ -636,15 +624,13 @@ class TextCategorizer(BaseThincComponent):
|
||||||
for j, label in enumerate(self.labels):
|
for j, label in enumerate(self.labels):
|
||||||
doc.cats[label] = float(scores[i, j])
|
doc.cats[label] = float(scores[i, j])
|
||||||
|
|
||||||
def update(self, docs_tensors, golds, state=None, drop=0., sgd=None, losses=None):
|
def update(self, docs, golds, state=None, drop=0., sgd=None, losses=None):
|
||||||
docs, tensors = docs_tensors
|
|
||||||
scores, bp_scores = self.model.begin_update(docs, drop=drop)
|
scores, bp_scores = self.model.begin_update(docs, drop=drop)
|
||||||
loss, d_scores = self.get_loss(docs, golds, scores)
|
loss, d_scores = self.get_loss(docs, golds, scores)
|
||||||
d_tensors = bp_scores(d_scores, sgd=sgd)
|
bp_scores(d_scores, sgd=sgd)
|
||||||
if losses is not None:
|
if losses is not None:
|
||||||
losses.setdefault(self.name, 0.0)
|
losses.setdefault(self.name, 0.0)
|
||||||
losses[self.name] += loss
|
losses[self.name] += loss
|
||||||
return d_tensors
|
|
||||||
|
|
||||||
def get_loss(self, docs, golds, scores):
|
def get_loss(self, docs, golds, scores):
|
||||||
truths = numpy.zeros((len(golds), len(self.labels)), dtype='f')
|
truths = numpy.zeros((len(golds), len(self.labels)), dtype='f')
|
||||||
|
|
|
@ -147,10 +147,10 @@ def get_token_ids(states, int n_tokens):
|
||||||
|
|
||||||
nr_update = 0
|
nr_update = 0
|
||||||
def update_beam(TransitionSystem moves, int nr_feature, int max_steps,
|
def update_beam(TransitionSystem moves, int nr_feature, int max_steps,
|
||||||
states, tokvecs, golds,
|
states, golds,
|
||||||
state2vec, vec2scores,
|
state2vec, vec2scores,
|
||||||
int width, float density,
|
int width, float density,
|
||||||
sgd=None, losses=None, drop=0.):
|
losses=None, drop=0.):
|
||||||
global nr_update
|
global nr_update
|
||||||
cdef MaxViolation violn
|
cdef MaxViolation violn
|
||||||
nr_update += 1
|
nr_update += 1
|
||||||
|
|
|
@ -48,7 +48,7 @@ from .. import util
|
||||||
from ..util import get_async, get_cuda_stream
|
from ..util import get_async, get_cuda_stream
|
||||||
from .._ml import zero_init, PrecomputableAffine, PrecomputableMaxouts
|
from .._ml import zero_init, PrecomputableAffine, PrecomputableMaxouts
|
||||||
from .._ml import Tok2Vec, doc2feats, rebatch, fine_tune
|
from .._ml import Tok2Vec, doc2feats, rebatch, fine_tune
|
||||||
from .._ml import Residual, drop_layer
|
from .._ml import Residual, drop_layer, flatten
|
||||||
from ..compat import json_dumps
|
from ..compat import json_dumps
|
||||||
|
|
||||||
from . import _parse_features
|
from . import _parse_features
|
||||||
|
@ -244,8 +244,9 @@ cdef class Parser:
|
||||||
hidden_width = util.env_opt('hidden_width', hidden_width)
|
hidden_width = util.env_opt('hidden_width', hidden_width)
|
||||||
parser_maxout_pieces = util.env_opt('parser_maxout_pieces', 2)
|
parser_maxout_pieces = util.env_opt('parser_maxout_pieces', 2)
|
||||||
embed_size = util.env_opt('embed_size', 4000)
|
embed_size = util.env_opt('embed_size', 4000)
|
||||||
tensors = fine_tune(Tok2Vec(token_vector_width, embed_size,
|
tok2vec = Tok2Vec(token_vector_width, embed_size,
|
||||||
pretrained_dims=cfg.get('pretrained_dims')))
|
pretrained_dims=cfg.get('pretrained_dims', 0))
|
||||||
|
tok2vec = chain(tok2vec, flatten)
|
||||||
if parser_maxout_pieces == 1:
|
if parser_maxout_pieces == 1:
|
||||||
lower = PrecomputableAffine(hidden_width if depth >= 1 else nr_class,
|
lower = PrecomputableAffine(hidden_width if depth >= 1 else nr_class,
|
||||||
nF=cls.nr_feature,
|
nF=cls.nr_feature,
|
||||||
|
@ -277,7 +278,7 @@ cdef class Parser:
|
||||||
'hidden_width': hidden_width,
|
'hidden_width': hidden_width,
|
||||||
'maxout_pieces': parser_maxout_pieces
|
'maxout_pieces': parser_maxout_pieces
|
||||||
}
|
}
|
||||||
return (tensors, lower, upper), cfg
|
return (tok2vec, lower, upper), cfg
|
||||||
|
|
||||||
def __init__(self, Vocab vocab, moves=True, model=True, **cfg):
|
def __init__(self, Vocab vocab, moves=True, model=True, **cfg):
|
||||||
"""
|
"""
|
||||||
|
@ -309,7 +310,6 @@ cdef class Parser:
|
||||||
cfg['beam_density'] = util.env_opt('beam_density', 0.0)
|
cfg['beam_density'] = util.env_opt('beam_density', 0.0)
|
||||||
if 'pretrained_dims' not in cfg:
|
if 'pretrained_dims' not in cfg:
|
||||||
cfg['pretrained_dims'] = self.vocab.vectors.data.shape[1]
|
cfg['pretrained_dims'] = self.vocab.vectors.data.shape[1]
|
||||||
cfg.setdefault('cnn_maxout_pieces', 2)
|
|
||||||
self.cfg = cfg
|
self.cfg = cfg
|
||||||
if 'actions' in self.cfg:
|
if 'actions' in self.cfg:
|
||||||
for action, labels in self.cfg.get('actions', {}).items():
|
for action, labels in self.cfg.get('actions', {}).items():
|
||||||
|
@ -335,11 +335,11 @@ cdef class Parser:
|
||||||
beam_density = self.cfg.get('beam_density', 0.0)
|
beam_density = self.cfg.get('beam_density', 0.0)
|
||||||
cdef Beam beam
|
cdef Beam beam
|
||||||
if beam_width == 1:
|
if beam_width == 1:
|
||||||
states = self.parse_batch([doc], [doc.tensor])
|
states = self.parse_batch([doc])
|
||||||
self.set_annotations([doc], states)
|
self.set_annotations([doc], states)
|
||||||
return doc
|
return doc
|
||||||
else:
|
else:
|
||||||
beam = self.beam_parse([doc], [doc.tensor],
|
beam = self.beam_parse([doc],
|
||||||
beam_width=beam_width, beam_density=beam_density)[0]
|
beam_width=beam_width, beam_density=beam_density)[0]
|
||||||
output = self.moves.get_beam_annot(beam)
|
output = self.moves.get_beam_annot(beam)
|
||||||
state = <StateClass>beam.at(0)
|
state = <StateClass>beam.at(0)
|
||||||
|
@ -368,11 +368,10 @@ cdef class Parser:
|
||||||
cdef Beam beam
|
cdef Beam beam
|
||||||
for docs in cytoolz.partition_all(batch_size, docs):
|
for docs in cytoolz.partition_all(batch_size, docs):
|
||||||
docs = list(docs)
|
docs = list(docs)
|
||||||
tokvecs = [doc.tensor for doc in docs]
|
|
||||||
if beam_width == 1:
|
if beam_width == 1:
|
||||||
parse_states = self.parse_batch(docs, tokvecs)
|
parse_states = self.parse_batch(docs)
|
||||||
else:
|
else:
|
||||||
beams = self.beam_parse(docs, tokvecs,
|
beams = self.beam_parse(docs,
|
||||||
beam_width=beam_width, beam_density=beam_density)
|
beam_width=beam_width, beam_density=beam_density)
|
||||||
parse_states = []
|
parse_states = []
|
||||||
for beam in beams:
|
for beam in beams:
|
||||||
|
@ -380,7 +379,7 @@ cdef class Parser:
|
||||||
self.set_annotations(docs, parse_states)
|
self.set_annotations(docs, parse_states)
|
||||||
yield from docs
|
yield from docs
|
||||||
|
|
||||||
def parse_batch(self, docs, tokvecses):
|
def parse_batch(self, docs):
|
||||||
cdef:
|
cdef:
|
||||||
precompute_hiddens state2vec
|
precompute_hiddens state2vec
|
||||||
StateClass state
|
StateClass state
|
||||||
|
@ -391,21 +390,15 @@ cdef class Parser:
|
||||||
int nr_class, nr_feat, nr_piece, nr_dim, nr_state
|
int nr_class, nr_feat, nr_piece, nr_dim, nr_state
|
||||||
if isinstance(docs, Doc):
|
if isinstance(docs, Doc):
|
||||||
docs = [docs]
|
docs = [docs]
|
||||||
if isinstance(tokvecses, np.ndarray):
|
|
||||||
tokvecses = [tokvecses]
|
|
||||||
|
|
||||||
if USE_FINE_TUNE:
|
cuda_stream = get_cuda_stream()
|
||||||
tokvecs = self.model[0].ops.flatten(self.model[0]((docs, tokvecses)))
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
|
||||||
else:
|
0.0)
|
||||||
tokvecs = self.model[0].ops.flatten(tokvecses)
|
|
||||||
nr_state = len(docs)
|
nr_state = len(docs)
|
||||||
nr_class = self.moves.n_moves
|
nr_class = self.moves.n_moves
|
||||||
nr_dim = tokvecs.shape[1]
|
nr_dim = tokvecs.shape[1]
|
||||||
nr_feat = self.nr_feature
|
nr_feat = self.nr_feature
|
||||||
|
|
||||||
cuda_stream = get_cuda_stream()
|
|
||||||
state2vec, vec2scores = self.get_batch_model(nr_state, tokvecs,
|
|
||||||
cuda_stream, 0.0)
|
|
||||||
nr_piece = state2vec.nP
|
nr_piece = state2vec.nP
|
||||||
|
|
||||||
states = self.moves.init_batch(docs)
|
states = self.moves.init_batch(docs)
|
||||||
|
@ -448,19 +441,15 @@ cdef class Parser:
|
||||||
next_step.push_back(st)
|
next_step.push_back(st)
|
||||||
return states
|
return states
|
||||||
|
|
||||||
def beam_parse(self, docs, tokvecses, int beam_width=3, float beam_density=0.001):
|
def beam_parse(self, docs, int beam_width=3, float beam_density=0.001):
|
||||||
cdef Beam beam
|
cdef Beam beam
|
||||||
cdef np.ndarray scores
|
cdef np.ndarray scores
|
||||||
cdef Doc doc
|
cdef Doc doc
|
||||||
cdef int nr_class = self.moves.n_moves
|
cdef int nr_class = self.moves.n_moves
|
||||||
cdef StateClass stcls, output
|
cdef StateClass stcls, output
|
||||||
if USE_FINE_TUNE:
|
|
||||||
tokvecs = self.model[0].ops.flatten(self.model[0]((docs, tokvecses)))
|
|
||||||
else:
|
|
||||||
tokvecs = self.model[0].ops.flatten(tokvecses)
|
|
||||||
cuda_stream = get_cuda_stream()
|
cuda_stream = get_cuda_stream()
|
||||||
state2vec, vec2scores = self.get_batch_model(len(docs), tokvecs,
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
|
||||||
cuda_stream, 0.0)
|
0.0)
|
||||||
beams = []
|
beams = []
|
||||||
cdef int offset = 0
|
cdef int offset = 0
|
||||||
cdef int j = 0
|
cdef int j = 0
|
||||||
|
@ -520,30 +509,24 @@ cdef class Parser:
|
||||||
free(scores)
|
free(scores)
|
||||||
free(token_ids)
|
free(token_ids)
|
||||||
|
|
||||||
def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None):
|
def update(self, docs, golds, drop=0., sgd=None, losses=None):
|
||||||
if not any(self.moves.has_gold(gold) for gold in golds):
|
if not any(self.moves.has_gold(gold) for gold in golds):
|
||||||
return None
|
return None
|
||||||
if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= 0.5:
|
if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= 0.5:
|
||||||
return self.update_beam(docs_tokvecs, golds,
|
return self.update_beam(docs, golds,
|
||||||
self.cfg['beam_width'], self.cfg['beam_density'],
|
self.cfg['beam_width'], self.cfg['beam_density'],
|
||||||
drop=drop, sgd=sgd, losses=losses)
|
drop=drop, sgd=sgd, losses=losses)
|
||||||
if losses is not None and self.name not in losses:
|
if losses is not None and self.name not in losses:
|
||||||
losses[self.name] = 0.
|
losses[self.name] = 0.
|
||||||
docs, tokvec_lists = docs_tokvecs
|
|
||||||
if isinstance(docs, Doc) and isinstance(golds, GoldParse):
|
if isinstance(docs, Doc) and isinstance(golds, GoldParse):
|
||||||
docs = [docs]
|
docs = [docs]
|
||||||
golds = [golds]
|
golds = [golds]
|
||||||
if USE_FINE_TUNE:
|
|
||||||
my_tokvecs, bp_my_tokvecs = self.model[0].begin_update(docs_tokvecs, drop=drop)
|
|
||||||
tokvecs = self.model[0].ops.flatten(my_tokvecs)
|
|
||||||
else:
|
|
||||||
tokvecs = self.model[0].ops.flatten(docs_tokvecs[1])
|
|
||||||
|
|
||||||
cuda_stream = get_cuda_stream()
|
cuda_stream = get_cuda_stream()
|
||||||
|
|
||||||
states, golds, max_steps = self._init_gold_batch(docs, golds)
|
states, golds, max_steps = self._init_gold_batch(docs, golds)
|
||||||
state2vec, vec2scores = self.get_batch_model(len(states), tokvecs, cuda_stream,
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
|
||||||
0.0)
|
0.0)
|
||||||
todo = [(s, g) for (s, g) in zip(states, golds)
|
todo = [(s, g) for (s, g) in zip(states, golds)
|
||||||
if not s.is_final() and g is not None]
|
if not s.is_final() and g is not None]
|
||||||
if not todo:
|
if not todo:
|
||||||
|
@ -587,13 +570,9 @@ cdef class Parser:
|
||||||
if n_steps >= max_steps:
|
if n_steps >= max_steps:
|
||||||
break
|
break
|
||||||
self._make_updates(d_tokvecs,
|
self._make_updates(d_tokvecs,
|
||||||
backprops, sgd, cuda_stream)
|
bp_tokvecs, backprops, sgd, cuda_stream)
|
||||||
d_tokvecs = self.model[0].ops.unflatten(d_tokvecs, [len(d) for d in docs])
|
|
||||||
if USE_FINE_TUNE:
|
|
||||||
d_tokvecs = bp_my_tokvecs(d_tokvecs, sgd=sgd)
|
|
||||||
return d_tokvecs
|
|
||||||
|
|
||||||
def update_beam(self, docs_tokvecs, golds, width=None, density=None,
|
def update_beam(self, docs, golds, width=None, density=None,
|
||||||
drop=0., sgd=None, losses=None):
|
drop=0., sgd=None, losses=None):
|
||||||
if not any(self.moves.has_gold(gold) for gold in golds):
|
if not any(self.moves.has_gold(gold) for gold in golds):
|
||||||
return None
|
return None
|
||||||
|
@ -605,26 +584,20 @@ cdef class Parser:
|
||||||
density = self.cfg.get('beam_density', 0.0)
|
density = self.cfg.get('beam_density', 0.0)
|
||||||
if losses is not None and self.name not in losses:
|
if losses is not None and self.name not in losses:
|
||||||
losses[self.name] = 0.
|
losses[self.name] = 0.
|
||||||
docs, tokvecs = docs_tokvecs
|
|
||||||
lengths = [len(d) for d in docs]
|
lengths = [len(d) for d in docs]
|
||||||
assert min(lengths) >= 1
|
assert min(lengths) >= 1
|
||||||
if USE_FINE_TUNE:
|
|
||||||
my_tokvecs, bp_my_tokvecs = self.model[0].begin_update(docs_tokvecs, drop=drop)
|
|
||||||
tokvecs = self.model[0].ops.flatten(my_tokvecs)
|
|
||||||
else:
|
|
||||||
tokvecs = self.model[0].ops.flatten(tokvecs)
|
|
||||||
states = self.moves.init_batch(docs)
|
states = self.moves.init_batch(docs)
|
||||||
for gold in golds:
|
for gold in golds:
|
||||||
self.moves.preprocess_gold(gold)
|
self.moves.preprocess_gold(gold)
|
||||||
|
|
||||||
cuda_stream = get_cuda_stream()
|
cuda_stream = get_cuda_stream()
|
||||||
state2vec, vec2scores = self.get_batch_model(len(states), tokvecs, cuda_stream, 0.0)
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream, 0.0)
|
||||||
|
|
||||||
states_d_scores, backprops = _beam_utils.update_beam(self.moves, self.nr_feature, 500,
|
states_d_scores, backprops = _beam_utils.update_beam(self.moves, self.nr_feature, 500,
|
||||||
states, tokvecs, golds,
|
states, golds,
|
||||||
state2vec, vec2scores,
|
state2vec, vec2scores,
|
||||||
width, density,
|
width, density,
|
||||||
sgd=sgd, drop=drop, losses=losses)
|
drop=drop, losses=losses)
|
||||||
backprop_lower = []
|
backprop_lower = []
|
||||||
cdef float batch_size = len(docs)
|
cdef float batch_size = len(docs)
|
||||||
for i, d_scores in enumerate(states_d_scores):
|
for i, d_scores in enumerate(states_d_scores):
|
||||||
|
@ -642,20 +615,7 @@ cdef class Parser:
|
||||||
else:
|
else:
|
||||||
backprop_lower.append((ids, d_vector, bp_vectors))
|
backprop_lower.append((ids, d_vector, bp_vectors))
|
||||||
d_tokvecs = self.model[0].ops.allocate(tokvecs.shape)
|
d_tokvecs = self.model[0].ops.allocate(tokvecs.shape)
|
||||||
self._make_updates(d_tokvecs, backprop_lower, sgd, cuda_stream)
|
self._make_updates(d_tokvecs, bp_tokvecs, backprop_lower, sgd, cuda_stream)
|
||||||
d_tokvecs = self.model[0].ops.unflatten(d_tokvecs, lengths)
|
|
||||||
if USE_FINE_TUNE:
|
|
||||||
d_tokvecs = bp_my_tokvecs(d_tokvecs, sgd=sgd)
|
|
||||||
return d_tokvecs
|
|
||||||
|
|
||||||
def _pad_tokvecs(self, tokvecs):
|
|
||||||
# Add a vector for missing values at the start of tokvecs
|
|
||||||
xp = get_array_module(tokvecs)
|
|
||||||
pad = xp.zeros((1, tokvecs.shape[1]), dtype=tokvecs.dtype)
|
|
||||||
return xp.vstack((pad, tokvecs))
|
|
||||||
|
|
||||||
def _unpad_tokvecs(self, d_tokvecs):
|
|
||||||
return d_tokvecs[1:]
|
|
||||||
|
|
||||||
def _init_gold_batch(self, whole_docs, whole_golds):
|
def _init_gold_batch(self, whole_docs, whole_golds):
|
||||||
"""Make a square batch, of length equal to the shortest doc. A long
|
"""Make a square batch, of length equal to the shortest doc. A long
|
||||||
|
@ -693,7 +653,7 @@ cdef class Parser:
|
||||||
max_moves = max(max_moves, len(oracle_actions))
|
max_moves = max(max_moves, len(oracle_actions))
|
||||||
return states, golds, max_moves
|
return states, golds, max_moves
|
||||||
|
|
||||||
def _make_updates(self, d_tokvecs, backprops, sgd, cuda_stream=None):
|
def _make_updates(self, d_tokvecs, bp_tokvecs, backprops, sgd, cuda_stream=None):
|
||||||
# Tells CUDA to block, so our async copies complete.
|
# Tells CUDA to block, so our async copies complete.
|
||||||
if cuda_stream is not None:
|
if cuda_stream is not None:
|
||||||
cuda_stream.synchronize()
|
cuda_stream.synchronize()
|
||||||
|
@ -704,6 +664,7 @@ cdef class Parser:
|
||||||
d_state_features *= mask.reshape(ids.shape + (1,))
|
d_state_features *= mask.reshape(ids.shape + (1,))
|
||||||
self.model[0].ops.scatter_add(d_tokvecs, ids * mask,
|
self.model[0].ops.scatter_add(d_tokvecs, ids * mask,
|
||||||
d_state_features)
|
d_state_features)
|
||||||
|
bp_tokvecs(d_tokvecs, sgd=sgd)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def move_names(self):
|
def move_names(self):
|
||||||
|
@ -713,11 +674,12 @@ cdef class Parser:
|
||||||
names.append(name)
|
names.append(name)
|
||||||
return names
|
return names
|
||||||
|
|
||||||
def get_batch_model(self, batch_size, tokvecs, stream, dropout):
|
def get_batch_model(self, docs, stream, dropout):
|
||||||
_, lower, upper = self.model
|
tok2vec, lower, upper = self.model
|
||||||
state2vec = precompute_hiddens(batch_size, tokvecs,
|
tokvecs, bp_tokvecs = tok2vec.begin_update(docs, drop=dropout)
|
||||||
|
state2vec = precompute_hiddens(len(docs), tokvecs,
|
||||||
lower, stream, drop=dropout)
|
lower, stream, drop=dropout)
|
||||||
return state2vec, upper
|
return (tokvecs, bp_tokvecs), state2vec, upper
|
||||||
|
|
||||||
nr_feature = 8
|
nr_feature = 8
|
||||||
|
|
||||||
|
|
|
@ -61,33 +61,22 @@ def test_predict_doc(parser, tok2vec, model, doc):
|
||||||
parser(doc)
|
parser(doc)
|
||||||
|
|
||||||
|
|
||||||
def test_update_doc(parser, tok2vec, model, doc, gold):
|
def test_update_doc(parser, model, doc, gold):
|
||||||
parser.model = model
|
parser.model = model
|
||||||
tokvecs, bp_tokvecs = tok2vec.begin_update([doc])
|
|
||||||
d_tokvecs = parser.update(([doc], tokvecs), [gold])
|
|
||||||
assert d_tokvecs[0].shape == tokvecs[0].shape
|
|
||||||
def optimize(weights, gradient, key=None):
|
def optimize(weights, gradient, key=None):
|
||||||
weights -= 0.001 * gradient
|
weights -= 0.001 * gradient
|
||||||
bp_tokvecs(d_tokvecs, sgd=optimize)
|
parser.update([doc], [gold], sgd=optimize)
|
||||||
assert d_tokvecs[0].sum() == 0.
|
|
||||||
|
|
||||||
|
|
||||||
def test_predict_doc_beam(parser, tok2vec, model, doc):
|
def test_predict_doc_beam(parser, model, doc):
|
||||||
doc.tensor = tok2vec([doc])[0]
|
|
||||||
parser.model = model
|
parser.model = model
|
||||||
parser(doc, beam_width=32, beam_density=0.001)
|
parser(doc, beam_width=32, beam_density=0.001)
|
||||||
for word in doc:
|
|
||||||
print(word.text, word.head, word.dep_)
|
|
||||||
|
|
||||||
|
|
||||||
def test_update_doc_beam(parser, tok2vec, model, doc, gold):
|
def test_update_doc_beam(parser, model, doc, gold):
|
||||||
parser.model = model
|
parser.model = model
|
||||||
tokvecs, bp_tokvecs = tok2vec.begin_update([doc])
|
|
||||||
d_tokvecs = parser.update_beam(([doc], tokvecs), [gold])
|
|
||||||
assert d_tokvecs[0].shape == tokvecs[0].shape
|
|
||||||
def optimize(weights, gradient, key=None):
|
def optimize(weights, gradient, key=None):
|
||||||
weights -= 0.001 * gradient
|
weights -= 0.001 * gradient
|
||||||
bp_tokvecs(d_tokvecs, sgd=optimize)
|
parser.update_beam([doc], [gold], sgd=optimize)
|
||||||
assert d_tokvecs[0].sum() == 0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user