2017-05-14 01:55:01 +03:00
|
|
|
# cython: infer_types=True
|
2017-05-15 22:46:08 +03:00
|
|
|
# cython: cdivision=True
|
|
|
|
# cython: boundscheck=False
|
2017-05-14 01:55:01 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals, print_function
|
|
|
|
|
2017-10-27 20:45:57 +03:00
|
|
|
from collections import OrderedDict
|
2017-05-14 01:55:01 +03:00
|
|
|
import ujson
|
2017-09-26 14:44:56 +03:00
|
|
|
import json
|
2017-10-18 22:45:01 +03:00
|
|
|
import numpy
|
2017-05-14 01:55:01 +03:00
|
|
|
cimport cython.parallel
|
|
|
|
import cytoolz
|
|
|
|
import numpy.random
|
|
|
|
cimport numpy as np
|
2017-10-27 20:45:57 +03:00
|
|
|
from cpython.ref cimport PyObject, Py_XDECREF
|
2017-10-24 13:40:47 +03:00
|
|
|
from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno
|
2017-10-27 20:45:57 +03:00
|
|
|
from libc.math cimport exp
|
|
|
|
from libcpp.vector cimport vector
|
2017-11-15 02:51:42 +03:00
|
|
|
from libc.string cimport memset, memcpy
|
2017-10-27 20:45:57 +03:00
|
|
|
from libc.stdlib cimport calloc, free
|
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
from thinc.typedefs cimport weight_t, class_t, hash_t
|
2017-07-20 16:02:55 +03:00
|
|
|
from thinc.extra.search cimport Beam
|
2017-10-27 20:45:57 +03:00
|
|
|
from thinc.api import chain, clone
|
|
|
|
from thinc.v2v import Model, Maxout, Affine
|
2017-10-03 21:07:17 +03:00
|
|
|
from thinc.misc import LayerNorm
|
2017-10-27 20:45:57 +03:00
|
|
|
from thinc.neural.ops import CupyOps
|
2017-05-23 12:23:29 +03:00
|
|
|
from thinc.neural.util import get_array_module
|
2017-10-28 14:16:06 +03:00
|
|
|
from thinc.linalg cimport Vec, VecVec
|
2018-03-27 20:23:02 +03:00
|
|
|
from thinc cimport openblas
|
|
|
|
|
2018-03-13 04:10:58 +03:00
|
|
|
|
2017-10-28 14:16:06 +03:00
|
|
|
from .._ml import zero_init, PrecomputableAffine, Tok2Vec, flatten
|
2017-11-06 16:26:26 +03:00
|
|
|
from .._ml import link_vectors_to_models, create_default_optimizer
|
2017-10-09 04:35:40 +03:00
|
|
|
from ..compat import json_dumps, copy_array
|
2017-05-14 01:55:01 +03:00
|
|
|
from ..tokens.doc cimport Doc
|
|
|
|
from ..gold cimport GoldParse
|
2017-10-27 20:45:57 +03:00
|
|
|
from .. import util
|
|
|
|
from .stateclass cimport StateClass
|
|
|
|
from ._state cimport StateC
|
|
|
|
from .transition_system cimport Transition
|
|
|
|
from . import _beam_utils, nonproj
|
2017-05-14 01:55:01 +03:00
|
|
|
|
|
|
|
|
|
|
|
def get_templates(*args, **kwargs):
|
|
|
|
return []
|
|
|
|
|
2017-10-27 20:45:57 +03:00
|
|
|
|
2017-05-14 01:55:01 +03:00
|
|
|
DEBUG = False
|
2017-10-27 20:45:57 +03:00
|
|
|
|
|
|
|
|
2017-05-14 01:55:01 +03:00
|
|
|
def set_debug(val):
|
|
|
|
global DEBUG
|
|
|
|
DEBUG = val
|
|
|
|
|
|
|
|
|
2017-05-15 22:46:08 +03:00
|
|
|
cdef class precompute_hiddens:
|
2017-10-27 15:39:30 +03:00
|
|
|
"""Allow a model to be "primed" by pre-computing input features in bulk.
|
2017-05-14 01:55:01 +03:00
|
|
|
|
|
|
|
This is used for the parser, where we want to take a batch of documents,
|
|
|
|
and compute vectors for each (token, position) pair. These vectors can then
|
|
|
|
be reused, especially for beam-search.
|
|
|
|
|
|
|
|
Let's say we're using 12 features for each state, e.g. word at start of
|
|
|
|
buffer, three words on stack, their children, etc. In the normal arc-eager
|
|
|
|
system, a document of length N is processed in 2*N states. This means we'll
|
|
|
|
create 2*N*12 feature vectors --- but if we pre-compute, we only need
|
|
|
|
N*12 vector computations. The saving for beam-search is much better:
|
|
|
|
if we have a beam of k, we'll normally make 2*N*12*K computations --
|
|
|
|
so we can save the factor k. This also gives a nice CPU/GPU division:
|
|
|
|
we can do all our hard maths up front, packed into large multiplications,
|
|
|
|
and do the hard-to-program parsing on the CPU.
|
2017-10-27 15:39:30 +03:00
|
|
|
"""
|
2017-05-23 13:58:07 +03:00
|
|
|
cdef int nF, nO, nP
|
2017-05-15 22:46:08 +03:00
|
|
|
cdef bint _is_synchronized
|
|
|
|
cdef public object ops
|
|
|
|
cdef np.ndarray _features
|
|
|
|
cdef np.ndarray _cached
|
2017-10-19 19:42:11 +03:00
|
|
|
cdef np.ndarray bias
|
2017-05-15 22:46:08 +03:00
|
|
|
cdef object _cuda_stream
|
|
|
|
cdef object _bp_hiddens
|
|
|
|
|
2017-10-27 20:45:57 +03:00
|
|
|
def __init__(self, batch_size, tokvecs, lower_model, cuda_stream=None,
|
|
|
|
drop=0.):
|
2017-05-15 22:46:08 +03:00
|
|
|
gpu_cached, bp_features = lower_model.begin_update(tokvecs, drop=drop)
|
|
|
|
cdef np.ndarray cached
|
|
|
|
if not isinstance(gpu_cached, numpy.ndarray):
|
|
|
|
# Note the passing of cuda_stream here: it lets
|
|
|
|
# cupy make the copy asynchronously.
|
|
|
|
# We then have to block before first use.
|
|
|
|
cached = gpu_cached.get(stream=cuda_stream)
|
|
|
|
else:
|
|
|
|
cached = gpu_cached
|
2017-10-31 04:33:16 +03:00
|
|
|
if not isinstance(lower_model.b, numpy.ndarray):
|
|
|
|
self.bias = lower_model.b.get()
|
|
|
|
else:
|
|
|
|
self.bias = lower_model.b
|
2017-05-15 22:46:08 +03:00
|
|
|
self.nF = cached.shape[1]
|
2017-05-23 13:58:07 +03:00
|
|
|
self.nP = getattr(lower_model, 'nP', 1)
|
2017-10-23 19:16:23 +03:00
|
|
|
self.nO = cached.shape[2]
|
2017-05-15 22:46:08 +03:00
|
|
|
self.ops = lower_model.ops
|
|
|
|
self._is_synchronized = False
|
|
|
|
self._cuda_stream = cuda_stream
|
|
|
|
self._cached = cached
|
|
|
|
self._bp_hiddens = bp_features
|
|
|
|
|
2017-05-23 01:58:12 +03:00
|
|
|
cdef const float* get_feat_weights(self) except NULL:
|
2017-10-27 20:45:57 +03:00
|
|
|
if not self._is_synchronized and self._cuda_stream is not None:
|
2017-05-23 01:58:12 +03:00
|
|
|
self._cuda_stream.synchronize()
|
|
|
|
self._is_synchronized = True
|
|
|
|
return <float*>self._cached.data
|
|
|
|
|
2017-05-15 22:46:08 +03:00
|
|
|
def __call__(self, X):
|
|
|
|
return self.begin_update(X)[0]
|
|
|
|
|
|
|
|
def begin_update(self, token_ids, drop=0.):
|
2017-10-20 17:24:16 +03:00
|
|
|
cdef np.ndarray state_vector = numpy.zeros(
|
|
|
|
(token_ids.shape[0], self.nO, self.nP), dtype='f')
|
2017-05-15 22:46:08 +03:00
|
|
|
# This is tricky, but (assuming GPU available);
|
2017-05-14 01:55:01 +03:00
|
|
|
# - Input to forward on CPU
|
|
|
|
# - Output from forward on CPU
|
|
|
|
# - Input to backward on GPU!
|
|
|
|
# - Output from backward on GPU
|
2017-05-15 22:46:08 +03:00
|
|
|
bp_hiddens = self._bp_hiddens
|
|
|
|
|
2017-05-23 01:58:12 +03:00
|
|
|
feat_weights = self.get_feat_weights()
|
2017-05-14 01:55:01 +03:00
|
|
|
cdef int[:, ::1] ids = token_ids
|
2017-05-23 01:58:12 +03:00
|
|
|
sum_state_features(<float*>state_vector.data,
|
|
|
|
feat_weights, &ids[0,0],
|
2017-05-23 13:58:07 +03:00
|
|
|
token_ids.shape[0], self.nF, self.nO*self.nP)
|
2017-10-23 19:16:23 +03:00
|
|
|
state_vector += self.bias
|
2017-05-23 13:58:07 +03:00
|
|
|
state_vector, bp_nonlinearity = self._nonlinearity(state_vector)
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2017-10-31 04:33:16 +03:00
|
|
|
def backward(d_state_vector_ids, sgd=None):
|
|
|
|
d_state_vector, token_ids = d_state_vector_ids
|
2017-10-19 21:27:34 +03:00
|
|
|
d_state_vector = bp_nonlinearity(d_state_vector, sgd)
|
2017-05-14 01:55:01 +03:00
|
|
|
# This will usually be on GPU
|
2017-10-19 14:45:18 +03:00
|
|
|
if not isinstance(d_state_vector, self.ops.xp.ndarray):
|
2017-05-20 14:40:10 +03:00
|
|
|
d_state_vector = self.ops.xp.array(d_state_vector)
|
2017-05-15 22:46:08 +03:00
|
|
|
d_tokens = bp_hiddens((d_state_vector, token_ids), sgd)
|
2017-05-14 01:55:01 +03:00
|
|
|
return d_tokens
|
2017-05-20 14:40:10 +03:00
|
|
|
return state_vector, backward
|
2017-05-15 22:46:08 +03:00
|
|
|
|
2017-05-23 13:58:07 +03:00
|
|
|
def _nonlinearity(self, state_vector):
|
|
|
|
if self.nP == 1:
|
2017-10-23 19:16:23 +03:00
|
|
|
state_vector = state_vector.reshape(state_vector.shape[:-1])
|
2017-10-19 19:42:11 +03:00
|
|
|
mask = state_vector >= 0.
|
2017-10-19 21:27:34 +03:00
|
|
|
state_vector *= mask
|
|
|
|
else:
|
|
|
|
state_vector, mask = self.ops.maxout(state_vector)
|
2017-05-23 13:58:07 +03:00
|
|
|
|
2017-10-19 21:27:34 +03:00
|
|
|
def backprop_nonlinearity(d_best, sgd=None):
|
|
|
|
if self.nP == 1:
|
2017-10-23 19:16:23 +03:00
|
|
|
d_best *= mask
|
|
|
|
d_best = d_best.reshape((d_best.shape + (1,)))
|
|
|
|
return d_best
|
2017-10-19 21:27:34 +03:00
|
|
|
else:
|
2017-10-20 17:24:16 +03:00
|
|
|
return self.ops.backprop_maxout(d_best, mask, self.nP)
|
2017-10-19 21:27:34 +03:00
|
|
|
return state_vector, backprop_nonlinearity
|
2017-05-23 13:58:07 +03:00
|
|
|
|
2017-06-04 22:55:24 +03:00
|
|
|
|
2017-05-23 01:58:12 +03:00
|
|
|
cdef void sum_state_features(float* output,
|
|
|
|
const float* cached, const int* token_ids, int B, int F, int O) nogil:
|
|
|
|
cdef int idx, b, f, i
|
|
|
|
cdef const float* feature
|
2017-10-31 04:33:16 +03:00
|
|
|
padding = cached
|
|
|
|
cached += F * O
|
2018-03-27 13:08:38 +03:00
|
|
|
cdef int id_stride = F*O
|
|
|
|
cdef float one = 1.
|
2017-05-23 01:58:12 +03:00
|
|
|
for b in range(B):
|
|
|
|
for f in range(F):
|
|
|
|
if token_ids[f] < 0:
|
2017-10-28 19:45:54 +03:00
|
|
|
feature = &padding[f*O]
|
|
|
|
else:
|
2018-03-27 13:08:38 +03:00
|
|
|
idx = token_ids[f] * id_stride + f*O
|
2017-10-28 19:45:54 +03:00
|
|
|
feature = &cached[idx]
|
2018-03-27 13:08:38 +03:00
|
|
|
openblas.simple_axpy(&output[b*O], O,
|
|
|
|
feature, one)
|
2017-05-23 01:58:12 +03:00
|
|
|
token_ids += F
|
2017-05-14 01:55:01 +03:00
|
|
|
|
|
|
|
|
|
|
|
cdef void cpu_log_loss(float* d_scores,
|
|
|
|
const float* costs, const int* is_valid, const float* scores,
|
|
|
|
int O) nogil:
|
|
|
|
"""Do multi-label log loss"""
|
|
|
|
cdef double max_, gmax, Z, gZ
|
|
|
|
best = arg_max_if_gold(scores, costs, is_valid, O)
|
|
|
|
guess = arg_max_if_valid(scores, is_valid, O)
|
|
|
|
Z = 1e-10
|
|
|
|
gZ = 1e-10
|
|
|
|
max_ = scores[guess]
|
|
|
|
gmax = scores[best]
|
|
|
|
for i in range(O):
|
|
|
|
if is_valid[i]:
|
|
|
|
Z += exp(scores[i] - max_)
|
|
|
|
if costs[i] <= costs[best]:
|
|
|
|
gZ += exp(scores[i] - gmax)
|
|
|
|
for i in range(O):
|
|
|
|
if not is_valid[i]:
|
|
|
|
d_scores[i] = 0.
|
|
|
|
elif costs[i] <= costs[best]:
|
|
|
|
d_scores[i] = (exp(scores[i]-max_) / Z) - (exp(scores[i]-gmax)/gZ)
|
|
|
|
else:
|
|
|
|
d_scores[i] = exp(scores[i]-max_) / Z
|
|
|
|
|
|
|
|
|
|
|
|
cdef void cpu_regression_loss(float* d_scores,
|
|
|
|
const float* costs, const int* is_valid, const float* scores,
|
|
|
|
int O) nogil:
|
|
|
|
cdef float eps = 2.
|
|
|
|
best = arg_max_if_gold(scores, costs, is_valid, O)
|
|
|
|
for i in range(O):
|
|
|
|
if not is_valid[i]:
|
|
|
|
d_scores[i] = 0.
|
|
|
|
elif scores[i] < scores[best]:
|
|
|
|
d_scores[i] = 0.
|
|
|
|
else:
|
|
|
|
# I doubt this is correct?
|
|
|
|
# Looking for something like Huber loss
|
|
|
|
diff = scores[i] - -costs[i]
|
|
|
|
if diff > eps:
|
|
|
|
d_scores[i] = eps
|
|
|
|
elif diff < -eps:
|
|
|
|
d_scores[i] = -eps
|
|
|
|
else:
|
|
|
|
d_scores[i] = diff
|
|
|
|
|
|
|
|
|
2017-11-14 04:11:40 +03:00
|
|
|
def _collect_states(beams):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef Beam beam
|
|
|
|
states = []
|
|
|
|
for beam in beams:
|
|
|
|
state = StateClass.borrow(<StateC*>beam.at(0))
|
|
|
|
states.append(state)
|
|
|
|
return states
|
|
|
|
|
|
|
|
|
2017-05-14 01:55:01 +03:00
|
|
|
cdef class Parser:
|
|
|
|
"""
|
|
|
|
Base class of the DependencyParser and EntityRecognizer.
|
|
|
|
"""
|
|
|
|
@classmethod
|
2017-10-06 21:17:31 +03:00
|
|
|
def Model(cls, nr_class, **cfg):
|
2017-10-11 10:43:48 +03:00
|
|
|
depth = util.env_opt('parser_hidden_depth', cfg.get('hidden_depth', 1))
|
2017-10-19 01:42:34 +03:00
|
|
|
if depth != 1:
|
|
|
|
raise ValueError("Currently parser depth is hard-coded to 1.")
|
2017-10-27 20:45:57 +03:00
|
|
|
parser_maxout_pieces = util.env_opt('parser_maxout_pieces',
|
|
|
|
cfg.get('maxout_pieces', 2))
|
|
|
|
token_vector_width = util.env_opt('token_vector_width',
|
2017-10-28 14:16:06 +03:00
|
|
|
cfg.get('token_vector_width', 128))
|
2018-03-26 18:22:18 +03:00
|
|
|
hidden_width = util.env_opt('hidden_width', cfg.get('hidden_width', 128))
|
|
|
|
embed_size = util.env_opt('embed_size', cfg.get('embed_size', 5000))
|
2017-10-12 15:56:11 +03:00
|
|
|
hist_size = util.env_opt('history_feats', cfg.get('hist_size', 0))
|
|
|
|
hist_width = util.env_opt('history_width', cfg.get('hist_width', 0))
|
2017-10-19 01:42:34 +03:00
|
|
|
if hist_size != 0:
|
|
|
|
raise ValueError("Currently history size is hard-coded to 0")
|
2017-10-25 16:54:02 +03:00
|
|
|
if hist_width != 0:
|
2017-10-19 01:42:34 +03:00
|
|
|
raise ValueError("Currently history width is hard-coded to 0")
|
2017-09-21 15:59:48 +03:00
|
|
|
tok2vec = Tok2Vec(token_vector_width, embed_size,
|
|
|
|
pretrained_dims=cfg.get('pretrained_dims', 0))
|
|
|
|
tok2vec = chain(tok2vec, flatten)
|
2017-10-20 17:24:16 +03:00
|
|
|
lower = PrecomputableAffine(hidden_width,
|
|
|
|
nF=cls.nr_feature, nI=token_vector_width,
|
|
|
|
nP=parser_maxout_pieces)
|
2017-10-20 04:07:17 +03:00
|
|
|
lower.nP = parser_maxout_pieces
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2017-05-15 22:46:08 +03:00
|
|
|
with Model.use_device('cpu'):
|
2017-10-19 01:42:34 +03:00
|
|
|
upper = chain(
|
2018-03-27 20:23:02 +03:00
|
|
|
clone(Maxout(hidden_width, hidden_width), depth-1),
|
2017-10-19 01:42:34 +03:00
|
|
|
zero_init(Affine(nr_class, hidden_width, drop_factor=0.0))
|
|
|
|
)
|
2017-10-05 04:06:05 +03:00
|
|
|
|
2017-05-29 11:14:20 +03:00
|
|
|
cfg = {
|
|
|
|
'nr_class': nr_class,
|
2017-10-06 21:50:52 +03:00
|
|
|
'hidden_depth': depth,
|
2017-05-29 11:14:20 +03:00
|
|
|
'token_vector_width': token_vector_width,
|
|
|
|
'hidden_width': hidden_width,
|
2017-10-06 03:38:13 +03:00
|
|
|
'maxout_pieces': parser_maxout_pieces,
|
|
|
|
'hist_size': hist_size,
|
|
|
|
'hist_width': hist_width
|
2017-05-29 11:14:20 +03:00
|
|
|
}
|
2017-09-21 15:59:48 +03:00
|
|
|
return (tok2vec, lower, upper), cfg
|
2017-05-15 22:46:08 +03:00
|
|
|
|
2017-11-06 16:26:26 +03:00
|
|
|
def create_optimizer(self):
|
|
|
|
return create_default_optimizer(self.model[0].ops,
|
|
|
|
**self.cfg.get('optimizer', {}))
|
|
|
|
|
2017-05-16 17:17:30 +03:00
|
|
|
def __init__(self, Vocab vocab, moves=True, model=True, **cfg):
|
2017-10-27 15:39:30 +03:00
|
|
|
"""Create a Parser.
|
|
|
|
|
|
|
|
vocab (Vocab): The vocabulary object. Must be shared with documents
|
|
|
|
to be processed. The value is set to the `.vocab` attribute.
|
|
|
|
moves (TransitionSystem): Defines how the parse-state is created,
|
|
|
|
updated and evaluated. The value is set to the .moves attribute
|
|
|
|
unless True (default), in which case a new instance is created with
|
|
|
|
`Parser.Moves()`.
|
|
|
|
model (object): Defines how the parse-state is created, updated and
|
|
|
|
evaluated. The value is set to the .model attribute unless True
|
|
|
|
(default), in which case a new instance is created with
|
|
|
|
`Parser.Model()`.
|
|
|
|
**cfg: Arbitrary configuration parameters. Set to the `.cfg` attribute
|
2017-05-14 01:55:01 +03:00
|
|
|
"""
|
|
|
|
self.vocab = vocab
|
2017-05-16 17:17:30 +03:00
|
|
|
if moves is True:
|
2018-03-27 20:23:02 +03:00
|
|
|
self.moves = self.TransitionSystem(self.vocab.strings)
|
2017-05-16 17:17:30 +03:00
|
|
|
else:
|
|
|
|
self.moves = moves
|
2017-08-18 23:38:59 +03:00
|
|
|
if 'beam_width' not in cfg:
|
|
|
|
cfg['beam_width'] = util.env_opt('beam_width', 1)
|
|
|
|
if 'beam_density' not in cfg:
|
|
|
|
cfg['beam_density'] = util.env_opt('beam_density', 0.0)
|
2017-09-20 00:42:12 +03:00
|
|
|
if 'pretrained_dims' not in cfg:
|
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors.data.shape[1]
|
2017-05-14 01:55:01 +03:00
|
|
|
self.cfg = cfg
|
2017-05-16 12:21:59 +03:00
|
|
|
self.model = model
|
2017-09-26 13:42:52 +03:00
|
|
|
self._multitasks = []
|
2017-05-14 01:55:01 +03:00
|
|
|
|
|
|
|
def __reduce__(self):
|
2017-05-17 13:04:50 +03:00
|
|
|
return (Parser, (self.vocab, self.moves, self.model), None, None)
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2017-07-20 16:02:55 +03:00
|
|
|
def __call__(self, Doc doc, beam_width=None, beam_density=None):
|
2017-10-27 15:39:30 +03:00
|
|
|
"""Apply the parser or entity recognizer, setting the annotations onto
|
|
|
|
the `Doc` object.
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2017-10-27 15:39:30 +03:00
|
|
|
doc (Doc): The document to be processed.
|
2017-05-14 01:55:01 +03:00
|
|
|
"""
|
2017-07-20 16:02:55 +03:00
|
|
|
if beam_width is None:
|
|
|
|
beam_width = self.cfg.get('beam_width', 1)
|
|
|
|
if beam_density is None:
|
2017-08-18 23:38:59 +03:00
|
|
|
beam_density = self.cfg.get('beam_density', 0.0)
|
2017-07-20 16:02:55 +03:00
|
|
|
cdef Beam beam
|
|
|
|
if beam_width == 1:
|
2017-11-03 13:21:00 +03:00
|
|
|
states, tokvecs = self.parse_batch([doc])
|
|
|
|
self.set_annotations([doc], states, tensors=tokvecs)
|
2017-07-20 16:02:55 +03:00
|
|
|
return doc
|
|
|
|
else:
|
2017-11-03 13:21:00 +03:00
|
|
|
beams, tokvecs = self.beam_parse([doc],
|
|
|
|
beam_width=beam_width,
|
|
|
|
beam_density=beam_density)
|
|
|
|
beam = beams[0]
|
2017-07-20 16:02:55 +03:00
|
|
|
output = self.moves.get_beam_annot(beam)
|
2017-11-14 04:11:40 +03:00
|
|
|
state = StateClass.borrow(<StateC*>beam.at(0))
|
2017-11-03 13:21:00 +03:00
|
|
|
self.set_annotations([doc], [state], tensors=tokvecs)
|
2017-07-20 16:02:55 +03:00
|
|
|
_cleanup(beam)
|
|
|
|
return output
|
|
|
|
|
2017-10-06 03:38:13 +03:00
|
|
|
def pipe(self, docs, int batch_size=256, int n_threads=2,
|
2017-08-18 23:38:59 +03:00
|
|
|
beam_width=None, beam_density=None):
|
2017-10-27 15:39:30 +03:00
|
|
|
"""Process a stream of documents.
|
|
|
|
|
|
|
|
stream: The sequence of documents to process.
|
|
|
|
batch_size (int): Number of documents to accumulate into a working set.
|
|
|
|
n_threads (int): The number of threads with which to work on the buffer
|
|
|
|
in parallel.
|
|
|
|
YIELDS (Doc): Documents, in order.
|
2017-05-14 01:55:01 +03:00
|
|
|
"""
|
2017-08-18 23:38:59 +03:00
|
|
|
if beam_width is None:
|
|
|
|
beam_width = self.cfg.get('beam_width', 1)
|
|
|
|
if beam_density is None:
|
|
|
|
beam_density = self.cfg.get('beam_density', 0.0)
|
2017-05-15 22:46:08 +03:00
|
|
|
cdef Doc doc
|
2017-10-18 22:45:01 +03:00
|
|
|
for batch in cytoolz.partition_all(batch_size, docs):
|
2017-11-14 04:11:40 +03:00
|
|
|
batch_in_order = list(batch)
|
|
|
|
by_length = sorted(batch_in_order, key=lambda doc: len(doc))
|
|
|
|
batch_beams = []
|
2017-10-19 01:25:21 +03:00
|
|
|
for subbatch in cytoolz.partition_all(8, by_length):
|
2017-10-18 22:45:01 +03:00
|
|
|
subbatch = list(subbatch)
|
|
|
|
if beam_width == 1:
|
2017-11-03 13:21:00 +03:00
|
|
|
parse_states, tokvecs = self.parse_batch(subbatch)
|
2017-10-18 22:45:01 +03:00
|
|
|
beams = []
|
|
|
|
else:
|
2017-11-03 13:21:00 +03:00
|
|
|
beams, tokvecs = self.beam_parse(subbatch,
|
|
|
|
beam_width=beam_width,
|
|
|
|
beam_density=beam_density)
|
2017-11-14 04:11:40 +03:00
|
|
|
parse_states = _collect_states(beams)
|
|
|
|
self.set_annotations(subbatch, parse_states, tensors=None)
|
|
|
|
for beam in beams:
|
|
|
|
_cleanup(beam)
|
|
|
|
for doc in batch_in_order:
|
|
|
|
yield doc
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2017-09-21 15:59:48 +03:00
|
|
|
def parse_batch(self, docs):
|
2017-05-23 01:58:12 +03:00
|
|
|
cdef:
|
|
|
|
precompute_hiddens state2vec
|
2017-05-23 12:23:05 +03:00
|
|
|
Pool mem
|
2017-05-23 01:58:12 +03:00
|
|
|
const float* feat_weights
|
|
|
|
StateC* st
|
2017-11-14 04:11:40 +03:00
|
|
|
StateClass stcls
|
2017-10-19 01:25:21 +03:00
|
|
|
vector[StateC*] states
|
|
|
|
int guess, nr_class, nr_feat, nr_piece, nr_dim, nr_state, nr_step
|
|
|
|
int j
|
2017-05-23 01:58:12 +03:00
|
|
|
if isinstance(docs, Doc):
|
|
|
|
docs = [docs]
|
|
|
|
|
2017-10-27 20:45:57 +03:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(
|
|
|
|
docs, cuda_stream, 0.0)
|
2017-05-23 01:58:12 +03:00
|
|
|
nr_state = len(docs)
|
|
|
|
nr_class = self.moves.n_moves
|
|
|
|
nr_dim = tokvecs.shape[1]
|
|
|
|
nr_feat = self.nr_feature
|
2017-05-23 19:06:49 +03:00
|
|
|
nr_piece = state2vec.nP
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2017-10-19 01:25:21 +03:00
|
|
|
state_objs = self.moves.init_batch(docs)
|
|
|
|
for stcls in state_objs:
|
|
|
|
if not stcls.c.is_final():
|
|
|
|
states.push_back(stcls.c)
|
2017-10-19 14:45:18 +03:00
|
|
|
|
2017-05-23 01:58:12 +03:00
|
|
|
feat_weights = state2vec.get_feat_weights()
|
|
|
|
cdef int i
|
2017-10-27 20:45:57 +03:00
|
|
|
cdef np.ndarray hidden_weights = numpy.ascontiguousarray(
|
|
|
|
vec2scores._layers[-1].W.T)
|
2017-10-18 22:45:01 +03:00
|
|
|
cdef np.ndarray hidden_bias = vec2scores._layers[-1].b
|
|
|
|
|
|
|
|
hW = <float*>hidden_weights.data
|
|
|
|
hb = <float*>hidden_bias.data
|
2017-10-19 19:42:11 +03:00
|
|
|
bias = <float*>state2vec.bias.data
|
2017-10-18 22:45:01 +03:00
|
|
|
cdef int nr_hidden = hidden_weights.shape[0]
|
2017-10-19 02:48:39 +03:00
|
|
|
cdef int nr_task = states.size()
|
2017-10-19 01:25:21 +03:00
|
|
|
with nogil:
|
2018-03-27 20:23:02 +03:00
|
|
|
self._parseC(&states[0], nr_task, feat_weights, bias, hW, hb,
|
|
|
|
nr_class, nr_hidden, nr_feat, nr_piece)
|
2017-10-24 13:40:47 +03:00
|
|
|
PyErr_CheckSignals()
|
2017-11-03 13:21:00 +03:00
|
|
|
tokvecs = self.model[0].ops.unflatten(tokvecs,
|
|
|
|
[len(doc) for doc in docs])
|
|
|
|
return state_objs, tokvecs
|
2017-10-19 01:25:21 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
cdef void _parseC(self, StateC** states, int nr_task,
|
2017-10-19 19:42:11 +03:00
|
|
|
const float* feat_weights, const float* bias,
|
|
|
|
const float* hW, const float* hb,
|
2017-10-19 01:25:21 +03:00
|
|
|
int nr_class, int nr_hidden, int nr_feat, int nr_piece) nogil:
|
|
|
|
token_ids = <int*>calloc(nr_feat, sizeof(int))
|
|
|
|
is_valid = <int*>calloc(nr_class, sizeof(int))
|
2018-03-27 20:23:02 +03:00
|
|
|
vectors = <float*>calloc(nr_hidden * nr_task, sizeof(float))
|
|
|
|
unmaxed = <float*>calloc(nr_hidden * nr_piece, sizeof(float))
|
|
|
|
scores = <float*>calloc(nr_class*nr_task, sizeof(float))
|
2017-10-24 13:40:47 +03:00
|
|
|
if not (token_ids and is_valid and vectors and scores):
|
|
|
|
with gil:
|
|
|
|
PyErr_SetFromErrno(MemoryError)
|
|
|
|
PyErr_CheckSignals()
|
2018-03-27 20:23:02 +03:00
|
|
|
cdef int nr_todo = nr_task
|
|
|
|
cdef int i, j
|
|
|
|
cdef vector[StateC*] unfinished
|
|
|
|
while nr_todo >= 1:
|
|
|
|
memset(vectors, 0, nr_todo * nr_hidden * sizeof(float))
|
|
|
|
memset(scores, 0, nr_todo * nr_class * sizeof(float))
|
|
|
|
for i in range(nr_todo):
|
|
|
|
state = states[i]
|
|
|
|
state.set_context_tokens(token_ids, nr_feat)
|
|
|
|
memset(unmaxed, 0, nr_hidden * nr_piece * sizeof(float))
|
|
|
|
sum_state_features(unmaxed,
|
|
|
|
feat_weights, token_ids, 1, nr_feat, nr_hidden * nr_piece)
|
|
|
|
VecVec.add_i(unmaxed,
|
|
|
|
bias, 1., nr_hidden*nr_piece)
|
|
|
|
state_vector = &vectors[i*nr_hidden]
|
|
|
|
for j in range(nr_hidden):
|
|
|
|
index = j * nr_piece
|
|
|
|
which = Vec.arg_max(&unmaxed[index], nr_piece)
|
|
|
|
state_vector[j] = unmaxed[index + which]
|
|
|
|
# Compute hidden-to-output
|
|
|
|
openblas.simple_gemm(scores, nr_todo, nr_class,
|
|
|
|
vectors, nr_todo, nr_hidden, hW, nr_hidden, nr_class, 0, 0)
|
|
|
|
# Add bias
|
|
|
|
for i in range(nr_todo):
|
|
|
|
VecVec.add_i(&scores[i*nr_class],
|
|
|
|
hb, 1., nr_class)
|
|
|
|
# Validate actions, argmax, take action.
|
|
|
|
for i in range(nr_todo):
|
|
|
|
state = states[i]
|
|
|
|
self.moves.set_valid(is_valid, state)
|
|
|
|
guess = arg_max_if_valid(&scores[i*nr_class], is_valid, nr_class)
|
|
|
|
action = self.moves.c[guess]
|
|
|
|
action.do(state, action.label)
|
|
|
|
state.push_hist(guess)
|
|
|
|
if not state.is_final():
|
|
|
|
unfinished.push_back(state)
|
|
|
|
for i in range(unfinished.size()):
|
|
|
|
states[i] = unfinished[i]
|
|
|
|
nr_todo = unfinished.size()
|
|
|
|
unfinished.clear()
|
2017-10-19 01:25:21 +03:00
|
|
|
free(token_ids)
|
|
|
|
free(is_valid)
|
|
|
|
free(vectors)
|
2018-03-27 20:23:02 +03:00
|
|
|
free(unmaxed)
|
2017-10-19 01:25:21 +03:00
|
|
|
free(scores)
|
2017-05-15 22:46:08 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
def beam_parse(self, docs, int beam_width=3, float beam_density=0.001,
|
|
|
|
float drop=0.):
|
2017-07-20 16:02:55 +03:00
|
|
|
cdef Beam beam
|
|
|
|
cdef np.ndarray scores
|
|
|
|
cdef Doc doc
|
|
|
|
cdef int nr_class = self.moves.n_moves
|
2017-10-27 20:45:57 +03:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(
|
2018-03-27 20:23:02 +03:00
|
|
|
docs, cuda_stream, drop)
|
2017-07-20 16:02:55 +03:00
|
|
|
cdef int offset = 0
|
2017-08-18 23:23:03 +03:00
|
|
|
cdef int j = 0
|
|
|
|
cdef int k
|
2017-11-15 02:51:42 +03:00
|
|
|
|
|
|
|
beams = []
|
2017-07-20 16:02:55 +03:00
|
|
|
for doc in docs:
|
|
|
|
beam = Beam(nr_class, beam_width, min_density=beam_density)
|
|
|
|
beam.initialize(self.moves.init_beam_state, doc.length, doc.c)
|
|
|
|
for i in range(beam.width):
|
2017-11-14 04:11:40 +03:00
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
state.offset = offset
|
2017-07-20 16:02:55 +03:00
|
|
|
offset += len(doc)
|
|
|
|
beam.check_done(_check_final_state, NULL)
|
2017-11-15 02:51:42 +03:00
|
|
|
beams.append(beam)
|
|
|
|
cdef np.ndarray token_ids
|
|
|
|
token_ids = numpy.zeros((len(docs) * beam_width, self.nr_feature),
|
|
|
|
dtype='i', order='C')
|
|
|
|
todo = [beam for beam in beams if not beam.is_done]
|
|
|
|
|
|
|
|
cdef int* c_ids
|
|
|
|
cdef int nr_feature = self.nr_feature
|
|
|
|
cdef int n_states
|
|
|
|
while todo:
|
|
|
|
todo = [beam for beam in beams if not beam.is_done]
|
|
|
|
token_ids.fill(-1)
|
|
|
|
c_ids = <int*>token_ids.data
|
|
|
|
n_states = 0
|
|
|
|
for beam in todo:
|
2017-07-20 16:02:55 +03:00
|
|
|
for i in range(beam.size):
|
2017-11-15 02:51:42 +03:00
|
|
|
state = <StateC*>beam.at(i)
|
2017-08-18 23:23:03 +03:00
|
|
|
# This way we avoid having to score finalized states
|
|
|
|
# We do have to take care to keep indexes aligned, though
|
2017-11-15 02:51:42 +03:00
|
|
|
if not state.is_final():
|
|
|
|
state.set_context_tokens(c_ids, nr_feature)
|
|
|
|
c_ids += nr_feature
|
|
|
|
n_states += 1
|
|
|
|
if n_states == 0:
|
|
|
|
break
|
2018-03-27 20:23:02 +03:00
|
|
|
vectors, _ = state2vec.begin_update(token_ids[:n_states], drop)
|
|
|
|
scores, _ = vec2scores.begin_update(vectors, drop=drop)
|
2017-11-15 02:51:42 +03:00
|
|
|
c_scores = <float*>scores.data
|
|
|
|
for beam in todo:
|
2017-07-20 16:02:55 +03:00
|
|
|
for i in range(beam.size):
|
2017-11-14 04:11:40 +03:00
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
if not state.is_final():
|
|
|
|
self.moves.set_valid(beam.is_valid[i], state)
|
2017-11-15 02:51:42 +03:00
|
|
|
memcpy(beam.scores[i], c_scores, nr_class * sizeof(float))
|
|
|
|
c_scores += nr_class
|
2017-11-15 01:36:46 +03:00
|
|
|
beam.advance(_transition_state, NULL, <void*>self.moves.c)
|
2017-07-20 16:02:55 +03:00
|
|
|
beam.check_done(_check_final_state, NULL)
|
2017-11-03 13:21:00 +03:00
|
|
|
tokvecs = self.model[0].ops.unflatten(tokvecs,
|
|
|
|
[len(doc) for doc in docs])
|
|
|
|
return beams, tokvecs
|
2017-07-20 16:02:55 +03:00
|
|
|
|
2017-09-21 15:59:48 +03:00
|
|
|
def update(self, docs, golds, drop=0., sgd=None, losses=None):
|
2017-08-20 23:59:28 +03:00
|
|
|
if not any(self.moves.has_gold(gold) for gold in golds):
|
2017-08-20 15:41:38 +03:00
|
|
|
return None
|
2018-02-17 20:41:18 +03:00
|
|
|
assert len(docs) == len(golds)
|
2017-11-15 02:51:42 +03:00
|
|
|
if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= 0.0:
|
2017-09-21 15:59:48 +03:00
|
|
|
return self.update_beam(docs, golds,
|
2017-08-18 23:38:59 +03:00
|
|
|
self.cfg['beam_width'], self.cfg['beam_density'],
|
|
|
|
drop=drop, sgd=sgd, losses=losses)
|
2017-05-25 04:11:41 +03:00
|
|
|
if losses is not None and self.name not in losses:
|
|
|
|
losses[self.name] = 0.
|
2017-05-15 22:46:08 +03:00
|
|
|
if isinstance(docs, Doc) and isinstance(golds, GoldParse):
|
2017-05-16 17:17:30 +03:00
|
|
|
docs = [docs]
|
|
|
|
golds = [golds]
|
2018-02-17 20:41:18 +03:00
|
|
|
for multitask in self._multitasks:
|
|
|
|
multitask.update(docs, golds, drop=drop, sgd=sgd)
|
2017-10-27 20:45:57 +03:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
2018-03-27 20:23:02 +03:00
|
|
|
# Chop sequences into lengths of this many transitions, to make the
|
|
|
|
# batch uniform length.
|
|
|
|
cut_gold = numpy.random.choice(range(20, 100))
|
|
|
|
states, golds, max_steps = self._init_gold_batch(docs, golds, max_length=cut_gold)
|
2017-09-21 15:59:48 +03:00
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(docs, cuda_stream,
|
2017-09-29 02:47:13 +03:00
|
|
|
drop)
|
2017-05-23 12:23:05 +03:00
|
|
|
todo = [(s, g) for (s, g) in zip(states, golds)
|
|
|
|
if not s.is_final() and g is not None]
|
2017-05-26 22:02:59 +03:00
|
|
|
if not todo:
|
|
|
|
return None
|
2017-05-23 11:06:53 +03:00
|
|
|
|
2017-05-23 12:23:05 +03:00
|
|
|
backprops = []
|
2017-11-04 02:23:23 +03:00
|
|
|
# Add a padding vector to the d_tokvecs gradient, so that missing
|
|
|
|
# values don't affect the real gradient.
|
|
|
|
d_tokvecs = state2vec.ops.allocate((tokvecs.shape[0]+1, tokvecs.shape[1]))
|
2017-05-23 12:23:05 +03:00
|
|
|
cdef float loss = 0.
|
2017-05-28 01:59:00 +03:00
|
|
|
n_steps = 0
|
2017-05-25 14:49:00 +03:00
|
|
|
while todo:
|
2017-05-23 12:23:05 +03:00
|
|
|
states, golds = zip(*todo)
|
|
|
|
token_ids = self.get_token_ids(states)
|
2017-05-23 23:20:45 +03:00
|
|
|
vector, bp_vector = state2vec.begin_update(token_ids, drop=0.0)
|
2017-05-25 04:11:41 +03:00
|
|
|
if drop != 0:
|
|
|
|
mask = vec2scores.ops.get_dropout_mask(vector.shape, drop)
|
|
|
|
vector *= mask
|
2017-10-03 13:44:01 +03:00
|
|
|
hists = numpy.asarray([st.history for st in states], dtype='i')
|
2017-10-06 03:38:13 +03:00
|
|
|
if self.cfg.get('hist_size', 0):
|
2017-10-03 13:44:01 +03:00
|
|
|
scores, bp_scores = vec2scores.begin_update((vector, hists), drop=drop)
|
|
|
|
else:
|
|
|
|
scores, bp_scores = vec2scores.begin_update(vector, drop=drop)
|
2017-05-23 12:23:05 +03:00
|
|
|
|
|
|
|
d_scores = self.get_batch_loss(states, golds, scores)
|
2017-08-19 17:02:57 +03:00
|
|
|
d_scores /= len(docs)
|
2017-08-18 23:23:03 +03:00
|
|
|
d_vector = bp_scores(d_scores, sgd=sgd)
|
2017-05-25 04:11:41 +03:00
|
|
|
if drop != 0:
|
|
|
|
d_vector *= mask
|
2017-05-23 12:23:05 +03:00
|
|
|
|
|
|
|
if isinstance(self.model[0].ops, CupyOps) \
|
|
|
|
and not isinstance(token_ids, state2vec.ops.xp.ndarray):
|
2017-08-18 23:23:03 +03:00
|
|
|
# Move token_ids and d_vector to GPU, asynchronously
|
2017-05-23 12:23:05 +03:00
|
|
|
backprops.append((
|
2017-10-27 20:45:57 +03:00
|
|
|
util.get_async(cuda_stream, token_ids),
|
|
|
|
util.get_async(cuda_stream, d_vector),
|
2017-05-23 12:23:05 +03:00
|
|
|
bp_vector
|
|
|
|
))
|
|
|
|
else:
|
|
|
|
backprops.append((token_ids, d_vector, bp_vector))
|
|
|
|
self.transition_batch(states, scores)
|
2017-10-03 13:44:01 +03:00
|
|
|
todo = [(st, gold) for (st, gold) in todo
|
|
|
|
if not st.is_final()]
|
2017-05-25 04:11:41 +03:00
|
|
|
if losses is not None:
|
|
|
|
losses[self.name] += (d_scores**2).sum()
|
2017-05-28 01:59:00 +03:00
|
|
|
n_steps += 1
|
|
|
|
if n_steps >= max_steps:
|
2017-05-25 19:18:59 +03:00
|
|
|
break
|
2017-05-25 14:49:00 +03:00
|
|
|
self._make_updates(d_tokvecs,
|
2017-09-21 15:59:48 +03:00
|
|
|
bp_tokvecs, backprops, sgd, cuda_stream)
|
2018-02-17 20:41:18 +03:00
|
|
|
|
2017-09-21 15:59:48 +03:00
|
|
|
def update_beam(self, docs, golds, width=None, density=None,
|
2017-08-18 23:38:59 +03:00
|
|
|
drop=0., sgd=None, losses=None):
|
2017-08-20 23:59:28 +03:00
|
|
|
if not any(self.moves.has_gold(gold) for gold in golds):
|
|
|
|
return None
|
2017-08-20 15:41:38 +03:00
|
|
|
if not golds:
|
|
|
|
return None
|
2017-08-18 23:38:59 +03:00
|
|
|
if width is None:
|
|
|
|
width = self.cfg.get('beam_width', 2)
|
|
|
|
if density is None:
|
|
|
|
density = self.cfg.get('beam_density', 0.0)
|
2017-08-13 03:22:52 +03:00
|
|
|
if losses is not None and self.name not in losses:
|
|
|
|
losses[self.name] = 0.
|
2017-08-13 01:15:16 +03:00
|
|
|
lengths = [len(d) for d in docs]
|
2017-08-13 13:37:26 +03:00
|
|
|
assert min(lengths) >= 1
|
2017-08-14 02:02:05 +03:00
|
|
|
states = self.moves.init_batch(docs)
|
|
|
|
for gold in golds:
|
|
|
|
self.moves.preprocess_gold(gold)
|
2017-10-27 20:45:57 +03:00
|
|
|
cuda_stream = util.get_cuda_stream()
|
|
|
|
(tokvecs, bp_tokvecs), state2vec, vec2scores = self.get_batch_model(
|
|
|
|
docs, cuda_stream, drop)
|
2017-11-13 20:18:26 +03:00
|
|
|
states_d_scores, backprops, beams = _beam_utils.update_beam(
|
2017-10-27 20:45:57 +03:00
|
|
|
self.moves, self.nr_feature, 500, states, golds, state2vec,
|
|
|
|
vec2scores, width, density, self.cfg.get('hist_size', 0),
|
|
|
|
drop=drop, losses=losses)
|
2017-08-12 22:47:45 +03:00
|
|
|
backprop_lower = []
|
2017-08-19 17:02:57 +03:00
|
|
|
cdef float batch_size = len(docs)
|
2017-08-12 22:47:45 +03:00
|
|
|
for i, d_scores in enumerate(states_d_scores):
|
2017-08-19 17:02:57 +03:00
|
|
|
d_scores /= batch_size
|
2017-08-13 13:37:26 +03:00
|
|
|
if losses is not None:
|
|
|
|
losses[self.name] += (d_scores**2).sum()
|
2017-08-12 22:47:45 +03:00
|
|
|
ids, bp_vectors, bp_scores = backprops[i]
|
|
|
|
d_vector = bp_scores(d_scores, sgd=sgd)
|
2017-08-13 01:15:16 +03:00
|
|
|
if isinstance(self.model[0].ops, CupyOps) \
|
|
|
|
and not isinstance(ids, state2vec.ops.xp.ndarray):
|
|
|
|
backprop_lower.append((
|
2017-10-27 20:45:57 +03:00
|
|
|
util.get_async(cuda_stream, ids),
|
|
|
|
util.get_async(cuda_stream, d_vector),
|
2017-08-13 01:15:16 +03:00
|
|
|
bp_vectors))
|
|
|
|
else:
|
|
|
|
backprop_lower.append((ids, d_vector, bp_vectors))
|
2017-11-04 02:23:23 +03:00
|
|
|
# Add a padding vector to the d_tokvecs gradient, so that missing
|
|
|
|
# values don't affect the real gradient.
|
|
|
|
d_tokvecs = state2vec.ops.allocate((tokvecs.shape[0]+1, tokvecs.shape[1]))
|
2017-10-27 20:45:57 +03:00
|
|
|
self._make_updates(d_tokvecs, bp_tokvecs, backprop_lower, sgd,
|
|
|
|
cuda_stream)
|
2017-11-13 20:18:26 +03:00
|
|
|
cdef Beam beam
|
|
|
|
for beam in beams:
|
|
|
|
_cleanup(beam)
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
def _init_gold_batch(self, whole_docs, whole_golds, min_length=5, max_length=500):
|
2017-05-25 19:18:59 +03:00
|
|
|
"""Make a square batch, of length equal to the shortest doc. A long
|
|
|
|
doc will get multiple states. Let's say we have a doc of length 2*N,
|
|
|
|
where N is the shortest doc. We'll make two states, one representing
|
|
|
|
long_doc[:N], and another representing long_doc[N:]."""
|
2017-05-26 19:31:23 +03:00
|
|
|
cdef:
|
|
|
|
StateClass state
|
|
|
|
Transition action
|
|
|
|
whole_states = self.moves.init_batch(whole_docs)
|
2018-03-27 20:23:02 +03:00
|
|
|
max_length = max(min_length, min(max_length, min([len(doc) for doc in whole_docs])))
|
2017-05-28 01:59:00 +03:00
|
|
|
max_moves = 0
|
2017-05-25 19:18:59 +03:00
|
|
|
states = []
|
2017-05-26 19:31:23 +03:00
|
|
|
golds = []
|
|
|
|
for doc, state, gold in zip(whole_docs, whole_states, whole_golds):
|
2017-05-25 19:18:59 +03:00
|
|
|
gold = self.moves.preprocess_gold(gold)
|
2017-05-26 19:31:23 +03:00
|
|
|
if gold is None:
|
|
|
|
continue
|
|
|
|
oracle_actions = self.moves.get_oracle_sequence(doc, gold)
|
|
|
|
start = 0
|
2017-05-25 19:18:59 +03:00
|
|
|
while start < len(doc):
|
2017-05-26 19:31:23 +03:00
|
|
|
state = state.copy()
|
2017-05-28 01:59:00 +03:00
|
|
|
n_moves = 0
|
2017-05-25 19:18:59 +03:00
|
|
|
while state.B(0) < start and not state.is_final():
|
2017-05-26 19:31:23 +03:00
|
|
|
action = self.moves.c[oracle_actions.pop(0)]
|
|
|
|
action.do(state.c, action.label)
|
2017-10-06 14:08:50 +03:00
|
|
|
state.c.push_hist(action.clas)
|
2017-05-28 01:59:00 +03:00
|
|
|
n_moves += 1
|
2017-05-26 19:31:23 +03:00
|
|
|
has_gold = self.moves.has_gold(gold, start=start,
|
|
|
|
end=start+max_length)
|
|
|
|
if not state.is_final() and has_gold:
|
2017-05-25 19:18:59 +03:00
|
|
|
states.append(state)
|
2017-05-26 19:31:23 +03:00
|
|
|
golds.append(gold)
|
2017-05-28 01:59:00 +03:00
|
|
|
max_moves = max(max_moves, n_moves)
|
2017-05-26 19:31:23 +03:00
|
|
|
start += min(max_length, len(doc)-start)
|
2017-05-28 01:59:00 +03:00
|
|
|
max_moves = max(max_moves, len(oracle_actions))
|
|
|
|
return states, golds, max_moves
|
2017-05-25 19:18:59 +03:00
|
|
|
|
2017-09-21 15:59:48 +03:00
|
|
|
def _make_updates(self, d_tokvecs, bp_tokvecs, backprops, sgd, cuda_stream=None):
|
2017-05-23 12:23:05 +03:00
|
|
|
# Tells CUDA to block, so our async copies complete.
|
|
|
|
if cuda_stream is not None:
|
|
|
|
cuda_stream.synchronize()
|
2017-05-23 12:23:29 +03:00
|
|
|
xp = get_array_module(d_tokvecs)
|
|
|
|
for ids, d_vector, bp_vector in backprops:
|
2017-10-31 04:33:16 +03:00
|
|
|
d_state_features = bp_vector((d_vector, ids), sgd=sgd)
|
2017-10-27 04:16:55 +03:00
|
|
|
ids = ids.flatten()
|
|
|
|
d_state_features = d_state_features.reshape(
|
|
|
|
(ids.size, d_state_features.shape[2]))
|
|
|
|
self.model[0].ops.scatter_add(d_tokvecs, ids,
|
2017-08-18 23:38:59 +03:00
|
|
|
d_state_features)
|
2017-11-04 02:23:23 +03:00
|
|
|
# Padded -- see update()
|
|
|
|
bp_tokvecs(d_tokvecs[:-1], sgd=sgd)
|
2017-05-15 22:46:08 +03:00
|
|
|
|
2017-05-27 23:51:55 +03:00
|
|
|
@property
|
|
|
|
def move_names(self):
|
|
|
|
names = []
|
|
|
|
for i in range(self.moves.n_moves):
|
|
|
|
name = self.moves.move_name(self.moves.c[i].move, self.moves.c[i].label)
|
|
|
|
names.append(name)
|
|
|
|
return names
|
|
|
|
|
2017-09-21 15:59:48 +03:00
|
|
|
def get_batch_model(self, docs, stream, dropout):
|
|
|
|
tok2vec, lower, upper = self.model
|
|
|
|
tokvecs, bp_tokvecs = tok2vec.begin_update(docs, drop=dropout)
|
|
|
|
state2vec = precompute_hiddens(len(docs), tokvecs,
|
2017-09-29 02:47:13 +03:00
|
|
|
lower, stream, drop=0.0)
|
2017-09-21 15:59:48 +03:00
|
|
|
return (tokvecs, bp_tokvecs), state2vec, upper
|
2017-05-15 22:46:08 +03:00
|
|
|
|
2018-03-27 13:08:12 +03:00
|
|
|
nr_feature = 8
|
2017-05-17 13:04:50 +03:00
|
|
|
|
2017-05-20 19:26:23 +03:00
|
|
|
def get_token_ids(self, states):
|
2017-05-15 22:46:08 +03:00
|
|
|
cdef StateClass state
|
2017-05-20 19:26:23 +03:00
|
|
|
cdef int n_tokens = self.nr_feature
|
2017-05-23 01:58:12 +03:00
|
|
|
cdef np.ndarray ids = numpy.zeros((len(states), n_tokens),
|
|
|
|
dtype='i', order='C')
|
|
|
|
c_ids = <int*>ids.data
|
2017-05-15 22:46:08 +03:00
|
|
|
for i, state in enumerate(states):
|
2017-07-20 16:02:55 +03:00
|
|
|
if not state.is_final():
|
|
|
|
state.c.set_context_tokens(c_ids, n_tokens)
|
2017-05-23 01:58:12 +03:00
|
|
|
c_ids += ids.shape[1]
|
2017-05-20 19:26:23 +03:00
|
|
|
return ids
|
2017-05-15 22:46:08 +03:00
|
|
|
|
|
|
|
def transition_batch(self, states, float[:, ::1] scores):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef int[500] is_valid # TODO: Unhack
|
|
|
|
cdef float* c_scores = &scores[0, 0]
|
|
|
|
for state in states:
|
|
|
|
self.moves.set_valid(is_valid, state.c)
|
|
|
|
guess = arg_max_if_valid(c_scores, is_valid, scores.shape[1])
|
|
|
|
action = self.moves.c[guess]
|
|
|
|
action.do(state.c, action.label)
|
|
|
|
c_scores += scores.shape[1]
|
2017-10-03 14:27:10 +03:00
|
|
|
state.c.push_hist(guess)
|
2017-05-15 22:46:08 +03:00
|
|
|
|
|
|
|
def get_batch_loss(self, states, golds, float[:, ::1] scores):
|
|
|
|
cdef StateClass state
|
|
|
|
cdef GoldParse gold
|
|
|
|
cdef Pool mem = Pool()
|
|
|
|
cdef int i
|
|
|
|
is_valid = <int*>mem.alloc(self.moves.n_moves, sizeof(int))
|
|
|
|
costs = <float*>mem.alloc(self.moves.n_moves, sizeof(float))
|
|
|
|
cdef np.ndarray d_scores = numpy.zeros((len(states), self.moves.n_moves),
|
|
|
|
dtype='f', order='C')
|
|
|
|
c_d_scores = <float*>d_scores.data
|
|
|
|
for i, (state, gold) in enumerate(zip(states, golds)):
|
|
|
|
memset(is_valid, 0, self.moves.n_moves * sizeof(int))
|
|
|
|
memset(costs, 0, self.moves.n_moves * sizeof(float))
|
|
|
|
self.moves.set_costs(is_valid, costs, state, gold)
|
|
|
|
cpu_log_loss(c_d_scores,
|
|
|
|
costs, is_valid, &scores[i, 0], d_scores.shape[1])
|
|
|
|
c_d_scores += d_scores.shape[1]
|
|
|
|
return d_scores
|
|
|
|
|
2017-11-03 13:21:00 +03:00
|
|
|
def set_annotations(self, docs, states, tensors=None):
|
2017-05-15 22:46:08 +03:00
|
|
|
cdef StateClass state
|
|
|
|
cdef Doc doc
|
2017-11-03 13:21:00 +03:00
|
|
|
for i, (state, doc) in enumerate(zip(states, docs)):
|
2017-05-15 22:46:08 +03:00
|
|
|
self.moves.finalize_state(state.c)
|
2017-11-03 13:21:00 +03:00
|
|
|
for j in range(doc.length):
|
|
|
|
doc.c[j] = state.c._sent[j]
|
|
|
|
if tensors is not None:
|
2017-11-05 17:34:40 +03:00
|
|
|
if isinstance(doc.tensor, numpy.ndarray) \
|
|
|
|
and not isinstance(tensors[i], numpy.ndarray):
|
|
|
|
doc.extend_tensor(tensors[i].get())
|
|
|
|
else:
|
|
|
|
doc.extend_tensor(tensors[i])
|
2017-05-15 22:46:08 +03:00
|
|
|
self.moves.finalize_doc(doc)
|
2017-11-03 16:04:51 +03:00
|
|
|
|
2017-10-07 03:00:47 +03:00
|
|
|
for hook in self.postprocesses:
|
|
|
|
for doc in docs:
|
|
|
|
hook(doc)
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
@property
|
|
|
|
def labels(self):
|
|
|
|
class_names = [self.moves.get_class_name(i) for i in range(self.moves.n_moves)]
|
|
|
|
return class_names
|
|
|
|
|
2017-11-03 22:20:59 +03:00
|
|
|
@property
|
|
|
|
def tok2vec(self):
|
|
|
|
'''Return the embedding and convolutional layer of the model.'''
|
|
|
|
if self.model in (None, True, False):
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
return self.model[0]
|
|
|
|
|
2017-10-07 03:00:47 +03:00
|
|
|
@property
|
|
|
|
def postprocesses(self):
|
|
|
|
# Available for subclasses, e.g. to deprojectivize
|
|
|
|
return []
|
2017-05-14 01:55:01 +03:00
|
|
|
|
|
|
|
def add_label(self, label):
|
2017-10-09 04:35:40 +03:00
|
|
|
resized = False
|
2017-05-14 01:55:01 +03:00
|
|
|
for action in self.moves.action_types:
|
|
|
|
added = self.moves.add_action(action, label)
|
|
|
|
if added:
|
2017-10-09 04:35:40 +03:00
|
|
|
resized = True
|
|
|
|
if self.model not in (True, False, None) and resized:
|
|
|
|
# Weights are stored in (nr_out, nr_in) format, so we're basically
|
|
|
|
# just adding rows here.
|
2017-10-19 02:48:43 +03:00
|
|
|
smaller = self.model[-1]._layers[-1]
|
|
|
|
larger = Affine(self.moves.n_moves, smaller.nI)
|
|
|
|
copy_array(larger.W[:smaller.nO], smaller.W)
|
|
|
|
copy_array(larger.b[:smaller.nO], smaller.b)
|
|
|
|
self.model[-1]._layers[-1] = larger
|
2017-05-14 01:55:01 +03:00
|
|
|
|
2018-03-27 12:39:59 +03:00
|
|
|
def begin_training(self, get_gold_tuples, pipeline=None, sgd=None, **cfg):
|
2017-05-16 12:21:59 +03:00
|
|
|
if 'model' in cfg:
|
|
|
|
self.model = cfg['model']
|
2018-03-27 22:08:41 +03:00
|
|
|
if not hasattr(get_gold_tuples, '__call__'):
|
|
|
|
gold_tuples = get_gold_tuples
|
|
|
|
get_gold_tuples = lambda: gold_tuples
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
cfg.setdefault('min_action_freq', 30)
|
2018-03-27 12:39:59 +03:00
|
|
|
actions = self.moves.get_actions(gold_parses=get_gold_tuples(),
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
min_freq=cfg.get('min_action_freq', 30))
|
|
|
|
self.moves.initialize_actions(actions)
|
2018-02-02 04:32:40 +03:00
|
|
|
cfg.setdefault('token_vector_width', 128)
|
2017-05-16 12:21:59 +03:00
|
|
|
if self.model is True:
|
2017-09-16 20:47:21 +03:00
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors_length
|
2017-05-29 11:14:20 +03:00
|
|
|
self.model, cfg = self.Model(self.moves.n_moves, **cfg)
|
2017-11-06 16:26:26 +03:00
|
|
|
if sgd is None:
|
|
|
|
sgd = self.create_optimizer()
|
2018-01-18 15:51:57 +03:00
|
|
|
self.model[1].begin_training(
|
Improve label management in parser and NER (#2108)
This patch does a few smallish things that tighten up the training workflow a little, and allow memory use during training to be reduced by letting the GoldCorpus stream data properly.
Previously, the parser and entity recognizer read and saved labels as lists, with extra labels noted separately. Lists were used becaue ordering is very important, to ensure that the label-to-class mapping is stable.
We now manage labels as nested dictionaries, first keyed by the action, and then keyed by the label. Values are frequencies. The trick is, how do we save new labels? We need to make sure we iterate over these in the same order they're added. Otherwise, we'll get different class IDs, and the model's predictions won't make sense.
To allow stable sorting, we map the new labels to negative values. If we have two new labels, they'll be noted as having "frequency" -1 and -2. The next new label will then have "frequency" -3. When we sort by (frequency, label), we then get a stable sort.
Storing frequencies then allows us to make the next nice improvement. Previously we had to iterate over the whole training set, to pre-process it for the deprojectivisation. This led to storing the whole training set in memory. This was most of the required memory during training.
To prevent this, we now store the frequencies as we stream in the data, and deprojectivize as we go. Once we've built the frequencies, we can then apply a frequency cut-off when we decide how many classes to make.
Finally, to allow proper data streaming, we also have to have some way of shuffling the iterator. This is awkward if the training files have multiple documents in them. To solve this, the GoldCorpus class now writes the training data to disk in msgpack files, one per document. We can then shuffle the data by shuffling the paths.
This is a squash merge, as I made a lot of very small commits. Individual commit messages below.
* Simplify label management for TransitionSystem and its subclasses
* Fix serialization for new label handling format in parser
* Simplify and improve GoldCorpus class. Reduce memory use, write to temp dir
* Set actions in transition system
* Require thinc 6.11.1.dev4
* Fix error in parser init
* Add unicode declaration
* Fix unicode declaration
* Update textcat test
* Try to get model training on less memory
* Print json loc for now
* Try rapidjson to reduce memory use
* Remove rapidjson requirement
* Try rapidjson for reduced mem usage
* Handle None heads when projectivising
* Stream json docs
* Fix train script
* Handle projectivity in GoldParse
* Fix projectivity handling
* Add minibatch_by_words util from ud_train
* Minibatch by number of words in spacy.cli.train
* Move minibatch_by_words util to spacy.util
* Fix label handling
* More hacking at label management in parser
* Fix encoding in msgpack serialization in GoldParse
* Adjust batch sizes in parser training
* Fix minibatch_by_words
* Add merge_subtokens function to pipeline.pyx
* Register merge_subtokens factory
* Restore use of msgpack tmp directory
* Use minibatch-by-words in train
* Handle retokenization in scorer
* Change back-off approach for missing labels. Use 'dep' label
* Update NER for new label management
* Set NER tags for over-segmented words
* Fix label alignment in gold
* Fix label back-off for infrequent labels
* Fix int type in labels dict key
* Fix int type in labels dict key
* Update feature definition for 8 feature set
* Update ud-train script for new label stuff
* Fix json streamer
* Print the line number if conll eval fails
* Update children and sentence boundaries after deprojectivisation
* Export set_children_from_heads from doc.pxd
* Render parses during UD training
* Remove print statement
* Require thinc 6.11.1.dev6. Try adding wheel as install_requires
* Set different dev version, to flush pip cache
* Update thinc version
* Update GoldCorpus docs
* Remove print statements
* Fix formatting and links [ci skip]
2018-03-19 04:58:08 +03:00
|
|
|
self.model[1].ops.allocate((5, cfg['token_vector_width'])))
|
2018-02-16 01:50:21 +03:00
|
|
|
if pipeline is not None:
|
2018-03-27 12:39:59 +03:00
|
|
|
self.init_multitask_objectives(get_gold_tuples, pipeline, sgd=sgd, **cfg)
|
2017-09-22 17:38:22 +03:00
|
|
|
link_vectors_to_models(self.vocab)
|
2018-02-02 04:32:40 +03:00
|
|
|
else:
|
|
|
|
if sgd is None:
|
|
|
|
sgd = self.create_optimizer()
|
|
|
|
self.model[1].begin_training(
|
|
|
|
self.model[1].ops.allocate((5, cfg['token_vector_width'])))
|
|
|
|
self.cfg.update(cfg)
|
2017-11-06 16:26:26 +03:00
|
|
|
return sgd
|
2017-05-16 12:21:59 +03:00
|
|
|
|
2018-01-21 21:37:02 +03:00
|
|
|
def add_multitask_objective(self, target):
|
|
|
|
# Defined in subclasses, to avoid circular import
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2018-03-27 12:39:59 +03:00
|
|
|
def init_multitask_objectives(self, get_gold_tuples, pipeline, **cfg):
|
2017-09-26 13:42:52 +03:00
|
|
|
'''Setup models for secondary objectives, to benefit from multi-task
|
|
|
|
learning. This method is intended to be overridden by subclasses.
|
|
|
|
|
|
|
|
For instance, the dependency parser can benefit from sharing
|
|
|
|
an input representation with a label prediction model. These auxiliary
|
|
|
|
models are discarded after training.
|
|
|
|
'''
|
|
|
|
pass
|
|
|
|
|
2017-05-22 01:53:38 +03:00
|
|
|
def preprocess_gold(self, docs_golds):
|
|
|
|
for doc, gold in docs_golds:
|
|
|
|
yield doc, gold
|
|
|
|
|
2017-05-18 16:30:59 +03:00
|
|
|
def use_params(self, params):
|
|
|
|
# Can't decorate cdef class :(. Workaround.
|
|
|
|
with self.model[0].use_params(params):
|
|
|
|
with self.model[1].use_params(params):
|
|
|
|
yield
|
|
|
|
|
2017-05-29 12:45:45 +03:00
|
|
|
def to_disk(self, path, **exclude):
|
|
|
|
serializers = {
|
2017-08-18 23:23:03 +03:00
|
|
|
'tok2vec_model': lambda p: p.open('wb').write(
|
2017-08-14 14:00:23 +03:00
|
|
|
self.model[0].to_bytes()),
|
2017-08-18 23:23:03 +03:00
|
|
|
'lower_model': lambda p: p.open('wb').write(
|
2017-08-14 14:00:23 +03:00
|
|
|
self.model[1].to_bytes()),
|
2017-08-18 23:23:03 +03:00
|
|
|
'upper_model': lambda p: p.open('wb').write(
|
|
|
|
self.model[2].to_bytes()),
|
2017-05-29 12:45:45 +03:00
|
|
|
'vocab': lambda p: self.vocab.to_disk(p),
|
|
|
|
'moves': lambda p: self.moves.to_disk(p, strings=False),
|
2017-05-31 14:42:39 +03:00
|
|
|
'cfg': lambda p: p.open('w').write(json_dumps(self.cfg))
|
2017-05-29 12:45:45 +03:00
|
|
|
}
|
|
|
|
util.to_disk(path, serializers, exclude)
|
|
|
|
|
|
|
|
def from_disk(self, path, **exclude):
|
|
|
|
deserializers = {
|
|
|
|
'vocab': lambda p: self.vocab.from_disk(p),
|
|
|
|
'moves': lambda p: self.moves.from_disk(p, strings=False),
|
2018-02-13 22:44:33 +03:00
|
|
|
'cfg': lambda p: self.cfg.update(util.read_json(p)),
|
2017-05-29 12:45:45 +03:00
|
|
|
'model': lambda p: None
|
|
|
|
}
|
|
|
|
util.from_disk(path, deserializers, exclude)
|
|
|
|
if 'model' not in exclude:
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
if self.model is True:
|
2018-01-23 21:10:49 +03:00
|
|
|
self.cfg.setdefault('pretrained_dims', self.vocab.vectors_length)
|
2017-05-29 14:38:20 +03:00
|
|
|
self.model, cfg = self.Model(**self.cfg)
|
2017-05-31 14:42:39 +03:00
|
|
|
else:
|
|
|
|
cfg = {}
|
2017-08-18 23:23:03 +03:00
|
|
|
with (path / 'tok2vec_model').open('rb') as file_:
|
2017-05-31 14:42:39 +03:00
|
|
|
bytes_data = file_.read()
|
2017-08-14 14:00:23 +03:00
|
|
|
self.model[0].from_bytes(bytes_data)
|
2017-08-18 23:23:03 +03:00
|
|
|
with (path / 'lower_model').open('rb') as file_:
|
2017-08-06 19:33:46 +03:00
|
|
|
bytes_data = file_.read()
|
2017-08-14 14:00:23 +03:00
|
|
|
self.model[1].from_bytes(bytes_data)
|
2017-08-18 23:23:03 +03:00
|
|
|
with (path / 'upper_model').open('rb') as file_:
|
|
|
|
bytes_data = file_.read()
|
|
|
|
self.model[2].from_bytes(bytes_data)
|
2017-05-29 14:38:20 +03:00
|
|
|
self.cfg.update(cfg)
|
2017-05-29 12:45:45 +03:00
|
|
|
return self
|
2017-05-17 13:04:50 +03:00
|
|
|
|
2017-05-29 11:14:20 +03:00
|
|
|
def to_bytes(self, **exclude):
|
2017-06-02 22:07:56 +03:00
|
|
|
serializers = OrderedDict((
|
2017-08-18 23:23:03 +03:00
|
|
|
('tok2vec_model', lambda: self.model[0].to_bytes()),
|
|
|
|
('lower_model', lambda: self.model[1].to_bytes()),
|
|
|
|
('upper_model', lambda: self.model[2].to_bytes()),
|
2017-06-02 22:07:56 +03:00
|
|
|
('vocab', lambda: self.vocab.to_bytes()),
|
|
|
|
('moves', lambda: self.moves.to_bytes(strings=False)),
|
2017-09-26 14:44:56 +03:00
|
|
|
('cfg', lambda: json.dumps(self.cfg, indent=2, sort_keys=True))
|
2017-06-02 22:07:56 +03:00
|
|
|
))
|
2017-06-02 19:37:07 +03:00
|
|
|
if 'model' in exclude:
|
2017-08-18 23:23:03 +03:00
|
|
|
exclude['tok2vec_model'] = True
|
2017-06-02 19:37:07 +03:00
|
|
|
exclude['lower_model'] = True
|
|
|
|
exclude['upper_model'] = True
|
|
|
|
exclude.pop('model')
|
2017-05-29 12:45:45 +03:00
|
|
|
return util.to_bytes(serializers, exclude)
|
2017-05-29 11:14:20 +03:00
|
|
|
|
|
|
|
def from_bytes(self, bytes_data, **exclude):
|
2017-06-02 22:07:56 +03:00
|
|
|
deserializers = OrderedDict((
|
|
|
|
('vocab', lambda b: self.vocab.from_bytes(b)),
|
|
|
|
('moves', lambda b: self.moves.from_bytes(b, strings=False)),
|
2017-09-26 14:44:56 +03:00
|
|
|
('cfg', lambda b: self.cfg.update(json.loads(b))),
|
2017-08-18 23:23:03 +03:00
|
|
|
('tok2vec_model', lambda b: None),
|
2017-06-02 22:07:56 +03:00
|
|
|
('lower_model', lambda b: None),
|
|
|
|
('upper_model', lambda b: None)
|
|
|
|
))
|
2017-05-29 12:45:45 +03:00
|
|
|
msg = util.from_bytes(bytes_data, deserializers, exclude)
|
2017-05-29 11:14:20 +03:00
|
|
|
if 'model' not in exclude:
|
|
|
|
if self.model is True:
|
2017-09-22 17:38:22 +03:00
|
|
|
self.model, cfg = self.Model(**self.cfg)
|
2017-09-17 13:47:34 +03:00
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors_length
|
2017-05-29 16:40:45 +03:00
|
|
|
else:
|
|
|
|
cfg = {}
|
2017-09-16 20:47:21 +03:00
|
|
|
cfg['pretrained_dims'] = self.vocab.vectors_length
|
2017-08-18 23:23:03 +03:00
|
|
|
if 'tok2vec_model' in msg:
|
|
|
|
self.model[0].from_bytes(msg['tok2vec_model'])
|
2017-06-02 22:07:56 +03:00
|
|
|
if 'lower_model' in msg:
|
2017-08-18 23:23:03 +03:00
|
|
|
self.model[1].from_bytes(msg['lower_model'])
|
2017-06-02 22:07:56 +03:00
|
|
|
if 'upper_model' in msg:
|
2017-08-18 23:23:03 +03:00
|
|
|
self.model[2].from_bytes(msg['upper_model'])
|
2017-05-29 14:38:20 +03:00
|
|
|
self.cfg.update(cfg)
|
2017-05-29 11:14:20 +03:00
|
|
|
return self
|
2017-05-17 13:04:50 +03:00
|
|
|
|
2017-05-14 01:55:01 +03:00
|
|
|
|
|
|
|
class ParserStateError(ValueError):
|
|
|
|
def __init__(self, doc):
|
|
|
|
ValueError.__init__(self,
|
|
|
|
"Error analysing doc -- no valid actions available. This should "
|
|
|
|
"never happen, so please report the error on the issue tracker. "
|
|
|
|
"Here's the thread to do so --- reopen it if it's closed:\n"
|
|
|
|
"https://github.com/spacy-io/spaCy/issues/429\n"
|
|
|
|
"Please include the text that the parser failed on, which is:\n"
|
|
|
|
"%s" % repr(doc.text))
|
|
|
|
|
|
|
|
|
|
|
|
cdef int arg_max_if_gold(const weight_t* scores, const weight_t* costs, const int* is_valid, int n) nogil:
|
|
|
|
# Find minimum cost
|
|
|
|
cdef float cost = 1
|
|
|
|
for i in range(n):
|
|
|
|
if is_valid[i] and costs[i] < cost:
|
|
|
|
cost = costs[i]
|
|
|
|
# Now find best-scoring with that cost
|
|
|
|
cdef int best = -1
|
|
|
|
for i in range(n):
|
|
|
|
if costs[i] <= cost and is_valid[i]:
|
|
|
|
if best == -1 or scores[i] > scores[best]:
|
|
|
|
best = i
|
|
|
|
return best
|
|
|
|
|
|
|
|
|
|
|
|
cdef int arg_max_if_valid(const weight_t* scores, const int* is_valid, int n) nogil:
|
|
|
|
cdef int best = -1
|
|
|
|
for i in range(n):
|
|
|
|
if is_valid[i] >= 1:
|
|
|
|
if best == -1 or scores[i] > scores[best]:
|
|
|
|
best = i
|
|
|
|
return best
|
|
|
|
|
|
|
|
|
2017-07-20 16:02:55 +03:00
|
|
|
# These are passed as callbacks to thinc.search.Beam
|
|
|
|
cdef int _transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1:
|
2017-11-14 04:11:40 +03:00
|
|
|
dest = <StateC*>_dest
|
|
|
|
src = <StateC*>_src
|
2017-07-20 16:02:55 +03:00
|
|
|
moves = <const Transition*>_moves
|
|
|
|
dest.clone(src)
|
2017-11-14 04:11:40 +03:00
|
|
|
moves[clas].do(dest, moves[clas].label)
|
|
|
|
dest.push_hist(clas)
|
2017-07-20 16:02:55 +03:00
|
|
|
|
|
|
|
|
|
|
|
cdef int _check_final_state(void* _state, void* extra_args) except -1:
|
2017-11-14 04:11:40 +03:00
|
|
|
state = <StateC*>_state
|
|
|
|
return state.is_final()
|
2017-07-20 16:02:55 +03:00
|
|
|
|
|
|
|
|
|
|
|
def _cleanup(Beam beam):
|
2017-11-14 04:11:40 +03:00
|
|
|
cdef StateC* state
|
|
|
|
# Once parsing has finished, states in beam may not be unique. Is this
|
|
|
|
# correct?
|
|
|
|
seen = set()
|
2017-07-20 16:02:55 +03:00
|
|
|
for i in range(beam.width):
|
2017-11-14 04:11:40 +03:00
|
|
|
addr = <size_t>beam._parents[i].content
|
|
|
|
if addr not in seen:
|
|
|
|
state = <StateC*>addr
|
|
|
|
del state
|
|
|
|
seen.add(addr)
|
2017-11-15 01:36:46 +03:00
|
|
|
else:
|
|
|
|
print(i, addr)
|
|
|
|
print(seen)
|
|
|
|
raise Exception
|
2017-11-14 04:11:40 +03:00
|
|
|
addr = <size_t>beam._states[i].content
|
|
|
|
if addr not in seen:
|
|
|
|
state = <StateC*>addr
|
|
|
|
del state
|
|
|
|
seen.add(addr)
|
2017-11-15 01:36:46 +03:00
|
|
|
else:
|
|
|
|
print(i, addr)
|
|
|
|
print(seen)
|
|
|
|
raise Exception
|