2016-10-27 19:08:13 +03:00
|
|
|
# cython: infer_types=True
|
2017-04-15 14:05:15 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-10-27 20:45:57 +03:00
|
|
|
from cpython.ref cimport Py_INCREF
|
2015-02-21 18:38:18 +03:00
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
from thinc.typedefs cimport weight_t
|
2018-05-15 23:17:29 +03:00
|
|
|
from thinc.extra.search cimport Beam
|
2018-03-27 20:23:02 +03:00
|
|
|
from collections import OrderedDict, Counter
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
import srsly
|
2015-02-21 18:38:18 +03:00
|
|
|
|
2018-05-15 23:17:29 +03:00
|
|
|
from . cimport _beam_utils
|
|
|
|
from ..tokens.doc cimport Doc
|
2015-07-19 16:18:17 +03:00
|
|
|
from ..structs cimport TokenC
|
2015-06-09 22:20:14 +03:00
|
|
|
from .stateclass cimport StateClass
|
2017-05-28 19:09:27 +03:00
|
|
|
from ..typedefs cimport attr_t
|
2018-04-03 16:50:31 +03:00
|
|
|
from ..errors import Errors
|
2017-10-27 20:45:57 +03:00
|
|
|
from .. import util
|
2015-06-09 22:20:14 +03:00
|
|
|
|
2015-02-21 18:38:18 +03:00
|
|
|
|
|
|
|
cdef weight_t MIN_SCORE = -90000
|
|
|
|
|
|
|
|
|
2015-02-22 08:32:07 +03:00
|
|
|
class OracleError(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2017-03-11 20:12:01 +03:00
|
|
|
cdef void* _init_state(Pool mem, int length, void* tokens) except NULL:
|
2017-11-14 04:11:40 +03:00
|
|
|
cdef StateC* st = new StateC(<const TokenC*>tokens, length)
|
2017-03-11 20:12:01 +03:00
|
|
|
return <void*>st
|
|
|
|
|
|
|
|
|
2015-02-21 18:38:18 +03:00
|
|
|
cdef class TransitionSystem:
|
2018-03-27 20:23:02 +03:00
|
|
|
def __init__(self, StringStore string_table, labels_by_action=None, min_freq=None):
|
2015-02-21 18:38:18 +03:00
|
|
|
self.mem = Pool()
|
2015-03-14 18:06:35 +03:00
|
|
|
self.strings = string_table
|
2016-01-19 21:09:33 +03:00
|
|
|
self.n_moves = 0
|
|
|
|
self._size = 100
|
2017-03-11 20:12:01 +03:00
|
|
|
|
2016-01-19 21:09:33 +03:00
|
|
|
self.c = <Transition*>self.mem.alloc(self._size, sizeof(Transition))
|
2017-03-11 20:12:01 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
self.labels = {}
|
|
|
|
if labels_by_action:
|
|
|
|
self.initialize_actions(labels_by_action, min_freq=min_freq)
|
2017-05-28 19:09:27 +03:00
|
|
|
self.root_label = self.strings.add('ROOT')
|
2017-03-11 20:12:01 +03:00
|
|
|
self.init_beam_state = _init_state
|
2015-02-21 18:38:18 +03:00
|
|
|
|
2015-10-12 11:33:11 +03:00
|
|
|
def __reduce__(self):
|
2018-03-27 20:23:02 +03:00
|
|
|
return (self.__class__, (self.strings, self.labels), None, None)
|
2015-10-12 11:33:11 +03:00
|
|
|
|
2017-05-15 22:46:08 +03:00
|
|
|
def init_batch(self, docs):
|
|
|
|
cdef StateClass state
|
|
|
|
states = []
|
|
|
|
offset = 0
|
|
|
|
for doc in docs:
|
|
|
|
state = StateClass(doc, offset=offset)
|
|
|
|
self.initialize_state(state.c)
|
|
|
|
states.append(state)
|
|
|
|
offset += len(doc)
|
|
|
|
return states
|
|
|
|
|
2018-05-15 23:17:29 +03:00
|
|
|
def init_beams(self, docs, beam_width, beam_density=0.):
|
|
|
|
cdef Doc doc
|
|
|
|
beams = []
|
|
|
|
cdef int offset = 0
|
|
|
|
for doc in docs:
|
|
|
|
beam = Beam(self.n_moves, beam_width, min_density=beam_density)
|
|
|
|
beam.initialize(self.init_beam_state, doc.length, doc.c)
|
|
|
|
for i in range(beam.width):
|
|
|
|
state = <StateC*>beam.at(i)
|
|
|
|
state.offset = offset
|
|
|
|
offset += len(doc)
|
|
|
|
beam.check_done(_beam_utils.check_final_state, NULL)
|
|
|
|
beams.append(beam)
|
|
|
|
return beams
|
|
|
|
|
2017-05-26 19:31:23 +03:00
|
|
|
def get_oracle_sequence(self, doc, GoldParse gold):
|
|
|
|
cdef Pool mem = Pool()
|
|
|
|
costs = <float*>mem.alloc(self.n_moves, sizeof(float))
|
|
|
|
is_valid = <int*>mem.alloc(self.n_moves, sizeof(int))
|
|
|
|
|
|
|
|
cdef StateClass state = StateClass(doc, offset=0)
|
|
|
|
self.initialize_state(state.c)
|
|
|
|
history = []
|
|
|
|
while not state.is_final():
|
|
|
|
self.set_costs(is_valid, costs, state, gold)
|
|
|
|
for i in range(self.n_moves):
|
|
|
|
if is_valid[i] and costs[i] <= 0:
|
|
|
|
action = self.c[i]
|
|
|
|
history.append(i)
|
|
|
|
action.do(state.c, action.label)
|
|
|
|
break
|
2017-05-27 23:52:20 +03:00
|
|
|
else:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E024)
|
2017-05-26 19:31:23 +03:00
|
|
|
return history
|
|
|
|
|
2016-02-01 10:34:55 +03:00
|
|
|
cdef int initialize_state(self, StateC* state) nogil:
|
2015-04-28 21:45:51 +03:00
|
|
|
pass
|
|
|
|
|
2016-02-01 10:34:55 +03:00
|
|
|
cdef int finalize_state(self, StateC* state) nogil:
|
2015-04-28 21:45:51 +03:00
|
|
|
pass
|
2015-03-10 20:00:23 +03:00
|
|
|
|
2016-05-02 15:25:10 +03:00
|
|
|
def finalize_doc(self, doc):
|
|
|
|
pass
|
|
|
|
|
2017-05-22 18:30:12 +03:00
|
|
|
def preprocess_gold(self, GoldParse gold):
|
2015-03-09 14:06:01 +03:00
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-08-18 23:38:59 +03:00
|
|
|
def is_gold_parse(self, StateClass state, GoldParse gold):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2015-03-09 14:06:01 +03:00
|
|
|
cdef Transition lookup_transition(self, object name) except *:
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2017-05-28 15:06:40 +03:00
|
|
|
cdef Transition init_transition(self, int clas, int move, attr_t label) except *:
|
2015-02-21 18:38:18 +03:00
|
|
|
raise NotImplementedError
|
|
|
|
|
2015-08-09 00:36:18 +03:00
|
|
|
def is_valid(self, StateClass stcls, move_name):
|
|
|
|
action = self.lookup_transition(move_name)
|
2017-08-18 23:23:03 +03:00
|
|
|
if action.move == 0:
|
|
|
|
return False
|
2016-02-01 04:58:14 +03:00
|
|
|
return action.is_valid(stcls.c, action.label)
|
2015-08-09 00:36:18 +03:00
|
|
|
|
2016-02-01 05:00:15 +03:00
|
|
|
cdef int set_valid(self, int* is_valid, const StateC* st) nogil:
|
2015-06-04 20:32:32 +03:00
|
|
|
cdef int i
|
|
|
|
for i in range(self.n_moves):
|
2016-02-01 05:00:15 +03:00
|
|
|
is_valid[i] = self.c[i].is_valid(st, self.c[i].label)
|
2015-06-04 20:32:32 +03:00
|
|
|
|
2016-01-30 16:31:12 +03:00
|
|
|
cdef int set_costs(self, int* is_valid, weight_t* costs,
|
2015-06-26 07:25:36 +03:00
|
|
|
StateClass stcls, GoldParse gold) except -1:
|
2015-02-21 18:38:18 +03:00
|
|
|
cdef int i
|
2016-02-01 05:00:15 +03:00
|
|
|
self.set_valid(is_valid, stcls.c)
|
2017-05-26 19:31:23 +03:00
|
|
|
cdef int n_gold = 0
|
2015-02-21 18:38:18 +03:00
|
|
|
for i in range(self.n_moves):
|
2015-06-26 07:25:36 +03:00
|
|
|
if is_valid[i]:
|
|
|
|
costs[i] = self.c[i].get_cost(stcls, &gold.c, self.c[i].label)
|
2017-05-26 19:31:23 +03:00
|
|
|
n_gold += costs[i] <= 0
|
2015-06-26 07:25:36 +03:00
|
|
|
else:
|
|
|
|
costs[i] = 9000
|
2017-05-26 19:31:23 +03:00
|
|
|
if n_gold <= 0:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E024)
|
2017-05-28 19:09:27 +03:00
|
|
|
|
2017-07-20 16:02:55 +03:00
|
|
|
def get_class_name(self, int clas):
|
|
|
|
act = self.c[clas]
|
|
|
|
return self.move_name(act.move, act.label)
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
def initialize_actions(self, labels_by_action, min_freq=None):
|
|
|
|
self.labels = {}
|
|
|
|
self.n_moves = 0
|
2019-02-24 18:36:29 +03:00
|
|
|
added_labels = []
|
|
|
|
added_actions = {}
|
2018-03-27 20:23:02 +03:00
|
|
|
for action, label_freqs in sorted(labels_by_action.items()):
|
|
|
|
action = int(action)
|
|
|
|
# Make sure we take a copy here, and that we get a Counter
|
|
|
|
self.labels[action] = Counter()
|
|
|
|
# Have to be careful here: Sorting must be stable, or our model
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
# won't be read back in correctly.
|
2018-03-27 20:23:02 +03:00
|
|
|
sorted_labels = [(f, L) for L, f in label_freqs.items()]
|
|
|
|
sorted_labels.sort()
|
|
|
|
sorted_labels.reverse()
|
|
|
|
for freq, label_str in sorted_labels:
|
2019-02-24 18:36:29 +03:00
|
|
|
if freq < 0:
|
|
|
|
added_labels.append((freq, label_str))
|
|
|
|
added_actions.setdefault(label_str, []).append(action)
|
|
|
|
else:
|
|
|
|
self.add_action(int(action), label_str)
|
|
|
|
self.labels[action][label_str] = freq
|
|
|
|
added_labels.sort(reverse=True)
|
|
|
|
for freq, label_str in added_labels:
|
|
|
|
for action in added_actions[label_str]:
|
2018-03-27 20:23:02 +03:00
|
|
|
self.add_action(int(action), label_str)
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
self.labels[action][label_str] = freq
|
2018-03-27 20:23:02 +03:00
|
|
|
|
2017-05-28 19:09:27 +03:00
|
|
|
def add_action(self, int action, label_name):
|
|
|
|
cdef attr_t label_id
|
2017-10-18 22:48:00 +03:00
|
|
|
if not isinstance(label_name, int) and \
|
2017-10-27 20:45:57 +03:00
|
|
|
not isinstance(label_name, long):
|
2017-05-28 19:09:27 +03:00
|
|
|
label_id = self.strings.add(label_name)
|
|
|
|
else:
|
|
|
|
label_id = label_name
|
2016-01-19 22:10:04 +03:00
|
|
|
# Check we're not creating a move we already have, so that this is
|
|
|
|
# idempotent
|
|
|
|
for trans in self.c[:self.n_moves]:
|
2017-05-28 19:09:27 +03:00
|
|
|
if trans.move == action and trans.label == label_id:
|
2016-01-19 22:10:04 +03:00
|
|
|
return 0
|
2016-01-19 21:09:33 +03:00
|
|
|
if self.n_moves >= self._size:
|
|
|
|
self._size *= 2
|
|
|
|
self.c = <Transition*>self.mem.realloc(self.c, self._size * sizeof(self.c[0]))
|
2017-05-28 19:09:27 +03:00
|
|
|
self.c[self.n_moves] = self.init_transition(self.n_moves, action, label_id)
|
2016-01-19 21:09:33 +03:00
|
|
|
self.n_moves += 1
|
2018-08-15 16:37:24 +03:00
|
|
|
# Add the new (action, label) pair, making up a frequency for it if
|
|
|
|
# necessary. To preserve sort order, the frequency needs to be lower
|
|
|
|
# than previous frequencies.
|
2018-03-27 20:23:02 +03:00
|
|
|
if self.labels.get(action, []):
|
|
|
|
new_freq = min(self.labels[action].values())
|
|
|
|
else:
|
|
|
|
self.labels[action] = Counter()
|
|
|
|
new_freq = -1
|
|
|
|
if new_freq > 0:
|
|
|
|
new_freq = 0
|
|
|
|
self.labels[action][label_name] = new_freq-1
|
2016-01-19 22:10:04 +03:00
|
|
|
return 1
|
2017-05-29 12:45:45 +03:00
|
|
|
|
|
|
|
def to_disk(self, path, **exclude):
|
2017-05-31 14:44:00 +03:00
|
|
|
with path.open('wb') as file_:
|
|
|
|
file_.write(self.to_bytes(**exclude))
|
2017-05-29 12:45:45 +03:00
|
|
|
|
|
|
|
def from_disk(self, path, **exclude):
|
2017-05-31 14:44:00 +03:00
|
|
|
with path.open('rb') as file_:
|
|
|
|
byte_data = file_.read()
|
|
|
|
self.from_bytes(byte_data, **exclude)
|
2017-05-29 12:45:45 +03:00
|
|
|
return self
|
|
|
|
|
|
|
|
def to_bytes(self, **exclude):
|
|
|
|
transitions = []
|
|
|
|
serializers = {
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
'moves': lambda: srsly.json_dumps(self.labels),
|
2017-05-29 12:45:45 +03:00
|
|
|
'strings': lambda: self.strings.to_bytes()
|
|
|
|
}
|
|
|
|
return util.to_bytes(serializers, exclude)
|
|
|
|
|
|
|
|
def from_bytes(self, bytes_data, **exclude):
|
2018-03-27 20:23:02 +03:00
|
|
|
labels = {}
|
2017-05-29 12:45:45 +03:00
|
|
|
deserializers = {
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
'moves': lambda b: labels.update(srsly.json_loads(b)),
|
2017-05-29 12:45:45 +03:00
|
|
|
'strings': lambda b: self.strings.from_bytes(b)
|
|
|
|
}
|
|
|
|
msg = util.from_bytes(bytes_data, deserializers, exclude)
|
2018-03-27 20:23:02 +03:00
|
|
|
self.initialize_actions(labels)
|
2017-05-29 12:45:45 +03:00
|
|
|
return self
|