2014-12-16 14:44:43 +03:00
|
|
|
"""
|
|
|
|
MALT-style dependency parser
|
|
|
|
"""
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
cimport cython
|
2014-12-19 01:30:50 +03:00
|
|
|
from libc.stdint cimport uint32_t, uint64_t
|
2014-12-16 14:44:43 +03:00
|
|
|
import random
|
|
|
|
import os.path
|
2014-12-31 11:40:59 +03:00
|
|
|
from os import path
|
2014-12-16 14:44:43 +03:00
|
|
|
import shutil
|
|
|
|
import json
|
|
|
|
|
|
|
|
from cymem.cymem cimport Pool, Address
|
2014-12-19 01:30:50 +03:00
|
|
|
from murmurhash.mrmr cimport hash64
|
2014-12-16 14:44:43 +03:00
|
|
|
from thinc.typedefs cimport weight_t, class_t, feat_t, atom_t
|
|
|
|
|
|
|
|
|
|
|
|
from util import Config
|
|
|
|
|
|
|
|
from thinc.features cimport Extractor
|
|
|
|
from thinc.features cimport Feature
|
|
|
|
from thinc.features cimport count_feats
|
|
|
|
|
|
|
|
from thinc.learner cimport LinearModel
|
|
|
|
|
2015-06-02 01:28:02 +03:00
|
|
|
from thinc.search cimport Beam
|
|
|
|
from thinc.search cimport MaxViolation
|
|
|
|
|
2014-12-16 14:44:43 +03:00
|
|
|
from ..tokens cimport Tokens, TokenC
|
2015-03-14 18:06:35 +03:00
|
|
|
from ..strings cimport StringStore
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2014-12-18 03:33:25 +03:00
|
|
|
from .arc_eager cimport TransitionSystem, Transition
|
2015-02-22 08:32:07 +03:00
|
|
|
from .transition_system import OracleError
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-06-02 01:28:02 +03:00
|
|
|
from ._state cimport State, new_state, copy_state, is_final, push_stack
|
2015-05-24 22:35:02 +03:00
|
|
|
from ..gold cimport GoldParse
|
2014-12-16 14:44:43 +03:00
|
|
|
|
|
|
|
from . import _parse_features
|
|
|
|
from ._parse_features cimport fill_context, CONTEXT_SIZE
|
|
|
|
|
|
|
|
|
2015-04-19 11:31:31 +03:00
|
|
|
DEBUG = False
|
2014-12-16 14:44:43 +03:00
|
|
|
def set_debug(val):
|
|
|
|
global DEBUG
|
|
|
|
DEBUG = val
|
|
|
|
|
|
|
|
|
|
|
|
cdef unicode print_state(State* s, list words):
|
|
|
|
words = list(words) + ['EOL']
|
2015-01-28 19:18:29 +03:00
|
|
|
top = words[s.stack[0]] + '_%d' % s.sent[s.stack[0]].head
|
|
|
|
second = words[s.stack[-1]] + '_%d' % s.sent[s.stack[-1]].head
|
|
|
|
third = words[s.stack[-2]] + '_%d' % s.sent[s.stack[-2]].head
|
2015-03-27 19:29:58 +03:00
|
|
|
n0 = words[s.i] if s.i < len(words) else 'EOL'
|
|
|
|
n1 = words[s.i + 1] if s.i+1 < len(words) else 'EOL'
|
2015-03-10 20:00:23 +03:00
|
|
|
if s.ents_len:
|
|
|
|
ent = '%s %d-%d' % (s.ent.label, s.ent.start, s.ent.end)
|
|
|
|
else:
|
|
|
|
ent = '-'
|
|
|
|
return ' '.join((ent, str(s.stack_len), third, second, top, '|', n0, n1))
|
2014-12-16 14:44:43 +03:00
|
|
|
|
|
|
|
|
|
|
|
def get_templates(name):
|
2014-12-17 13:09:29 +03:00
|
|
|
pf = _parse_features
|
2015-03-24 07:08:35 +03:00
|
|
|
if name == 'ner':
|
2015-03-10 20:00:23 +03:00
|
|
|
return pf.ner
|
2015-03-24 06:29:01 +03:00
|
|
|
elif name == 'debug':
|
|
|
|
return pf.unigrams
|
2014-12-18 01:05:31 +03:00
|
|
|
else:
|
2015-02-21 07:30:31 +03:00
|
|
|
return (pf.unigrams + pf.s0_n0 + pf.s1_n0 + pf.s0_n1 + pf.n0_n1 + \
|
|
|
|
pf.tree_shape + pf.trigrams)
|
2014-12-16 14:44:43 +03:00
|
|
|
|
|
|
|
|
2015-06-02 01:28:02 +03:00
|
|
|
cdef class Parser:
|
2015-03-14 18:06:35 +03:00
|
|
|
def __init__(self, StringStore strings, model_dir, transition_system):
|
2014-12-16 14:44:43 +03:00
|
|
|
assert os.path.exists(model_dir) and os.path.isdir(model_dir)
|
|
|
|
self.cfg = Config.read(model_dir, 'config')
|
2015-03-14 18:06:35 +03:00
|
|
|
self.moves = transition_system(strings, self.cfg.labels)
|
2015-02-21 07:30:31 +03:00
|
|
|
templates = get_templates(self.cfg.features)
|
|
|
|
self.model = Model(self.moves.n_moves, templates, model_dir)
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-01-17 08:21:17 +03:00
|
|
|
def __call__(self, Tokens tokens):
|
2015-02-10 18:15:58 +03:00
|
|
|
if tokens.length == 0:
|
|
|
|
return 0
|
2015-06-02 01:28:02 +03:00
|
|
|
cdef State* state
|
|
|
|
if self.cfg.beam_width == 1:
|
|
|
|
state = self._greedy_parse(tokens)
|
|
|
|
else:
|
|
|
|
state = self._beam_parse(tokens)
|
|
|
|
self.moves.finalize_state(state)
|
|
|
|
tokens.set_parse(state.sent)
|
2015-02-10 18:15:58 +03:00
|
|
|
|
2015-06-02 01:28:02 +03:00
|
|
|
cdef State* _greedy_parse(self, Tokens tokens) except NULL:
|
2014-12-16 14:44:43 +03:00
|
|
|
cdef atom_t[CONTEXT_SIZE] context
|
|
|
|
cdef int n_feats
|
|
|
|
cdef Pool mem = Pool()
|
2015-03-10 20:00:23 +03:00
|
|
|
cdef State* state = new_state(mem, tokens.data, tokens.length)
|
2015-04-28 21:45:51 +03:00
|
|
|
self.moves.initialize_state(state)
|
2015-02-21 07:30:31 +03:00
|
|
|
cdef Transition guess
|
2014-12-16 14:44:43 +03:00
|
|
|
while not is_final(state):
|
2014-12-19 19:48:51 +03:00
|
|
|
fill_context(context, state)
|
2015-06-02 01:28:02 +03:00
|
|
|
scores = self.model.score(context)
|
2014-12-19 19:48:51 +03:00
|
|
|
guess = self.moves.best_valid(scores, state)
|
2015-02-18 12:41:06 +03:00
|
|
|
guess.do(&guess, state)
|
2015-06-02 01:28:02 +03:00
|
|
|
return state
|
|
|
|
|
|
|
|
cdef State* _beam_parse(self, Tokens tokens) except NULL:
|
|
|
|
cdef Beam beam = Beam(self.model.n_classes, self.cfg.beam_width)
|
|
|
|
beam.initialize(_init_state, tokens.length, tokens.data)
|
|
|
|
while not beam.is_done:
|
|
|
|
self._advance_beam(beam, None, False)
|
|
|
|
return <State*>beam.at(0)
|
2014-12-16 14:44:43 +03:00
|
|
|
|
2015-03-24 07:12:37 +03:00
|
|
|
def train(self, Tokens tokens, GoldParse gold):
|
2015-03-09 14:06:01 +03:00
|
|
|
self.moves.preprocess_gold(gold)
|
2015-06-02 01:28:02 +03:00
|
|
|
if self.beam_width == 1:
|
|
|
|
return self._greedy_train(tokens, gold)
|
|
|
|
else:
|
|
|
|
return self._beam_train(tokens, gold)
|
|
|
|
|
|
|
|
def _greedy_train(self, Tokens tokens, GoldParse gold):
|
2014-12-16 14:44:43 +03:00
|
|
|
cdef Pool mem = Pool()
|
2015-03-10 20:00:23 +03:00
|
|
|
cdef State* state = new_state(mem, tokens.data, tokens.length)
|
2015-04-28 21:45:51 +03:00
|
|
|
self.moves.initialize_state(state)
|
2015-03-24 07:11:37 +03:00
|
|
|
|
|
|
|
cdef int cost
|
|
|
|
cdef const Feature* feats
|
|
|
|
cdef const weight_t* scores
|
|
|
|
cdef Transition guess
|
|
|
|
cdef Transition best
|
|
|
|
cdef atom_t[CONTEXT_SIZE] context
|
2015-05-24 22:35:02 +03:00
|
|
|
loss = 0
|
2014-12-16 14:44:43 +03:00
|
|
|
while not is_final(state):
|
2014-12-31 11:40:59 +03:00
|
|
|
fill_context(context, state)
|
2015-06-02 01:28:02 +03:00
|
|
|
scores = self.model.score(context)
|
2014-12-16 14:44:43 +03:00
|
|
|
guess = self.moves.best_valid(scores, state)
|
2015-02-22 08:32:07 +03:00
|
|
|
best = self.moves.best_gold(scores, state, gold)
|
2015-02-21 07:30:31 +03:00
|
|
|
cost = guess.get_cost(&guess, state, gold)
|
|
|
|
self.model.update(context, guess.clas, best.clas, cost)
|
2015-03-24 07:12:37 +03:00
|
|
|
guess.do(&guess, state)
|
2015-05-24 22:35:02 +03:00
|
|
|
loss += cost
|
|
|
|
return loss
|
2015-06-02 01:28:02 +03:00
|
|
|
|
|
|
|
def _beam_train(self, Tokens tokens, GoldParse gold_parse):
|
|
|
|
cdef Beam pred = Beam(self.model.n_classes, self.cfg.beam_width)
|
|
|
|
pred.initialize(_init_state, tokens.length, tokens.data)
|
|
|
|
cdef Beam gold = Beam(self.model.n_classes, self.cfg.beam_width)
|
|
|
|
gold.initialize(_init_state, tokens.length, tokens.data)
|
|
|
|
|
|
|
|
violn = MaxViolation()
|
|
|
|
while not pred.is_done and not gold.is_done:
|
|
|
|
self._advance_beam(pred, gold_parse, False)
|
|
|
|
self._advance_beam(gold, gold_parse, True)
|
|
|
|
violn.check(pred, gold)
|
|
|
|
counts = {}
|
2015-06-02 01:57:09 +03:00
|
|
|
if pred._states[0].loss >= 1:
|
2015-06-02 01:28:02 +03:00
|
|
|
self._count_feats(counts, tokens, violn.g_hist, 1)
|
|
|
|
self._count_feats(counts, tokens, violn.p_hist, -1)
|
|
|
|
self.model._model.update(counts)
|
2015-06-02 01:57:09 +03:00
|
|
|
return pred._states[0].loss
|
2015-06-02 01:28:02 +03:00
|
|
|
|
|
|
|
def _advance_beam(self, Beam beam, GoldParse gold, bint follow_gold):
|
|
|
|
cdef atom_t[CONTEXT_SIZE] context
|
|
|
|
cdef State* state
|
|
|
|
cdef int i, j, cost
|
|
|
|
cdef bint is_valid
|
|
|
|
cdef const Transition* move
|
|
|
|
for i in range(beam.size):
|
|
|
|
state = <State*>beam.at(i)
|
|
|
|
fill_context(context, state)
|
|
|
|
scores = self.model.score(context)
|
|
|
|
validities = self.moves.get_valid(state)
|
|
|
|
if gold is None:
|
|
|
|
for j in range(self.model.n_clases):
|
|
|
|
beam.set_cell(i, j, scores[j], 0, validities[j])
|
|
|
|
elif not follow_gold:
|
|
|
|
for j in range(self.model.n_classes):
|
|
|
|
move = &self.moves.c[j]
|
|
|
|
cost = move.get_cost(move, state, gold)
|
|
|
|
beam.set_cell(i, j, scores[j], cost, validities[j])
|
|
|
|
else:
|
|
|
|
for j in range(self.model.n_classes):
|
|
|
|
move = &self.moves.c[j]
|
|
|
|
cost = move.get_cost(move, state, gold)
|
|
|
|
beam.set_cell(i, j, scores[j], cost, cost == 0)
|
|
|
|
beam.advance(_transition_state, <void*>self.moves.c)
|
|
|
|
beam.check_done(_check_final_state, NULL)
|
|
|
|
|
|
|
|
def _count_feats(self, dict counts, Tokens tokens, list hist, int inc):
|
|
|
|
cdef atom_t[CONTEXT_SIZE] context
|
|
|
|
cdef Pool mem = Pool()
|
|
|
|
cdef State* state = new_state(mem, tokens.data, tokens.length)
|
|
|
|
self.moves.initialize_state(state)
|
|
|
|
|
|
|
|
cdef class_t clas
|
|
|
|
cdef int n_feats
|
|
|
|
for clas in hist:
|
|
|
|
if is_final(state):
|
|
|
|
break
|
|
|
|
fill_context(context, state)
|
|
|
|
feats = self.model._extractor.get_feats(context, &n_feats)
|
|
|
|
count_feats(counts.setdefault(clas, {}), feats, n_feats, inc)
|
|
|
|
self.moves.c[clas].do(&self.moves.c[clas], state)
|
|
|
|
|
|
|
|
|
|
|
|
# These are passed as callbacks to thinc.search.Beam
|
|
|
|
|
|
|
|
cdef int _transition_state(void* _dest, void* _src, class_t clas, void* _moves) except -1:
|
|
|
|
dest = <State*>_dest
|
|
|
|
src = <const State*>_src
|
|
|
|
moves = <const Transition*>_moves
|
|
|
|
copy_state(dest, src)
|
|
|
|
moves[clas].do(&moves[clas], dest)
|
|
|
|
|
|
|
|
|
|
|
|
cdef void* _init_state(Pool mem, int length, void* tokens) except NULL:
|
|
|
|
state = new_state(mem, <const TokenC*>tokens, length)
|
|
|
|
push_stack(state)
|
|
|
|
return state
|
|
|
|
|
|
|
|
|
|
|
|
cdef int _check_final_state(void* state, void* extra_args) except -1:
|
|
|
|
return is_final(<State*>state)
|