2017-04-15 14:05:15 +03:00
|
|
|
# coding: utf-8
|
2016-11-26 02:29:17 +03:00
|
|
|
from __future__ import unicode_literals
|
2016-02-24 13:26:25 +03:00
|
|
|
from copy import copy
|
2016-02-22 16:40:40 +03:00
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
from ..tokens.doc cimport Doc
|
2017-04-15 14:05:15 +03:00
|
|
|
from ..attrs import DEP, HEAD
|
2016-03-01 12:09:08 +03:00
|
|
|
|
|
|
|
|
2016-02-24 13:26:25 +03:00
|
|
|
def ancestors(tokenid, heads):
|
2016-02-22 16:40:40 +03:00
|
|
|
# returns all words going from the word up the path to the root
|
|
|
|
# the path to root cannot be longer than the number of words in the sentence
|
2017-02-27 00:27:11 +03:00
|
|
|
# this function ends after at most len(heads) steps
|
2016-02-22 16:40:40 +03:00
|
|
|
# because it would otherwise loop indefinitely on cycles
|
2016-02-24 13:26:25 +03:00
|
|
|
head = tokenid
|
2016-02-22 16:40:40 +03:00
|
|
|
cnt = 0
|
|
|
|
while heads[head] != head and cnt < len(heads):
|
|
|
|
head = heads[head]
|
|
|
|
cnt += 1
|
|
|
|
yield head
|
|
|
|
if head == None:
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
def contains_cycle(heads):
|
|
|
|
# in an acyclic tree, the path from each word following
|
|
|
|
# the head relation upwards always ends at the root node
|
2016-02-24 13:26:25 +03:00
|
|
|
for tokenid in range(len(heads)):
|
|
|
|
seen = set([tokenid])
|
|
|
|
for ancestor in ancestors(tokenid,heads):
|
2016-02-22 16:40:40 +03:00
|
|
|
if ancestor in seen:
|
|
|
|
return seen
|
|
|
|
seen.add(ancestor)
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2016-02-24 13:26:25 +03:00
|
|
|
def is_nonproj_arc(tokenid, heads):
|
2016-02-22 16:40:40 +03:00
|
|
|
# definition (e.g. Havelka 2007): an arc h -> d, h < d is non-projective
|
2016-02-24 13:26:25 +03:00
|
|
|
# if there is a token k, h < k < d such that h is not
|
2016-02-22 16:40:40 +03:00
|
|
|
# an ancestor of k. Same for h -> d, h > d
|
2016-02-24 13:26:25 +03:00
|
|
|
head = heads[tokenid]
|
|
|
|
if head == tokenid: # root arcs cannot be non-projective
|
2016-02-22 16:40:40 +03:00
|
|
|
return False
|
|
|
|
elif head == None: # unattached tokens cannot be non-projective
|
|
|
|
return False
|
|
|
|
|
2016-02-24 13:26:25 +03:00
|
|
|
start, end = (head+1, tokenid) if head < tokenid else (tokenid+1, head)
|
2016-02-22 16:40:40 +03:00
|
|
|
for k in range(start,end):
|
|
|
|
for ancestor in ancestors(k,heads):
|
|
|
|
if ancestor == None: # for unattached tokens/subtrees
|
|
|
|
break
|
|
|
|
elif ancestor == head: # normal case: k dominated by h
|
|
|
|
break
|
|
|
|
else: # head not in ancestors: d -> h is non-projective
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2016-02-24 13:26:25 +03:00
|
|
|
def is_nonproj_tree(heads):
|
2016-02-22 16:40:40 +03:00
|
|
|
# a tree is non-projective if at least one arc is non-projective
|
2016-02-24 13:26:25 +03:00
|
|
|
return any( is_nonproj_arc(word,heads) for word in range(len(heads)) )
|
|
|
|
|
|
|
|
|
2016-03-03 21:05:08 +03:00
|
|
|
class PseudoProjectivity:
|
2016-02-24 13:26:25 +03:00
|
|
|
# implements the projectivize/deprojectivize mechanism in Nivre & Nilsson 2005
|
|
|
|
# for doing pseudo-projective parsing
|
|
|
|
# implementation uses the HEAD decoration scheme
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
delimiter = '||'
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def decompose(cls, label):
|
|
|
|
return label.partition(cls.delimiter)[::2]
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def is_decorated(cls, label):
|
|
|
|
return label.find(cls.delimiter) != -1
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def preprocess_training_data(cls, gold_tuples, label_freq_cutoff=30):
|
2016-02-24 13:26:25 +03:00
|
|
|
preprocessed = []
|
2016-03-03 23:36:27 +03:00
|
|
|
freqs = {}
|
2016-03-01 12:09:08 +03:00
|
|
|
for raw_text, sents in gold_tuples:
|
|
|
|
prepro_sents = []
|
|
|
|
for (ids, words, tags, heads, labels, iob), ctnts in sents:
|
|
|
|
proj_heads,deco_labels = cls.projectivize(heads,labels)
|
|
|
|
# set the label to ROOT for each root dependent
|
|
|
|
deco_labels = [ 'ROOT' if head == i else deco_labels[i] for i,head in enumerate(proj_heads) ]
|
|
|
|
# count label frequencies
|
|
|
|
if label_freq_cutoff > 0:
|
2016-03-03 23:36:27 +03:00
|
|
|
for label in deco_labels:
|
|
|
|
if cls.is_decorated(label):
|
|
|
|
freqs[label] = freqs.get(label,0) + 1
|
2016-03-01 12:09:08 +03:00
|
|
|
prepro_sents.append(((ids,words,tags,proj_heads,deco_labels,iob), ctnts))
|
|
|
|
preprocessed.append((raw_text, prepro_sents))
|
2016-02-24 13:26:25 +03:00
|
|
|
|
|
|
|
if label_freq_cutoff > 0:
|
2016-03-01 12:09:08 +03:00
|
|
|
return cls._filter_labels(preprocessed,label_freq_cutoff,freqs)
|
2016-02-24 13:26:25 +03:00
|
|
|
return preprocessed
|
|
|
|
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
@classmethod
|
|
|
|
def projectivize(cls, heads, labels):
|
2016-02-24 13:26:25 +03:00
|
|
|
# use the algorithm by Nivre & Nilsson 2005
|
|
|
|
# assumes heads to be a proper tree, i.e. connected and cycle-free
|
|
|
|
# returns a new pair (heads,labels) which encode
|
|
|
|
# a projective and decorated tree
|
|
|
|
proj_heads = copy(heads)
|
2016-03-01 12:09:08 +03:00
|
|
|
smallest_np_arc = cls._get_smallest_nonproj_arc(proj_heads)
|
2016-02-24 13:26:25 +03:00
|
|
|
if smallest_np_arc == None: # this sentence is already projective
|
|
|
|
return proj_heads, copy(labels)
|
|
|
|
while smallest_np_arc != None:
|
2016-03-01 12:09:08 +03:00
|
|
|
cls._lift(smallest_np_arc, proj_heads)
|
|
|
|
smallest_np_arc = cls._get_smallest_nonproj_arc(proj_heads)
|
|
|
|
deco_labels = cls._decorate(heads, proj_heads, labels)
|
2016-02-24 13:26:25 +03:00
|
|
|
return proj_heads, deco_labels
|
|
|
|
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
@classmethod
|
2016-05-05 16:01:10 +03:00
|
|
|
def deprojectivize(cls, tokens):
|
2016-02-24 13:26:25 +03:00
|
|
|
# reattach arcs with decorated labels (following HEAD scheme)
|
|
|
|
# for each decorated arc X||Y, search top-down, left-to-right,
|
|
|
|
# breadth-first until hitting a Y then make this the new head
|
2016-03-11 19:31:06 +03:00
|
|
|
#parse = tokens.to_array([HEAD, DEP])
|
2016-03-01 12:09:08 +03:00
|
|
|
for token in tokens:
|
|
|
|
if cls.is_decorated(token.dep_):
|
|
|
|
newlabel,headlabel = cls.decompose(token.dep_)
|
|
|
|
newhead = cls._find_new_head(token,headlabel)
|
2016-03-11 19:31:06 +03:00
|
|
|
token.head = newhead
|
|
|
|
token.dep_ = newlabel
|
|
|
|
|
|
|
|
# tokens.attach(token,newhead,newlabel)
|
|
|
|
#parse[token.i,1] = tokens.vocab.strings[newlabel]
|
|
|
|
#parse[token.i,0] = newhead.i - token.i
|
|
|
|
#tokens.from_array([HEAD, DEP],parse)
|
2016-03-01 12:09:08 +03:00
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _decorate(cls, heads, proj_heads, labels):
|
2016-02-24 13:26:25 +03:00
|
|
|
# uses decoration scheme HEAD from Nivre & Nilsson 2005
|
|
|
|
assert(len(heads) == len(proj_heads) == len(labels))
|
|
|
|
deco_labels = []
|
|
|
|
for tokenid,head in enumerate(heads):
|
|
|
|
if head != proj_heads[tokenid]:
|
2016-03-01 12:09:08 +03:00
|
|
|
deco_labels.append('%s%s%s' % (labels[tokenid],cls.delimiter,labels[head]))
|
2016-02-24 13:26:25 +03:00
|
|
|
else:
|
|
|
|
deco_labels.append(labels[tokenid])
|
|
|
|
return deco_labels
|
|
|
|
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
@classmethod
|
|
|
|
def _get_smallest_nonproj_arc(cls, heads):
|
2016-02-24 13:26:25 +03:00
|
|
|
# return the smallest non-proj arc or None
|
|
|
|
# where size is defined as the distance between dep and head
|
|
|
|
# and ties are broken left to right
|
|
|
|
smallest_size = float('inf')
|
|
|
|
smallest_np_arc = None
|
|
|
|
for tokenid,head in enumerate(heads):
|
|
|
|
size = abs(tokenid-head)
|
|
|
|
if size < smallest_size and is_nonproj_arc(tokenid,heads):
|
|
|
|
smallest_size = size
|
|
|
|
smallest_np_arc = tokenid
|
|
|
|
return smallest_np_arc
|
|
|
|
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
@classmethod
|
|
|
|
def _lift(cls, tokenid, heads):
|
2016-02-24 13:26:25 +03:00
|
|
|
# reattaches a word to it's grandfather
|
|
|
|
head = heads[tokenid]
|
|
|
|
ghead = heads[head]
|
|
|
|
# attach to ghead if head isn't attached to root else attach to root
|
|
|
|
heads[tokenid] = ghead if head != ghead else tokenid
|
|
|
|
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
@classmethod
|
|
|
|
def _find_new_head(cls, token, headlabel):
|
2016-03-11 19:31:06 +03:00
|
|
|
# search through the tree starting from the head of the given token
|
2016-02-24 13:26:25 +03:00
|
|
|
# returns the id of the first descendant with the given label
|
|
|
|
# if there is none, return the current head (no change)
|
2016-03-01 12:09:08 +03:00
|
|
|
queue = [token.head]
|
2016-02-24 13:26:25 +03:00
|
|
|
while queue:
|
|
|
|
next_queue = []
|
2016-03-01 12:09:08 +03:00
|
|
|
for qtoken in queue:
|
|
|
|
for child in qtoken.children:
|
2017-02-27 00:27:11 +03:00
|
|
|
if child.is_space: continue
|
2016-03-11 19:31:06 +03:00
|
|
|
if child == token: continue
|
2016-03-01 12:09:08 +03:00
|
|
|
if child.dep_ == headlabel:
|
|
|
|
return child
|
|
|
|
next_queue.append(child)
|
2016-02-24 13:26:25 +03:00
|
|
|
queue = next_queue
|
2016-03-01 12:09:08 +03:00
|
|
|
return token.head
|
2016-02-24 13:26:25 +03:00
|
|
|
|
|
|
|
|
2016-03-01 12:09:08 +03:00
|
|
|
@classmethod
|
|
|
|
def _filter_labels(cls, gold_tuples, cutoff, freqs):
|
2016-02-24 13:26:25 +03:00
|
|
|
# throw away infrequent decorated labels
|
|
|
|
# can't learn them reliably anyway and keeps label set smaller
|
|
|
|
filtered = []
|
2016-03-01 12:09:08 +03:00
|
|
|
for raw_text, sents in gold_tuples:
|
|
|
|
filtered_sents = []
|
|
|
|
for (ids, words, tags, heads, labels, iob), ctnts in sents:
|
|
|
|
filtered_labels = [ cls.decompose(label)[0] if freqs.get(label,cutoff) < cutoff else label for label in labels ]
|
|
|
|
filtered_sents.append(((ids,words,tags,heads,filtered_labels,iob), ctnts))
|
|
|
|
filtered.append((raw_text, filtered_sents))
|
2016-02-24 13:26:25 +03:00
|
|
|
return filtered
|