From b8c4f5ea768126e46b138cc8d3e0c930fb6a5aba Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Thu, 24 Nov 2016 23:30:15 +1100 Subject: [PATCH] Allow German noun chunks to work on Span Update the German noun chunks iterator, so that it also works on Span objects. --- spacy/syntax/iterators.pyx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/spacy/syntax/iterators.pyx b/spacy/syntax/iterators.pyx index f8951d039..ee5e818c1 100644 --- a/spacy/syntax/iterators.pyx +++ b/spacy/syntax/iterators.pyx @@ -2,9 +2,11 @@ from spacy.parts_of_speech cimport NOUN, PROPN, PRON def english_noun_chunks(obj): + '''Detect base noun phrases from a dependency parse. + Works on both Doc and Span.''' labels = ['nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj', 'attr', 'ROOT', 'root'] - doc = obj.doc + doc = obj.doc # Ensure works on both Doc and Span. np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings['conj'] np_label = doc.vocab.strings['NP'] @@ -26,14 +28,15 @@ def english_noun_chunks(obj): # extended to the right of the NOUN # example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" and not # just "eine Tasse", same for "das Thema Familie" -def german_noun_chunks(doc): +def german_noun_chunks(obj): labels = ['sb', 'oa', 'da', 'nk', 'mo', 'ag', 'ROOT', 'root', 'cj', 'pd', 'og', 'app'] + doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings['NP'] np_deps = set(doc.vocab.strings[label] for label in labels) close_app = doc.vocab.strings['nk'] rbracket = 0 - for i, word in enumerate(doc): + for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: