From 884ba168a88699bedecf55888b670cbf2040a539 Mon Sep 17 00:00:00 2001 From: Jeffrey Gerard Date: Wed, 23 Aug 2017 21:18:53 -0700 Subject: [PATCH] Capture more noun chunks --- spacy/syntax/iterators.pyx | 2 +- spacy/tests/parser/test_noun_chunks.py | 30 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/spacy/syntax/iterators.pyx b/spacy/syntax/iterators.pyx index 0fe724622..14dba5f9b 100644 --- a/spacy/syntax/iterators.pyx +++ b/spacy/syntax/iterators.pyx @@ -9,7 +9,7 @@ def english_noun_chunks(obj): Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ - labels = ['nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj', + labels = ['nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj', 'dative', 'appos', 'attr', 'ROOT'] doc = obj.doc # Ensure works on both Doc and Span. np_deps = [doc.vocab.strings[label] for label in labels] diff --git a/spacy/tests/parser/test_noun_chunks.py b/spacy/tests/parser/test_noun_chunks.py index 5e8c7659a..ddebca8b8 100644 --- a/spacy/tests/parser/test_noun_chunks.py +++ b/spacy/tests/parser/test_noun_chunks.py @@ -47,6 +47,36 @@ def test_parser_noun_chunks_pp_chunks(en_tokenizer): assert chunks[1].text_with_ws == "another phrase " +def test_parser_noun_chunks_appositional_modifiers(en_tokenizer): + text = "Sam, my brother, arrived to the house." + heads = [5, -1, 1, -3, -4, 0, -1, 1, -2, -4] + tags = ['NNP', ',', 'PRP$', 'NN', ',', 'VBD', 'IN', 'DT', 'NN', '.'] + deps = ['nsubj', 'punct', 'poss', 'appos', 'punct', 'ROOT', 'prep', 'det', 'pobj', 'punct'] + + tokens = en_tokenizer(text) + doc = get_doc(tokens.vocab, [t.text for t in tokens], tags=tags, deps=deps, heads=heads) + chunks = list(doc.noun_chunks) + assert len(chunks) == 3 + assert chunks[0].text_with_ws == "Sam " + assert chunks[1].text_with_ws == "my brother " + assert chunks[2].text_with_ws == "the house " + + +def test_parser_noun_chunks_dative(en_tokenizer): + text = "She gave Bob a raise." + heads = [1, 0, -1, 1, -3, -4] + tags = ['PRP', 'VBD', 'NNP', 'DT', 'NN', '.'] + deps = ['nsubj', 'ROOT', 'dative', 'det', 'dobj', 'punct'] + + tokens = en_tokenizer(text) + doc = get_doc(tokens.vocab, [t.text for t in tokens], tags=tags, deps=deps, heads=heads) + chunks = list(doc.noun_chunks) + assert len(chunks) == 3 + assert chunks[0].text_with_ws == "She " + assert chunks[1].text_with_ws == "Bob " + assert chunks[2].text_with_ws == "a raise " + + def test_parser_noun_chunks_standard_de(de_tokenizer): text = "Eine Tasse steht auf dem Tisch." heads = [1, 1, 0, -1, 1, -2, -4]