mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 09:57:26 +03:00 
			
		
		
		
	Capture more noun chunks
This commit is contained in:
		
							parent
							
								
									dcff10abe9
								
							
						
					
					
						commit
						884ba168a8
					
				| 
						 | 
					@ -9,7 +9,7 @@ def english_noun_chunks(obj):
 | 
				
			||||||
    Detect base noun phrases from a dependency parse.
 | 
					    Detect base noun phrases from a dependency parse.
 | 
				
			||||||
    Works on both Doc and Span.
 | 
					    Works on both Doc and Span.
 | 
				
			||||||
    """
 | 
					    """
 | 
				
			||||||
    labels = ['nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj',
 | 
					    labels = ['nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj', 'dative', 'appos',
 | 
				
			||||||
              'attr', 'ROOT']
 | 
					              'attr', 'ROOT']
 | 
				
			||||||
    doc = obj.doc # Ensure works on both Doc and Span.
 | 
					    doc = obj.doc # Ensure works on both Doc and Span.
 | 
				
			||||||
    np_deps = [doc.vocab.strings[label] for label in labels]
 | 
					    np_deps = [doc.vocab.strings[label] for label in labels]
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,6 +47,36 @@ def test_parser_noun_chunks_pp_chunks(en_tokenizer):
 | 
				
			||||||
    assert chunks[1].text_with_ws == "another phrase "
 | 
					    assert chunks[1].text_with_ws == "another phrase "
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def test_parser_noun_chunks_appositional_modifiers(en_tokenizer):
 | 
				
			||||||
 | 
					    text = "Sam, my brother, arrived to the house."
 | 
				
			||||||
 | 
					    heads = [5, -1, 1, -3, -4, 0, -1, 1, -2, -4]
 | 
				
			||||||
 | 
					    tags = ['NNP', ',', 'PRP$', 'NN', ',', 'VBD', 'IN', 'DT', 'NN', '.']
 | 
				
			||||||
 | 
					    deps = ['nsubj', 'punct', 'poss', 'appos', 'punct', 'ROOT', 'prep', 'det', 'pobj', 'punct']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    tokens = en_tokenizer(text)
 | 
				
			||||||
 | 
					    doc = get_doc(tokens.vocab, [t.text for t in tokens], tags=tags, deps=deps, heads=heads)
 | 
				
			||||||
 | 
					    chunks = list(doc.noun_chunks)
 | 
				
			||||||
 | 
					    assert len(chunks) == 3
 | 
				
			||||||
 | 
					    assert chunks[0].text_with_ws == "Sam "
 | 
				
			||||||
 | 
					    assert chunks[1].text_with_ws == "my brother "
 | 
				
			||||||
 | 
					    assert chunks[2].text_with_ws == "the house "
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def test_parser_noun_chunks_dative(en_tokenizer):
 | 
				
			||||||
 | 
					    text = "She gave Bob a raise."
 | 
				
			||||||
 | 
					    heads = [1, 0, -1, 1, -3, -4]
 | 
				
			||||||
 | 
					    tags = ['PRP', 'VBD', 'NNP', 'DT', 'NN', '.']
 | 
				
			||||||
 | 
					    deps = ['nsubj', 'ROOT', 'dative', 'det', 'dobj', 'punct']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    tokens = en_tokenizer(text)
 | 
				
			||||||
 | 
					    doc = get_doc(tokens.vocab, [t.text for t in tokens], tags=tags, deps=deps, heads=heads)
 | 
				
			||||||
 | 
					    chunks = list(doc.noun_chunks)
 | 
				
			||||||
 | 
					    assert len(chunks) == 3
 | 
				
			||||||
 | 
					    assert chunks[0].text_with_ws == "She "
 | 
				
			||||||
 | 
					    assert chunks[1].text_with_ws == "Bob "
 | 
				
			||||||
 | 
					    assert chunks[2].text_with_ws == "a raise "
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_parser_noun_chunks_standard_de(de_tokenizer):
 | 
					def test_parser_noun_chunks_standard_de(de_tokenizer):
 | 
				
			||||||
    text = "Eine Tasse steht auf dem Tisch."
 | 
					    text = "Eine Tasse steht auf dem Tisch."
 | 
				
			||||||
    heads = [1, 1, 0, -1, 1, -2, -4]
 | 
					    heads = [1, 1, 0, -1, 1, -2, -4]
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue
	
	Block a user