mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-14 03:26:24 +03:00
7e4cd7575c
* Refactor Docs.is_ flags * Add derived `Doc.has_annotation` method * `Doc.has_annotation(attr)` returns `True` for partial annotation * `Doc.has_annotation(attr, require_complete=True)` returns `True` for complete annotation * Add deprecation warnings to `is_tagged`, `is_parsed`, `is_sentenced` and `is_nered` * Add `Doc._get_array_attrs()`, which returns a full list of `Doc` attrs for use with `Doc.to_array`, `Doc.to_bytes` and `Doc.from_docs`. The list is the `DocBin` attributes list plus `SPACY` and `LENGTH`. Notes on `Doc.has_annotation`: * `HEAD` is converted to `DEP` because heads don't have an unset state * Accept `IS_SENT_START` as a synonym of `SENT_START` Additional changes: * Add `NORM`, `ENT_ID` and `SENT_START` to default attributes for `DocBin` * In `Doc.from_array()` the presence of `DEP` causes `HEAD` to override `SENT_START` * In `Doc.from_array()` using `attrs` other than `Doc._get_array_attrs()` (i.e., a user's custom list rather than our default internal list) with both `HEAD` and `SENT_START` shows a warning that `HEAD` will override `SENT_START` * `set_children_from_heads` does not require dependency labels to set sentence boundaries and sets `sent_start` for all non-sentence starts to `-1` * Fix call to set_children_form_heads Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
54 lines
1.8 KiB
Python
54 lines
1.8 KiB
Python
import pytest
|
|
|
|
from ...util import get_doc
|
|
|
|
|
|
def test_noun_chunks_is_parsed_sv(sv_tokenizer):
|
|
"""Test that noun_chunks raises Value Error for 'sv' language if Doc is not parsed.
|
|
"""
|
|
doc = sv_tokenizer("Studenten läste den bästa boken")
|
|
with pytest.raises(ValueError):
|
|
list(doc.noun_chunks)
|
|
|
|
|
|
SV_NP_TEST_EXAMPLES = [
|
|
(
|
|
"En student läste en bok", # A student read a book
|
|
["DET", "NOUN", "VERB", "DET", "NOUN"],
|
|
["det", "nsubj", "ROOT", "det", "dobj"],
|
|
[1, 1, 0, 1, -2],
|
|
["En student", "en bok"],
|
|
),
|
|
(
|
|
"Studenten läste den bästa boken.", # The student read the best book
|
|
["NOUN", "VERB", "DET", "ADJ", "NOUN", "PUNCT"],
|
|
["nsubj", "ROOT", "det", "amod", "dobj", "punct"],
|
|
[1, 0, 2, 1, -3, -4],
|
|
["Studenten", "den bästa boken"],
|
|
),
|
|
(
|
|
"De samvetslösa skurkarna hade stulit de största juvelerna på söndagen", # The remorseless crooks had stolen the largest jewels that sunday
|
|
["DET", "ADJ", "NOUN", "VERB", "VERB", "DET", "ADJ", "NOUN", "ADP", "NOUN"],
|
|
["det", "amod", "nsubj", "aux", "root", "det", "amod", "dobj", "case", "nmod"],
|
|
[2, 1, 2, 1, 0, 2, 1, -3, 1, -5],
|
|
["De samvetslösa skurkarna", "de största juvelerna", "på söndagen"],
|
|
),
|
|
]
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text,pos,deps,heads,expected_noun_chunks", SV_NP_TEST_EXAMPLES
|
|
)
|
|
def test_sv_noun_chunks(sv_tokenizer, text, pos, deps, heads, expected_noun_chunks):
|
|
tokens = sv_tokenizer(text)
|
|
|
|
assert len(heads) == len(pos)
|
|
doc = get_doc(
|
|
tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps, pos=pos
|
|
)
|
|
|
|
noun_chunks = list(doc.noun_chunks)
|
|
assert len(noun_chunks) == len(expected_noun_chunks)
|
|
for i, np in enumerate(noun_chunks):
|
|
assert np.text == expected_noun_chunks[i]
|