mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 12:18:04 +03:00
9ce059dd06
* Limiting noun_chunks for specific langauges * Limiting noun_chunks for specific languages Contributor Agreement * Addressing review comments * Removed unused fixtures and imports * Add fa_tokenizer in test suite * Use fa_tokenizer in test * Undo extraneous reformatting Co-authored-by: adrianeboyd <adrianeboyd@gmail.com>
17 lines
498 B
Python
17 lines
498 B
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
|
def test_noun_chunks_is_parsed_id(id_tokenizer):
|
|
"""Test that noun_chunks raises Value Error for 'id' language if Doc is not parsed.
|
|
To check this test, we're constructing a Doc
|
|
with a new Vocab here and forcing is_parsed to 'False'
|
|
to make sure the noun chunks don't run.
|
|
"""
|
|
doc = id_tokenizer("sebelas")
|
|
doc.is_parsed = False
|
|
with pytest.raises(ValueError):
|
|
list(doc.noun_chunks)
|