mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 10:26:35 +03:00
1448ad100c
* Improved stop words list * Removed some wrong stop words form list * Improved stop words list * Removed some wrong stop words form list * Improved Polish Tokenizer (#38) * Add tests for polish tokenizer * Add polish tokenizer exceptions * Don't split any words containing hyphens * Fix test case with wrong model answer * Remove commented out line of code until better solution is found * Add source srx' license * Rename exception_list.py to match spaCy conventionality * Add a brief explanation of where the exception list comes from * Add newline after reach exception * Rename COPYING.txt to LICENSE * Delete old files * Add header to the license * Agreements signed * Stanisław Giziński agreement * Krzysztof Kowalczyk - signed agreement * Mateusz Olko agreement * Add DoomCoder's contributor agreement * Improve like number checking in polish lang * like num tests added * all from SI system added * Final licence and removed splitting exceptions * Added polish stop words to LEX_ATTRA * Add encoding info to pl tokenizer exceptions
18 lines
496 B
Python
18 lines
496 B
Python
# coding: utf-8
|
|
"""Words like numbers are recognized correctly."""
|
|
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
|
@pytest.mark.parametrize('text,match', [
|
|
('10', True), ('1', True), ('10,000', True), ('10,00', True),
|
|
('jeden', True), ('dwa', True), ('milion', True),
|
|
('pies', False), (',', False), ('1/2', True)])
|
|
def test_lex_attrs_like_number(pl_tokenizer, text, match):
|
|
tokens = pl_tokenizer(text)
|
|
assert len(tokens) == 1
|
|
assert tokens[0].like_num == match
|