mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 13:47:13 +03:00
9745b0d523
## Description 1. Added the same infix rule as in French (`d'une`, `j'ai`) for Italian (`c'è`, `l'ha`), bringing F-score on `it_isdt-ud-train.txt` from 96% to 99%. Added unit test to check this behaviour. 2. Added specific Urdu punctuation character as suffix, improving F-score on `ur_udtb-ud-train.txt` from 94% to 100%. Added unit test to check this behaviour. ### Types of change Enhancement of Italian & Urdu tokenization ## Checklist - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
32 lines
718 B
Python
32 lines
718 B
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from .stop_words import STOP_WORDS
|
|
from .lex_attrs import LEX_ATTRS
|
|
from ..tag_map import TAG_MAP
|
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ...language import Language
|
|
from ...attrs import LANG
|
|
|
|
from .punctuation import TOKENIZER_SUFFIXES
|
|
|
|
|
|
class UrduDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters.update(LEX_ATTRS)
|
|
lex_attr_getters[LANG] = lambda text: "ur"
|
|
|
|
tokenizer_exceptions = BASE_EXCEPTIONS
|
|
tag_map = TAG_MAP
|
|
stop_words = STOP_WORDS
|
|
suffixes = TOKENIZER_SUFFIXES
|
|
|
|
|
|
class Urdu(Language):
|
|
lang = "ur"
|
|
Defaults = UrduDefaults
|
|
|
|
|
|
__all__ = ["Urdu"]
|