mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 21:57:15 +03:00
86c43e55fa
* Improve Lithuanian tokenization Modify Lithuanian tokenization to improve performance for UD_Lithuanian-ALKSNIS. * Update Lithuanian tokenizer tests
30 lines
771 B
Python
30 lines
771 B
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from ..char_classes import LIST_ICONS, LIST_ELLIPSES
|
|
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
|
|
from ..char_classes import HYPHENS
|
|
from ..punctuation import TOKENIZER_SUFFIXES
|
|
|
|
|
|
_infixes = (
|
|
LIST_ELLIPSES
|
|
+ LIST_ICONS
|
|
+ [
|
|
r"(?<=[0-9])[+\*^](?=[0-9-])",
|
|
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
|
|
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
|
|
),
|
|
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
|
|
r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
|
|
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
|
|
]
|
|
)
|
|
|
|
|
|
_suffixes = ["\."] + list(TOKENIZER_SUFFIXES)
|
|
|
|
|
|
TOKENIZER_INFIXES = _infixes
|
|
TOKENIZER_SUFFIXES = _suffixes
|