spaCy/spacy/lang/lt/punctuation.py
adrianeboyd 86c43e55fa
Improve Lithuanian tokenization (#5205)
* Improve Lithuanian tokenization

Modify Lithuanian tokenization to improve performance for
UD_Lithuanian-ALKSNIS.

* Update Lithuanian tokenizer tests
2020-03-25 11:28:12 +01:00

30 lines
771 B
Python

# coding: utf8
from __future__ import unicode_literals
from ..char_classes import LIST_ICONS, LIST_ELLIPSES
from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA
from ..char_classes import HYPHENS
from ..punctuation import TOKENIZER_SUFFIXES
_infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
_suffixes = ["\."] + list(TOKENIZER_SUFFIXES)
TOKENIZER_INFIXES = _infixes
TOKENIZER_SUFFIXES = _suffixes