2017-07-23 18:55:05 +03:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-07-26 15:13:47 +03:00
|
|
|
import regex as re
|
|
|
|
|
2017-07-24 10:11:51 +03:00
|
|
|
from ._tokenizer_exceptions_list import ID_BASE_EXCEPTIONS
|
2017-07-26 15:13:47 +03:00
|
|
|
from ..tokenizer_exceptions import URL_PATTERN
|
|
|
|
from ..char_classes import ALPHA
|
2017-07-24 10:12:34 +03:00
|
|
|
from ...symbols import ORTH
|
2017-07-24 10:11:10 +03:00
|
|
|
|
2017-07-26 15:13:47 +03:00
|
|
|
|
2017-07-24 10:11:10 +03:00
|
|
|
_exc = {}
|
|
|
|
|
2017-07-26 15:13:47 +03:00
|
|
|
for orth in ID_BASE_EXCEPTIONS:
|
2017-07-24 10:11:10 +03:00
|
|
|
_exc[orth] = [{ORTH: orth}]
|
|
|
|
|
2017-07-26 15:13:47 +03:00
|
|
|
orth_title = orth.title()
|
|
|
|
_exc[orth_title] = [{ORTH: orth_title}]
|
|
|
|
|
|
|
|
orth_caps = orth.upper()
|
|
|
|
_exc[orth_caps] = [{ORTH: orth_caps}]
|
|
|
|
|
|
|
|
orth_lower = orth.lower()
|
|
|
|
_exc[orth_lower] = [{ORTH: orth_lower}]
|
|
|
|
|
|
|
|
if '-' in orth:
|
|
|
|
orth_title = '-'.join([part.title() for part in orth.split('-')])
|
|
|
|
_exc[orth_title] = [{ORTH: orth_title}]
|
|
|
|
|
|
|
|
orth_caps = '-'.join([part.upper() for part in orth.split('-')])
|
|
|
|
_exc[orth_caps] = [{ORTH: orth_caps}]
|
|
|
|
|
|
|
|
|
|
|
|
_hyphen_prefix = """Abdur Abdus Abou Aboul Abror Abshar Abu Abubakar Abul
|
|
|
|
Aero Agri Agro Ahmadi Ahmed Air abd abdel abdul ad adz afro al ala ali all
|
|
|
|
amir an antar anti ar as ash asy at ath az bekas ber best bi co di double
|
|
|
|
dual duo e eco eks el era ex full hi high i in inter intra ke kontra korona
|
|
|
|
kuartal lintas m macro makro me mem meng micro mid mikro mini multi neo nge
|
|
|
|
no non on pan pasca pe pem poli poly post pra pre pro re se self serba seri
|
|
|
|
sub super t trans ultra un x """.split()
|
|
|
|
|
|
|
|
_hyphen_infix = """me-kan me-kannya men-kan men-kannya meng-kannya ke-an
|
|
|
|
ke-annya di-kan di-kannya de-isasi ber-an berke-an""".split()
|
|
|
|
|
|
|
|
_regular_exp = ['^{p}-*$'.format(p=prefix) for prefix in _hyphen_prefix]
|
|
|
|
_regular_exp += ['^{0}-*-{1}$'.format(*infix.split('-')) for infix in _hyphen_infix]
|
|
|
|
_regular_exp.append(URL_PATTERN)
|
|
|
|
|
|
|
|
|
|
|
|
TOKENIZER_EXCEPTIONS = dict(_exc)
|
|
|
|
TOKEN_MATCH = re.compile('|'.join('(?:{})'.format(m) for m in _regular_exp), re.IGNORECASE).match
|