From f77443ab68d79103ab69fe29d22eaf38b2c3858c Mon Sep 17 00:00:00 2001 From: Jim Geovedi Date: Sun, 20 Aug 2017 13:43:21 +0700 Subject: [PATCH] reworked --- spacy/lang/id/__init__.py | 3 +-- spacy/lang/id/tokenizer_exceptions.py | 22 +--------------------- 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/spacy/lang/id/__init__.py b/spacy/lang/id/__init__.py index a65a5b24f..e0cfa941d 100644 --- a/spacy/lang/id/__init__.py +++ b/spacy/lang/id/__init__.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals from .stop_words import STOP_WORDS from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES, TOKENIZER_INFIXES -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .norm_exceptions import NORM_EXCEPTIONS from .lemmatizer import LOOKUP from .lex_attrs import LEX_ATTRS @@ -24,7 +24,6 @@ class IndonesianDefaults(Language.Defaults): tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) stop_words = set(STOP_WORDS) - token_match = TOKEN_MATCH prefixes = tuple(TOKENIZER_PREFIXES) suffixes = tuple(TOKENIZER_SUFFIXES) infixes = tuple(TOKENIZER_INFIXES) diff --git a/spacy/lang/id/tokenizer_exceptions.py b/spacy/lang/id/tokenizer_exceptions.py index 3bbb4b385..9978606b0 100644 --- a/spacy/lang/id/tokenizer_exceptions.py +++ b/spacy/lang/id/tokenizer_exceptions.py @@ -46,25 +46,5 @@ for orth in [ ]: _exc[orth] = [{ORTH: orth}] - -_hyphen_prefix = """abdur abdus abou aboul abror abshar abu abubakar abul -aero agri agro ahmadi ahmed air abd abdel abdul ad adz afro al ala ali all -amir an antar anti ar as ash asy at ath az bekas ber best bi co di double -dual duo e eco eks el era ex full hi high i in inter intra ke kontra korona -kuartal lintas m macro makro me mem meng micro mid mikro mini multi neo nge -no non on pan pasca pe pem poli poly post pra pre pro re se self serba seri -sub super t trans u ultra un x""".split() - -_hyphen_infix = """ber-an berke-an de-isasi di-kan di-kannya di-nya ke-an -ke-annya me-kan me-kannya men-kan men-kannya meng-kannya pe-an pen-an -per-an per-i se-an se-nya ter-i ter-kan ter-kannya""".split() - -_hyphen_suffix = """el""" - -_regular_exp = ['^{p}-[A-Za-z0-9]+$'.format(p=prefix) for prefix in _hyphen_prefix] -_regular_exp += ['^{0}-[A-Za-z0-9]+-{1}$'.format(*infix.split('-')) for infix in _hyphen_infix] -_regular_exp += ['^[A-Za-z0-9]+-{s}$'.format(s=suffix) for suffix in _hyphen_suffix] -_regular_exp.append(URL_PATTERN) - TOKENIZER_EXCEPTIONS = dict(_exc) -TOKEN_MATCH = re.compile('|'.join('(?:{})'.format(m) for m in _regular_exp), re.IGNORECASE).match +