spaCy/spacy/lang/id/lex_attrs.py
Andrew Ongko 81564cc4e8 Update Indonesian model (#2752)
* adding e-KTP in tokenizer exceptions list

* add exception token

* removing lines with containing space as it won't matter since we use .split() method in the end, added new tokens in exception

* add tokenizer exceptions list

* combining base_norms with norm_exceptions

* adding norm_exception

* fix double key in lemmatizer

* remove unused import on punctuation.py

* reformat stop_words to reduce number of lines, improve readibility

* updating tokenizer exception

* implement is_currency for lang/id

* adding orth_first_upper in tokenizer_exceptions

* update the norm_exception list

* remove bunch of abbreviations

* adding contributors file
2018-09-14 12:30:32 +02:00

47 lines
1.2 KiB
Python

# coding: utf8
from __future__ import unicode_literals
import unicodedata
from .punctuation import LIST_CURRENCY
from ...attrs import IS_CURRENCY, LIKE_NUM
_num_words = ['nol', 'satu', 'dua', 'tiga', 'empat', 'lima', 'enam', 'tujuh',
'delapan', 'sembilan', 'sepuluh', 'sebelas', 'belas', 'puluh',
'ratus', 'ribu', 'juta', 'miliar', 'biliun', 'triliun', 'kuadriliun',
'kuintiliun', 'sekstiliun', 'septiliun', 'oktiliun', 'noniliun', 'desiliun']
def like_num(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
if text.count('-') == 1:
_, num = text.split('-')
if num.isdigit() or num in _num_words:
return True
return False
def is_currency(text):
if text in LIST_CURRENCY:
return True
for char in text:
if unicodedata.category(char) != 'Sc':
return False
return True
LEX_ATTRS = {
IS_CURRENCY: is_currency,
LIKE_NUM: like_num
}