Fixed emoji handling for Hungarian

This commit is contained in:
Gyorgy Orosz 2017-05-30 21:34:46 +02:00
parent f86289566a
commit 8c0b4b850e
2 changed files with 6 additions and 11 deletions

View File

@ -1,18 +1,17 @@
# coding: utf8 # coding: utf8
from __future__ import unicode_literals from __future__ import unicode_literals
from ..punctuation import TOKENIZER_INFIXES from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY
from ..char_classes import QUOTES, UNITS, ALPHA, ALPHA_LOWER, ALPHA_UPPER from ..char_classes import QUOTES, UNITS, ALPHA, ALPHA_LOWER, ALPHA_UPPER
LIST_ICONS = [r'[\p{So}--[°]]']
_currency = r'\$|¢|£|€|¥|฿' _currency = r'\$|¢|£|€|¥|฿'
_quotes = QUOTES.replace("'", '') _quotes = QUOTES.replace("'", '')
_prefixes = ([r'\+'] + LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + LIST_ICONS)
_prefixes = ([r'\+'] + LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES) _suffixes = (LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + LIST_ICONS +
_suffixes = (LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES +
[r'(?<=[0-9])\+', [r'(?<=[0-9])\+',
r'(?<=°[FfCcKk])\.', r'(?<=°[FfCcKk])\.',
r'(?<=[0-9])(?:{})'.format(_currency), r'(?<=[0-9])(?:{})'.format(_currency),
@ -20,8 +19,7 @@ _suffixes = (LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES +
r'(?<=[{}{}{}(?:{})])\.'.format(ALPHA_LOWER, r'%²\-\)\]\+', QUOTES, _currency), r'(?<=[{}{}{}(?:{})])\.'.format(ALPHA_LOWER, r'%²\-\)\]\+', QUOTES, _currency),
r'(?<=[{})])-e'.format(ALPHA_LOWER)]) r'(?<=[{})])-e'.format(ALPHA_LOWER)])
_infixes = (LIST_ELLIPSES + LIST_ICONS +
_infixes = (LIST_ELLIPSES +
[r'(?<=[{}])\.(?=[{}])'.format(ALPHA_LOWER, ALPHA_UPPER), [r'(?<=[{}])\.(?=[{}])'.format(ALPHA_LOWER, ALPHA_UPPER),
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA), r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA), r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA),
@ -29,7 +27,6 @@ _infixes = (LIST_ELLIPSES +
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA), r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_quotes)]) r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_quotes)])
TOKENIZER_PREFIXES = _prefixes TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes TOKENIZER_INFIXES = _infixes

View File

@ -41,7 +41,5 @@ def test_tokenizer_excludes_false_pos_emoticons(tokenizer, text, length):
@pytest.mark.parametrize('text,length', [('can you still dunk?🍕🍔😵LOL', 8), @pytest.mark.parametrize('text,length', [('can you still dunk?🍕🍔😵LOL', 8),
('i💙you', 3), ('🤘🤘yay!', 4)]) ('i💙you', 3), ('🤘🤘yay!', 4)])
def test_tokenizer_handles_emoji(tokenizer, text, length): def test_tokenizer_handles_emoji(tokenizer, text, length):
exceptions = ["hu"]
tokens = tokenizer(text) tokens = tokenizer(text)
if tokens[0].lang_ not in exceptions: assert len(tokens) == length
assert len(tokens) == length