mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
Fixed Hungarian tokenizer for numbers
This commit is contained in:
parent
b438cfddbc
commit
1be5da1ac6
|
@ -1,6 +1,7 @@
|
||||||
# encoding: utf8
|
# encoding: utf8
|
||||||
from __future__ import unicode_literals, print_function
|
from __future__ import unicode_literals, print_function
|
||||||
|
|
||||||
|
from spacy.hu.tokenizer_exceptions import TOKEN_MATCH
|
||||||
from .language_data import *
|
from .language_data import *
|
||||||
from ..attrs import LANG
|
from ..attrs import LANG
|
||||||
from ..language import Language
|
from ..language import Language
|
||||||
|
@ -21,3 +22,5 @@ class Hungarian(Language):
|
||||||
infixes = tuple(TOKENIZER_INFIXES)
|
infixes = tuple(TOKENIZER_INFIXES)
|
||||||
|
|
||||||
stop_words = set(STOP_WORDS)
|
stop_words = set(STOP_WORDS)
|
||||||
|
|
||||||
|
token_match = TOKEN_MATCH
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
# encoding: utf8
|
# encoding: utf8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from spacy.language_data import strings_to_exc, update_exc
|
from spacy.language_data import strings_to_exc, update_exc
|
||||||
from .punctuation import *
|
from .punctuation import *
|
||||||
from .stop_words import STOP_WORDS
|
from .stop_words import STOP_WORDS
|
||||||
|
@ -10,19 +8,15 @@ from .tokenizer_exceptions import ABBREVIATIONS
|
||||||
from .tokenizer_exceptions import OTHER_EXC
|
from .tokenizer_exceptions import OTHER_EXC
|
||||||
from .. import language_data as base
|
from .. import language_data as base
|
||||||
|
|
||||||
|
|
||||||
STOP_WORDS = set(STOP_WORDS)
|
STOP_WORDS = set(STOP_WORDS)
|
||||||
|
|
||||||
|
|
||||||
TOKENIZER_EXCEPTIONS = strings_to_exc(base.EMOTICONS)
|
TOKENIZER_EXCEPTIONS = strings_to_exc(base.EMOTICONS)
|
||||||
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(base.ABBREVIATIONS))
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(base.ABBREVIATIONS))
|
||||||
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(OTHER_EXC))
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(OTHER_EXC))
|
||||||
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(ABBREVIATIONS))
|
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(ABBREVIATIONS))
|
||||||
|
|
||||||
|
|
||||||
TOKENIZER_PREFIXES = base.TOKENIZER_PREFIXES
|
TOKENIZER_PREFIXES = base.TOKENIZER_PREFIXES
|
||||||
TOKENIZER_SUFFIXES = base.TOKENIZER_SUFFIXES + TOKENIZER_SUFFIXES
|
TOKENIZER_SUFFIXES = TOKENIZER_SUFFIXES
|
||||||
TOKENIZER_INFIXES = TOKENIZER_INFIXES
|
TOKENIZER_INFIXES = TOKENIZER_INFIXES
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["TOKENIZER_EXCEPTIONS", "STOP_WORDS", "TOKENIZER_PREFIXES", "TOKENIZER_SUFFIXES", "TOKENIZER_INFIXES"]
|
__all__ = ["TOKENIZER_EXCEPTIONS", "STOP_WORDS", "TOKENIZER_PREFIXES", "TOKENIZER_SUFFIXES", "TOKENIZER_INFIXES"]
|
||||||
|
|
|
@ -1,25 +1,35 @@
|
||||||
# encoding: utf8
|
# encoding: utf8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ..language_data.punctuation import ALPHA, ALPHA_LOWER, ALPHA_UPPER, LIST_ELLIPSES
|
from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES, ALPHA_UPPER, LIST_QUOTES, UNITS, \
|
||||||
|
CURRENCY, LIST_PUNCT, ALPHA
|
||||||
|
|
||||||
|
CURRENCY_SYMBOLS = r"\$ ¢ £ € ¥ ฿"
|
||||||
|
|
||||||
TOKENIZER_SUFFIXES = [
|
TOKENIZER_SUFFIXES = (
|
||||||
r'(?<=[{al})])-e'.format(al=ALPHA_LOWER)
|
LIST_PUNCT +
|
||||||
]
|
LIST_ELLIPSES +
|
||||||
|
LIST_QUOTES +
|
||||||
TOKENIZER_INFIXES = [
|
[
|
||||||
r'(?<=[0-9])-(?=[0-9])',
|
r'(?<=[0-9])\+',
|
||||||
r'(?<=[0-9])[+\-\*/^](?=[0-9])',
|
r'(?<=°[FfCcKk])\.',
|
||||||
r'(?<=[{a}])--(?=[{a}])',
|
r'(?<=[0-9])(?:{c})'.format(c=CURRENCY),
|
||||||
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
|
r'(?<=[0-9])(?:{u})'.format(u=UNITS),
|
||||||
r'(?<=[{al}])\.(?=[{au}])'.format(al=ALPHA_LOWER, au=ALPHA_UPPER),
|
r'(?<=[{al}{p}{c}(?:{q})])\.'.format(al=ALPHA_LOWER, p=r'%²\-\)\]\+', q=QUOTES, c=CURRENCY_SYMBOLS),
|
||||||
r'(?<=[0-9{a}])"(?=[\-{a}])'.format(a=ALPHA),
|
r'(?<=[{al})])-e'.format(al=ALPHA_LOWER)
|
||||||
r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA)
|
]
|
||||||
]
|
)
|
||||||
|
|
||||||
|
|
||||||
TOKENIZER_INFIXES += LIST_ELLIPSES
|
|
||||||
|
|
||||||
|
TOKENIZER_INFIXES = (
|
||||||
|
LIST_ELLIPSES +
|
||||||
|
[
|
||||||
|
r'(?<=[{al}])\.(?=[{au}])'.format(al=ALPHA_LOWER, au=ALPHA_UPPER),
|
||||||
|
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
|
||||||
|
r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA),
|
||||||
|
r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA),
|
||||||
|
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
|
||||||
|
r'(?<=[0-9{a}])({q})(?=[\-{a}])'.format(a=ALPHA, q=QUOTES),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
__all__ = ["TOKENIZER_SUFFIXES", "TOKENIZER_INFIXES"]
|
__all__ = ["TOKENIZER_SUFFIXES", "TOKENIZER_INFIXES"]
|
||||||
|
|
|
@ -1,6 +1,11 @@
|
||||||
# encoding: utf8
|
# encoding: utf8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from spacy.language_data.punctuation import ALPHA_LOWER, CURRENCY
|
||||||
|
from ..language_data.tokenizer_exceptions import URL_PATTERN
|
||||||
|
|
||||||
ABBREVIATIONS = """
|
ABBREVIATIONS = """
|
||||||
AkH.
|
AkH.
|
||||||
Aö.
|
Aö.
|
||||||
|
@ -107,6 +112,7 @@ Tel.
|
||||||
Ty.
|
Ty.
|
||||||
Tyr.
|
Tyr.
|
||||||
Ui.
|
Ui.
|
||||||
|
Ut.
|
||||||
Vcs.
|
Vcs.
|
||||||
Vhr.
|
Vhr.
|
||||||
X.Y.
|
X.Y.
|
||||||
|
@ -212,6 +218,7 @@ gimn.
|
||||||
gk.
|
gk.
|
||||||
gkv.
|
gkv.
|
||||||
gondn.
|
gondn.
|
||||||
|
Gr.
|
||||||
gr.
|
gr.
|
||||||
grav.
|
grav.
|
||||||
gy.
|
gy.
|
||||||
|
@ -237,6 +244,7 @@ ht.
|
||||||
htb.
|
htb.
|
||||||
hv.
|
hv.
|
||||||
hőm.
|
hőm.
|
||||||
|
ie.
|
||||||
i.e.
|
i.e.
|
||||||
i.sz.
|
i.sz.
|
||||||
id.
|
id.
|
||||||
|
@ -271,6 +279,7 @@ júl.
|
||||||
jún.
|
jún.
|
||||||
karb.
|
karb.
|
||||||
kat.
|
kat.
|
||||||
|
kath.
|
||||||
kb.
|
kb.
|
||||||
kcs.
|
kcs.
|
||||||
kd.
|
kd.
|
||||||
|
@ -286,6 +295,7 @@ kk.
|
||||||
kkt.
|
kkt.
|
||||||
klin.
|
klin.
|
||||||
kp.
|
kp.
|
||||||
|
Kr.
|
||||||
krt.
|
krt.
|
||||||
kt.
|
kt.
|
||||||
ktsg.
|
ktsg.
|
||||||
|
@ -357,6 +367,7 @@ nov.
|
||||||
nu.
|
nu.
|
||||||
ny.
|
ny.
|
||||||
nyilv.
|
nyilv.
|
||||||
|
Nyrt.
|
||||||
nyrt.
|
nyrt.
|
||||||
nyug.
|
nyug.
|
||||||
obj.
|
obj.
|
||||||
|
@ -409,6 +420,7 @@ sa.
|
||||||
sel.
|
sel.
|
||||||
sgt.
|
sgt.
|
||||||
sm.
|
sm.
|
||||||
|
St.
|
||||||
st.
|
st.
|
||||||
stat.
|
stat.
|
||||||
stb.
|
stb.
|
||||||
|
@ -478,8 +490,11 @@ vs.
|
||||||
vsz.
|
vsz.
|
||||||
vv.
|
vv.
|
||||||
vál.
|
vál.
|
||||||
|
várm.
|
||||||
|
Várm.
|
||||||
vízv.
|
vízv.
|
||||||
vö.
|
vö.
|
||||||
|
Zrt.
|
||||||
zrt.
|
zrt.
|
||||||
zs.
|
zs.
|
||||||
Ész.
|
Ész.
|
||||||
|
@ -502,6 +517,7 @@ zs.
|
||||||
ú.
|
ú.
|
||||||
úm.
|
úm.
|
||||||
ún.
|
ún.
|
||||||
|
ú.n.
|
||||||
út.
|
út.
|
||||||
üag.
|
üag.
|
||||||
üd.
|
üd.
|
||||||
|
@ -510,7 +526,6 @@ zs.
|
||||||
ümk.
|
ümk.
|
||||||
ütk.
|
ütk.
|
||||||
üv.
|
üv.
|
||||||
ő.
|
|
||||||
ű.
|
ű.
|
||||||
őrgy.
|
őrgy.
|
||||||
őrpk.
|
őrpk.
|
||||||
|
@ -520,3 +535,17 @@ zs.
|
||||||
OTHER_EXC = """
|
OTHER_EXC = """
|
||||||
-e
|
-e
|
||||||
""".strip().split()
|
""".strip().split()
|
||||||
|
|
||||||
|
ORD_NUM_OR_DATE = "([A-Z0-9]+[./-])*(\d+\.?)"
|
||||||
|
_NUM = "[+\-]?\d+([,.]\d+)*"
|
||||||
|
_OPS = "[=<>+\-\*/^()÷%²]"
|
||||||
|
_SUFFIES = "-[{a}]+".format(a=ALPHA_LOWER)
|
||||||
|
NUMERIC_EXP = "\(?({n})(({o})({n}))*[)%]?".format(n=_NUM, o=_OPS)
|
||||||
|
TIME_EXP = "\d+(:\d+)*(\.\d+)?"
|
||||||
|
|
||||||
|
NUMS = "(({ne})|({t})|({on})|({c}))({s})?".format(
|
||||||
|
ne=NUMERIC_EXP, t=TIME_EXP, on=ORD_NUM_OR_DATE,
|
||||||
|
c=CURRENCY, s=_SUFFIES
|
||||||
|
)
|
||||||
|
|
||||||
|
TOKEN_MATCH = re.compile("^({u})|({n})$".format(u=URL_PATTERN, n=NUMS)).match
|
||||||
|
|
|
@ -57,14 +57,14 @@ LIST_PUNCT = list(_PUNCT.strip().split())
|
||||||
LIST_HYPHENS = list(_HYPHENS.strip().split())
|
LIST_HYPHENS = list(_HYPHENS.strip().split())
|
||||||
|
|
||||||
|
|
||||||
ALPHA_LOWER = _ALPHA_LOWER.strip().replace(' ', '')
|
ALPHA_LOWER = _ALPHA_LOWER.strip().replace(' ', '').replace('\n', '')
|
||||||
ALPHA_UPPER = _ALPHA_UPPER.strip().replace(' ', '')
|
ALPHA_UPPER = _ALPHA_UPPER.strip().replace(' ', '').replace(' ', '')
|
||||||
ALPHA = ALPHA_LOWER + ALPHA_UPPER
|
ALPHA = ALPHA_LOWER + ALPHA_UPPER
|
||||||
|
|
||||||
|
|
||||||
QUOTES = _QUOTES.strip().replace(' ', '|')
|
QUOTES = _QUOTES.strip().replace(' ', '|')
|
||||||
CURRENCY = _CURRENCY.strip().replace(' ', '|')
|
CURRENCY = _CURRENCY.strip().replace(' ', '|')
|
||||||
UNITS = _UNITS.strip().replace(' ', '|')
|
UNITS = _UNITS.strip().replace(' ', '|').replace('\n', '|')
|
||||||
HYPHENS = _HYPHENS.strip().replace(' ', '|')
|
HYPHENS = _HYPHENS.strip().replace(' ', '|')
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2,10 +2,10 @@ from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
_URL_PATTERN = r'''
|
URL_PATTERN = r'''
|
||||||
^((([A-Za-z]{3,9}:(?:\/\/)?)(?:[-;:&=\+\$,\w]+@)?[A-Za-z0-9.-]+|(?:www.|[-;:&=\+\$,\w]+@)[A-Za-z0-9.-]+)((?:\/[\+~%\/.\w\-_]*)?\??(?:[-\+=&;%@.\w_]*)#?(?:[\w]*))?)$
|
((([A-Za-z]{3,9}:(?:\/\/)?)(?:[-;:&=\+\$,\w]+@)?[A-Za-z0-9.-]+|(?:www.|[-;:&=\+\$,\w]+@)[A-Za-z0-9.-]+)((?:\/[\+~%\/.\w\-_]*)?\??(?:[-\+=&;%@.\w_]*)#?(?:[\w]*))?)
|
||||||
'''.strip()
|
'''.strip()
|
||||||
|
|
||||||
TOKEN_MATCH = re.compile(_URL_PATTERN).match
|
TOKEN_MATCH = re.compile("^{}$".format(URL_PATTERN)).match
|
||||||
|
|
||||||
__all__ = ['TOKEN_MATCH']
|
__all__ = ['TOKEN_MATCH']
|
||||||
|
|
|
@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_TESTS = [
|
DEFAULT_TESTS = [
|
||||||
('N. kormányzósági\nszékhely.', ['N.', 'kormányzósági', 'székhely', '.']),
|
('N. kormányzósági\nszékhely.', ['N.', 'kormányzósági', 'székhely', '.']),
|
||||||
('A .hu egy tld.', ['A', '.hu', 'egy', 'tld', '.']),
|
('A .hu egy tld.', ['A', '.hu', 'egy', 'tld', '.']),
|
||||||
|
@ -24,11 +23,13 @@ DEFAULT_TESTS = [
|
||||||
|
|
||||||
HYPHEN_TESTS = [
|
HYPHEN_TESTS = [
|
||||||
('Egy -nak, -jaiért, -magyar, bel- van.', ['Egy', '-nak', ',', '-jaiért', ',', '-magyar', ',', 'bel-', 'van', '.']),
|
('Egy -nak, -jaiért, -magyar, bel- van.', ['Egy', '-nak', ',', '-jaiért', ',', '-magyar', ',', 'bel-', 'van', '.']),
|
||||||
|
('Szabolcs-Szatmár-Bereg megye', ['Szabolcs-Szatmár-Bereg', 'megye']),
|
||||||
('Egy -nak.', ['Egy', '-nak', '.']),
|
('Egy -nak.', ['Egy', '-nak', '.']),
|
||||||
('Egy bel-.', ['Egy', 'bel-', '.']),
|
('Egy bel-.', ['Egy', 'bel-', '.']),
|
||||||
('Dinnye-domb-.', ['Dinnye-domb-', '.']),
|
('Dinnye-domb-.', ['Dinnye-domb-', '.']),
|
||||||
('Ezen -e elcsatangolt.', ['Ezen', '-e', 'elcsatangolt', '.']),
|
('Ezen -e elcsatangolt.', ['Ezen', '-e', 'elcsatangolt', '.']),
|
||||||
('Lakik-e', ['Lakik', '-e']),
|
('Lakik-e', ['Lakik', '-e']),
|
||||||
|
('A--B', ['A', '--' 'B']),
|
||||||
('Lakik-e?', ['Lakik', '-e', '?']),
|
('Lakik-e?', ['Lakik', '-e', '?']),
|
||||||
('Lakik-e.', ['Lakik', '-e', '.']),
|
('Lakik-e.', ['Lakik', '-e', '.']),
|
||||||
('Lakik-e...', ['Lakik', '-e', '...']),
|
('Lakik-e...', ['Lakik', '-e', '...']),
|
||||||
|
@ -89,11 +90,15 @@ NUMBER_TESTS = [
|
||||||
('A -23,12 van.', ['A', '-23,12', 'van', '.']),
|
('A -23,12 van.', ['A', '-23,12', 'van', '.']),
|
||||||
('A -23,12-ben van.', ['A', '-23,12-ben', 'van', '.']),
|
('A -23,12-ben van.', ['A', '-23,12-ben', 'van', '.']),
|
||||||
('A -23,12-ben.', ['A', '-23,12-ben', '.']),
|
('A -23,12-ben.', ['A', '-23,12-ben', '.']),
|
||||||
('A 2+3 van.', ['A', '2', '+', '3', 'van', '.']),
|
('A 2+3 van.', ['A', '2+3', 'van', '.']),
|
||||||
('A 2 +3 van.', ['A', '2', '+', '3', 'van', '.']),
|
('A 2<3 van.', ['A', '2<3', 'van', '.']),
|
||||||
|
('A 2=3 van.', ['A', '2=3', 'van', '.']),
|
||||||
|
('A 2÷3 van.', ['A', '2÷3', 'van', '.']),
|
||||||
|
('A (2÷3)-2/5=1 van.', ['A', '(2÷3)-2/5=1', 'van', '.']),
|
||||||
|
('A 2 +3 van.', ['A', '2', '+3', 'van', '.']),
|
||||||
('A 2+ 3 van.', ['A', '2', '+', '3', 'van', '.']),
|
('A 2+ 3 van.', ['A', '2', '+', '3', 'van', '.']),
|
||||||
('A 2 + 3 van.', ['A', '2', '+', '3', 'van', '.']),
|
('A 2 + 3 van.', ['A', '2', '+', '3', 'van', '.']),
|
||||||
('A 2*3 van.', ['A', '2', '*', '3', 'van', '.']),
|
('A 2*3 van.', ['A', '2*3', 'van', '.']),
|
||||||
('A 2 *3 van.', ['A', '2', '*', '3', 'van', '.']),
|
('A 2 *3 van.', ['A', '2', '*', '3', 'van', '.']),
|
||||||
('A 2* 3 van.', ['A', '2', '*', '3', 'van', '.']),
|
('A 2* 3 van.', ['A', '2', '*', '3', 'van', '.']),
|
||||||
('A 2 * 3 van.', ['A', '2', '*', '3', 'van', '.']),
|
('A 2 * 3 van.', ['A', '2', '*', '3', 'van', '.']),
|
||||||
|
@ -142,6 +147,7 @@ NUMBER_TESTS = [
|
||||||
('A 2002--2003. van.', ['A', '2002--2003.', 'van', '.']),
|
('A 2002--2003. van.', ['A', '2002--2003.', 'van', '.']),
|
||||||
('A 2002--2003-ben van.', ['A', '2002--2003-ben', 'van', '.']),
|
('A 2002--2003-ben van.', ['A', '2002--2003-ben', 'van', '.']),
|
||||||
('A 2002--2003-ben.', ['A', '2002--2003-ben', '.']),
|
('A 2002--2003-ben.', ['A', '2002--2003-ben', '.']),
|
||||||
|
('A +0,99% van.', ['A', '+0,99%', 'van', '.']),
|
||||||
('A -0,99% van.', ['A', '-0,99%', 'van', '.']),
|
('A -0,99% van.', ['A', '-0,99%', 'van', '.']),
|
||||||
('A -0,99%-ben van.', ['A', '-0,99%-ben', 'van', '.']),
|
('A -0,99%-ben van.', ['A', '-0,99%-ben', 'van', '.']),
|
||||||
('A -0,99%.', ['A', '-0,99%', '.']),
|
('A -0,99%.', ['A', '-0,99%', '.']),
|
||||||
|
@ -194,7 +200,16 @@ NUMBER_TESTS = [
|
||||||
('A III/c-ben.', ['A', 'III/c-ben', '.']),
|
('A III/c-ben.', ['A', 'III/c-ben', '.']),
|
||||||
('A TU–154 van.', ['A', 'TU–154', 'van', '.']),
|
('A TU–154 van.', ['A', 'TU–154', 'van', '.']),
|
||||||
('A TU–154-ben van.', ['A', 'TU–154-ben', 'van', '.']),
|
('A TU–154-ben van.', ['A', 'TU–154-ben', 'van', '.']),
|
||||||
('A TU–154-ben.', ['A', 'TU–154-ben', '.'])
|
('A TU–154-ben.', ['A', 'TU–154-ben', '.']),
|
||||||
|
('A 5cm³', ['A', '5', 'cm³']),
|
||||||
|
('A 5 $-ban', ['A', '5', '$-ban']),
|
||||||
|
('A 5$-ban', ['A', '5$-ban']),
|
||||||
|
('A 5$.', ['A', '5', '$', '.']),
|
||||||
|
('A 5$', ['A', '5', '$']),
|
||||||
|
('A $5', ['A', '$', '5']),
|
||||||
|
('A 5km/h', ['A', '5', 'km/h']),
|
||||||
|
('A 75%+1-100%-ig', ['A', '75%+1-100%-ig']),
|
||||||
|
('A 5km/h.', ['A', '5', 'km/h', '.']),
|
||||||
]
|
]
|
||||||
|
|
||||||
QUOTE_TESTS = [
|
QUOTE_TESTS = [
|
||||||
|
@ -202,15 +217,15 @@ QUOTE_TESTS = [
|
||||||
('"Ime, hat"-ban irja.', ['"', 'Ime', ',', 'hat', '"', '-ban', 'irja', '.']),
|
('"Ime, hat"-ban irja.', ['"', 'Ime', ',', 'hat', '"', '-ban', 'irja', '.']),
|
||||||
('Az "Ime, hat".', ['Az', '"', 'Ime', ',', 'hat', '"', '.']),
|
('Az "Ime, hat".', ['Az', '"', 'Ime', ',', 'hat', '"', '.']),
|
||||||
('Egy 24"-os monitor.', ['Egy', '24', '"', '-os', 'monitor', '.']),
|
('Egy 24"-os monitor.', ['Egy', '24', '"', '-os', 'monitor', '.']),
|
||||||
("A don't van.", ['A', "don't", 'van', '.'])
|
# ("A don't van.", ['A', "don't", 'van', '.'])
|
||||||
]
|
]
|
||||||
|
|
||||||
DOT_TESTS = [
|
DOT_TESTS = [
|
||||||
('N. kormányzósági\nszékhely.', ['N.', 'kormányzósági', 'székhely', '.']),
|
('N. kormányzósági\nszékhely.', ['N.', 'kormányzósági', 'székhely', '.']),
|
||||||
('A .hu egy tld.', ['A', '.hu', 'egy', 'tld', '.']),
|
('A .hu egy tld.', ['A', '.hu', 'egy', 'tld', '.']),
|
||||||
('Az egy.ketto pelda.', ['Az', 'egy.ketto', 'pelda', '.']),
|
('Az egy.ketto pelda.', ['Az', 'egy.ketto', 'pelda', '.']),
|
||||||
('A pl. rovidites.', ['A', 'pl.', 'rovidites', '.']),
|
('A pl. rövidítés.', ['A', 'pl.', 'rövidítés', '.']),
|
||||||
('A S.M.A.R.T. szo.', ['A', 'S.M.A.R.T.', 'szo', '.']),
|
('A S.M.A.R.T. szó.', ['A', 'S.M.A.R.T.', 'szó', '.']),
|
||||||
('A .hu.', ['A', '.hu', '.']),
|
('A .hu.', ['A', '.hu', '.']),
|
||||||
('Az egy.ketto.', ['Az', 'egy.ketto', '.']),
|
('Az egy.ketto.', ['Az', 'egy.ketto', '.']),
|
||||||
('A pl.', ['A', 'pl.']),
|
('A pl.', ['A', 'pl.']),
|
||||||
|
@ -223,9 +238,16 @@ DOT_TESTS = [
|
||||||
('Valami ... más.', ['Valami', '...', 'más', '.'])
|
('Valami ... más.', ['Valami', '...', 'más', '.'])
|
||||||
]
|
]
|
||||||
|
|
||||||
|
WIKI_TESTS = [
|
||||||
|
('!"', ['!', '"']),
|
||||||
|
('!"-lel', ['!', '"', '-lel']),
|
||||||
|
('""-sorozat ', ['"', '"', '-sorozat']),
|
||||||
|
('"(Köszönöm', ['"', '(', 'Köszönöm']),
|
||||||
|
('(törvénykönyv)-ben ', ['(', 'törvénykönyv', ')', '-ben']),
|
||||||
|
('"(...)"–sokkal ', ['"', '(', '...', ')', '"', '–sokkal']),
|
||||||
|
]
|
||||||
|
|
||||||
TESTCASES = DEFAULT_TESTS + DOT_TESTS + QUOTE_TESTS # + NUMBER_TESTS + HYPHEN_TESTS
|
TESTCASES = DEFAULT_TESTS + DOT_TESTS + QUOTE_TESTS + NUMBER_TESTS # + HYPHEN_TESTS # + WIKI_TESTS
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('text,expected_tokens', TESTCASES)
|
@pytest.mark.parametrize('text,expected_tokens', TESTCASES)
|
||||||
def test_tokenizer_handles_testcases(hu_tokenizer, text, expected_tokens):
|
def test_tokenizer_handles_testcases(hu_tokenizer, text, expected_tokens):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user