2014-10-14 13:26:16 +04:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_hyphen(en_tokenizer):
|
|
|
|
tokens = en_tokenizer('best-known')
|
2015-06-06 06:57:36 +03:00
|
|
|
assert len(tokens) == 3
|
2014-10-14 13:26:16 +04:00
|
|
|
|
|
|
|
|
2015-10-17 07:49:51 +03:00
|
|
|
def test_numeric_range(en_tokenizer):
|
|
|
|
tokens = en_tokenizer('0.1-13.5')
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_period(en_tokenizer):
|
|
|
|
tokens = en_tokenizer('best.Known')
|
2014-10-14 13:26:16 +04:00
|
|
|
assert len(tokens) == 3
|
2015-06-07 18:24:49 +03:00
|
|
|
tokens = en_tokenizer('zombo.com')
|
2014-10-14 13:26:16 +04:00
|
|
|
assert len(tokens) == 1
|
2015-07-26 18:30:34 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_ellipsis(en_tokenizer):
|
|
|
|
tokens = en_tokenizer('best...Known')
|
|
|
|
assert len(tokens) == 3
|
|
|
|
tokens = en_tokenizer('best...known')
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
|
|
|
def test_email(en_tokenizer):
|
|
|
|
tokens = en_tokenizer('hello@example.com')
|
2015-07-26 18:32:55 +03:00
|
|
|
assert len(tokens) == 1
|
2015-07-26 18:30:34 +03:00
|
|
|
tokens = en_tokenizer('hi+there@gmail.it')
|
2015-07-26 18:32:55 +03:00
|
|
|
assert len(tokens) == 1
|
2015-07-26 18:30:34 +03:00
|
|
|
|
|
|
|
|
2016-03-29 06:27:13 +03:00
|
|
|
def test_double_hyphen(en_tokenizer):
|
|
|
|
tokens = en_tokenizer(u'No decent--let alone well-bred--people.')
|
|
|
|
assert tokens[0].text == u'No'
|
|
|
|
assert tokens[1].text == u'decent'
|
|
|
|
assert tokens[2].text == u'--'
|
|
|
|
assert tokens[3].text == u'let'
|
2016-03-29 06:31:05 +03:00
|
|
|
assert tokens[4].text == u'alone'
|
|
|
|
assert tokens[5].text == u'well'
|
|
|
|
assert tokens[6].text == u'-'
|
|
|
|
# TODO: This points to a deeper issue with the tokenizer: it doesn't re-enter
|
|
|
|
# on infixes.
|
2016-04-13 11:38:26 +03:00
|
|
|
assert tokens[7].text == u'bred'
|
|
|
|
assert tokens[8].text == u'--'
|
|
|
|
assert tokens[9].text == u'people'
|
2016-03-29 06:31:05 +03:00
|
|
|
|
2016-04-14 12:36:03 +03:00
|
|
|
|
|
|
|
def test_infix_comma(en_tokenizer):
|
|
|
|
# Re issue #326
|
|
|
|
tokens = en_tokenizer(u'Hello,world')
|
|
|
|
assert tokens[0].text == u'Hello'
|
|
|
|
assert tokens[1].text == u','
|
|
|
|
assert tokens[2].text == u'world'
|