2014-07-07 06:23:46 +04:00
|
|
|
from __future__ import unicode_literals
|
2014-12-21 12:41:13 +03:00
|
|
|
import pytest
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_possess(en_tokenizer):
|
|
|
|
tokens = en_tokenizer("Mike's")
|
|
|
|
assert en_tokenizer.vocab.strings[tokens[0].orth] == "Mike"
|
|
|
|
assert en_tokenizer.vocab.strings[tokens[1].orth] == "'s"
|
2014-08-18 21:14:00 +04:00
|
|
|
assert len(tokens) == 2
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_apostrophe(en_tokenizer):
|
|
|
|
tokens = en_tokenizer("schools'")
|
2014-07-07 06:23:46 +04:00
|
|
|
assert len(tokens) == 2
|
2015-01-23 23:22:30 +03:00
|
|
|
assert tokens[1].orth_ == "'"
|
|
|
|
assert tokens[0].orth_ == "schools"
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_LL(en_tokenizer):
|
|
|
|
tokens = en_tokenizer("we'll")
|
2014-07-07 06:23:46 +04:00
|
|
|
assert len(tokens) == 2
|
2015-01-23 23:22:30 +03:00
|
|
|
assert tokens[1].orth_ == "'ll"
|
2015-01-14 19:51:47 +03:00
|
|
|
assert tokens[1].lemma_ == "will"
|
2015-01-23 23:22:30 +03:00
|
|
|
assert tokens[0].orth_ == "we"
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_aint(en_tokenizer):
|
|
|
|
tokens = en_tokenizer("ain't")
|
2014-07-07 06:23:46 +04:00
|
|
|
assert len(tokens) == 2
|
2015-01-23 23:22:30 +03:00
|
|
|
assert tokens[0].orth_ == "ai"
|
2015-01-14 19:51:47 +03:00
|
|
|
assert tokens[0].lemma_ == "be"
|
2015-01-23 23:22:30 +03:00
|
|
|
assert tokens[1].orth_ == "n't"
|
2015-01-14 19:51:47 +03:00
|
|
|
assert tokens[1].lemma_ == "not"
|
2014-07-07 07:07:21 +04:00
|
|
|
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_capitalized(en_tokenizer):
|
|
|
|
tokens = en_tokenizer("can't")
|
2014-07-07 07:07:21 +04:00
|
|
|
assert len(tokens) == 2
|
2015-06-07 18:24:49 +03:00
|
|
|
tokens = en_tokenizer("Can't")
|
2014-07-07 07:07:21 +04:00
|
|
|
assert len(tokens) == 2
|
2015-06-07 18:24:49 +03:00
|
|
|
tokens = en_tokenizer("Ain't")
|
2014-07-07 07:07:21 +04:00
|
|
|
assert len(tokens) == 2
|
2015-01-23 23:22:30 +03:00
|
|
|
assert tokens[0].orth_ == "Ai"
|
2015-01-14 19:51:47 +03:00
|
|
|
assert tokens[0].lemma_ == "be"
|
2014-12-07 14:08:04 +03:00
|
|
|
|
|
|
|
|
2015-06-07 18:24:49 +03:00
|
|
|
def test_punct(en_tokenizer):
|
|
|
|
tokens = en_tokenizer("We've")
|
2014-12-07 14:08:04 +03:00
|
|
|
assert len(tokens) == 2
|
2015-06-07 18:24:49 +03:00
|
|
|
tokens = en_tokenizer("``We've")
|
2014-12-07 14:08:04 +03:00
|
|
|
assert len(tokens) == 3
|