2014-07-07 06:23:46 +04:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2014-08-18 21:14:00 +04:00
|
|
|
from spacy.en import tokenize, lookup, unhash
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
|
|
|
def test_possess():
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("Mike's")
|
2014-08-23 21:55:06 +04:00
|
|
|
assert unhash(tokens[0].lex) == "Mike"
|
|
|
|
assert unhash(tokens[1].lex) == "'s"
|
2014-08-18 21:14:00 +04:00
|
|
|
assert len(tokens) == 2
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
|
|
|
def test_apostrophe():
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("schools'")
|
2014-07-07 06:23:46 +04:00
|
|
|
assert len(tokens) == 2
|
2014-08-23 21:55:06 +04:00
|
|
|
assert unhash(tokens[1].lex) == "'"
|
|
|
|
assert unhash(tokens[0].lex) == "schools"
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
|
|
|
def test_LL():
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("we'll")
|
2014-07-07 06:23:46 +04:00
|
|
|
assert len(tokens) == 2
|
2014-08-23 21:55:06 +04:00
|
|
|
assert unhash(tokens[1].lex) == "will"
|
|
|
|
assert unhash(tokens[0].lex) == "we"
|
2014-07-07 06:23:46 +04:00
|
|
|
|
|
|
|
|
|
|
|
def test_aint():
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("ain't")
|
2014-07-07 06:23:46 +04:00
|
|
|
assert len(tokens) == 2
|
2014-08-23 21:55:06 +04:00
|
|
|
assert unhash(tokens[0].lex) == "are"
|
|
|
|
assert unhash(tokens[1].lex) == "not"
|
2014-07-07 07:07:21 +04:00
|
|
|
|
|
|
|
|
|
|
|
def test_capitalized():
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("can't")
|
2014-07-07 07:07:21 +04:00
|
|
|
assert len(tokens) == 2
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("Can't")
|
2014-07-07 07:07:21 +04:00
|
|
|
assert len(tokens) == 2
|
2014-08-18 21:14:00 +04:00
|
|
|
tokens = tokenize("Ain't")
|
2014-07-07 07:07:21 +04:00
|
|
|
assert len(tokens) == 2
|
2014-08-23 21:55:06 +04:00
|
|
|
assert unhash(tokens[0].lex) == "Are"
|