spaCy/tests/test_tokenizer.py

42 lines
1.3 KiB
Python
Raw Normal View History

from __future__ import unicode_literals
2014-08-28 21:45:09 +04:00
from spacy.en import EN
def test_single_word():
2014-08-28 21:45:09 +04:00
lex_ids = EN.tokenize(u'hello')
2014-09-10 22:58:30 +04:00
assert lex_ids[0].string == EN.lexicon.lookup(u'hello').string
def test_two_words():
2014-08-28 21:45:09 +04:00
words = EN.tokenize('hello possums')
assert len(words) == 2
2014-09-10 22:58:30 +04:00
assert words[0].string == EN.lexicon.lookup('hello').string
assert words[0].string != words[1].string
def test_punct():
2014-08-28 21:45:09 +04:00
tokens = EN.tokenize('hello, possums.')
assert len(tokens) == 4
2014-08-28 21:45:09 +04:00
assert tokens[0].string == EN.lexicon.lookup('hello').string
assert tokens[1].string == EN.lexicon.lookup(',').string
assert tokens[2].string == EN.lexicon.lookup('possums').string
assert tokens[1].string != EN.lexicon.lookup('hello').string
def test_digits():
2014-08-28 21:45:09 +04:00
lex_ids = EN.tokenize('The year: 1984.')
assert len(lex_ids) == 5
2014-08-28 21:45:09 +04:00
assert lex_ids[0].string == EN.lexicon.lookup('The').string
assert lex_ids[3].string == EN.lexicon.lookup('1984').string
assert lex_ids[4].string == EN.lexicon.lookup('.').string
def test_contraction():
2014-08-28 21:45:09 +04:00
lex_ids = EN.tokenize("don't giggle")
assert len(lex_ids) == 3
2014-08-28 21:45:09 +04:00
assert lex_ids[1].string == EN.lexicon.lookup("not").string
lex_ids = EN.tokenize("i said don't!")
assert len(lex_ids) == 4
2014-08-28 21:45:09 +04:00
assert lex_ids[3].string == EN.lexicon.lookup('!').string