* Update tokenization tests for new tokenizer rules

This commit is contained in:
Matthew Honnibal 2014-09-15 01:32:51 +02:00
parent df24e3708c
commit 5dcc1a426a

View File

@ -17,21 +17,19 @@ def test_two_words():
def test_punct(): def test_punct():
tokens = EN.tokenize('hello, possums.') tokens = EN.tokenize('hello, possums.')
assert len(tokens) == 4 assert len(tokens) == 3
assert tokens[0].string == EN.lexicon.lookup('hello').string assert tokens[0].string == EN.lexicon.lookup('hello').string
assert tokens[1].string == EN.lexicon.lookup(',').string assert tokens[1].string == EN.lexicon.lookup(',').string
assert tokens[2].string == EN.lexicon.lookup('possums').string assert tokens[2].string == EN.lexicon.lookup('possums.').string
assert tokens[1].string != EN.lexicon.lookup('hello').string assert tokens[1].string != EN.lexicon.lookup('hello').string
def test_digits(): def test_digits():
lex_ids = EN.tokenize('The year: 1984.') lex_ids = EN.tokenize('The year: 1984.')
assert lex_ids.string(4) == "." assert lex_ids.string(3) == "1984."
assert lex_ids.string(3) == "1984" assert len(lex_ids) == 4
assert len(lex_ids) == 5
assert lex_ids[0].string == EN.lexicon.lookup('The').string assert lex_ids[0].string == EN.lexicon.lookup('The').string
assert lex_ids[3].string == EN.lexicon.lookup('1984').string assert lex_ids[3].string == EN.lexicon.lookup('1984.').string
assert lex_ids[4].string == EN.lexicon.lookup('.').string
def test_contraction(): def test_contraction():
@ -53,3 +51,17 @@ def test_contraction_punct():
tokens = EN.tokenize("can't!") tokens = EN.tokenize("can't!")
assert len(tokens) == 3 assert len(tokens) == 3
def test_sample():
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = EN.tokenize(text)
assert len(tokens) > 5