From dd587b7477e70a592c101b0adf34aa792b76db58 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sun, 7 Jun 2015 18:07:32 +0200 Subject: [PATCH] * Fix tests --- tests/parser/test_conjuncts.py | 2 -- tests/parser/test_parse_navigate.py | 2 +- tests/{tokenizer => }/sun.txt | 0 tests/tokenizer/test_wiki_sun.py | 6 +++--- 4 files changed, 4 insertions(+), 6 deletions(-) rename tests/{tokenizer => }/sun.txt (100%) diff --git a/tests/parser/test_conjuncts.py b/tests/parser/test_conjuncts.py index 965fd1146..b407e887d 100644 --- a/tests/parser/test_conjuncts.py +++ b/tests/parser/test_conjuncts.py @@ -11,8 +11,6 @@ def orths(tokens): def test_simple_two(EN): tokens = EN('I lost money and pride.', tag=True, parse=True) pride = tokens[4] - for t in tokens: - print t.orth_, t.tag_, t.head.orth_ assert orths(pride.conjuncts) == ['money', 'pride'] money = tokens[2] assert orths(money.conjuncts) == ['money', 'pride'] diff --git a/tests/parser/test_parse_navigate.py b/tests/parser/test_parse_navigate.py index bfeee54a3..ebd550fa2 100644 --- a/tests/parser/test_parse_navigate.py +++ b/tests/parser/test_parse_navigate.py @@ -7,7 +7,7 @@ import pytest @pytest.fixture def sun_text(): - with codecs.open(path.join(path.dirname(__file__), 'sun.txt'), 'r', 'utf8') as file_: + with codecs.open(path.join(path.dirname(__file__), '..', 'sun.txt'), 'r', 'utf8') as file_: text = file_.read() return text diff --git a/tests/tokenizer/sun.txt b/tests/sun.txt similarity index 100% rename from tests/tokenizer/sun.txt rename to tests/sun.txt diff --git a/tests/tokenizer/test_wiki_sun.py b/tests/tokenizer/test_wiki_sun.py index f42c14445..8d2a6682e 100644 --- a/tests/tokenizer/test_wiki_sun.py +++ b/tests/tokenizer/test_wiki_sun.py @@ -11,11 +11,11 @@ HERE = path.dirname(__file__) @pytest.fixture def sun_txt(): - loc = path.join(HERE, 'sun.txt') + loc = path.join(HERE, '..', 'sun.txt') return utf8open(loc).read() -def test_tokenize(sun_txt, EN): +def test_tokenize(sun_txt, en_tokenizer): assert len(sun_txt) != 0 - tokens = nlp(sun_txt) + tokens = en_tokenizer(sun_txt) assert len(tokens) > 100