diff --git a/spacy/tests/tokenizer/conftest.py b/spacy/tests/tokenizer/conftest.py index 8d842cd6d..3a3516c41 100644 --- a/spacy/tests/tokenizer/conftest.py +++ b/spacy/tests/tokenizer/conftest.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import pytest + from ...en import English diff --git a/spacy/tests/tokenizer/test_indices.py b/spacy/tests/tokenizer/test_indices.py index 5a0dea627..0ed6ca4dc 100644 --- a/spacy/tests/tokenizer/test_indices.py +++ b/spacy/tests/tokenizer/test_indices.py @@ -1,13 +1,14 @@ # coding: utf-8 """Test that token.idx correctly computes index into the original string.""" + from __future__ import unicode_literals import pytest def test_simple_punct(en_tokenizer): - text = 'to walk, do foo' + text = "to walk, do foo" tokens = en_tokenizer(text) assert tokens[0].idx == 0 assert tokens[1].idx == 3 @@ -17,7 +18,7 @@ def test_simple_punct(en_tokenizer): def test_complex_punct(en_tokenizer): - text = 'Tom (D., Ill.)!' + text = "Tom (D., Ill.)!" tokens = en_tokenizer(text) assert tokens[0].idx == 0 assert len(tokens[0]) == 3