2017-01-12 17:27:46 +03:00
|
|
|
# coding: utf-8
|
2014-07-05 22:51:42 +04:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
from ...attrs import LEMMA, ORTH, PROB, IS_ALPHA
|
|
|
|
from ...parts_of_speech import NOUN, VERB
|
2014-07-05 22:51:42 +04:00
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
import pytest
|
2015-10-26 04:31:05 +03:00
|
|
|
|
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
@pytest.mark.parametrize('text1,text2', [
|
|
|
|
("Hello", "bye"), ("Hello", "hello"), ("Hello", "Hello,")])
|
|
|
|
def test_vocab_api_neq(en_vocab, text1, text2):
|
|
|
|
assert en_vocab[text1].orth != en_vocab[text2].orth
|
2015-10-26 04:31:05 +03:00
|
|
|
|
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
@pytest.mark.parametrize('text', "Hello")
|
|
|
|
def test_vocab_api_eq(en_vocab, text):
|
|
|
|
lex = en_vocab[text]
|
|
|
|
assert en_vocab[text].orth == lex.orth
|
2015-10-26 04:31:05 +03:00
|
|
|
|
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
@pytest.mark.parametrize('text', ["example"])
|
|
|
|
def test_vocab_api_shape_attr(en_vocab, text):
|
|
|
|
lex = en_vocab[text]
|
|
|
|
assert lex.orth != lex.shape
|
2015-10-26 04:31:05 +03:00
|
|
|
|
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
@pytest.mark.parametrize('string,symbol', [
|
|
|
|
('IS_ALPHA', IS_ALPHA), ('NOUN', NOUN), ('VERB', VERB), ('LEMMA', LEMMA),
|
|
|
|
('ORTH', ORTH), ('PROB', PROB)])
|
|
|
|
def test_vocab_api_symbols(en_vocab, string, symbol):
|
|
|
|
assert en_vocab.strings[string] == symbol
|
2015-10-26 04:31:05 +03:00
|
|
|
|
|
|
|
|
2017-01-12 17:27:46 +03:00
|
|
|
@pytest.mark.parametrize('text', "Hello")
|
2017-01-13 16:26:53 +03:00
|
|
|
def test_vocab_api_contains(en_vocab, text):
|
2017-01-12 17:27:46 +03:00
|
|
|
_ = en_vocab[text]
|
|
|
|
assert text in en_vocab
|
|
|
|
assert "LKsdjvlsakdvlaksdvlkasjdvljasdlkfvm" not in en_vocab
|