mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-13 10:46:29 +03:00
* Upd tests for tighter interface
This commit is contained in:
parent
ea8f1e7053
commit
63114820cf
|
@ -7,18 +7,18 @@ from spacy.lexeme import *
|
||||||
|
|
||||||
|
|
||||||
def test_is_alpha():
|
def test_is_alpha():
|
||||||
the = EN.lexicon.lookup('the')
|
the = EN.lexicon['the']
|
||||||
assert the['flags'] & (1 << IS_ALPHA)
|
assert the['flags'] & (1 << IS_ALPHA)
|
||||||
year = EN.lexicon.lookup('1999')
|
year = EN.lexicon['1999']
|
||||||
assert not year['flags'] & (1 << IS_ALPHA)
|
assert not year['flags'] & (1 << IS_ALPHA)
|
||||||
mixed = EN.lexicon.lookup('hello1')
|
mixed = EN.lexicon['hello1']
|
||||||
assert not mixed['flags'] & (1 << IS_ALPHA)
|
assert not mixed['flags'] & (1 << IS_ALPHA)
|
||||||
|
|
||||||
|
|
||||||
def test_is_digit():
|
def test_is_digit():
|
||||||
the = EN.lexicon.lookup('the')
|
the = EN.lexicon['the']
|
||||||
assert not the['flags'] & (1 << IS_DIGIT)
|
assert not the['flags'] & (1 << IS_DIGIT)
|
||||||
year = EN.lexicon.lookup('1999')
|
year = EN.lexicon['1999']
|
||||||
assert year['flags'] & (1 << IS_DIGIT)
|
assert year['flags'] & (1 << IS_DIGIT)
|
||||||
mixed = EN.lexicon.lookup('hello1')
|
mixed = EN.lexicon['hello1']
|
||||||
assert not mixed['flags'] & (1 << IS_DIGIT)
|
assert not mixed['flags'] & (1 << IS_DIGIT)
|
||||||
|
|
|
@ -27,17 +27,17 @@ def test_punct():
|
||||||
def test_digits():
|
def test_digits():
|
||||||
tokens = EN.tokenize('The year: 1984.')
|
tokens = EN.tokenize('The year: 1984.')
|
||||||
assert len(tokens) == 5
|
assert len(tokens) == 5
|
||||||
assert tokens[0].sic == EN.lexicon.lookup('The')['sic']
|
assert tokens[0].sic == EN.lexicon['The']['sic']
|
||||||
assert tokens[3].sic == EN.lexicon.lookup('1984')['sic']
|
assert tokens[3].sic == EN.lexicon['1984']['sic']
|
||||||
|
|
||||||
|
|
||||||
def test_contraction():
|
def test_contraction():
|
||||||
tokens = EN.tokenize("don't giggle")
|
tokens = EN.tokenize("don't giggle")
|
||||||
assert len(tokens) == 3
|
assert len(tokens) == 3
|
||||||
assert tokens[1].sic == EN.lexicon.lookup("not")['sic']
|
assert tokens[1].sic == EN.lexicon["not"]['sic']
|
||||||
tokens = EN.tokenize("i said don't!")
|
tokens = EN.tokenize("i said don't!")
|
||||||
assert len(tokens) == 5
|
assert len(tokens) == 5
|
||||||
assert tokens[4].sic == EN.lexicon.lookup('!')['sic']
|
assert tokens[4].sic == EN.lexicon['!']['sic']
|
||||||
|
|
||||||
|
|
||||||
def test_contraction_punct():
|
def test_contraction_punct():
|
||||||
|
|
|
@ -4,20 +4,20 @@ from spacy.en import EN
|
||||||
|
|
||||||
|
|
||||||
def test_neq():
|
def test_neq():
|
||||||
addr = EN.lexicon.lookup('Hello')
|
addr = EN.lexicon['Hello']
|
||||||
assert EN.lexicon.lookup('bye')['sic'] != addr['sic']
|
assert EN.lexicon['bye']['sic'] != addr['sic']
|
||||||
|
|
||||||
|
|
||||||
def test_eq():
|
def test_eq():
|
||||||
addr = EN.lexicon.lookup('Hello')
|
addr = EN.lexicon['Hello']
|
||||||
assert EN.lexicon.lookup('Hello')['sic'] == addr['sic']
|
assert EN.lexicon['Hello']['sic'] == addr['sic']
|
||||||
|
|
||||||
|
|
||||||
def test_case_neq():
|
def test_case_neq():
|
||||||
addr = EN.lexicon.lookup('Hello')
|
addr = EN.lexicon['Hello']
|
||||||
assert EN.lexicon.lookup('hello')['sic'] != addr['sic']
|
assert EN.lexicon['hello']['sic'] != addr['sic']
|
||||||
|
|
||||||
|
|
||||||
def test_punct_neq():
|
def test_punct_neq():
|
||||||
addr = EN.lexicon.lookup('Hello')
|
addr = EN.lexicon['Hello']
|
||||||
assert EN.lexicon.lookup('Hello,')['sic'] != addr['sic']
|
assert EN.lexicon['Hello,']['sic'] != addr['sic']
|
||||||
|
|
Loading…
Reference in New Issue
Block a user