mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-16 14:47:16 +03:00
7d50804644
* Migrate regressions 1-1000 * Move serialize test to correct file * Remove tests that won't work in v3 * Migrate regressions 1000-1500 Removed regression test 1250 because v3 doesn't support the old LEX scheme anymore. * Add missing imports in serializer tests * Migrate tests 1500-2000 * Migrate regressions from 2000-2500 * Migrate regressions from 2501-3000 * Migrate regressions from 3000-3501 * Migrate regressions from 3501-4000 * Migrate regressions from 4001-4500 * Migrate regressions from 4501-5000 * Migrate regressions from 5001-5501 * Migrate regressions from 5501 to 7000 * Migrate regressions from 7001 to 8000 * Migrate remaining regression tests * Fixing missing imports * Update docs with new system [ci skip] * Update CONTRIBUTING.md - Fix formatting - Update wording * Remove lemmatizer tests in el lang * Move a few tests into the general tokenizer * Separate Doc and DocBin tests
62 lines
1.6 KiB
Python
62 lines
1.6 KiB
Python
import pytest
|
|
from spacy.attrs import IS_ALPHA, LEMMA, ORTH
|
|
from spacy.parts_of_speech import NOUN, VERB
|
|
from spacy.vocab import Vocab
|
|
|
|
|
|
@pytest.mark.issue(1868)
|
|
def test_issue1868():
|
|
"""Test Vocab.__contains__ works with int keys."""
|
|
vocab = Vocab()
|
|
lex = vocab["hello"]
|
|
assert lex.orth in vocab
|
|
assert lex.orth_ in vocab
|
|
assert "some string" not in vocab
|
|
int_id = vocab.strings.add("some string")
|
|
assert int_id not in vocab
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text1,text2", [("Hello", "bye"), ("Hello", "hello"), ("Hello", "Hello,")]
|
|
)
|
|
def test_vocab_api_neq(en_vocab, text1, text2):
|
|
assert en_vocab[text1].orth != en_vocab[text2].orth
|
|
|
|
|
|
@pytest.mark.parametrize("text", "Hello")
|
|
def test_vocab_api_eq(en_vocab, text):
|
|
lex = en_vocab[text]
|
|
assert en_vocab[text].orth == lex.orth
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["example"])
|
|
def test_vocab_api_shape_attr(en_vocab, text):
|
|
lex = en_vocab[text]
|
|
assert lex.orth != lex.shape
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"string,symbol",
|
|
[
|
|
("IS_ALPHA", IS_ALPHA),
|
|
("NOUN", NOUN),
|
|
("VERB", VERB),
|
|
("LEMMA", LEMMA),
|
|
("ORTH", ORTH),
|
|
],
|
|
)
|
|
def test_vocab_api_symbols(en_vocab, string, symbol):
|
|
assert en_vocab.strings[string] == symbol
|
|
|
|
|
|
@pytest.mark.parametrize("text", "Hello")
|
|
def test_vocab_api_contains(en_vocab, text):
|
|
_ = en_vocab[text] # noqa: F841
|
|
assert text in en_vocab
|
|
assert "LKsdjvlsakdvlaksdvlkasjdvljasdlkfvm" not in en_vocab
|
|
|
|
|
|
def test_vocab_writing_system(en_vocab):
|
|
assert en_vocab.writing_system["direction"] == "ltr"
|
|
assert en_vocab.writing_system["has_case"] is True
|