mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 04:08:09 +03:00
a5cd203284
* Reduce stored lexemes data, move feats to lookups * Move non-derivable lexemes features (`norm / cluster / prob`) to `spacy-lookups-data` as lookups * Get/set `norm` in both lookups and `LexemeC`, serialize in lookups * Remove `cluster` and `prob` from `LexemesC`, get/set/serialize in lookups only * Remove serialization of lexemes data as `vocab/lexemes.bin` * Remove `SerializedLexemeC` * Remove `Lexeme.to_bytes/from_bytes` * Modify normalization exception loading: * Always create `Vocab.lookups` table `lexeme_norm` for normalization exceptions * Load base exceptions from `lang.norm_exceptions`, but load language-specific exceptions from lookups * Set `lex_attr_getter[NORM]` including new lookups table in `BaseDefaults.create_vocab()` and when deserializing `Vocab` * Remove all cached lexemes when deserializing vocab to override existing normalizations with the new normalizations (as a replacement for the previous step that replaced all lexemes data with the deserialized data) * Skip English normalization test Skip English normalization test because the data is now in `spacy-lookups-data`. * Remove norm exceptions Moved to spacy-lookups-data. * Move norm exceptions test to spacy-lookups-data * Load extra lookups from spacy-lookups-data lazily Load extra lookups (currently for cluster and prob) lazily from the entry point `lg_extra` as `Vocab.lookups_extra`. * Skip creating lexeme cache on load To improve model loading times, do not create the full lexeme cache when loading. The lexemes will be created on demand when processing. * Identify numeric values in Lexeme.set_attrs() With the removal of a special case for `PROB`, also identify `float` to avoid trying to convert it with the `StringStore`. * Skip lexeme cache init in from_bytes * Unskip and update lookups tests for python3.6+ * Update vocab pickle to include lookups_extra * Update vocab serialization tests Check strings rather than lexemes since lexemes aren't initialized automatically, account for addition of "_SP". * Re-skip lookups test because of python3.5 * Skip PROB/float values in Lexeme.set_attrs * Convert is_oov from lexeme flag to lex in vectors Instead of storing `is_oov` as a lexeme flag, `is_oov` reports whether the lexeme has a vector. Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
68 lines
2.3 KiB
Python
68 lines
2.3 KiB
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
import numpy
|
|
from spacy.attrs import IS_ALPHA, IS_DIGIT
|
|
from spacy.util import OOV_RANK
|
|
|
|
|
|
@pytest.mark.parametrize("text1,prob1,text2,prob2", [("NOUN", -1, "opera", -2)])
|
|
def test_vocab_lexeme_lt(en_vocab, text1, text2, prob1, prob2):
|
|
"""More frequent is l.t. less frequent"""
|
|
lex1 = en_vocab[text1]
|
|
lex1.prob = prob1
|
|
lex2 = en_vocab[text2]
|
|
lex2.prob = prob2
|
|
|
|
assert lex1 < lex2
|
|
assert lex2 > lex1
|
|
|
|
|
|
@pytest.mark.parametrize("text1,text2", [("phantom", "opera")])
|
|
def test_vocab_lexeme_hash(en_vocab, text1, text2):
|
|
"""Test that lexemes are hashable."""
|
|
lex1 = en_vocab[text1]
|
|
lex2 = en_vocab[text2]
|
|
lexes = {lex1: lex1, lex2: lex2}
|
|
assert lexes[lex1].orth_ == text1
|
|
assert lexes[lex2].orth_ == text2
|
|
|
|
|
|
def test_vocab_lexeme_is_alpha(en_vocab):
|
|
assert en_vocab["the"].flags & (1 << IS_ALPHA)
|
|
assert not en_vocab["1999"].flags & (1 << IS_ALPHA)
|
|
assert not en_vocab["hello1"].flags & (1 << IS_ALPHA)
|
|
|
|
|
|
def test_vocab_lexeme_is_digit(en_vocab):
|
|
assert not en_vocab["the"].flags & (1 << IS_DIGIT)
|
|
assert en_vocab["1999"].flags & (1 << IS_DIGIT)
|
|
assert not en_vocab["hello1"].flags & (1 << IS_DIGIT)
|
|
|
|
|
|
def test_vocab_lexeme_add_flag_auto_id(en_vocab):
|
|
is_len4 = en_vocab.add_flag(lambda string: len(string) == 4)
|
|
assert en_vocab["1999"].check_flag(is_len4) is True
|
|
assert en_vocab["1999"].check_flag(IS_DIGIT) is True
|
|
assert en_vocab["199"].check_flag(is_len4) is False
|
|
assert en_vocab["199"].check_flag(IS_DIGIT) is True
|
|
assert en_vocab["the"].check_flag(is_len4) is False
|
|
assert en_vocab["dogs"].check_flag(is_len4) is True
|
|
|
|
|
|
def test_vocab_lexeme_add_flag_provided_id(en_vocab):
|
|
is_len4 = en_vocab.add_flag(lambda string: len(string) == 4, flag_id=IS_DIGIT)
|
|
assert en_vocab["1999"].check_flag(is_len4) is True
|
|
assert en_vocab["199"].check_flag(is_len4) is False
|
|
assert en_vocab["199"].check_flag(IS_DIGIT) is False
|
|
assert en_vocab["the"].check_flag(is_len4) is False
|
|
assert en_vocab["dogs"].check_flag(is_len4) is True
|
|
|
|
|
|
def test_vocab_lexeme_oov_rank(en_vocab):
|
|
"""Test that default rank is OOV_RANK."""
|
|
lex = en_vocab["word"]
|
|
assert OOV_RANK == numpy.iinfo(numpy.uint64).max
|
|
assert lex.rank == OOV_RANK
|