mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 09:57:26 +03:00 
			
		
		
		
	* Add Lemmatizer and simplify related components * Add `Lemmatizer` pipe with `lookup` and `rule` modes using the `Lookups` tables. * Reduce `Tagger` to a simple tagger that sets `Token.tag` (no pos or lemma) * Reduce `Morphology` to only keep track of morph tags (no tag map, lemmatizer, or morph rules) * Remove lemmatizer from `Vocab` * Adjust many many tests Differences: * No default lookup lemmas * No special treatment of TAG in `from_array` and similar required * Easier to modify labels in a `Tagger` * No extra strings added from morphology / tag map * Fix test * Initial fix for Lemmatizer config/serialization * Adjust init test to be more generic * Adjust init test to force empty Lookups * Add simple cache to rule-based lemmatizer * Convert language-specific lemmatizers Convert language-specific lemmatizers to component lemmatizers. Remove previous lemmatizer class. * Fix French and Polish lemmatizers * Remove outdated UPOS conversions * Update Russian lemmatizer init in tests * Add minimal init/run tests for custom lemmatizers * Add option to overwrite existing lemmas * Update mode setting, lookup loading, and caching * Make `mode` an immutable property * Only enforce strict `load_lookups` for known supported modes * Move caching into individual `_lemmatize` methods * Implement strict when lang is not found in lookups * Fix tables/lookups in make_lemmatizer * Reallow provided lookups and allow for stricter checks * Add lookups asset to all Lemmatizer pipe tests * Rename lookups in lemmatizer init test * Clean up merge * Refactor lookup table loading * Add helper from `load_lemmatizer_lookups` that loads required and optional lookups tables based on settings provided by a config. Additional slight refactor of lookups: * Add `Lookups.set_table` to set a table from a provided `Table` * Reorder class definitions to be able to specify type as `Table` * Move registry assets into test methods * Refactor lookups tables config Use class methods within `Lemmatizer` to provide the config for particular modes and to load the lookups from a config. * Add pipe and score to lemmatizer * Simplify Tagger.score * Add missing import * Clean up imports and auto-format * Remove unused kwarg * Tidy up and auto-format * Update docstrings for Lemmatizer Update docstrings for Lemmatizer. Additionally modify `is_base_form` API to take `Token` instead of individual features. * Update docstrings * Remove tag map values from Tagger.add_label * Update API docs * Fix relative link in Lemmatizer API docs
		
			
				
	
	
		
			282 lines
		
	
	
		
			6.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			282 lines
		
	
	
		
			6.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
import pytest
 | 
						|
from spacy.util import get_lang_class
 | 
						|
 | 
						|
 | 
						|
def pytest_addoption(parser):
 | 
						|
    parser.addoption("--slow", action="store_true", help="include slow tests")
 | 
						|
 | 
						|
 | 
						|
def pytest_runtest_setup(item):
 | 
						|
    def getopt(opt):
 | 
						|
        # When using 'pytest --pyargs spacy' to test an installed copy of
 | 
						|
        # spacy, pytest skips running our pytest_addoption() hook. Later, when
 | 
						|
        # we call getoption(), pytest raises an error, because it doesn't
 | 
						|
        # recognize the option we're asking about. To avoid this, we need to
 | 
						|
        # pass a default value. We default to False, i.e., we act like all the
 | 
						|
        # options weren't given.
 | 
						|
        return item.config.getoption(f"--{opt}", False)
 | 
						|
 | 
						|
    for opt in ["slow"]:
 | 
						|
        if opt in item.keywords and not getopt(opt):
 | 
						|
            pytest.skip(f"need --{opt} option to run")
 | 
						|
 | 
						|
 | 
						|
# Fixtures for language tokenizers (languages sorted alphabetically)
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="module")
 | 
						|
def tokenizer():
 | 
						|
    return get_lang_class("xx")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ar_tokenizer():
 | 
						|
    return get_lang_class("ar")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def bn_tokenizer():
 | 
						|
    return get_lang_class("bn")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ca_tokenizer():
 | 
						|
    return get_lang_class("ca")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def da_tokenizer():
 | 
						|
    return get_lang_class("da")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def de_tokenizer():
 | 
						|
    return get_lang_class("de")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def el_tokenizer():
 | 
						|
    return get_lang_class("el")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def en_tokenizer():
 | 
						|
    return get_lang_class("en")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def en_vocab():
 | 
						|
    return get_lang_class("en")().vocab
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def en_parser(en_vocab):
 | 
						|
    nlp = get_lang_class("en")(en_vocab)
 | 
						|
    return nlp.create_pipe("parser")
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def es_tokenizer():
 | 
						|
    return get_lang_class("es")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def eu_tokenizer():
 | 
						|
    return get_lang_class("eu")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def fa_tokenizer():
 | 
						|
    return get_lang_class("fa")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def fi_tokenizer():
 | 
						|
    return get_lang_class("fi")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def fr_tokenizer():
 | 
						|
    return get_lang_class("fr")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ga_tokenizer():
 | 
						|
    return get_lang_class("ga")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def gu_tokenizer():
 | 
						|
    return get_lang_class("gu")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def he_tokenizer():
 | 
						|
    return get_lang_class("he")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def hr_tokenizer():
 | 
						|
    return get_lang_class("hr")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture
 | 
						|
def hu_tokenizer():
 | 
						|
    return get_lang_class("hu")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def id_tokenizer():
 | 
						|
    return get_lang_class("id")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def it_tokenizer():
 | 
						|
    return get_lang_class("it")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ja_tokenizer():
 | 
						|
    pytest.importorskip("sudachipy")
 | 
						|
    return get_lang_class("ja")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ko_tokenizer():
 | 
						|
    pytest.importorskip("natto")
 | 
						|
    return get_lang_class("ko")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def lb_tokenizer():
 | 
						|
    return get_lang_class("lb")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def lt_tokenizer():
 | 
						|
    return get_lang_class("lt")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ml_tokenizer():
 | 
						|
    return get_lang_class("ml")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def nb_tokenizer():
 | 
						|
    return get_lang_class("nb")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ne_tokenizer():
 | 
						|
    return get_lang_class("ne")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def nl_tokenizer():
 | 
						|
    return get_lang_class("nl")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def pl_tokenizer():
 | 
						|
    return get_lang_class("pl")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def pt_tokenizer():
 | 
						|
    return get_lang_class("pt")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ro_tokenizer():
 | 
						|
    return get_lang_class("ro")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ru_tokenizer():
 | 
						|
    pytest.importorskip("pymorphy2")
 | 
						|
    return get_lang_class("ru")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture
 | 
						|
def ru_lemmatizer():
 | 
						|
    pytest.importorskip("pymorphy2")
 | 
						|
    return get_lang_class("ru")().add_pipe("lemmatizer")
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def sr_tokenizer():
 | 
						|
    return get_lang_class("sr")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def sv_tokenizer():
 | 
						|
    return get_lang_class("sv")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def th_tokenizer():
 | 
						|
    pytest.importorskip("pythainlp")
 | 
						|
    return get_lang_class("th")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def tr_tokenizer():
 | 
						|
    return get_lang_class("tr")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def tt_tokenizer():
 | 
						|
    return get_lang_class("tt")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def uk_tokenizer():
 | 
						|
    pytest.importorskip("pymorphy2")
 | 
						|
    pytest.importorskip("pymorphy2.lang")
 | 
						|
    return get_lang_class("uk")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def ur_tokenizer():
 | 
						|
    return get_lang_class("ur")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def yo_tokenizer():
 | 
						|
    return get_lang_class("yo")().tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def zh_tokenizer_char():
 | 
						|
    nlp = get_lang_class("zh")()
 | 
						|
    return nlp.tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def zh_tokenizer_jieba():
 | 
						|
    pytest.importorskip("jieba")
 | 
						|
    config = {
 | 
						|
        "@tokenizers": "spacy.zh.ChineseTokenizer",
 | 
						|
        "segmenter": "jieba",
 | 
						|
    }
 | 
						|
    nlp = get_lang_class("zh").from_config({"nlp": {"tokenizer": config}})
 | 
						|
    return nlp.tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def zh_tokenizer_pkuseg():
 | 
						|
    pytest.importorskip("pkuseg")
 | 
						|
    config = {
 | 
						|
        "@tokenizers": "spacy.zh.ChineseTokenizer",
 | 
						|
        "segmenter": "pkuseg",
 | 
						|
        "pkuseg_model": "default",
 | 
						|
    }
 | 
						|
    nlp = get_lang_class("zh").from_config({"nlp": {"tokenizer": config}})
 | 
						|
    return nlp.tokenizer
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session")
 | 
						|
def hy_tokenizer():
 | 
						|
    return get_lang_class("hy")().tokenizer
 |