mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 09:57:26 +03:00 
			
		
		
		
	## Description Related issues: #2379 (should be fixed by separating model tests) * **total execution time down from > 300 seconds to under 60 seconds** 🎉 * removed all model-specific tests that could only really be run manually anyway – those will now live in a separate test suite in the [`spacy-models`](https://github.com/explosion/spacy-models) repository and are already integrated into our new model training infrastructure * changed all relative imports to absolute imports to prepare for moving the test suite from `/spacy/tests` to `/tests` (it'll now always test against the installed version) * merged old regression tests into collections, e.g. `test_issue1001-1500.py` (about 90% of the regression tests are very short anyways) * tidied up and rewrote existing tests wherever possible ### Todo - [ ] move tests to `/tests` and adjust CI commands accordingly - [x] move model test suite from internal repo to `spacy-models` - [x] ~~investigate why `pipeline/test_textcat.py` is flakey~~ - [x] review old regression tests (leftover files) and see if they can be merged, simplified or deleted - [ ] update documentation on how to run tests ### Types of change enhancement, tests ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
		
			
				
	
	
		
			45 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			45 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# coding: utf-8
 | 
						||
from __future__ import unicode_literals
 | 
						||
 | 
						||
import pytest
 | 
						||
from spacy.util import get_lang_class
 | 
						||
from spacy.tokenizer import Tokenizer
 | 
						||
 | 
						||
from ..util import make_tempdir, assert_packed_msg_equal
 | 
						||
 | 
						||
 | 
						||
def load_tokenizer(b):
 | 
						||
    tok = get_lang_class('en').Defaults.create_tokenizer()
 | 
						||
    tok.from_bytes(b)
 | 
						||
    return tok
 | 
						||
 | 
						||
 | 
						||
def test_serialize_custom_tokenizer(en_vocab, en_tokenizer):
 | 
						||
    """Test that custom tokenizer with not all functions defined can be
 | 
						||
    serialized and deserialized correctly (see #2494)."""
 | 
						||
    tokenizer = Tokenizer(en_vocab, suffix_search=en_tokenizer.suffix_search)
 | 
						||
    tokenizer_bytes = tokenizer.to_bytes()
 | 
						||
    new_tokenizer = Tokenizer(en_vocab).from_bytes(tokenizer_bytes)
 | 
						||
 | 
						||
 | 
						||
@pytest.mark.skip(reason="Currently unreliable across platforms")
 | 
						||
@pytest.mark.parametrize('text', ["I💜you", "they’re", "“hello”"])
 | 
						||
def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text):
 | 
						||
    tokenizer = en_tokenizer
 | 
						||
    new_tokenizer = load_tokenizer(tokenizer.to_bytes())
 | 
						||
    assert_packed_msg_equal(new_tokenizer.to_bytes(), tokenizer.to_bytes())
 | 
						||
    assert new_tokenizer.to_bytes() == tokenizer.to_bytes()
 | 
						||
    doc1 = tokenizer(text)
 | 
						||
    doc2 = new_tokenizer(text)
 | 
						||
    assert [token.text for token in doc1] == [token.text for token in doc2]
 | 
						||
 | 
						||
 | 
						||
@pytest.mark.skip(reason="Currently unreliable across platforms")
 | 
						||
def test_serialize_tokenizer_roundtrip_disk(en_tokenizer):
 | 
						||
    tokenizer = en_tokenizer
 | 
						||
    with make_tempdir() as d:
 | 
						||
        file_path = d / 'tokenizer'
 | 
						||
        tokenizer.to_disk(file_path)
 | 
						||
        tokenizer_d = en_tokenizer.from_disk(file_path)
 | 
						||
        assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
 |