mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 18:07:26 +03:00 
			
		
		
		
	## Description Related issues: #2379 (should be fixed by separating model tests) * **total execution time down from > 300 seconds to under 60 seconds** 🎉 * removed all model-specific tests that could only really be run manually anyway – those will now live in a separate test suite in the [`spacy-models`](https://github.com/explosion/spacy-models) repository and are already integrated into our new model training infrastructure * changed all relative imports to absolute imports to prepare for moving the test suite from `/spacy/tests` to `/tests` (it'll now always test against the installed version) * merged old regression tests into collections, e.g. `test_issue1001-1500.py` (about 90% of the regression tests are very short anyways) * tidied up and rewrote existing tests wherever possible ### Todo - [ ] move tests to `/tests` and adjust CI commands accordingly - [x] move model test suite from internal repo to `spacy-models` - [x] ~~investigate why `pipeline/test_textcat.py` is flakey~~ - [x] review old regression tests (leftover files) and see if they can be merged, simplified or deleted - [ ] update documentation on how to run tests ### Types of change enhancement, tests ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
		
			
				
	
	
		
			53 lines
		
	
	
		
			1.5 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			53 lines
		
	
	
		
			1.5 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# coding: utf-8
 | 
						|
from __future__ import unicode_literals
 | 
						|
 | 
						|
import pytest
 | 
						|
import re
 | 
						|
from spacy.language import Language
 | 
						|
from spacy.tokenizer import Tokenizer
 | 
						|
 | 
						|
from ..util import make_tempdir
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture
 | 
						|
def meta_data():
 | 
						|
    return {
 | 
						|
        'name': 'name-in-fixture',
 | 
						|
        'version': 'version-in-fixture',
 | 
						|
        'description': 'description-in-fixture',
 | 
						|
        'author': 'author-in-fixture',
 | 
						|
        'email': 'email-in-fixture',
 | 
						|
        'url': 'url-in-fixture',
 | 
						|
        'license': 'license-in-fixture',
 | 
						|
        'vectors': {'width': 0, 'vectors': 0, 'keys': 0, 'name': None}
 | 
						|
    }
 | 
						|
 | 
						|
 | 
						|
def test_serialize_language_meta_disk(meta_data):
 | 
						|
    language = Language(meta=meta_data)
 | 
						|
    with make_tempdir() as d:
 | 
						|
        language.to_disk(d)
 | 
						|
        new_language = Language().from_disk(d)
 | 
						|
    assert new_language.meta == language.meta
 | 
						|
 | 
						|
 | 
						|
def test_serialize_with_custom_tokenizer():
 | 
						|
    """Test that serialization with custom tokenizer works without token_match.
 | 
						|
    See: https://support.prodi.gy/t/how-to-save-a-custom-tokenizer/661/2
 | 
						|
    """
 | 
						|
    prefix_re = re.compile(r'''1/|2/|:[0-9][0-9][A-K]:|:[0-9][0-9]:''')
 | 
						|
    suffix_re = re.compile(r'''''')
 | 
						|
    infix_re = re.compile(r'''[~]''')
 | 
						|
 | 
						|
    def custom_tokenizer(nlp):
 | 
						|
        return Tokenizer(nlp.vocab,
 | 
						|
                         {},
 | 
						|
                         prefix_search=prefix_re.search,
 | 
						|
                         suffix_search=suffix_re.search,
 | 
						|
                         infix_finditer=infix_re.finditer)
 | 
						|
 | 
						|
    nlp = Language()
 | 
						|
    nlp.tokenizer = custom_tokenizer(nlp)
 | 
						|
    with make_tempdir() as d:
 | 
						|
        nlp.to_disk(d)
 |