mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 10:26:35 +03:00
75f3234404
## Description Related issues: #2379 (should be fixed by separating model tests) * **total execution time down from > 300 seconds to under 60 seconds** 🎉 * removed all model-specific tests that could only really be run manually anyway – those will now live in a separate test suite in the [`spacy-models`](https://github.com/explosion/spacy-models) repository and are already integrated into our new model training infrastructure * changed all relative imports to absolute imports to prepare for moving the test suite from `/spacy/tests` to `/tests` (it'll now always test against the installed version) * merged old regression tests into collections, e.g. `test_issue1001-1500.py` (about 90% of the regression tests are very short anyways) * tidied up and rewrote existing tests wherever possible ### Todo - [ ] move tests to `/tests` and adjust CI commands accordingly - [x] move model test suite from internal repo to `spacy-models` - [x] ~~investigate why `pipeline/test_textcat.py` is flakey~~ - [x] review old regression tests (leftover files) and see if they can be merged, simplified or deleted - [ ] update documentation on how to run tests ### Types of change enhancement, tests ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
56 lines
1.5 KiB
Python
56 lines
1.5 KiB
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
from spacy.language import Language
|
|
from spacy.compat import pickle, unicode_
|
|
|
|
|
|
def test_pickle_single_doc():
|
|
nlp = Language()
|
|
doc = nlp('pickle roundtrip')
|
|
data = pickle.dumps(doc, 1)
|
|
doc2 = pickle.loads(data)
|
|
assert doc2.text == 'pickle roundtrip'
|
|
|
|
|
|
def test_list_of_docs_pickles_efficiently():
|
|
nlp = Language()
|
|
for i in range(10000):
|
|
_ = nlp.vocab[unicode_(i)]
|
|
one_pickled = pickle.dumps(nlp('0'), -1)
|
|
docs = list(nlp.pipe(unicode_(i) for i in range(100)))
|
|
many_pickled = pickle.dumps(docs, -1)
|
|
assert len(many_pickled) < (len(one_pickled) * 2)
|
|
many_unpickled = pickle.loads(many_pickled)
|
|
assert many_unpickled[0].text == '0'
|
|
assert many_unpickled[-1].text == '99'
|
|
assert len(many_unpickled) == 100
|
|
|
|
|
|
def test_user_data_from_disk():
|
|
nlp = Language()
|
|
doc = nlp('Hello')
|
|
doc.user_data[(0, 1)] = False
|
|
b = doc.to_bytes()
|
|
doc2 = doc.__class__(doc.vocab).from_bytes(b)
|
|
assert doc2.user_data[(0, 1)] == False
|
|
|
|
def test_user_data_unpickles():
|
|
nlp = Language()
|
|
doc = nlp('Hello')
|
|
doc.user_data[(0, 1)] = False
|
|
b = pickle.dumps(doc)
|
|
doc2 = pickle.loads(b)
|
|
assert doc2.user_data[(0, 1)] == False
|
|
|
|
|
|
def test_hooks_unpickle():
|
|
def inner_func(d1, d2):
|
|
return 'hello!'
|
|
nlp = Language()
|
|
doc = nlp('Hello')
|
|
doc.user_hooks['similarity'] = inner_func
|
|
b = pickle.dumps(doc)
|
|
doc2 = pickle.loads(b)
|
|
assert doc2.similarity(None) == 'hello!'
|