2018-02-13 22:44:33 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
2018-06-29 15:33:12 +03:00
|
|
|
import re
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.language import Language
|
|
|
|
from spacy.tokenizer import Tokenizer
|
|
|
|
|
|
|
|
from ..util import make_tempdir
|
2018-02-13 22:44:33 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def meta_data():
|
|
|
|
return {
|
|
|
|
'name': 'name-in-fixture',
|
|
|
|
'version': 'version-in-fixture',
|
|
|
|
'description': 'description-in-fixture',
|
|
|
|
'author': 'author-in-fixture',
|
|
|
|
'email': 'email-in-fixture',
|
|
|
|
'url': 'url-in-fixture',
|
|
|
|
'license': 'license-in-fixture',
|
2018-03-28 21:12:53 +03:00
|
|
|
'vectors': {'width': 0, 'vectors': 0, 'keys': 0, 'name': None}
|
2018-02-13 22:44:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_language_meta_disk(meta_data):
|
|
|
|
language = Language(meta=meta_data)
|
|
|
|
with make_tempdir() as d:
|
|
|
|
language.to_disk(d)
|
|
|
|
new_language = Language().from_disk(d)
|
|
|
|
assert new_language.meta == language.meta
|
2018-06-29 15:33:12 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_serialize_with_custom_tokenizer():
|
|
|
|
"""Test that serialization with custom tokenizer works without token_match.
|
|
|
|
See: https://support.prodi.gy/t/how-to-save-a-custom-tokenizer/661/2
|
|
|
|
"""
|
|
|
|
prefix_re = re.compile(r'''1/|2/|:[0-9][0-9][A-K]:|:[0-9][0-9]:''')
|
|
|
|
suffix_re = re.compile(r'''''')
|
|
|
|
infix_re = re.compile(r'''[~]''')
|
|
|
|
|
|
|
|
def custom_tokenizer(nlp):
|
|
|
|
return Tokenizer(nlp.vocab,
|
|
|
|
{},
|
|
|
|
prefix_search=prefix_re.search,
|
|
|
|
suffix_search=suffix_re.search,
|
|
|
|
infix_finditer=infix_re.finditer)
|
|
|
|
|
|
|
|
nlp = Language()
|
|
|
|
nlp.tokenizer = custom_tokenizer(nlp)
|
|
|
|
with make_tempdir() as d:
|
|
|
|
nlp.to_disk(d)
|