spaCy/spacy/tests/serialize/test_packer.py

136 lines
3.6 KiB
Python
Raw Normal View History

2015-07-17 22:21:10 +03:00
from __future__ import unicode_literals
2015-07-20 02:38:29 +03:00
import re
2015-07-17 22:21:10 +03:00
import pytest
import numpy
from spacy.language import Language
2015-11-03 11:45:16 +03:00
from spacy.en import English
2015-07-17 22:21:10 +03:00
from spacy.vocab import Vocab
from spacy.tokens.doc import Doc
2015-07-20 02:38:29 +03:00
from spacy.tokenizer import Tokenizer
from os import path
import os
2015-07-20 02:38:29 +03:00
2016-01-16 14:23:45 +03:00
from spacy import util
2015-07-18 23:46:40 +03:00
from spacy.attrs import ORTH, SPACY, TAG, DEP, HEAD
2015-07-17 22:21:10 +03:00
from spacy.serialize.packer import Packer
from spacy.serialize.bits import BitArray
@pytest.fixture
def vocab():
2016-09-24 21:26:17 +03:00
path = os.environ.get('SPACY_DATA')
if path is None:
path = util.match_best_version('en', None, util.get_data_path())
else:
2016-09-24 21:26:17 +03:00
path = util.match_best_version('en', None, path)
2016-01-16 14:23:45 +03:00
2016-10-18 17:18:46 +03:00
vocab = English.Defaults.create_vocab()
lex = vocab['dog']
2015-07-18 23:46:40 +03:00
assert vocab[vocab.strings['dog']].orth_ == 'dog'
lex = vocab['the']
lex = vocab['quick']
lex = vocab['jumped']
2015-07-17 22:21:10 +03:00
return vocab
2015-07-20 02:38:29 +03:00
@pytest.fixture
def tokenizer(vocab):
null_re = re.compile(r'!!!!!!!!!')
2016-09-24 21:26:17 +03:00
tokenizer = Tokenizer(vocab, {}, null_re.search, null_re.search, null_re.finditer)
2015-07-20 02:38:29 +03:00
return tokenizer
def test_char_packer(vocab):
packer = Packer(vocab, [])
bits = BitArray()
bits.seek(0)
byte_str = bytearray(b'the dog jumped')
2015-07-20 02:38:29 +03:00
packer.char_codec.encode(byte_str, bits)
bits.seek(0)
result = [b''] * len(byte_str)
packer.char_codec.decode(bits, result)
2015-07-24 04:47:59 +03:00
assert bytearray(result) == byte_str
2015-07-17 22:21:10 +03:00
2015-07-20 02:38:29 +03:00
def test_packer_unannotated(tokenizer):
packer = Packer(tokenizer.vocab, [])
msg = tokenizer(u'the dog jumped')
2015-07-17 22:21:10 +03:00
assert msg.string == 'the dog jumped'
2015-07-20 02:38:29 +03:00
2015-07-17 22:21:10 +03:00
bits = packer.pack(msg)
result = packer.unpack(bits)
assert result.string == 'the dog jumped'
2015-07-20 02:38:29 +03:00
@pytest.mark.models
2015-07-20 02:38:29 +03:00
def test_packer_annotated(tokenizer):
vocab = tokenizer.vocab
2015-07-17 22:21:10 +03:00
nn = vocab.strings['NN']
dt = vocab.strings['DT']
vbd = vocab.strings['VBD']
jj = vocab.strings['JJ']
det = vocab.strings['det']
nsubj = vocab.strings['nsubj']
adj = vocab.strings['adj']
root = vocab.strings['ROOT']
attr_freqs = [
(TAG, [(nn, 0.1), (dt, 0.2), (jj, 0.01), (vbd, 0.05)]),
(DEP, {det: 0.2, nsubj: 0.1, adj: 0.05, root: 0.1}.items()),
(HEAD, {0: 0.05, 1: 0.2, -1: 0.2, -2: 0.1, 2: 0.1}.items())
]
packer = Packer(vocab, attr_freqs)
2015-07-20 02:38:29 +03:00
msg = tokenizer(u'the dog jumped')
2015-07-17 22:21:10 +03:00
msg.from_array(
[TAG, DEP, HEAD],
numpy.array([
[dt, det, 1],
[nn, nsubj, 1],
[vbd, root, 0]
], dtype=numpy.int32))
assert msg.string == 'the dog jumped'
assert [t.tag_ for t in msg] == ['DT', 'NN', 'VBD']
assert [t.dep_ for t in msg] == ['det', 'nsubj', 'ROOT']
assert [(t.head.i - t.i) for t in msg] == [1, 1, 0]
bits = packer.pack(msg)
result = packer.unpack(bits)
assert result.string == 'the dog jumped'
assert [t.tag_ for t in result] == ['DT', 'NN', 'VBD']
assert [t.dep_ for t in result] == ['det', 'nsubj', 'ROOT']
assert [(t.head.i - t.i) for t in result] == [1, 1, 0]
2015-07-27 22:25:48 +03:00
def test_packer_bad_chars(tokenizer):
string = u'naja gut, is eher bl\xf6d und nicht mit reddit.com/digg.com vergleichbar; vielleicht auf dem weg dahin'
packer = Packer(tokenizer.vocab, [])
doc = tokenizer(string)
bits = packer.pack(doc)
result = packer.unpack(bits)
assert result.string == doc.string
@pytest.mark.models
def test_packer_bad_chars(EN):
string = u'naja gut, is eher bl\xf6d und nicht mit reddit.com/digg.com vergleichbar; vielleicht auf dem weg dahin'
doc = EN(string)
byte_string = doc.to_bytes()
result = Doc(EN.vocab).from_bytes(byte_string)
assert [t.tag_ for t in result] == [t.tag_ for t in doc]