mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-26 17:24:41 +03:00
Simplify tests and avoid tokenizing
This commit is contained in:
parent
a5b1f6dcec
commit
6bbf4ea309
|
@ -1,46 +1,43 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from spacy.tokens import Doc
|
||||
from spacy.attrs import ORTH, SHAPE, POS, DEP
|
||||
|
||||
from ..util import get_doc
|
||||
|
||||
|
||||
def test_doc_array_attr_of_token(en_tokenizer, en_vocab):
|
||||
text = "An example sentence"
|
||||
tokens = en_tokenizer(text)
|
||||
example = tokens.vocab["example"]
|
||||
def test_doc_array_attr_of_token(en_vocab):
|
||||
doc = Doc(en_vocab, words=["An", "example", "sentence"])
|
||||
example = doc.vocab["example"]
|
||||
assert example.orth != example.shape
|
||||
feats_array = tokens.to_array((ORTH, SHAPE))
|
||||
feats_array = doc.to_array((ORTH, SHAPE))
|
||||
assert feats_array[0][0] != feats_array[0][1]
|
||||
assert feats_array[0][0] != feats_array[0][1]
|
||||
|
||||
|
||||
def test_doc_stringy_array_attr_of_token(en_tokenizer, en_vocab):
|
||||
text = "An example sentence"
|
||||
tokens = en_tokenizer(text)
|
||||
example = tokens.vocab["example"]
|
||||
def test_doc_stringy_array_attr_of_token(en_vocab):
|
||||
doc = Doc(en_vocab, words=["An", "example", "sentence"])
|
||||
example = doc.vocab["example"]
|
||||
assert example.orth != example.shape
|
||||
feats_array = tokens.to_array((ORTH, SHAPE))
|
||||
feats_array_stringy = tokens.to_array(("ORTH", "SHAPE"))
|
||||
feats_array = doc.to_array((ORTH, SHAPE))
|
||||
feats_array_stringy = doc.to_array(("ORTH", "SHAPE"))
|
||||
assert feats_array_stringy[0][0] == feats_array[0][0]
|
||||
assert feats_array_stringy[0][1] == feats_array[0][1]
|
||||
|
||||
|
||||
def test_doc_scalar_attr_of_token(en_tokenizer, en_vocab):
|
||||
text = "An example sentence"
|
||||
tokens = en_tokenizer(text)
|
||||
example = tokens.vocab["example"]
|
||||
def test_doc_scalar_attr_of_token(en_vocab):
|
||||
doc = Doc(en_vocab, words=["An", "example", "sentence"])
|
||||
example = doc.vocab["example"]
|
||||
assert example.orth != example.shape
|
||||
feats_array = tokens.to_array(ORTH)
|
||||
feats_array = doc.to_array(ORTH)
|
||||
assert feats_array.shape == (3,)
|
||||
|
||||
|
||||
def test_doc_array_tag(en_tokenizer):
|
||||
text = "A nice sentence."
|
||||
def test_doc_array_tag(en_vocab):
|
||||
words = ["A", "nice", "sentence", "."]
|
||||
pos = ["DET", "ADJ", "NOUN", "PUNCT"]
|
||||
tokens = en_tokenizer(text)
|
||||
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], pos=pos)
|
||||
doc = get_doc(en_vocab, words=words, pos=pos)
|
||||
assert doc[0].pos != doc[1].pos != doc[2].pos != doc[3].pos
|
||||
feats_array = doc.to_array((ORTH, POS))
|
||||
assert feats_array[0][1] == doc[0].pos
|
||||
|
@ -49,11 +46,10 @@ def test_doc_array_tag(en_tokenizer):
|
|||
assert feats_array[3][1] == doc[3].pos
|
||||
|
||||
|
||||
def test_doc_array_dep(en_tokenizer):
|
||||
text = "A nice sentence."
|
||||
def test_doc_array_dep(en_vocab):
|
||||
words = ["A", "nice", "sentence", "."]
|
||||
deps = ["det", "amod", "ROOT", "punct"]
|
||||
tokens = en_tokenizer(text)
|
||||
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], deps=deps)
|
||||
doc = get_doc(en_vocab, words=words, deps=deps)
|
||||
feats_array = doc.to_array((ORTH, DEP))
|
||||
assert feats_array[0][1] == doc[0].dep
|
||||
assert feats_array[1][1] == doc[1].dep
|
||||
|
|
Loading…
Reference in New Issue
Block a user