mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 18:07:26 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			69 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			69 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# coding: utf-8
 | 
						|
from __future__ import unicode_literals
 | 
						|
 | 
						|
import pytest
 | 
						|
from spacy.tokens import Doc
 | 
						|
from spacy.attrs import ORTH, SHAPE, POS, DEP
 | 
						|
 | 
						|
from ..util import get_doc
 | 
						|
 | 
						|
 | 
						|
def test_doc_array_attr_of_token(en_vocab):
 | 
						|
    doc = Doc(en_vocab, words=["An", "example", "sentence"])
 | 
						|
    example = doc.vocab["example"]
 | 
						|
    assert example.orth != example.shape
 | 
						|
    feats_array = doc.to_array((ORTH, SHAPE))
 | 
						|
    assert feats_array[0][0] != feats_array[0][1]
 | 
						|
    assert feats_array[0][0] != feats_array[0][1]
 | 
						|
 | 
						|
 | 
						|
def test_doc_stringy_array_attr_of_token(en_vocab):
 | 
						|
    doc = Doc(en_vocab, words=["An", "example", "sentence"])
 | 
						|
    example = doc.vocab["example"]
 | 
						|
    assert example.orth != example.shape
 | 
						|
    feats_array = doc.to_array((ORTH, SHAPE))
 | 
						|
    feats_array_stringy = doc.to_array(("ORTH", "SHAPE"))
 | 
						|
    assert feats_array_stringy[0][0] == feats_array[0][0]
 | 
						|
    assert feats_array_stringy[0][1] == feats_array[0][1]
 | 
						|
 | 
						|
 | 
						|
def test_doc_scalar_attr_of_token(en_vocab):
 | 
						|
    doc = Doc(en_vocab, words=["An", "example", "sentence"])
 | 
						|
    example = doc.vocab["example"]
 | 
						|
    assert example.orth != example.shape
 | 
						|
    feats_array = doc.to_array(ORTH)
 | 
						|
    assert feats_array.shape == (3,)
 | 
						|
 | 
						|
 | 
						|
def test_doc_array_tag(en_vocab):
 | 
						|
    words = ["A", "nice", "sentence", "."]
 | 
						|
    pos = ["DET", "ADJ", "NOUN", "PUNCT"]
 | 
						|
    doc = get_doc(en_vocab, words=words, pos=pos)
 | 
						|
    assert doc[0].pos != doc[1].pos != doc[2].pos != doc[3].pos
 | 
						|
    feats_array = doc.to_array((ORTH, POS))
 | 
						|
    assert feats_array[0][1] == doc[0].pos
 | 
						|
    assert feats_array[1][1] == doc[1].pos
 | 
						|
    assert feats_array[2][1] == doc[2].pos
 | 
						|
    assert feats_array[3][1] == doc[3].pos
 | 
						|
 | 
						|
 | 
						|
def test_doc_array_dep(en_vocab):
 | 
						|
    words = ["A", "nice", "sentence", "."]
 | 
						|
    deps = ["det", "amod", "ROOT", "punct"]
 | 
						|
    doc = get_doc(en_vocab, words=words, deps=deps)
 | 
						|
    feats_array = doc.to_array((ORTH, DEP))
 | 
						|
    assert feats_array[0][1] == doc[0].dep
 | 
						|
    assert feats_array[1][1] == doc[1].dep
 | 
						|
    assert feats_array[2][1] == doc[2].dep
 | 
						|
    assert feats_array[3][1] == doc[3].dep
 | 
						|
 | 
						|
 | 
						|
@pytest.mark.parametrize("attrs", [["ORTH", "SHAPE"], "IS_ALPHA"])
 | 
						|
def test_doc_array_to_from_string_attrs(en_vocab, attrs):
 | 
						|
    """Test that both Doc.to_array and Doc.from_array accept string attrs,
 | 
						|
    as well as single attrs and sequences of attrs.
 | 
						|
    """
 | 
						|
    words = ["An", "example", "sentence"]
 | 
						|
    doc = Doc(en_vocab, words=words)
 | 
						|
    Doc(en_vocab, words=words).from_array(attrs, doc.to_array(attrs))
 |