mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-10 19:57:17 +03:00
Remove old tests
This commit is contained in:
parent
12eb8edf26
commit
a308703f47
1
setup.py
1
setup.py
|
@ -37,7 +37,6 @@ PACKAGES = [
|
|||
'spacy.munge',
|
||||
'spacy.tests',
|
||||
'spacy.tests.matcher',
|
||||
'spacy.tests.munge',
|
||||
'spacy.tests.parser',
|
||||
'spacy.tests.serialize',
|
||||
'spacy.tests.spans',
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
from spacy.deprecated import align_tokens
|
||||
|
||||
|
||||
def test_perfect_align():
|
||||
ref = ['I', 'align', 'perfectly']
|
||||
indices = []
|
||||
i = 0
|
||||
for token in ref:
|
||||
indices.append((i, i + len(token)))
|
||||
i += len(token)
|
||||
aligned = list(align_tokens(ref, indices))
|
||||
assert aligned[0] == ('I', [(0, 1)])
|
||||
assert aligned[1] == ('align', [(1, 6)])
|
||||
assert aligned[2] == ('perfectly', [(6, 15)])
|
||||
|
||||
|
||||
def test_hyphen_align():
|
||||
ref = ['I', 'must', 're-align']
|
||||
indices = [(0, 1), (1, 5), (5, 7), (7, 8), (8, 13)]
|
||||
aligned = list(align_tokens(ref, indices))
|
||||
assert aligned[0] == ('I', [(0, 1)])
|
||||
assert aligned[1] == ('must', [(1, 5)])
|
||||
assert aligned[2] == ('re-align', [(5, 7), (7, 8), (8, 13)])
|
||||
|
||||
|
||||
def test_align_continue():
|
||||
ref = ['I', 'must', 're-align', 'and', 'continue']
|
||||
indices = [(0, 1), (1, 5), (5, 7), (7, 8), (8, 13), (13, 16), (16, 24)]
|
||||
aligned = list(align_tokens(ref, indices))
|
||||
assert aligned[2] == ('re-align', [(5, 7), (7, 8), (8, 13)])
|
||||
assert aligned[3] == ('and', [(13, 16)])
|
||||
assert aligned[4] == ('continue', [(16, 24)])
|
|
@ -1,59 +0,0 @@
|
|||
import spacy.munge.read_conll
|
||||
|
||||
hongbin_example = """
|
||||
1 2. 0. LS _ 24 meta _ _ _
|
||||
2 . . . _ 1 punct _ _ _
|
||||
3 Wang wang NNP _ 4 compound _ _ _
|
||||
4 Hongbin hongbin NNP _ 16 nsubj _ _ _
|
||||
5 , , , _ 4 punct _ _ _
|
||||
6 the the DT _ 11 det _ _ _
|
||||
7 " " `` _ 11 punct _ _ _
|
||||
8 communist communist JJ _ 11 amod _ _ _
|
||||
9 trail trail NN _ 11 compound _ _ _
|
||||
10 - - HYPH _ 11 punct _ _ _
|
||||
11 blazer blazer NN _ 4 appos _ _ _
|
||||
12 , , , _ 16 punct _ _ _
|
||||
13 " " '' _ 16 punct _ _ _
|
||||
14 has have VBZ _ 16 aux _ _ _
|
||||
15 not not RB _ 16 neg _ _ _
|
||||
16 turned turn VBN _ 24 ccomp _ _ _
|
||||
17 into into IN syn=CLR 16 prep _ _ _
|
||||
18 a a DT _ 19 det _ _ _
|
||||
19 capitalist capitalist NN _ 17 pobj _ _ _
|
||||
20 ( ( -LRB- _ 24 punct _ _ _
|
||||
21 he he PRP _ 24 nsubj _ _ _
|
||||
22 does do VBZ _ 24 aux _ _ _
|
||||
23 n't not RB _ 24 neg _ _ _
|
||||
24 have have VB _ 0 root _ _ _
|
||||
25 any any DT _ 26 det _ _ _
|
||||
26 shares share NNS _ 24 dobj _ _ _
|
||||
27 , , , _ 24 punct _ _ _
|
||||
28 does do VBZ _ 30 aux _ _ _
|
||||
29 n't not RB _ 30 neg _ _ _
|
||||
30 have have VB _ 24 conj _ _ _
|
||||
31 any any DT _ 32 det _ _ _
|
||||
32 savings saving NNS _ 30 dobj _ _ _
|
||||
33 , , , _ 30 punct _ _ _
|
||||
34 does do VBZ _ 36 aux _ _ _
|
||||
35 n't not RB _ 36 neg _ _ _
|
||||
36 have have VB _ 30 conj _ _ _
|
||||
37 his his PRP$ _ 39 poss _ _ _
|
||||
38 own own JJ _ 39 amod _ _ _
|
||||
39 car car NN _ 36 dobj _ _ _
|
||||
40 , , , _ 36 punct _ _ _
|
||||
41 and and CC _ 36 cc _ _ _
|
||||
42 does do VBZ _ 44 aux _ _ _
|
||||
43 n't not RB _ 44 neg _ _ _
|
||||
44 have have VB _ 36 conj _ _ _
|
||||
45 a a DT _ 46 det _ _ _
|
||||
46 mansion mansion NN _ 44 dobj _ _ _
|
||||
47 ; ; . _ 24 punct _ _ _
|
||||
""".strip()
|
||||
|
||||
|
||||
def test_hongbin():
|
||||
words, annot = spacy.munge.read_conll.parse(hongbin_example, strip_bad_periods=True)
|
||||
assert words[annot[0]['head']] == 'have'
|
||||
assert words[annot[1]['head']] == 'Hongbin'
|
||||
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
from spacy.deprecated import detokenize
|
||||
|
||||
def test_punct():
|
||||
tokens = 'Pierre Vinken , 61 years old .'.split()
|
||||
detoks = [(0,), (1, 2), (3,), (4,), (5, 6)]
|
||||
token_rules = ('<SEP>,', '<SEP>.')
|
||||
assert detokenize(token_rules, tokens) == detoks
|
||||
|
||||
|
||||
def test_contractions():
|
||||
tokens = "I ca n't even".split()
|
||||
detoks = [(0,), (1, 2), (3,)]
|
||||
token_rules = ("ca<SEP>n't",)
|
||||
assert detokenize(token_rules, tokens) == detoks
|
||||
|
||||
|
||||
def test_contractions_punct():
|
||||
tokens = "I ca n't !".split()
|
||||
detoks = [(0,), (1, 2, 3)]
|
||||
token_rules = ("ca<SEP>n't", '<SEP>!')
|
||||
assert detokenize(token_rules, tokens) == detoks
|
|
@ -1,16 +0,0 @@
|
|||
from spacy.munge.read_ner import _get_text, _get_tag
|
||||
|
||||
|
||||
def test_get_text():
|
||||
assert _get_text('asbestos') == 'asbestos'
|
||||
assert _get_text('<ENAMEX TYPE="ORG">Lorillard</ENAMEX>') == 'Lorillard'
|
||||
assert _get_text('<ENAMEX TYPE="DATE">more') == 'more'
|
||||
assert _get_text('ago</ENAMEX>') == 'ago'
|
||||
|
||||
|
||||
def test_get_tag():
|
||||
assert _get_tag('asbestos', None) == ('O', None)
|
||||
assert _get_tag('asbestos', 'PER') == ('I-PER', 'PER')
|
||||
assert _get_tag('<ENAMEX TYPE="ORG">Lorillard</ENAMEX>', None) == ('U-ORG', None)
|
||||
assert _get_tag('<ENAMEX TYPE="DATE">more', None) == ('B-DATE', 'DATE')
|
||||
assert _get_tag('ago</ENAMEX>', 'DATE') == ('L-DATE', None)
|
Loading…
Reference in New Issue
Block a user