mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
Tidy up regression tests
This commit is contained in:
parent
869963c3c4
commit
3e6e1f0251
|
@ -1,4 +1,6 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...en import English
|
||||
|
||||
import pytest
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...en import English
|
||||
|
||||
import pytest
|
||||
|
@ -10,5 +12,5 @@ def en_tokenizer():
|
|||
|
||||
|
||||
def test_big_ellipsis(en_tokenizer):
|
||||
tokens = en_tokenizer(u'$45...............Asking')
|
||||
tokens = en_tokenizer('$45...............Asking')
|
||||
assert len(tokens) > 2
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
import pytest
|
||||
|
||||
import spacy
|
||||
from spacy.attrs import ORTH
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.models
|
||||
def test_issue429():
|
||||
|
@ -23,7 +25,7 @@ def test_issue429():
|
|||
doc = nlp.tokenizer('a b c')
|
||||
nlp.tagger(doc)
|
||||
nlp.matcher(doc)
|
||||
|
||||
|
||||
for word in doc:
|
||||
print(word.text, word.ent_iob_, word.ent_type_)
|
||||
nlp.entity(doc)
|
||||
|
|
|
@ -1,14 +1,18 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import spacy
|
||||
import spacy.matcher
|
||||
from spacy.attrs import IS_PUNCT, ORTH
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.models
|
||||
def test_matcher_segfault():
|
||||
nlp = spacy.load('en', parser=False, entity=False)
|
||||
matcher = spacy.matcher.Matcher(nlp.vocab)
|
||||
content = u'''a b; c'''
|
||||
content = '''a b; c'''
|
||||
matcher.add(entity_key='1', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}]])
|
||||
matcher(nlp(content))
|
||||
matcher.add(entity_key='2', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}, {IS_PUNCT: True}, {ORTH: 'c'}]])
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
import pytest
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...vocab import Vocab
|
||||
from ...tokens import Doc
|
||||
from ...matcher import Matcher
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def test_issue588():
|
||||
matcher = Matcher(Vocab())
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
import pytest
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...vocab import Vocab
|
||||
from ...tokens import Doc
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def test_issue589():
|
||||
vocab = Vocab()
|
||||
vocab.strings.set_frozen(True)
|
||||
doc = Doc(vocab, words=[u'whata'])
|
||||
doc = Doc(vocab, words=['whata'])
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...attrs import *
|
||||
from ...matcher import Matcher
|
||||
from ...tokens import Doc
|
||||
from ...en import English
|
||||
|
||||
|
||||
def test_overlapping_matches():
|
||||
vocab = English.Defaults.create_vocab()
|
||||
doc = Doc(vocab, words=['n', '=', '1', ';', 'a', ':', '5', '%'])
|
||||
|
@ -29,6 +32,6 @@ def test_overlapping_matches():
|
|||
{ORTH: '='},
|
||||
{LIKE_NUM: True},
|
||||
], label='b')
|
||||
|
||||
|
||||
matches = matcher(doc)
|
||||
assert len(matches) == 2
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
import pytest
|
||||
|
||||
from ...symbols import POS, VERB, VerbForm_inf
|
||||
from ...tokens import Doc
|
||||
from ...vocab import Vocab
|
||||
from ...lemmatizer import Lemmatizer
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def index():
|
||||
|
@ -37,6 +39,6 @@ def vocab(lemmatizer, tag_map):
|
|||
def test_not_lemmatize_base_forms(vocab):
|
||||
doc = Doc(vocab, words=["Do", "n't", "feed", "the", "dog"])
|
||||
feed = doc[2]
|
||||
feed.tag_ = u'VB'
|
||||
assert feed.text == u'feed'
|
||||
assert feed.lemma_ == u'feed'
|
||||
feed.tag_ = 'VB'
|
||||
assert feed.text == 'feed'
|
||||
assert feed.lemma_ == 'feed'
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...tokens import Doc
|
||||
from ...vocab import Vocab
|
||||
|
||||
|
||||
def test_issue599():
|
||||
doc = Doc(Vocab())
|
||||
doc.is_tagged = True
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...tokens import Doc
|
||||
from ...vocab import Vocab
|
||||
from ...attrs import POS
|
||||
|
@ -6,4 +8,4 @@ from ...attrs import POS
|
|||
|
||||
def test_issue600():
|
||||
doc = Doc(Vocab(tag_map={'NN': {'pos': 'NOUN'}}), words=['hello'])
|
||||
doc[0].tag_ = u'NN'
|
||||
doc[0].tag_ = 'NN'
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...attrs import LOWER, ORTH
|
||||
from ...tokens import Doc
|
||||
from ...vocab import Vocab
|
||||
|
@ -9,16 +12,16 @@ def return_false(doc, ent_id, label, start, end):
|
|||
|
||||
|
||||
def test_matcher_accept():
|
||||
doc = Doc(Vocab(), words=[u'The', u'golf', u'club', u'is', u'broken'])
|
||||
doc = Doc(Vocab(), words=['The', 'golf', 'club', 'is', 'broken'])
|
||||
|
||||
golf_pattern = [
|
||||
golf_pattern = [
|
||||
{ ORTH: "golf"},
|
||||
{ ORTH: "club"}
|
||||
]
|
||||
matcher = Matcher(doc.vocab)
|
||||
|
||||
matcher.add_entity(u'Sport_Equipment', acceptor=return_false)
|
||||
matcher.add_pattern(u"Sport_Equipment", golf_pattern)
|
||||
matcher.add_entity('Sport_Equipment', acceptor=return_false)
|
||||
matcher.add_pattern("Sport_Equipment", golf_pattern)
|
||||
match = matcher(doc)
|
||||
|
||||
assert match == []
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
import spacy
|
||||
|
||||
import spacy
|
||||
from spacy.attrs import ORTH
|
||||
|
||||
|
||||
|
@ -17,10 +19,10 @@ def merge_phrases(matcher, doc, i, matches):
|
|||
|
||||
def test_entity_ID_assignment():
|
||||
nlp = spacy.en.English()
|
||||
text = u"""The golf club is broken"""
|
||||
text = """The golf club is broken"""
|
||||
doc = nlp(text)
|
||||
|
||||
golf_pattern = [
|
||||
golf_pattern = [
|
||||
{ ORTH: "golf"},
|
||||
{ ORTH: "club"}
|
||||
]
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ...vocab import Vocab
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user