Merge branch 'master' of ssh://github.com/explosion/spaCy

This commit is contained in:
Matthew Honnibal 2017-01-11 13:03:51 +01:00
commit e12c90e03f
15 changed files with 103 additions and 49 deletions

View File

@ -1,4 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
from ...en import English
import pytest

View File

@ -1,4 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
from ...en import English
import pytest
@ -10,5 +12,5 @@ def en_tokenizer():
def test_big_ellipsis(en_tokenizer):
tokens = en_tokenizer(u'$45...............Asking')
tokens = en_tokenizer('$45...............Asking')
assert len(tokens) > 2

View File

@ -1,9 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest
import spacy
from spacy.attrs import ORTH
import pytest
@pytest.mark.models
def test_issue429():
@ -23,7 +25,7 @@ def test_issue429():
doc = nlp.tokenizer('a b c')
nlp.tagger(doc)
nlp.matcher(doc)
for word in doc:
print(word.text, word.ent_iob_, word.ent_type_)
nlp.entity(doc)

View File

@ -1,14 +1,18 @@
# coding: utf-8
from __future__ import unicode_literals
import spacy
import spacy.matcher
from spacy.attrs import IS_PUNCT, ORTH
import pytest
@pytest.mark.models
def test_matcher_segfault():
nlp = spacy.load('en', parser=False, entity=False)
matcher = spacy.matcher.Matcher(nlp.vocab)
content = u'''a b; c'''
content = '''a b; c'''
matcher.add(entity_key='1', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}]])
matcher(nlp(content))
matcher.add(entity_key='2', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}, {IS_PUNCT: True}, {ORTH: 'c'}]])

View File

@ -1,8 +1,12 @@
import pytest
# coding: utf-8
from __future__ import unicode_literals
from ...vocab import Vocab
from ...tokens import Doc
from ...matcher import Matcher
import pytest
def test_issue588():
matcher = Matcher(Vocab())

View File

@ -1,10 +1,13 @@
import pytest
# coding: utf-8
from __future__ import unicode_literals
from ...vocab import Vocab
from ...tokens import Doc
import pytest
def test_issue589():
vocab = Vocab()
vocab.strings.set_frozen(True)
doc = Doc(vocab, words=[u'whata'])
doc = Doc(vocab, words=['whata'])

View File

@ -1,9 +1,12 @@
# coding: utf-8
from __future__ import unicode_literals
from ...attrs import *
from ...matcher import Matcher
from ...tokens import Doc
from ...en import English
def test_overlapping_matches():
vocab = English.Defaults.create_vocab()
doc = Doc(vocab, words=['n', '=', '1', ';', 'a', ':', '5', '%'])
@ -29,6 +32,6 @@ def test_overlapping_matches():
{ORTH: '='},
{LIKE_NUM: True},
], label='b')
matches = matcher(doc)
assert len(matches) == 2

View File

@ -1,11 +1,13 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest
from ...symbols import POS, VERB, VerbForm_inf
from ...tokens import Doc
from ...vocab import Vocab
from ...lemmatizer import Lemmatizer
import pytest
@pytest.fixture
def index():
@ -37,6 +39,6 @@ def vocab(lemmatizer, tag_map):
def test_not_lemmatize_base_forms(vocab):
doc = Doc(vocab, words=["Do", "n't", "feed", "the", "dog"])
feed = doc[2]
feed.tag_ = u'VB'
assert feed.text == u'feed'
assert feed.lemma_ == u'feed'
feed.tag_ = 'VB'
assert feed.text == 'feed'
assert feed.lemma_ == 'feed'

View File

@ -1,6 +1,10 @@
# coding: utf-8
from __future__ import unicode_literals
from ...tokens import Doc
from ...vocab import Vocab
def test_issue599():
doc = Doc(Vocab())
doc.is_tagged = True

View File

@ -1,4 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
from ...tokens import Doc
from ...vocab import Vocab
from ...attrs import POS
@ -6,4 +8,4 @@ from ...attrs import POS
def test_issue600():
doc = Doc(Vocab(tag_map={'NN': {'pos': 'NOUN'}}), words=['hello'])
doc[0].tag_ = u'NN'
doc[0].tag_ = 'NN'

View File

@ -1,3 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
from ...attrs import LOWER, ORTH
from ...tokens import Doc
from ...vocab import Vocab
@ -9,16 +12,16 @@ def return_false(doc, ent_id, label, start, end):
def test_matcher_accept():
doc = Doc(Vocab(), words=[u'The', u'golf', u'club', u'is', u'broken'])
doc = Doc(Vocab(), words=['The', 'golf', 'club', 'is', 'broken'])
golf_pattern = [
golf_pattern = [
{ ORTH: "golf"},
{ ORTH: "club"}
]
matcher = Matcher(doc.vocab)
matcher.add_entity(u'Sport_Equipment', acceptor=return_false)
matcher.add_pattern(u"Sport_Equipment", golf_pattern)
matcher.add_entity('Sport_Equipment', acceptor=return_false)
matcher.add_pattern("Sport_Equipment", golf_pattern)
match = matcher(doc)
assert match == []

View File

@ -1,5 +1,7 @@
# coding: utf-8
from __future__ import unicode_literals
import spacy
import spacy
from spacy.attrs import ORTH
@ -17,10 +19,10 @@ def merge_phrases(matcher, doc, i, matches):
def test_entity_ID_assignment():
nlp = spacy.en.English()
text = u"""The golf club is broken"""
text = """The golf club is broken"""
doc = nlp(text)
golf_pattern = [
golf_pattern = [
{ ORTH: "golf"},
{ ORTH: "club"}
]

View File

@ -1,3 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
from ...vocab import Vocab

View File

@ -1,77 +1,95 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest
URLS = [
u"http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region&region=top-news&WT.nav=top-news&_r=0",
u"www.google.com?q=google",
u"google.com",
u"www.red-stars.com",
pytest.mark.xfail(u"red-stars.com"),
u"http://foo.com/blah_(wikipedia)#cite-1",
u"http://www.example.com/wpstyle/?bar=baz&inga=42&quux",
u"mailto:foo.bar@baz.com",
u"mailto:foo-bar@baz-co.com"
"http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region&region=top-news&WT.nav=top-news&_r=0",
"www.google.com?q=google",
"www.red-stars.com",
"http://foo.com/blah_(wikipedia)#cite-1",
"mailto:foo.bar@baz.com",
"mailto:foo-bar@baz-co.com"
]
# Punctuation we want to check is split away before the URL
PREFIXES = [
"(", '"', "...", ">"
"(", '"', ">"
]
# Punctuation we want to check is split away after the URL
SUFFIXES = [
'"', ":", ">"]
@pytest.mark.parametrize("text", URLS)
def test_simple_url(tokenizer, text):
tokens = tokenizer(text)
assert tokens[0].orth_ == text
@pytest.mark.parametrize("url", URLS)
def test_tokenizer_handles_simple_url(tokenizer, url):
tokens = tokenizer(url)
assert len(tokens) == 1
assert tokens[0].text == url
@pytest.mark.parametrize("prefix", PREFIXES)
@pytest.mark.parametrize("url", URLS)
def test_prefixed_url(tokenizer, prefix, url):
def test_tokenizer_handles_prefixed_url(tokenizer, prefix, url):
tokens = tokenizer(prefix + url)
assert len(tokens) == 2
assert tokens[0].text == prefix
assert tokens[1].text == url
assert len(tokens) == 2
@pytest.mark.parametrize("suffix", SUFFIXES)
@pytest.mark.parametrize("url", URLS)
def test_suffixed_url(tokenizer, url, suffix):
def test_tokenizer_handles_suffixed_url(tokenizer, url, suffix):
tokens = tokenizer(url + suffix)
assert len(tokens) == 2
assert tokens[0].text == url
assert tokens[1].text == suffix
assert len(tokens) == 2
@pytest.mark.parametrize("url", URLS)
def test_tokenizer_handles_simple_surround_url(tokenizer, url):
tokens = tokenizer("(" + url + ")")
assert len(tokens) == 3
assert tokens[0].text == "("
assert tokens[1].text == url
assert tokens[2].text == ")"
@pytest.mark.slow
@pytest.mark.parametrize("prefix", PREFIXES)
@pytest.mark.parametrize("suffix", SUFFIXES)
@pytest.mark.parametrize("url", URLS)
def test_surround_url(tokenizer, prefix, suffix, url):
def test_tokenizer_handles_surround_url(tokenizer, prefix, suffix, url):
tokens = tokenizer(prefix + url + suffix)
assert len(tokens) == 3
assert tokens[0].text == prefix
assert tokens[1].text == url
assert tokens[2].text == suffix
assert len(tokens) == 3
@pytest.mark.slow
@pytest.mark.parametrize("prefix1", PREFIXES)
@pytest.mark.parametrize("prefix2", PREFIXES)
@pytest.mark.parametrize("url", URLS)
def test_two_prefix_url(tokenizer, prefix1, prefix2, url):
def test_tokenizer_handles_two_prefix_url(tokenizer, prefix1, prefix2, url):
tokens = tokenizer(prefix1 + prefix2 + url)
assert len(tokens) == 3
assert tokens[0].text == prefix1
assert tokens[1].text == prefix2
assert tokens[2].text == url
assert len(tokens) == 3
@pytest.mark.slow
@pytest.mark.parametrize("suffix1", SUFFIXES)
@pytest.mark.parametrize("suffix2", SUFFIXES)
@pytest.mark.parametrize("url", URLS)
def test_two_prefix_url(tokenizer, suffix1, suffix2, url):
def test_tokenizer_handles_two_prefix_url(tokenizer, suffix1, suffix2, url):
tokens = tokenizer(url + suffix1 + suffix2)
assert len(tokens) == 3
assert tokens[0].text == url
assert tokens[1].text == suffix1
assert tokens[2].text == suffix2
assert len(tokens) == 3

View File

@ -305,7 +305,7 @@ cdef class Vocab:
'''
key = hash_string(string)
lex = self._by_hash.get(key)
return True if lex is not NULL else False
return lex is not NULL
def __iter__(self):
'''Iterate over the lexemes in the vocabulary.