mirror of
https://github.com/explosion/spaCy.git
synced 2025-04-25 11:23:40 +03:00
Merge branch 'master' of ssh://github.com/explosion/spaCy
This commit is contained in:
commit
e12c90e03f
|
@ -1,4 +1,6 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...en import English
|
from ...en import English
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...en import English
|
from ...en import English
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -10,5 +12,5 @@ def en_tokenizer():
|
||||||
|
|
||||||
|
|
||||||
def test_big_ellipsis(en_tokenizer):
|
def test_big_ellipsis(en_tokenizer):
|
||||||
tokens = en_tokenizer(u'$45...............Asking')
|
tokens = en_tokenizer('$45...............Asking')
|
||||||
assert len(tokens) > 2
|
assert len(tokens) > 2
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
import pytest
|
|
||||||
|
|
||||||
import spacy
|
import spacy
|
||||||
from spacy.attrs import ORTH
|
from spacy.attrs import ORTH
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.models
|
@pytest.mark.models
|
||||||
def test_issue429():
|
def test_issue429():
|
||||||
|
|
|
@ -1,14 +1,18 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import spacy
|
import spacy
|
||||||
import spacy.matcher
|
import spacy.matcher
|
||||||
from spacy.attrs import IS_PUNCT, ORTH
|
from spacy.attrs import IS_PUNCT, ORTH
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.models
|
@pytest.mark.models
|
||||||
def test_matcher_segfault():
|
def test_matcher_segfault():
|
||||||
nlp = spacy.load('en', parser=False, entity=False)
|
nlp = spacy.load('en', parser=False, entity=False)
|
||||||
matcher = spacy.matcher.Matcher(nlp.vocab)
|
matcher = spacy.matcher.Matcher(nlp.vocab)
|
||||||
content = u'''a b; c'''
|
content = '''a b; c'''
|
||||||
matcher.add(entity_key='1', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}]])
|
matcher.add(entity_key='1', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}]])
|
||||||
matcher(nlp(content))
|
matcher(nlp(content))
|
||||||
matcher.add(entity_key='2', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}, {IS_PUNCT: True}, {ORTH: 'c'}]])
|
matcher.add(entity_key='2', label='TEST', attrs={}, specs=[[{ORTH: 'a'}, {ORTH: 'b'}, {IS_PUNCT: True}, {ORTH: 'c'}]])
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
import pytest
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
from ...matcher import Matcher
|
from ...matcher import Matcher
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
def test_issue588():
|
def test_issue588():
|
||||||
matcher = Matcher(Vocab())
|
matcher = Matcher(Vocab())
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
import pytest
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
def test_issue589():
|
def test_issue589():
|
||||||
vocab = Vocab()
|
vocab = Vocab()
|
||||||
vocab.strings.set_frozen(True)
|
vocab.strings.set_frozen(True)
|
||||||
doc = Doc(vocab, words=[u'whata'])
|
doc = Doc(vocab, words=['whata'])
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...attrs import *
|
from ...attrs import *
|
||||||
from ...matcher import Matcher
|
from ...matcher import Matcher
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
from ...en import English
|
from ...en import English
|
||||||
|
|
||||||
|
|
||||||
def test_overlapping_matches():
|
def test_overlapping_matches():
|
||||||
vocab = English.Defaults.create_vocab()
|
vocab = English.Defaults.create_vocab()
|
||||||
doc = Doc(vocab, words=['n', '=', '1', ';', 'a', ':', '5', '%'])
|
doc = Doc(vocab, words=['n', '=', '1', ';', 'a', ':', '5', '%'])
|
||||||
|
|
|
@ -1,11 +1,13 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
import pytest
|
|
||||||
|
|
||||||
from ...symbols import POS, VERB, VerbForm_inf
|
from ...symbols import POS, VERB, VerbForm_inf
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
from ...lemmatizer import Lemmatizer
|
from ...lemmatizer import Lemmatizer
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def index():
|
def index():
|
||||||
|
@ -37,6 +39,6 @@ def vocab(lemmatizer, tag_map):
|
||||||
def test_not_lemmatize_base_forms(vocab):
|
def test_not_lemmatize_base_forms(vocab):
|
||||||
doc = Doc(vocab, words=["Do", "n't", "feed", "the", "dog"])
|
doc = Doc(vocab, words=["Do", "n't", "feed", "the", "dog"])
|
||||||
feed = doc[2]
|
feed = doc[2]
|
||||||
feed.tag_ = u'VB'
|
feed.tag_ = 'VB'
|
||||||
assert feed.text == u'feed'
|
assert feed.text == 'feed'
|
||||||
assert feed.lemma_ == u'feed'
|
assert feed.lemma_ == 'feed'
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
|
|
||||||
|
|
||||||
def test_issue599():
|
def test_issue599():
|
||||||
doc = Doc(Vocab())
|
doc = Doc(Vocab())
|
||||||
doc.is_tagged = True
|
doc.is_tagged = True
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
from ...attrs import POS
|
from ...attrs import POS
|
||||||
|
@ -6,4 +8,4 @@ from ...attrs import POS
|
||||||
|
|
||||||
def test_issue600():
|
def test_issue600():
|
||||||
doc = Doc(Vocab(tag_map={'NN': {'pos': 'NOUN'}}), words=['hello'])
|
doc = Doc(Vocab(tag_map={'NN': {'pos': 'NOUN'}}), words=['hello'])
|
||||||
doc[0].tag_ = u'NN'
|
doc[0].tag_ = 'NN'
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...attrs import LOWER, ORTH
|
from ...attrs import LOWER, ORTH
|
||||||
from ...tokens import Doc
|
from ...tokens import Doc
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
|
@ -9,7 +12,7 @@ def return_false(doc, ent_id, label, start, end):
|
||||||
|
|
||||||
|
|
||||||
def test_matcher_accept():
|
def test_matcher_accept():
|
||||||
doc = Doc(Vocab(), words=[u'The', u'golf', u'club', u'is', u'broken'])
|
doc = Doc(Vocab(), words=['The', 'golf', 'club', 'is', 'broken'])
|
||||||
|
|
||||||
golf_pattern = [
|
golf_pattern = [
|
||||||
{ ORTH: "golf"},
|
{ ORTH: "golf"},
|
||||||
|
@ -17,8 +20,8 @@ def test_matcher_accept():
|
||||||
]
|
]
|
||||||
matcher = Matcher(doc.vocab)
|
matcher = Matcher(doc.vocab)
|
||||||
|
|
||||||
matcher.add_entity(u'Sport_Equipment', acceptor=return_false)
|
matcher.add_entity('Sport_Equipment', acceptor=return_false)
|
||||||
matcher.add_pattern(u"Sport_Equipment", golf_pattern)
|
matcher.add_pattern("Sport_Equipment", golf_pattern)
|
||||||
match = matcher(doc)
|
match = matcher(doc)
|
||||||
|
|
||||||
assert match == []
|
assert match == []
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import spacy
|
import spacy
|
||||||
from spacy.attrs import ORTH
|
from spacy.attrs import ORTH
|
||||||
|
|
||||||
|
@ -17,7 +19,7 @@ def merge_phrases(matcher, doc, i, matches):
|
||||||
|
|
||||||
def test_entity_ID_assignment():
|
def test_entity_ID_assignment():
|
||||||
nlp = spacy.en.English()
|
nlp = spacy.en.English()
|
||||||
text = u"""The golf club is broken"""
|
text = """The golf club is broken"""
|
||||||
doc = nlp(text)
|
doc = nlp(text)
|
||||||
|
|
||||||
golf_pattern = [
|
golf_pattern = [
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ...vocab import Vocab
|
from ...vocab import Vocab
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,77 +1,95 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
URLS = [
|
URLS = [
|
||||||
u"http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region®ion=top-news&WT.nav=top-news&_r=0",
|
"http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region®ion=top-news&WT.nav=top-news&_r=0",
|
||||||
u"www.google.com?q=google",
|
"www.google.com?q=google",
|
||||||
u"google.com",
|
"www.red-stars.com",
|
||||||
u"www.red-stars.com",
|
"http://foo.com/blah_(wikipedia)#cite-1",
|
||||||
pytest.mark.xfail(u"red-stars.com"),
|
"mailto:foo.bar@baz.com",
|
||||||
u"http://foo.com/blah_(wikipedia)#cite-1",
|
"mailto:foo-bar@baz-co.com"
|
||||||
u"http://www.example.com/wpstyle/?bar=baz&inga=42&quux",
|
|
||||||
u"mailto:foo.bar@baz.com",
|
|
||||||
u"mailto:foo-bar@baz-co.com"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# Punctuation we want to check is split away before the URL
|
# Punctuation we want to check is split away before the URL
|
||||||
PREFIXES = [
|
PREFIXES = [
|
||||||
"(", '"', "...", ">"
|
"(", '"', ">"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# Punctuation we want to check is split away after the URL
|
# Punctuation we want to check is split away after the URL
|
||||||
SUFFIXES = [
|
SUFFIXES = [
|
||||||
'"', ":", ">"]
|
'"', ":", ">"]
|
||||||
|
|
||||||
@pytest.mark.parametrize("text", URLS)
|
|
||||||
def test_simple_url(tokenizer, text):
|
@pytest.mark.parametrize("url", URLS)
|
||||||
tokens = tokenizer(text)
|
def test_tokenizer_handles_simple_url(tokenizer, url):
|
||||||
assert tokens[0].orth_ == text
|
tokens = tokenizer(url)
|
||||||
assert len(tokens) == 1
|
assert len(tokens) == 1
|
||||||
|
assert tokens[0].text == url
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("prefix", PREFIXES)
|
@pytest.mark.parametrize("prefix", PREFIXES)
|
||||||
@pytest.mark.parametrize("url", URLS)
|
@pytest.mark.parametrize("url", URLS)
|
||||||
def test_prefixed_url(tokenizer, prefix, url):
|
def test_tokenizer_handles_prefixed_url(tokenizer, prefix, url):
|
||||||
tokens = tokenizer(prefix + url)
|
tokens = tokenizer(prefix + url)
|
||||||
|
assert len(tokens) == 2
|
||||||
assert tokens[0].text == prefix
|
assert tokens[0].text == prefix
|
||||||
assert tokens[1].text == url
|
assert tokens[1].text == url
|
||||||
assert len(tokens) == 2
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("suffix", SUFFIXES)
|
@pytest.mark.parametrize("suffix", SUFFIXES)
|
||||||
@pytest.mark.parametrize("url", URLS)
|
@pytest.mark.parametrize("url", URLS)
|
||||||
def test_suffixed_url(tokenizer, url, suffix):
|
def test_tokenizer_handles_suffixed_url(tokenizer, url, suffix):
|
||||||
tokens = tokenizer(url + suffix)
|
tokens = tokenizer(url + suffix)
|
||||||
|
assert len(tokens) == 2
|
||||||
assert tokens[0].text == url
|
assert tokens[0].text == url
|
||||||
assert tokens[1].text == suffix
|
assert tokens[1].text == suffix
|
||||||
assert len(tokens) == 2
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("url", URLS)
|
||||||
|
def test_tokenizer_handles_simple_surround_url(tokenizer, url):
|
||||||
|
tokens = tokenizer("(" + url + ")")
|
||||||
|
assert len(tokens) == 3
|
||||||
|
assert tokens[0].text == "("
|
||||||
|
assert tokens[1].text == url
|
||||||
|
assert tokens[2].text == ")"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow
|
||||||
@pytest.mark.parametrize("prefix", PREFIXES)
|
@pytest.mark.parametrize("prefix", PREFIXES)
|
||||||
@pytest.mark.parametrize("suffix", SUFFIXES)
|
@pytest.mark.parametrize("suffix", SUFFIXES)
|
||||||
@pytest.mark.parametrize("url", URLS)
|
@pytest.mark.parametrize("url", URLS)
|
||||||
def test_surround_url(tokenizer, prefix, suffix, url):
|
def test_tokenizer_handles_surround_url(tokenizer, prefix, suffix, url):
|
||||||
tokens = tokenizer(prefix + url + suffix)
|
tokens = tokenizer(prefix + url + suffix)
|
||||||
|
assert len(tokens) == 3
|
||||||
assert tokens[0].text == prefix
|
assert tokens[0].text == prefix
|
||||||
assert tokens[1].text == url
|
assert tokens[1].text == url
|
||||||
assert tokens[2].text == suffix
|
assert tokens[2].text == suffix
|
||||||
assert len(tokens) == 3
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow
|
||||||
@pytest.mark.parametrize("prefix1", PREFIXES)
|
@pytest.mark.parametrize("prefix1", PREFIXES)
|
||||||
@pytest.mark.parametrize("prefix2", PREFIXES)
|
@pytest.mark.parametrize("prefix2", PREFIXES)
|
||||||
@pytest.mark.parametrize("url", URLS)
|
@pytest.mark.parametrize("url", URLS)
|
||||||
def test_two_prefix_url(tokenizer, prefix1, prefix2, url):
|
def test_tokenizer_handles_two_prefix_url(tokenizer, prefix1, prefix2, url):
|
||||||
tokens = tokenizer(prefix1 + prefix2 + url)
|
tokens = tokenizer(prefix1 + prefix2 + url)
|
||||||
|
assert len(tokens) == 3
|
||||||
assert tokens[0].text == prefix1
|
assert tokens[0].text == prefix1
|
||||||
assert tokens[1].text == prefix2
|
assert tokens[1].text == prefix2
|
||||||
assert tokens[2].text == url
|
assert tokens[2].text == url
|
||||||
assert len(tokens) == 3
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow
|
||||||
@pytest.mark.parametrize("suffix1", SUFFIXES)
|
@pytest.mark.parametrize("suffix1", SUFFIXES)
|
||||||
@pytest.mark.parametrize("suffix2", SUFFIXES)
|
@pytest.mark.parametrize("suffix2", SUFFIXES)
|
||||||
@pytest.mark.parametrize("url", URLS)
|
@pytest.mark.parametrize("url", URLS)
|
||||||
def test_two_prefix_url(tokenizer, suffix1, suffix2, url):
|
def test_tokenizer_handles_two_prefix_url(tokenizer, suffix1, suffix2, url):
|
||||||
tokens = tokenizer(url + suffix1 + suffix2)
|
tokens = tokenizer(url + suffix1 + suffix2)
|
||||||
|
assert len(tokens) == 3
|
||||||
assert tokens[0].text == url
|
assert tokens[0].text == url
|
||||||
assert tokens[1].text == suffix1
|
assert tokens[1].text == suffix1
|
||||||
assert tokens[2].text == suffix2
|
assert tokens[2].text == suffix2
|
||||||
assert len(tokens) == 3
|
|
||||||
|
|
|
@ -305,7 +305,7 @@ cdef class Vocab:
|
||||||
'''
|
'''
|
||||||
key = hash_string(string)
|
key = hash_string(string)
|
||||||
lex = self._by_hash.get(key)
|
lex = self._by_hash.get(key)
|
||||||
return True if lex is not NULL else False
|
return lex is not NULL
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
'''Iterate over the lexemes in the vocabulary.
|
'''Iterate over the lexemes in the vocabulary.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user