Fix and merge attrs and lex_attrs tests

This commit is contained in:
ines 2017-05-09 11:06:25 +02:00
parent 02d0ac5cab
commit 22375eafb0
3 changed files with 52 additions and 104 deletions

View File

@ -0,0 +1,52 @@
# coding: utf-8
from __future__ import unicode_literals
from ...attrs import intify_attrs, ORTH, NORM, LEMMA, IS_ALPHA
from ...lang.lex_attrs import is_punct, is_ascii, like_url, word_shape
import pytest
@pytest.mark.parametrize('text', ["dog"])
def test_attrs_key(text):
assert intify_attrs({"ORTH": text}) == {ORTH: text}
assert intify_attrs({"NORM": text}) == {NORM: text}
assert intify_attrs({"lemma": text}, strings_map={text: 10}) == {LEMMA: 10}
@pytest.mark.parametrize('text', ["dog"])
def test_attrs_idempotence(text):
int_attrs = intify_attrs({"lemma": text, 'is_alpha': True}, strings_map={text: 10})
assert intify_attrs(int_attrs) == {LEMMA: 10, IS_ALPHA: True}
@pytest.mark.parametrize('text', ["dog"])
def test_attrs_do_deprecated(text):
int_attrs = intify_attrs({"F": text, 'is_alpha': True}, strings_map={text: 10},
_do_deprecated=True)
assert int_attrs == {ORTH: 10, IS_ALPHA: True}
@pytest.mark.parametrize('text,match', [(',', True), (' ', False), ('a', False)])
def test_lex_attrs_is_punct(text, match):
assert is_punct(text) == match
@pytest.mark.parametrize('text,match', [(',', True), ('£', False), ('', False)])
def test_lex_attrs_is_ascii(text, match):
assert is_ascii(text) == match
@pytest.mark.parametrize('text,match', [
('www.google.com', True), ('google.com', True), ('sydney.com', True),
('2girls1cup.org', True), ('http://stupid', True), ('www.hi', True),
('dog', False), ('1.2', False), ('1.a', False), ('hello.There', False)])
def test_lex_attrs_like_url(text, match):
assert like_url(text) == match
@pytest.mark.parametrize('text,shape', [
('Nasa', 'Xxxx'), ('capitalized', 'xxxx'), ('999999999', 'dddd'),
('C3P0', 'XdXd'), (',', ','), ('\n', '\n'), ('``,-', '``,-')])
def test_lex_attrs_word_shape(text, shape):
assert word_shape(text) == shape

View File

@ -1,27 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
from ..attrs import intify_attrs, ORTH, NORM, LEMMA, IS_ALPHA
import pytest
@pytest.mark.parametrize('text', ["dog"])
def test_attrs_key(text):
assert intify_attrs({"ORTH": text}) == {ORTH: text}
assert intify_attrs({"NORM": text}) == {NORM: text}
assert intify_attrs({"lemma": text}, strings_map={text: 10}) == {LEMMA: 10}
@pytest.mark.parametrize('text', ["dog"])
def test_attrs_idempotence(text):
int_attrs = intify_attrs({"lemma": text, 'is_alpha': True}, strings_map={text: 10})
assert intify_attrs(int_attrs) == {LEMMA: 10, IS_ALPHA: True}
@pytest.mark.parametrize('text', ["dog"])
def test_attrs_do_deprecated(text):
int_attrs = intify_attrs({"F": text, 'is_alpha': True},
strings_map={text: 10},
_do_deprecated=True)
assert int_attrs == {ORTH: 10, IS_ALPHA: True}

View File

@ -1,77 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
from ..orth import is_alpha, is_digit, is_punct, is_space, is_ascii, is_upper
from ..orth import is_lower, is_title, like_url, like_number, word_shape
import pytest
# TODO: brackets, is_ascii, is_upper, is_lower, is_title
@pytest.mark.parametrize('text,match', [
('1997', False), ('19.97', False), ('hello9', False), ('Hello', True),
('HELLO', True), ('Hello9', False), ('\n', False), ('!', False),
('!d', False), ('\nd', False)])
def test_orth_is_alpha(text, match):
if match:
assert is_alpha(text)
else:
assert not is_alpha(text)
@pytest.mark.parametrize('text,match', [
('1997', True), ('0000000', True), ('19.97', False), ('hello9', False), ('Hello', False), ('\n', False), ('!', False), ('!0', False),
('\n5', False)])
def test_orth_is_digit(text, match):
if match:
assert is_digit(text)
else:
assert not is_digit(text)
@pytest.mark.parametrize('text,match', [(',', True), (' ', False), ('a', False)])
def test_orth_is_punct(text,match):
if match:
assert is_punct(text)
else:
assert not is_punct(text)
@pytest.mark.parametrize('text,match', [(',', False), (' ', True), ('a', False)])
def test_orth_is_space(text,match):
if match:
assert is_space(text)
else:
assert not is_space(text)
@pytest.mark.parametrize('text,match', [
('www.google.com', True), ('google.com', True), ('sydney.com', True),
('2girls1cup.org', True), ('http://stupid', True), ('www.hi', True),
('dog', False), ('1.2', False), ('1.a', False), ('hello.There', False)])
def test_orth_like_url(text, match):
if match:
assert like_url(text)
else:
assert not like_url(text)
@pytest.mark.parametrize('text,match', [
('10', True), ('1', True), ('10,000', True), ('10,00', True),
(',10', True), ('999.0', True), ('one', True), ('two', True),
('billion', True), ('dog', False), (',', False), ('1/2', True),
('1/2/3', False)])
def test_orth_like_number(text, match):
if match:
assert like_number(text)
else:
assert not like_number(text)
@pytest.mark.parametrize('text,shape', [
('Nasa', 'Xxxx'), ('capitalized', 'xxxx'), ('999999999', 'dddd'),
('C3P0', 'XdXd'), (',', ','), ('\n', '\n'), ('``,-', '``,-')])
def test_orth_word_shape(text, shape):
assert word_shape(text) == shape