2017-01-12 03:54:44 +03:00
|
|
|
import pytest
|
2016-04-13 16:28:28 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.tokens.doc import Doc
|
|
|
|
|
|
|
|
from ..util import get_doc, apply_transition_sequence
|
|
|
|
|
2015-10-10 08:03:13 +03:00
|
|
|
|
2017-01-12 03:54:44 +03:00
|
|
|
def test_parser_space_attachment(en_tokenizer):
|
|
|
|
text = "This is a test.\nTo ensure spaces are attached well."
|
|
|
|
heads = [1, 0, 1, -2, -3, -1, 1, 4, -1, 2, 1, 0, -1, -2]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2015-10-14 19:20:51 +03:00
|
|
|
for sent in doc.sents:
|
2015-10-14 19:24:57 +03:00
|
|
|
if len(sent) == 1:
|
|
|
|
assert not sent[-1].is_space
|
2016-01-19 01:04:38 +03:00
|
|
|
|
|
|
|
|
2017-01-12 03:54:44 +03:00
|
|
|
def test_parser_sentence_space(en_tokenizer):
|
2018-11-27 03:09:36 +03:00
|
|
|
# fmt: off
|
2017-01-12 03:54:44 +03:00
|
|
|
text = "I look forward to using Thingamajig. I've been told it will make my life easier..."
|
|
|
|
heads = [1, 0, -1, -2, -1, -1, -5, -1, 3, 2, 1, 0, 2, 1, -3, 1, 1, -3, -7]
|
2018-11-27 03:09:36 +03:00
|
|
|
deps = ["nsubj", "ROOT", "advmod", "prep", "pcomp", "dobj", "punct", "",
|
|
|
|
"nsubjpass", "aux", "auxpass", "ROOT", "nsubj", "aux", "ccomp",
|
|
|
|
"poss", "nsubj", "ccomp", "punct"]
|
|
|
|
# fmt: on
|
2017-01-12 03:54:44 +03:00
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
|
2016-01-19 01:04:38 +03:00
|
|
|
assert len(list(doc.sents)) == 2
|
|
|
|
|
2016-04-13 16:28:28 +03:00
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
@pytest.mark.skip(
|
|
|
|
reason="The step_through API was removed (but should be brought back)"
|
|
|
|
)
|
2017-01-12 03:54:44 +03:00
|
|
|
def test_parser_space_attachment_leading(en_tokenizer, en_parser):
|
|
|
|
text = "\t \n This is a sentence ."
|
|
|
|
heads = [1, 1, 0, 1, -2, -3]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=text.split(" "), heads=heads)
|
2017-01-12 03:54:44 +03:00
|
|
|
assert doc[0].is_space
|
|
|
|
assert doc[1].is_space
|
2018-11-27 03:09:36 +03:00
|
|
|
assert doc[2].text == "This"
|
2017-01-12 03:54:44 +03:00
|
|
|
with en_parser.step_through(doc) as stepwise:
|
|
|
|
pass
|
|
|
|
assert doc[0].head.i == 2
|
|
|
|
assert doc[1].head.i == 2
|
|
|
|
assert stepwise.stack == set([2])
|
2016-04-13 16:28:28 +03:00
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
@pytest.mark.skip(
|
|
|
|
reason="The step_through API was removed (but should be brought back)"
|
|
|
|
)
|
2017-01-12 03:54:44 +03:00
|
|
|
def test_parser_space_attachment_intermediate_trailing(en_tokenizer, en_parser):
|
|
|
|
text = "This is \t a \t\n \n sentence . \n\n \n"
|
|
|
|
heads = [1, 0, -1, 2, -1, -4, -5, -1]
|
2018-11-27 03:09:36 +03:00
|
|
|
transition = ["L-nsubj", "S", "L-det", "R-attr", "D", "R-punct"]
|
2017-01-12 03:54:44 +03:00
|
|
|
tokens = en_tokenizer(text)
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=text.split(" "), heads=heads)
|
2017-01-12 03:54:44 +03:00
|
|
|
assert doc[2].is_space
|
|
|
|
assert doc[4].is_space
|
|
|
|
assert doc[5].is_space
|
|
|
|
assert doc[8].is_space
|
|
|
|
assert doc[9].is_space
|
2016-04-13 16:28:28 +03:00
|
|
|
|
2017-01-12 03:54:44 +03:00
|
|
|
apply_transition_sequence(en_parser, doc, transition)
|
|
|
|
for token in doc:
|
|
|
|
assert token.dep != 0 or token.is_space
|
|
|
|
assert [token.head.i for token in doc] == [1, 1, 1, 6, 3, 3, 1, 1, 7, 7]
|
2016-04-13 16:28:28 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text,length", [(["\n"], 1), (["\n", "\t", "\n\n", "\t"], 4)])
|
2020-07-25 16:01:15 +03:00
|
|
|
@pytest.mark.skip(
|
|
|
|
reason="The step_through API was removed (but should be brought back)"
|
|
|
|
)
|
2017-01-12 03:54:44 +03:00
|
|
|
def test_parser_space_attachment_space(en_tokenizer, en_parser, text, length):
|
|
|
|
doc = Doc(en_parser.vocab, words=text)
|
|
|
|
assert len(doc) == length
|
2018-11-30 19:43:08 +03:00
|
|
|
with en_parser.step_through(doc) as _: # noqa: F841
|
2017-01-12 03:54:44 +03:00
|
|
|
pass
|
|
|
|
assert doc[0].is_space
|
|
|
|
for token in doc:
|
2018-11-27 03:09:36 +03:00
|
|
|
assert token.head.i == length - 1
|