spaCy/spacy/tests/tokenizer/test_exceptions.py
Adriane Boyd 2ea9b58006
Ignore prefix in suffix matches (#9155)
* Ignore prefix in suffix matches

Ignore the currently matched prefix when looking for suffix matches in
the tokenizer. Otherwise a lookbehind in the suffix pattern may match
incorrectly due the presence of the prefix in the token string.

* Move °[cfkCFK]. to a tokenizer exception

* Adjust exceptions for same tokenization as v3.1

* Also update test accordingly

* Continue to split . after °CFK if ° is not a prefix

* Exclude new ° exceptions for pl

* Switch back to default tokenization of "° C ."

* Revert "Exclude new ° exceptions for pl"

This reverts commit 952013a5b4.

* Add exceptions for °C for hu
2021-10-27 13:02:25 +02:00

54 lines
1.7 KiB
Python

import sys
import pytest
def test_tokenizer_handles_emoticons(tokenizer):
# Tweebo challenge (CMU)
text = (
""":o :/ :'( >:o (: :) >.< XD -__- o.O ;D :-) @_@ :P 8D :1 >:( :D =| :> ...."""
)
tokens = tokenizer(text)
assert tokens[0].text == ":o"
assert tokens[1].text == ":/"
assert tokens[2].text == ":'("
assert tokens[3].text == ">:o"
assert tokens[4].text == "(:"
assert tokens[5].text == ":)"
assert tokens[6].text == ">.<"
assert tokens[7].text == "XD"
assert tokens[8].text == "-__-"
assert tokens[9].text == "o.O"
assert tokens[10].text == ";D"
assert tokens[11].text == ":-)"
assert tokens[12].text == "@_@"
assert tokens[13].text == ":P"
assert tokens[14].text == "8D"
assert tokens[15].text == ":1"
assert tokens[16].text == ">:("
assert tokens[17].text == ":D"
assert tokens[18].text == "=|"
assert tokens[19].text == ":>"
assert tokens[20].text == "...."
@pytest.mark.parametrize("text,length", [("108)", 2), ("XDN", 1)])
def test_tokenizer_excludes_false_pos_emoticons(tokenizer, text, length):
tokens = tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize(
"text,length", [("can you still dunk?🍕🍔😵LOL", 8), ("i💙you", 3), ("🤘🤘yay!", 4)]
)
def test_tokenizer_handles_emoji(tokenizer, text, length):
# These break on narrow unicode builds, e.g. Windows
if sys.maxunicode >= 1114111:
tokens = tokenizer(text)
assert len(tokens) == length
def test_tokenizer_degree(tokenizer):
for u in "cfkCFK":
assert [t.text for t in tokenizer(f"°{u}.")] == ["°", f"{u}", "."]
assert [t[1] for t in tokenizer.explain(f"°{u}.")] == ["°", f"{u}", "."]