mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 09:57:26 +03:00 
			
		
		
		
	* Use isort with Black profile * isort all the things * Fix import cycles as a result of import sorting * Add DOCBIN_ALL_ATTRS type definition * Add isort to requirements * Remove isort from build dependencies check * Typo
		
			
				
	
	
		
			155 lines
		
	
	
		
			3.7 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			155 lines
		
	
	
		
			3.7 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
import re
 | 
						|
 | 
						|
import pytest
 | 
						|
 | 
						|
from spacy.lang.en import English
 | 
						|
from spacy.tokenizer import Tokenizer
 | 
						|
from spacy.util import compile_infix_regex, compile_prefix_regex, compile_suffix_regex
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture
 | 
						|
def custom_en_tokenizer(en_vocab):
 | 
						|
    prefix_re = compile_prefix_regex(English.Defaults.prefixes)
 | 
						|
    suffix_re = compile_suffix_regex(English.Defaults.suffixes)
 | 
						|
    custom_infixes = [
 | 
						|
        r"\.\.\.+",
 | 
						|
        r"(?<=[0-9])-(?=[0-9])",
 | 
						|
        r"[0-9]+(,[0-9]+)+",
 | 
						|
        r"[\[\]!&:,()\*—–\/-]",
 | 
						|
    ]
 | 
						|
    infix_re = compile_infix_regex(custom_infixes)
 | 
						|
    token_match_re = re.compile("a-b")
 | 
						|
    return Tokenizer(
 | 
						|
        en_vocab,
 | 
						|
        English.Defaults.tokenizer_exceptions,
 | 
						|
        prefix_re.search,
 | 
						|
        suffix_re.search,
 | 
						|
        infix_re.finditer,
 | 
						|
        token_match=token_match_re.match,
 | 
						|
    )
 | 
						|
 | 
						|
 | 
						|
def test_en_customized_tokenizer_handles_infixes(custom_en_tokenizer):
 | 
						|
    sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion."
 | 
						|
    context = [word.text for word in custom_en_tokenizer(sentence)]
 | 
						|
    assert context == [
 | 
						|
        "The",
 | 
						|
        "8",
 | 
						|
        "and",
 | 
						|
        "10",
 | 
						|
        "-",
 | 
						|
        "county",
 | 
						|
        "definitions",
 | 
						|
        "are",
 | 
						|
        "not",
 | 
						|
        "used",
 | 
						|
        "for",
 | 
						|
        "the",
 | 
						|
        "greater",
 | 
						|
        "Southern",
 | 
						|
        "California",
 | 
						|
        "Megaregion",
 | 
						|
        ".",
 | 
						|
    ]
 | 
						|
    # the trailing '-' may cause Assertion Error
 | 
						|
    sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion."
 | 
						|
    context = [word.text for word in custom_en_tokenizer(sentence)]
 | 
						|
    assert context == [
 | 
						|
        "The",
 | 
						|
        "8",
 | 
						|
        "-",
 | 
						|
        "and",
 | 
						|
        "10",
 | 
						|
        "-",
 | 
						|
        "county",
 | 
						|
        "definitions",
 | 
						|
        "are",
 | 
						|
        "not",
 | 
						|
        "used",
 | 
						|
        "for",
 | 
						|
        "the",
 | 
						|
        "greater",
 | 
						|
        "Southern",
 | 
						|
        "California",
 | 
						|
        "Megaregion",
 | 
						|
        ".",
 | 
						|
    ]
 | 
						|
 | 
						|
 | 
						|
def test_en_customized_tokenizer_handles_token_match(custom_en_tokenizer):
 | 
						|
    sentence = "The 8 and 10-county definitions a-b not used for the greater Southern California Megaregion."
 | 
						|
    context = [word.text for word in custom_en_tokenizer(sentence)]
 | 
						|
    assert context == [
 | 
						|
        "The",
 | 
						|
        "8",
 | 
						|
        "and",
 | 
						|
        "10",
 | 
						|
        "-",
 | 
						|
        "county",
 | 
						|
        "definitions",
 | 
						|
        "a-b",
 | 
						|
        "not",
 | 
						|
        "used",
 | 
						|
        "for",
 | 
						|
        "the",
 | 
						|
        "greater",
 | 
						|
        "Southern",
 | 
						|
        "California",
 | 
						|
        "Megaregion",
 | 
						|
        ".",
 | 
						|
    ]
 | 
						|
 | 
						|
 | 
						|
def test_en_customized_tokenizer_handles_rules(custom_en_tokenizer):
 | 
						|
    sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion. :)"
 | 
						|
    context = [word.text for word in custom_en_tokenizer(sentence)]
 | 
						|
    assert context == [
 | 
						|
        "The",
 | 
						|
        "8",
 | 
						|
        "and",
 | 
						|
        "10",
 | 
						|
        "-",
 | 
						|
        "county",
 | 
						|
        "definitions",
 | 
						|
        "are",
 | 
						|
        "not",
 | 
						|
        "used",
 | 
						|
        "for",
 | 
						|
        "the",
 | 
						|
        "greater",
 | 
						|
        "Southern",
 | 
						|
        "California",
 | 
						|
        "Megaregion",
 | 
						|
        ".",
 | 
						|
        ":)",
 | 
						|
    ]
 | 
						|
 | 
						|
 | 
						|
def test_en_customized_tokenizer_handles_rules_property(custom_en_tokenizer):
 | 
						|
    sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion. :)"
 | 
						|
    rules = custom_en_tokenizer.rules
 | 
						|
    del rules[":)"]
 | 
						|
    custom_en_tokenizer.rules = rules
 | 
						|
    context = [word.text for word in custom_en_tokenizer(sentence)]
 | 
						|
    assert context == [
 | 
						|
        "The",
 | 
						|
        "8",
 | 
						|
        "and",
 | 
						|
        "10",
 | 
						|
        "-",
 | 
						|
        "county",
 | 
						|
        "definitions",
 | 
						|
        "are",
 | 
						|
        "not",
 | 
						|
        "used",
 | 
						|
        "for",
 | 
						|
        "the",
 | 
						|
        "greater",
 | 
						|
        "Southern",
 | 
						|
        "California",
 | 
						|
        "Megaregion",
 | 
						|
        ".",
 | 
						|
        ":",
 | 
						|
        ")",
 | 
						|
    ]
 |