mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	* Added Slovak * Added Slovenian tests * Added Estonian tests * Added Croatian tests * Added Latvian tests * Added Icelandic tests * Added Afrikaans tests * Added language-independent tests * Added Kannada tests * Tidied up * Added Albanian tests * Formatted with black * Added failing tests for anomalies * Update spacy/tests/lang/af/test_text.py Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Added context to failing Estonian tokenizer test Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Added context to failing Croatian tokenizer test Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Added context to failing Icelandic tokenizer test Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Added context to failing Latvian tokenizer test Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Added context to failing Slovak tokenizer test Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Added context to failing Slovenian tokenizer test Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
		
			
				
	
	
		
			31 lines
		
	
	
		
			778 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			31 lines
		
	
	
		
			778 B
		
	
	
	
		
			Python
		
	
	
	
	
	
import pytest
 | 
						|
 | 
						|
LV_BASIC_TOKENIZATION_TESTS = [
 | 
						|
    (
 | 
						|
        "Nevienu nedrīkst spīdzināt vai cietsirdīgi vai pazemojoši ar viņu "
 | 
						|
        "apieties vai sodīt.",
 | 
						|
        [
 | 
						|
            "Nevienu",
 | 
						|
            "nedrīkst",
 | 
						|
            "spīdzināt",
 | 
						|
            "vai",
 | 
						|
            "cietsirdīgi",
 | 
						|
            "vai",
 | 
						|
            "pazemojoši",
 | 
						|
            "ar",
 | 
						|
            "viņu",
 | 
						|
            "apieties",
 | 
						|
            "vai",
 | 
						|
            "sodīt",
 | 
						|
            ".",
 | 
						|
        ],
 | 
						|
    ),
 | 
						|
]
 | 
						|
 | 
						|
 | 
						|
@pytest.mark.parametrize("text,expected_tokens", LV_BASIC_TOKENIZATION_TESTS)
 | 
						|
def test_lv_tokenizer_basic(lv_tokenizer, text, expected_tokens):
 | 
						|
    tokens = lv_tokenizer(text)
 | 
						|
    token_list = [token.text for token in tokens if not token.is_space]
 | 
						|
    assert expected_tokens == token_list
 |