mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 18:07:26 +03:00 
			
		
		
		
	This reverts commit 6f314f99c4.
We are reverting this until we can support this normalization more
consistently across vectors, training corpora, and lemmatizer data.
		
	
			
		
			
				
	
	
		
			17 lines
		
	
	
		
			510 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			17 lines
		
	
	
		
			510 B
		
	
	
	
		
			Python
		
	
	
	
	
	
import pytest
 | 
						||
 | 
						||
 | 
						||
@pytest.mark.parametrize(
 | 
						||
    "text,norms,lemmas",
 | 
						||
    [
 | 
						||
        ("о.г.", ["ове године"], ["ова година"]),
 | 
						||
        ("чет.", ["четвртак"], ["четвртак"]),
 | 
						||
        ("гђа", ["госпођа"], ["госпођа"]),
 | 
						||
        ("ил'", ["или"], ["или"]),
 | 
						||
    ],
 | 
						||
)
 | 
						||
def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norms, lemmas):
 | 
						||
    tokens = sr_tokenizer(text)
 | 
						||
    assert len(tokens) == 1
 | 
						||
    assert [token.norm_ for token in tokens] == norms
 |