mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-01 00:17:44 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			20 lines
		
	
	
		
			567 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			20 lines
		
	
	
		
			567 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| # coding: utf-8
 | ||
| from __future__ import unicode_literals
 | ||
| 
 | ||
| import pytest
 | ||
| 
 | ||
| 
 | ||
| @pytest.mark.parametrize(
 | ||
|     "text,norms,lemmas",
 | ||
|     [
 | ||
|         ("о.г.", ["ове године"], ["ова година"]),
 | ||
|         ("чет.", ["четвртак"], ["четвртак"]),
 | ||
|         ("гђа", ["госпођа"], ["госпођа"]),
 | ||
|         ("ил'", ["или"], ["или"]),
 | ||
|     ],
 | ||
| )
 | ||
| def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norms, lemmas):
 | ||
|     tokens = sr_tokenizer(text)
 | ||
|     assert len(tokens) == 1
 | ||
|     assert [token.norm_ for token in tokens] == norms
 |