mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 09:57:26 +03:00 
			
		
		
		
	- added some tests for tokenization issues - fixed some issues with tokenization of words with hyphen infix - rewrote the "tokenizer_exceptions.py" file (stemming from the German version)
		
			
				
	
	
		
			24 lines
		
	
	
		
			704 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			24 lines
		
	
	
		
			704 B
		
	
	
	
		
			Python
		
	
	
	
	
	
# coding: utf8
 | 
						||
from __future__ import unicode_literals
 | 
						||
 | 
						||
from ..char_classes import LIST_ELLIPSES, LIST_ICONS
 | 
						||
from ..char_classes import CONCAT_QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER
 | 
						||
 | 
						||
ELISION = " ' ’ ".strip().replace(" ", "")
 | 
						||
 | 
						||
_infixes = (
 | 
						||
    LIST_ELLIPSES
 | 
						||
    + LIST_ICONS
 | 
						||
    + [
 | 
						||
        r"(?<=[{a}][{el}])(?=[{a}])".format(a=ALPHA, el=ELISION),
 | 
						||
        r"(?<=[{al}])\.(?=[{au}])".format(al=ALPHA_LOWER, au=ALPHA_UPPER),
 | 
						||
        r"(?<=[{a}])[,!?](?=[{a}])".format(a=ALPHA),
 | 
						||
        r"(?<=[{a}])[:<>=](?=[{a}])".format(a=ALPHA),
 | 
						||
        r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
 | 
						||
        r"(?<=[{a}])--(?=[{a}])".format(a=ALPHA),
 | 
						||
        r"(?<=[0-9])-(?=[0-9])",
 | 
						||
    ]
 | 
						||
)
 | 
						||
 | 
						||
TOKENIZER_INFIXES = _infixes
 |