mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-30 23:47:31 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			37 lines
		
	
	
		
			1.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			37 lines
		
	
	
		
			1.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # coding: utf8
 | |
| from __future__ import unicode_literals
 | |
| 
 | |
| from .stop_words import STOP_WORDS
 | |
| from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES, TOKENIZER_INFIXES
 | |
| from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
 | |
| from .norm_exceptions import NORM_EXCEPTIONS
 | |
| from .lemmatizer import LOOKUP
 | |
| from .lex_attrs import LEX_ATTRS
 | |
| from .syntax_iterators import SYNTAX_ITERATORS
 | |
| 
 | |
| from ..tokenizer_exceptions import BASE_EXCEPTIONS
 | |
| from ...language import Language
 | |
| from ...attrs import LANG
 | |
| from ...util import update_exc
 | |
| 
 | |
| 
 | |
| class IndonesianDefaults(Language.Defaults):
 | |
|     lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
 | |
|     lex_attr_getters[LANG] = lambda text: 'id'
 | |
|     lex_attr_getters.update(LEX_ATTRS)
 | |
|     tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
 | |
|     stop_words = STOP_WORDS
 | |
|     prefixes = TOKENIZER_PREFIXES
 | |
|     suffixes = TOKENIZER_SUFFIXES
 | |
|     infixes = TOKENIZER_INFIXES
 | |
|     syntax_iterators = SYNTAX_ITERATORS
 | |
|     lemma_lookup = LOOKUP
 | |
| 
 | |
| 
 | |
| class Indonesian(Language):
 | |
|     lang = 'id'
 | |
|     Defaults = IndonesianDefaults
 | |
| 
 | |
| 
 | |
| __all__ = ['Indonesian']
 |