Work on draft Italian tokenizer

This commit is contained in:
Matthew Honnibal 2016-11-02 19:56:32 +01:00
parent 7555aa5e63
commit 19c1e83d3d

View File

@ -3,7 +3,25 @@ from __future__ import unicode_literals, print_function
from os import path from os import path
from ..language import Language from ..language import Language
from ..attrs import LANG
from . import language_data
class Italian(Language): class German(Language):
pass lang = 'it'
class Defaults(Language.Defaults):
tokenizer_exceptions = dict(language_data.TOKENIZER_EXCEPTIONS)
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'it'
prefixes = tuple(language_data.TOKENIZER_PREFIXES)
suffixes = tuple(language_data.TOKENIZER_SUFFIXES)
infixes = tuple(language_data.TOKENIZER_INFIXES)
tag_map = dict(language_data.TAG_MAP)
stop_words = set(language_data.STOP_WORDS)