spaCy/spacy/lang/zh/__init__.py

50 lines
1.6 KiB
Python
Raw Normal View History

2017-05-08 16:54:36 +03:00
# coding: utf8
from __future__ import unicode_literals
2017-12-28 12:13:58 +03:00
from ...attrs import LANG
2017-05-08 23:29:04 +03:00
from ...language import Language
from ...tokens import Doc
from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
2017-12-28 12:13:58 +03:00
class ChineseDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'zh' # for pickling
use_jieba = True
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
tag_map = TAG_MAP
stop_words = STOP_WORDS
2017-12-28 12:13:58 +03:00
class Chinese(Language):
2017-05-08 16:54:36 +03:00
lang = 'zh'
2017-12-28 12:13:58 +03:00
Defaults = ChineseDefaults # override defaults
2016-05-05 12:39:12 +03:00
2016-11-02 21:57:38 +03:00
def make_doc(self, text):
if self.Defaults.use_jieba:
try:
import jieba
except ImportError:
msg = ("Jieba not installed. Either set Chinese.use_jieba = False, "
"or install it https://github.com/fxsjy/jieba")
raise ImportError(msg)
words = list(jieba.cut(text, cut_all=False))
words = [x for x in words if x]
return Doc(self.vocab, words=words, spaces=[False]*len(words))
else:
words = []
spaces = []
doc = self.tokenizer(text)
for token in self.tokenizer(text):
words.extend(list(token.text))
spaces.extend([False]*len(token.text))
spaces[-1] = bool(token.whitespace_)
return Doc(self.vocab, words=words, spaces=spaces)
2017-05-03 12:01:42 +03:00
2017-05-08 16:54:36 +03:00
__all__ = ['Chinese']