mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-30 20:06:30 +03:00
eddeb36c96
<!--- Provide a general summary of your changes in the title. --> ## Description - [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files. - [x] Update flake8 config to exclude very large files (lemmatization tables etc.) - [x] Update code to be compatible with flake8 rules - [x] Fix various small bugs, inconsistencies and messy stuff in the language data - [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means) Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results. At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information. ### Types of change enhancement, code style ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
47 lines
1.4 KiB
Python
47 lines
1.4 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from ...attrs import LANG
|
|
from ...language import Language
|
|
from ...tokens import Doc
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from .stop_words import STOP_WORDS
|
|
|
|
|
|
class ChineseDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters[LANG] = lambda text: "zh"
|
|
use_jieba = True
|
|
tokenizer_exceptions = BASE_EXCEPTIONS
|
|
stop_words = STOP_WORDS
|
|
|
|
|
|
class Chinese(Language):
|
|
lang = "zh"
|
|
Defaults = ChineseDefaults # override defaults
|
|
|
|
def make_doc(self, text):
|
|
if self.Defaults.use_jieba:
|
|
try:
|
|
import jieba
|
|
except ImportError:
|
|
msg = (
|
|
"Jieba not installed. Either set Chinese.use_jieba = False, "
|
|
"or install it https://github.com/fxsjy/jieba"
|
|
)
|
|
raise ImportError(msg)
|
|
words = list(jieba.cut(text, cut_all=False))
|
|
words = [x for x in words if x]
|
|
return Doc(self.vocab, words=words, spaces=[False] * len(words))
|
|
else:
|
|
words = []
|
|
spaces = []
|
|
for token in self.tokenizer(text):
|
|
words.extend(list(token.text))
|
|
spaces.extend([False] * len(token.text))
|
|
spaces[-1] = bool(token.whitespace_)
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
|
|
|
|
|
__all__ = ["Chinese"]
|