2020-09-29 22:10:22 +03:00
|
|
|
from typing import Optional, List, Dict, Any, Callable, Iterable
|
2020-07-22 14:42:59 +03:00
|
|
|
from enum import Enum
|
2020-04-18 18:01:53 +03:00
|
|
|
import tempfile
|
|
|
|
import srsly
|
2020-07-19 14:34:37 +03:00
|
|
|
import warnings
|
2020-04-18 18:01:53 +03:00
|
|
|
from pathlib import Path
|
2020-07-22 14:42:59 +03:00
|
|
|
|
2020-07-19 14:34:37 +03:00
|
|
|
from ...errors import Warnings, Errors
|
2017-05-08 23:29:04 +03:00
|
|
|
from ...language import Language
|
2020-09-27 23:20:45 +03:00
|
|
|
from ...scorer import Scorer
|
2017-05-08 23:29:04 +03:00
|
|
|
from ...tokens import Doc
|
2020-09-29 22:10:22 +03:00
|
|
|
from ...training import validate_examples, Example
|
2020-09-30 11:20:14 +03:00
|
|
|
from ...util import DummyTokenizer, registry, load_config_from_str
|
2019-11-11 16:23:21 +03:00
|
|
|
from .lex_attrs import LEX_ATTRS
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
from .stop_words import STOP_WORDS
|
2020-04-18 18:01:53 +03:00
|
|
|
from ... import util
|
|
|
|
|
|
|
|
|
2020-09-29 22:09:10 +03:00
|
|
|
# fmt: off
|
2020-10-23 12:27:54 +03:00
|
|
|
_PKUSEG_INSTALL_MSG = "install spacy-pkuseg with `pip install \"spacy-pkuseg>=0.0.27,<0.1.0\"` or `conda install -c conda-forge \"spacy-pkuseg>=0.0.27,<0.1.0\"`"
|
2020-09-29 22:09:10 +03:00
|
|
|
# fmt: on
|
2016-04-24 19:44:24 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
DEFAULT_CONFIG = """
|
|
|
|
[nlp]
|
2019-08-18 16:09:16 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
[nlp.tokenizer]
|
2020-07-24 15:50:26 +03:00
|
|
|
@tokenizers = "spacy.zh.ChineseTokenizer"
|
2020-07-22 14:42:59 +03:00
|
|
|
segmenter = "char"
|
2020-09-29 22:10:22 +03:00
|
|
|
|
|
|
|
[initialize]
|
|
|
|
|
|
|
|
[initialize.tokenizer]
|
2020-07-22 14:42:59 +03:00
|
|
|
pkuseg_model = null
|
|
|
|
pkuseg_user_dict = "default"
|
|
|
|
"""
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2019-11-11 16:23:21 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
class Segmenter(str, Enum):
|
|
|
|
char = "char"
|
|
|
|
jieba = "jieba"
|
|
|
|
pkuseg = "pkuseg"
|
2019-11-11 16:23:21 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@classmethod
|
|
|
|
def values(cls):
|
|
|
|
return list(cls.__members__.keys())
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
2020-07-24 15:50:26 +03:00
|
|
|
@registry.tokenizers("spacy.zh.ChineseTokenizer")
|
2020-10-03 18:20:18 +03:00
|
|
|
def create_chinese_tokenizer(segmenter: Segmenter = Segmenter.char):
|
2020-07-22 14:42:59 +03:00
|
|
|
def chinese_tokenizer_factory(nlp):
|
2020-09-29 22:10:22 +03:00
|
|
|
return ChineseTokenizer(nlp, segmenter=segmenter)
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
return chinese_tokenizer_factory
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
|
2019-11-11 16:23:21 +03:00
|
|
|
class ChineseTokenizer(DummyTokenizer):
|
2020-10-05 22:58:18 +03:00
|
|
|
def __init__(self, nlp: Language, segmenter: Segmenter = Segmenter.char):
|
2020-07-22 14:42:59 +03:00
|
|
|
self.vocab = nlp.vocab
|
2020-09-27 15:00:18 +03:00
|
|
|
if isinstance(segmenter, Segmenter):
|
2020-07-22 14:42:59 +03:00
|
|
|
segmenter = segmenter.value
|
|
|
|
self.segmenter = segmenter
|
|
|
|
self.pkuseg_seg = None
|
|
|
|
self.jieba_seg = None
|
|
|
|
if segmenter not in Segmenter.values():
|
|
|
|
warn_msg = Warnings.W103.format(
|
|
|
|
lang="Chinese",
|
|
|
|
segmenter=segmenter,
|
|
|
|
supported=", ".join(Segmenter.values()),
|
|
|
|
default="'char' (character segmentation)",
|
|
|
|
)
|
|
|
|
warnings.warn(warn_msg)
|
|
|
|
self.segmenter = Segmenter.char
|
2020-09-30 12:46:45 +03:00
|
|
|
if segmenter == Segmenter.jieba:
|
|
|
|
self.jieba_seg = try_jieba_import()
|
|
|
|
|
|
|
|
def initialize(
|
|
|
|
self,
|
2020-10-01 00:48:47 +03:00
|
|
|
get_examples: Optional[Callable[[], Iterable[Example]]] = None,
|
2020-09-30 12:46:45 +03:00
|
|
|
*,
|
2020-10-01 00:48:47 +03:00
|
|
|
nlp: Optional[Language] = None,
|
2020-09-30 12:46:45 +03:00
|
|
|
pkuseg_model: Optional[str] = None,
|
2020-10-05 17:24:28 +03:00
|
|
|
pkuseg_user_dict: Optional[str] = "default",
|
2020-09-30 12:46:45 +03:00
|
|
|
):
|
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-10-05 17:24:28 +03:00
|
|
|
if pkuseg_user_dict is None:
|
|
|
|
pkuseg_user_dict = pkuseg_model
|
2020-09-30 12:46:45 +03:00
|
|
|
self.pkuseg_seg = try_pkuseg_import(
|
2020-10-05 22:58:18 +03:00
|
|
|
pkuseg_model=pkuseg_model, pkuseg_user_dict=pkuseg_user_dict
|
2020-09-30 12:46:45 +03:00
|
|
|
)
|
2020-07-19 14:34:37 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def __call__(self, text: str) -> Doc:
|
|
|
|
if self.segmenter == Segmenter.jieba:
|
2020-04-18 18:01:53 +03:00
|
|
|
words = list([x for x in self.jieba_seg.cut(text, cut_all=False) if x])
|
|
|
|
(words, spaces) = util.get_words_and_spaces(words, text)
|
|
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
2020-07-22 14:42:59 +03:00
|
|
|
elif self.segmenter == Segmenter.pkuseg:
|
2020-07-19 14:34:37 +03:00
|
|
|
if self.pkuseg_seg is None:
|
|
|
|
raise ValueError(Errors.E1000)
|
2020-04-18 18:01:53 +03:00
|
|
|
words = self.pkuseg_seg.cut(text)
|
|
|
|
(words, spaces) = util.get_words_and_spaces(words, text)
|
2019-11-11 16:23:21 +03:00
|
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
2020-07-19 14:34:37 +03:00
|
|
|
|
|
|
|
# warn if segmenter setting is not the only remaining option "char"
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter != Segmenter.char:
|
2020-07-19 14:34:37 +03:00
|
|
|
warn_msg = Warnings.W103.format(
|
|
|
|
lang="Chinese",
|
|
|
|
segmenter=self.segmenter,
|
2020-07-22 14:42:59 +03:00
|
|
|
supported=", ".join(Segmenter.values()),
|
2020-07-19 14:34:37 +03:00
|
|
|
default="'char' (character segmentation)",
|
|
|
|
)
|
|
|
|
warnings.warn(warn_msg)
|
|
|
|
|
|
|
|
# split into individual characters
|
|
|
|
words = list(text)
|
|
|
|
(words, spaces) = util.get_words_and_spaces(words, text)
|
|
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def pkuseg_update_user_dict(self, words: List[str], reset: bool = False):
|
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-05-08 12:21:46 +03:00
|
|
|
if reset:
|
|
|
|
try:
|
2020-10-05 15:21:53 +03:00
|
|
|
import spacy_pkuseg
|
2020-05-21 15:04:57 +03:00
|
|
|
|
2020-10-05 15:21:53 +03:00
|
|
|
self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(None)
|
2020-05-08 12:21:46 +03:00
|
|
|
except ImportError:
|
2020-07-22 14:42:59 +03:00
|
|
|
msg = (
|
2020-10-05 15:21:53 +03:00
|
|
|
"spacy_pkuseg not installed: unable to reset pkuseg "
|
2020-07-22 14:42:59 +03:00
|
|
|
"user dict. Please " + _PKUSEG_INSTALL_MSG
|
|
|
|
)
|
2020-08-06 00:53:21 +03:00
|
|
|
raise ImportError(msg) from None
|
2020-05-08 12:21:46 +03:00
|
|
|
for word in words:
|
2020-05-21 15:04:57 +03:00
|
|
|
self.pkuseg_seg.preprocesser.insert(word.strip(), "")
|
2020-07-19 14:34:37 +03:00
|
|
|
else:
|
|
|
|
warn_msg = Warnings.W104.format(target="pkuseg", current=self.segmenter)
|
|
|
|
warnings.warn(warn_msg)
|
2020-05-08 12:21:46 +03:00
|
|
|
|
2020-09-27 23:20:45 +03:00
|
|
|
def score(self, examples):
|
|
|
|
validate_examples(examples, "ChineseTokenizer.score")
|
|
|
|
return Scorer.score_tokenization(examples)
|
|
|
|
|
2020-07-24 15:50:26 +03:00
|
|
|
def _get_config(self) -> Dict[str, Any]:
|
|
|
|
return {
|
|
|
|
"segmenter": self.segmenter,
|
|
|
|
}
|
|
|
|
|
|
|
|
def _set_config(self, config: Dict[str, Any] = {}) -> None:
|
|
|
|
self.segmenter = config.get("segmenter", Segmenter.char)
|
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
def to_bytes(self, **kwargs):
|
|
|
|
pkuseg_features_b = b""
|
|
|
|
pkuseg_weights_b = b""
|
|
|
|
pkuseg_processors_data = None
|
|
|
|
if self.pkuseg_seg:
|
|
|
|
with tempfile.TemporaryDirectory() as tempdir:
|
|
|
|
self.pkuseg_seg.feature_extractor.save(tempdir)
|
|
|
|
self.pkuseg_seg.model.save(tempdir)
|
|
|
|
tempdir = Path(tempdir)
|
2020-10-05 18:47:39 +03:00
|
|
|
with open(tempdir / "features.msgpack", "rb") as fileh:
|
2020-04-18 18:01:53 +03:00
|
|
|
pkuseg_features_b = fileh.read()
|
|
|
|
with open(tempdir / "weights.npz", "rb") as fileh:
|
|
|
|
pkuseg_weights_b = fileh.read()
|
|
|
|
pkuseg_processors_data = (
|
|
|
|
_get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie),
|
|
|
|
self.pkuseg_seg.postprocesser.do_process,
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.common_words)),
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.other_words)),
|
|
|
|
)
|
2020-07-22 14:42:59 +03:00
|
|
|
serializers = {
|
2020-07-24 15:50:26 +03:00
|
|
|
"cfg": lambda: srsly.json_dumps(self._get_config()),
|
2020-07-22 14:42:59 +03:00
|
|
|
"pkuseg_features": lambda: pkuseg_features_b,
|
|
|
|
"pkuseg_weights": lambda: pkuseg_weights_b,
|
|
|
|
"pkuseg_processors": lambda: srsly.msgpack_dumps(pkuseg_processors_data),
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
return util.to_bytes(serializers, [])
|
|
|
|
|
|
|
|
def from_bytes(self, data, **kwargs):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data = {"features_b": b"", "weights_b": b"", "processors_data": None}
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def deserialize_pkuseg_features(b):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data["features_b"] = b
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def deserialize_pkuseg_weights(b):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data["weights_b"] = b
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def deserialize_pkuseg_processors(b):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data["processors_data"] = srsly.msgpack_loads(b)
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
deserializers = {
|
2020-07-24 15:50:26 +03:00
|
|
|
"cfg": lambda b: self._set_config(srsly.json_loads(b)),
|
2020-07-22 14:42:59 +03:00
|
|
|
"pkuseg_features": deserialize_pkuseg_features,
|
|
|
|
"pkuseg_weights": deserialize_pkuseg_weights,
|
|
|
|
"pkuseg_processors": deserialize_pkuseg_processors,
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
util.from_bytes(data, deserializers, [])
|
|
|
|
|
2020-05-21 15:24:38 +03:00
|
|
|
if pkuseg_data["features_b"] and pkuseg_data["weights_b"]:
|
2020-04-18 18:01:53 +03:00
|
|
|
with tempfile.TemporaryDirectory() as tempdir:
|
|
|
|
tempdir = Path(tempdir)
|
2020-10-05 18:47:39 +03:00
|
|
|
with open(tempdir / "features.msgpack", "wb") as fileh:
|
2020-05-21 15:24:38 +03:00
|
|
|
fileh.write(pkuseg_data["features_b"])
|
2020-04-18 18:01:53 +03:00
|
|
|
with open(tempdir / "weights.npz", "wb") as fileh:
|
2020-05-21 15:24:38 +03:00
|
|
|
fileh.write(pkuseg_data["weights_b"])
|
2020-04-18 18:01:53 +03:00
|
|
|
try:
|
2020-10-05 15:21:53 +03:00
|
|
|
import spacy_pkuseg
|
2020-04-18 18:01:53 +03:00
|
|
|
except ImportError:
|
|
|
|
raise ImportError(
|
2020-10-05 22:38:23 +03:00
|
|
|
"spacy-pkuseg not installed. To use this model, "
|
2020-04-18 18:01:53 +03:00
|
|
|
+ _PKUSEG_INSTALL_MSG
|
2020-08-06 00:53:21 +03:00
|
|
|
) from None
|
2020-10-05 15:21:53 +03:00
|
|
|
self.pkuseg_seg = spacy_pkuseg.pkuseg(str(tempdir))
|
2020-05-21 15:24:38 +03:00
|
|
|
if pkuseg_data["processors_data"]:
|
|
|
|
processors_data = pkuseg_data["processors_data"]
|
|
|
|
(user_dict, do_process, common_words, other_words) = processors_data
|
2020-10-05 15:21:53 +03:00
|
|
|
self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(user_dict)
|
2020-04-18 18:01:53 +03:00
|
|
|
self.pkuseg_seg.postprocesser.do_process = do_process
|
|
|
|
self.pkuseg_seg.postprocesser.common_words = set(common_words)
|
|
|
|
self.pkuseg_seg.postprocesser.other_words = set(other_words)
|
|
|
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
def to_disk(self, path, **kwargs):
|
|
|
|
path = util.ensure_path(path)
|
2019-11-11 16:23:21 +03:00
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
def save_pkuseg_model(path):
|
|
|
|
if self.pkuseg_seg:
|
|
|
|
if not path.exists():
|
|
|
|
path.mkdir(parents=True)
|
|
|
|
self.pkuseg_seg.model.save(path)
|
|
|
|
self.pkuseg_seg.feature_extractor.save(path)
|
|
|
|
|
|
|
|
def save_pkuseg_processors(path):
|
|
|
|
if self.pkuseg_seg:
|
|
|
|
data = (
|
|
|
|
_get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie),
|
|
|
|
self.pkuseg_seg.postprocesser.do_process,
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.common_words)),
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.other_words)),
|
|
|
|
)
|
|
|
|
srsly.write_msgpack(path, data)
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
serializers = {
|
2020-07-24 15:50:26 +03:00
|
|
|
"cfg": lambda p: srsly.write_json(p, self._get_config()),
|
2020-07-22 14:42:59 +03:00
|
|
|
"pkuseg_model": lambda p: save_pkuseg_model(p),
|
|
|
|
"pkuseg_processors": lambda p: save_pkuseg_processors(p),
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
return util.to_disk(path, serializers, [])
|
|
|
|
|
|
|
|
def from_disk(self, path, **kwargs):
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
|
|
|
|
def load_pkuseg_model(path):
|
|
|
|
try:
|
2020-10-05 15:21:53 +03:00
|
|
|
import spacy_pkuseg
|
2020-04-18 18:01:53 +03:00
|
|
|
except ImportError:
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-04-18 18:01:53 +03:00
|
|
|
raise ImportError(
|
2020-10-05 22:38:23 +03:00
|
|
|
"spacy-pkuseg not installed. To use this model, "
|
2020-04-18 18:01:53 +03:00
|
|
|
+ _PKUSEG_INSTALL_MSG
|
2020-08-06 00:53:21 +03:00
|
|
|
) from None
|
2020-04-18 18:01:53 +03:00
|
|
|
if path.exists():
|
2020-10-05 15:21:53 +03:00
|
|
|
self.pkuseg_seg = spacy_pkuseg.pkuseg(path)
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def load_pkuseg_processors(path):
|
|
|
|
try:
|
2020-10-05 15:21:53 +03:00
|
|
|
import spacy_pkuseg
|
2020-04-18 18:01:53 +03:00
|
|
|
except ImportError:
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-08-06 00:53:21 +03:00
|
|
|
raise ImportError(self._pkuseg_install_msg) from None
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-04-18 18:01:53 +03:00
|
|
|
data = srsly.read_msgpack(path)
|
|
|
|
(user_dict, do_process, common_words, other_words) = data
|
2020-10-05 15:21:53 +03:00
|
|
|
self.pkuseg_seg.preprocesser = spacy_pkuseg.Preprocesser(user_dict)
|
2020-04-18 18:01:53 +03:00
|
|
|
self.pkuseg_seg.postprocesser.do_process = do_process
|
|
|
|
self.pkuseg_seg.postprocesser.common_words = set(common_words)
|
|
|
|
self.pkuseg_seg.postprocesser.other_words = set(other_words)
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
serializers = {
|
2020-07-24 15:50:26 +03:00
|
|
|
"cfg": lambda p: self._set_config(srsly.read_json(p)),
|
2020-07-22 14:42:59 +03:00
|
|
|
"pkuseg_model": lambda p: load_pkuseg_model(p),
|
|
|
|
"pkuseg_processors": lambda p: load_pkuseg_processors(p),
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
util.from_disk(path, serializers, [])
|
2019-11-11 16:23:21 +03:00
|
|
|
|
|
|
|
|
2017-12-28 12:13:58 +03:00
|
|
|
class ChineseDefaults(Language.Defaults):
|
2020-09-30 11:20:14 +03:00
|
|
|
config = load_config_from_str(DEFAULT_CONFIG)
|
2020-07-24 15:50:26 +03:00
|
|
|
lex_attr_getters = LEX_ATTRS
|
|
|
|
stop_words = STOP_WORDS
|
|
|
|
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
|
2017-12-28 12:13:58 +03:00
|
|
|
|
2019-03-11 19:10:50 +03:00
|
|
|
|
2016-04-24 19:44:24 +03:00
|
|
|
class Chinese(Language):
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
lang = "zh"
|
2020-07-22 14:42:59 +03:00
|
|
|
Defaults = ChineseDefaults
|
2016-05-05 12:39:12 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
2020-09-30 12:46:45 +03:00
|
|
|
def try_jieba_import() -> None:
|
2020-07-22 14:42:59 +03:00
|
|
|
try:
|
|
|
|
import jieba
|
|
|
|
|
2020-09-30 12:46:45 +03:00
|
|
|
# segment a short text to have jieba initialize its cache in advance
|
|
|
|
list(jieba.cut("作为", cut_all=False))
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
return jieba
|
|
|
|
except ImportError:
|
2020-09-30 12:46:45 +03:00
|
|
|
msg = (
|
|
|
|
"Jieba not installed. To use jieba, install it with `pip "
|
|
|
|
" install jieba` or from https://github.com/fxsjy/jieba"
|
|
|
|
)
|
|
|
|
raise ImportError(msg) from None
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
|
2020-09-30 12:46:45 +03:00
|
|
|
def try_pkuseg_import(pkuseg_model: str, pkuseg_user_dict: str) -> None:
|
2020-07-22 14:42:59 +03:00
|
|
|
try:
|
2020-10-05 15:21:53 +03:00
|
|
|
import spacy_pkuseg
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
except ImportError:
|
2020-10-05 22:38:23 +03:00
|
|
|
msg = "spacy-pkuseg not installed. To use pkuseg, " + _PKUSEG_INSTALL_MSG
|
2020-09-30 12:46:45 +03:00
|
|
|
raise ImportError(msg) from None
|
2020-10-05 15:21:53 +03:00
|
|
|
try:
|
|
|
|
return spacy_pkuseg.pkuseg(pkuseg_model, pkuseg_user_dict)
|
2020-07-22 14:42:59 +03:00
|
|
|
except FileNotFoundError:
|
2020-09-30 12:46:45 +03:00
|
|
|
msg = "Unable to load pkuseg model from: " + pkuseg_model
|
|
|
|
raise FileNotFoundError(msg) from None
|
2017-05-03 12:01:42 +03:00
|
|
|
|
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
def _get_pkuseg_trie_data(node, path=""):
|
|
|
|
data = []
|
|
|
|
for c, child_node in sorted(node.children.items()):
|
|
|
|
data.extend(_get_pkuseg_trie_data(child_node, path + c))
|
|
|
|
if node.isword:
|
|
|
|
data.append((path, node.usertag))
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-08-18 16:09:16 +03:00
|
|
|
__all__ = ["Chinese"]
|