2020-07-22 14:42:59 +03:00
|
|
|
from typing import Optional, List, Set, Dict, Callable, Any
|
|
|
|
from enum import Enum
|
2020-04-18 18:01:53 +03:00
|
|
|
import tempfile
|
|
|
|
import srsly
|
2020-07-19 14:34:37 +03:00
|
|
|
import warnings
|
2020-04-18 18:01:53 +03:00
|
|
|
from pathlib import Path
|
2020-07-22 14:42:59 +03:00
|
|
|
from thinc.api import Config
|
|
|
|
|
2020-07-19 14:34:37 +03:00
|
|
|
from ...errors import Warnings, Errors
|
2017-05-08 23:29:04 +03:00
|
|
|
from ...language import Language
|
|
|
|
from ...tokens import Doc
|
2020-07-22 14:42:59 +03:00
|
|
|
from ...util import DummyTokenizer, registry
|
2018-08-07 12:26:31 +03:00
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
2019-11-11 16:23:21 +03:00
|
|
|
from .lex_attrs import LEX_ATTRS
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
from .stop_words import STOP_WORDS
|
2020-04-18 18:01:53 +03:00
|
|
|
from ... import util
|
|
|
|
|
|
|
|
|
2020-07-19 12:09:49 +03:00
|
|
|
_PKUSEG_INSTALL_MSG = "install it with `pip install pkuseg==0.0.25` or from https://github.com/lancopku/pkuseg-python"
|
2016-04-24 19:44:24 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
DEFAULT_CONFIG = """
|
|
|
|
[nlp]
|
|
|
|
lang = "zh"
|
|
|
|
stop_words = {"@language_data": "spacy.zh.stop_words"}
|
|
|
|
lex_attr_getters = {"@language_data": "spacy.zh.lex_attr_getters"}
|
2019-08-18 16:09:16 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
[nlp.tokenizer]
|
|
|
|
@tokenizers = "spacy.ChineseTokenizer.v1"
|
|
|
|
segmenter = "char"
|
|
|
|
pkuseg_model = null
|
|
|
|
pkuseg_user_dict = "default"
|
2019-11-20 15:15:24 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
[nlp.writing_system]
|
|
|
|
direction = "ltr"
|
|
|
|
has_case = false
|
|
|
|
has_letters = false
|
|
|
|
"""
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2019-11-11 16:23:21 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
class Segmenter(str, Enum):
|
|
|
|
char = "char"
|
|
|
|
jieba = "jieba"
|
|
|
|
pkuseg = "pkuseg"
|
2019-11-11 16:23:21 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@classmethod
|
|
|
|
def values(cls):
|
|
|
|
return list(cls.__members__.keys())
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
@registry.language_data("spacy.zh.stop_words")
|
|
|
|
def stop_words() -> Set[str]:
|
|
|
|
return STOP_WORDS
|
|
|
|
|
|
|
|
|
|
|
|
@registry.language_data("spacy.zh.lex_attr_getters")
|
|
|
|
def lex_attr_getters() -> Dict[int, Callable[[str], Any]]:
|
|
|
|
return LEX_ATTRS
|
|
|
|
|
|
|
|
|
|
|
|
@registry.tokenizers("spacy.ChineseTokenizer.v1")
|
|
|
|
def create_chinese_tokenizer(
|
|
|
|
segmenter: Segmenter = Segmenter.char,
|
|
|
|
pkuseg_model: Optional[str] = None,
|
|
|
|
pkuseg_user_dict: Optional[str] = "default",
|
|
|
|
):
|
|
|
|
def chinese_tokenizer_factory(nlp):
|
|
|
|
return ChineseTokenizer(
|
|
|
|
nlp,
|
|
|
|
segmenter=segmenter,
|
|
|
|
pkuseg_model=pkuseg_model,
|
|
|
|
pkuseg_user_dict=pkuseg_user_dict,
|
|
|
|
)
|
|
|
|
|
|
|
|
return chinese_tokenizer_factory
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
|
2019-11-11 16:23:21 +03:00
|
|
|
class ChineseTokenizer(DummyTokenizer):
|
2020-07-22 14:42:59 +03:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
nlp: Language,
|
|
|
|
segmenter: Segmenter = Segmenter.char,
|
|
|
|
pkuseg_model: Optional[str] = None,
|
|
|
|
pkuseg_user_dict: Optional[str] = None,
|
|
|
|
):
|
|
|
|
self.vocab = nlp.vocab
|
|
|
|
if isinstance(segmenter, Segmenter): # we might have the Enum here
|
|
|
|
segmenter = segmenter.value
|
|
|
|
self.segmenter = segmenter
|
|
|
|
self.pkuseg_model = pkuseg_model
|
|
|
|
self.pkuseg_user_dict = pkuseg_user_dict
|
|
|
|
self.pkuseg_seg = None
|
|
|
|
self.jieba_seg = None
|
|
|
|
self.configure_segmenter(segmenter)
|
|
|
|
|
|
|
|
def configure_segmenter(self, segmenter: str):
|
|
|
|
if segmenter not in Segmenter.values():
|
|
|
|
warn_msg = Warnings.W103.format(
|
|
|
|
lang="Chinese",
|
|
|
|
segmenter=segmenter,
|
|
|
|
supported=", ".join(Segmenter.values()),
|
|
|
|
default="'char' (character segmentation)",
|
|
|
|
)
|
|
|
|
warnings.warn(warn_msg)
|
|
|
|
self.segmenter = Segmenter.char
|
2020-07-19 14:34:37 +03:00
|
|
|
self.jieba_seg = try_jieba_import(self.segmenter)
|
|
|
|
self.pkuseg_seg = try_pkuseg_import(
|
|
|
|
self.segmenter,
|
2020-07-22 14:42:59 +03:00
|
|
|
pkuseg_model=self.pkuseg_model,
|
|
|
|
pkuseg_user_dict=self.pkuseg_user_dict,
|
2020-07-19 14:34:37 +03:00
|
|
|
)
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def __call__(self, text: str) -> Doc:
|
|
|
|
if self.segmenter == Segmenter.jieba:
|
2020-04-18 18:01:53 +03:00
|
|
|
words = list([x for x in self.jieba_seg.cut(text, cut_all=False) if x])
|
|
|
|
(words, spaces) = util.get_words_and_spaces(words, text)
|
|
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
2020-07-22 14:42:59 +03:00
|
|
|
elif self.segmenter == Segmenter.pkuseg:
|
2020-07-19 14:34:37 +03:00
|
|
|
if self.pkuseg_seg is None:
|
|
|
|
raise ValueError(Errors.E1000)
|
2020-04-18 18:01:53 +03:00
|
|
|
words = self.pkuseg_seg.cut(text)
|
|
|
|
(words, spaces) = util.get_words_and_spaces(words, text)
|
2019-11-11 16:23:21 +03:00
|
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
2020-07-19 14:34:37 +03:00
|
|
|
|
|
|
|
# warn if segmenter setting is not the only remaining option "char"
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter != Segmenter.char:
|
2020-07-19 14:34:37 +03:00
|
|
|
warn_msg = Warnings.W103.format(
|
|
|
|
lang="Chinese",
|
|
|
|
segmenter=self.segmenter,
|
2020-07-22 14:42:59 +03:00
|
|
|
supported=", ".join(Segmenter.values()),
|
2020-07-19 14:34:37 +03:00
|
|
|
default="'char' (character segmentation)",
|
|
|
|
)
|
|
|
|
warnings.warn(warn_msg)
|
|
|
|
|
|
|
|
# split into individual characters
|
|
|
|
words = list(text)
|
|
|
|
(words, spaces) = util.get_words_and_spaces(words, text)
|
|
|
|
return Doc(self.vocab, words=words, spaces=spaces)
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
def pkuseg_update_user_dict(self, words: List[str], reset: bool = False):
|
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-05-08 12:21:46 +03:00
|
|
|
if reset:
|
|
|
|
try:
|
|
|
|
import pkuseg
|
2020-05-21 15:04:57 +03:00
|
|
|
|
2020-05-08 12:21:46 +03:00
|
|
|
self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(None)
|
|
|
|
except ImportError:
|
2020-07-22 14:42:59 +03:00
|
|
|
msg = (
|
|
|
|
"pkuseg not installed: unable to reset pkuseg "
|
|
|
|
"user dict. Please " + _PKUSEG_INSTALL_MSG
|
|
|
|
)
|
|
|
|
raise ImportError(msg)
|
2020-05-08 12:21:46 +03:00
|
|
|
for word in words:
|
2020-05-21 15:04:57 +03:00
|
|
|
self.pkuseg_seg.preprocesser.insert(word.strip(), "")
|
2020-07-19 14:34:37 +03:00
|
|
|
else:
|
|
|
|
warn_msg = Warnings.W104.format(target="pkuseg", current=self.segmenter)
|
|
|
|
warnings.warn(warn_msg)
|
2020-05-08 12:21:46 +03:00
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
def to_bytes(self, **kwargs):
|
|
|
|
pkuseg_features_b = b""
|
|
|
|
pkuseg_weights_b = b""
|
|
|
|
pkuseg_processors_data = None
|
|
|
|
if self.pkuseg_seg:
|
|
|
|
with tempfile.TemporaryDirectory() as tempdir:
|
|
|
|
self.pkuseg_seg.feature_extractor.save(tempdir)
|
|
|
|
self.pkuseg_seg.model.save(tempdir)
|
|
|
|
tempdir = Path(tempdir)
|
|
|
|
with open(tempdir / "features.pkl", "rb") as fileh:
|
|
|
|
pkuseg_features_b = fileh.read()
|
|
|
|
with open(tempdir / "weights.npz", "rb") as fileh:
|
|
|
|
pkuseg_weights_b = fileh.read()
|
|
|
|
pkuseg_processors_data = (
|
|
|
|
_get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie),
|
|
|
|
self.pkuseg_seg.postprocesser.do_process,
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.common_words)),
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.other_words)),
|
|
|
|
)
|
2020-07-22 14:42:59 +03:00
|
|
|
serializers = {
|
|
|
|
"pkuseg_features": lambda: pkuseg_features_b,
|
|
|
|
"pkuseg_weights": lambda: pkuseg_weights_b,
|
|
|
|
"pkuseg_processors": lambda: srsly.msgpack_dumps(pkuseg_processors_data),
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
return util.to_bytes(serializers, [])
|
|
|
|
|
|
|
|
def from_bytes(self, data, **kwargs):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data = {"features_b": b"", "weights_b": b"", "processors_data": None}
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def deserialize_pkuseg_features(b):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data["features_b"] = b
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def deserialize_pkuseg_weights(b):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data["weights_b"] = b
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
def deserialize_pkuseg_processors(b):
|
2020-05-21 15:24:38 +03:00
|
|
|
pkuseg_data["processors_data"] = srsly.msgpack_loads(b)
|
2020-04-18 18:01:53 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
deserializers = {
|
|
|
|
"pkuseg_features": deserialize_pkuseg_features,
|
|
|
|
"pkuseg_weights": deserialize_pkuseg_weights,
|
|
|
|
"pkuseg_processors": deserialize_pkuseg_processors,
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
util.from_bytes(data, deserializers, [])
|
|
|
|
|
2020-05-21 15:24:38 +03:00
|
|
|
if pkuseg_data["features_b"] and pkuseg_data["weights_b"]:
|
2020-04-18 18:01:53 +03:00
|
|
|
with tempfile.TemporaryDirectory() as tempdir:
|
|
|
|
tempdir = Path(tempdir)
|
|
|
|
with open(tempdir / "features.pkl", "wb") as fileh:
|
2020-05-21 15:24:38 +03:00
|
|
|
fileh.write(pkuseg_data["features_b"])
|
2020-04-18 18:01:53 +03:00
|
|
|
with open(tempdir / "weights.npz", "wb") as fileh:
|
2020-05-21 15:24:38 +03:00
|
|
|
fileh.write(pkuseg_data["weights_b"])
|
2020-04-18 18:01:53 +03:00
|
|
|
try:
|
|
|
|
import pkuseg
|
|
|
|
except ImportError:
|
|
|
|
raise ImportError(
|
|
|
|
"pkuseg not installed. To use this model, "
|
|
|
|
+ _PKUSEG_INSTALL_MSG
|
|
|
|
)
|
|
|
|
self.pkuseg_seg = pkuseg.pkuseg(str(tempdir))
|
2020-05-21 15:24:38 +03:00
|
|
|
if pkuseg_data["processors_data"]:
|
|
|
|
processors_data = pkuseg_data["processors_data"]
|
|
|
|
(user_dict, do_process, common_words, other_words) = processors_data
|
2020-04-18 18:01:53 +03:00
|
|
|
self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(user_dict)
|
|
|
|
self.pkuseg_seg.postprocesser.do_process = do_process
|
|
|
|
self.pkuseg_seg.postprocesser.common_words = set(common_words)
|
|
|
|
self.pkuseg_seg.postprocesser.other_words = set(other_words)
|
|
|
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
def to_disk(self, path, **kwargs):
|
|
|
|
path = util.ensure_path(path)
|
2019-11-11 16:23:21 +03:00
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
def save_pkuseg_model(path):
|
|
|
|
if self.pkuseg_seg:
|
|
|
|
if not path.exists():
|
|
|
|
path.mkdir(parents=True)
|
|
|
|
self.pkuseg_seg.model.save(path)
|
|
|
|
self.pkuseg_seg.feature_extractor.save(path)
|
|
|
|
|
|
|
|
def save_pkuseg_processors(path):
|
|
|
|
if self.pkuseg_seg:
|
|
|
|
data = (
|
|
|
|
_get_pkuseg_trie_data(self.pkuseg_seg.preprocesser.trie),
|
|
|
|
self.pkuseg_seg.postprocesser.do_process,
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.common_words)),
|
|
|
|
sorted(list(self.pkuseg_seg.postprocesser.other_words)),
|
|
|
|
)
|
|
|
|
srsly.write_msgpack(path, data)
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
serializers = {
|
|
|
|
"pkuseg_model": lambda p: save_pkuseg_model(p),
|
|
|
|
"pkuseg_processors": lambda p: save_pkuseg_processors(p),
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
return util.to_disk(path, serializers, [])
|
|
|
|
|
|
|
|
def from_disk(self, path, **kwargs):
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
|
|
|
|
def load_pkuseg_model(path):
|
|
|
|
try:
|
|
|
|
import pkuseg
|
|
|
|
except ImportError:
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-04-18 18:01:53 +03:00
|
|
|
raise ImportError(
|
|
|
|
"pkuseg not installed. To use this model, "
|
|
|
|
+ _PKUSEG_INSTALL_MSG
|
|
|
|
)
|
|
|
|
if path.exists():
|
|
|
|
self.pkuseg_seg = pkuseg.pkuseg(path)
|
|
|
|
|
|
|
|
def load_pkuseg_processors(path):
|
|
|
|
try:
|
|
|
|
import pkuseg
|
|
|
|
except ImportError:
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-04-18 18:01:53 +03:00
|
|
|
raise ImportError(self._pkuseg_install_msg)
|
2020-07-22 14:42:59 +03:00
|
|
|
if self.segmenter == Segmenter.pkuseg:
|
2020-04-18 18:01:53 +03:00
|
|
|
data = srsly.read_msgpack(path)
|
|
|
|
(user_dict, do_process, common_words, other_words) = data
|
|
|
|
self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(user_dict)
|
|
|
|
self.pkuseg_seg.postprocesser.do_process = do_process
|
|
|
|
self.pkuseg_seg.postprocesser.common_words = set(common_words)
|
|
|
|
self.pkuseg_seg.postprocesser.other_words = set(other_words)
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
serializers = {
|
|
|
|
"pkuseg_model": lambda p: load_pkuseg_model(p),
|
|
|
|
"pkuseg_processors": lambda p: load_pkuseg_processors(p),
|
|
|
|
}
|
2020-04-18 18:01:53 +03:00
|
|
|
util.from_disk(path, serializers, [])
|
2019-11-11 16:23:21 +03:00
|
|
|
|
|
|
|
|
2017-12-28 12:13:58 +03:00
|
|
|
class ChineseDefaults(Language.Defaults):
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
tokenizer_exceptions = BASE_EXCEPTIONS
|
2017-12-28 12:13:58 +03:00
|
|
|
|
2019-03-11 19:10:50 +03:00
|
|
|
|
2016-04-24 19:44:24 +03:00
|
|
|
class Chinese(Language):
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
lang = "zh"
|
2020-07-22 14:42:59 +03:00
|
|
|
Defaults = ChineseDefaults
|
|
|
|
default_config = Config().from_str(DEFAULT_CONFIG)
|
2016-05-05 12:39:12 +03:00
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
def try_jieba_import(segmenter: str) -> None:
|
|
|
|
try:
|
|
|
|
import jieba
|
|
|
|
|
|
|
|
if segmenter == Segmenter.jieba:
|
|
|
|
# segment a short text to have jieba initialize its cache in advance
|
|
|
|
list(jieba.cut("作为", cut_all=False))
|
|
|
|
|
|
|
|
return jieba
|
|
|
|
except ImportError:
|
|
|
|
if segmenter == Segmenter.jieba:
|
|
|
|
msg = (
|
|
|
|
"Jieba not installed. To use jieba, install it with `pip "
|
|
|
|
" install jieba` or from https://github.com/fxsjy/jieba"
|
|
|
|
)
|
|
|
|
raise ImportError(msg)
|
|
|
|
|
|
|
|
|
|
|
|
def try_pkuseg_import(segmenter: str, pkuseg_model: str, pkuseg_user_dict: str) -> None:
|
|
|
|
try:
|
|
|
|
import pkuseg
|
|
|
|
|
|
|
|
if pkuseg_model:
|
|
|
|
return pkuseg.pkuseg(pkuseg_model, pkuseg_user_dict)
|
|
|
|
elif segmenter == Segmenter.pkuseg:
|
|
|
|
msg = (
|
|
|
|
"The Chinese word segmenter is 'pkuseg' but no pkuseg model "
|
|
|
|
"was specified. Please provide the name of a pretrained model "
|
|
|
|
"or the path to a model with:\n"
|
|
|
|
'cfg = {"nlp": {"tokenizer": {"segmenter": "pkuseg", "pkuseg_model": name_or_path }}\n'
|
|
|
|
"nlp = Chinese.from_config(cfg)"
|
|
|
|
)
|
|
|
|
raise ValueError(msg)
|
|
|
|
except ImportError:
|
|
|
|
if segmenter == Segmenter.pkuseg:
|
|
|
|
msg = "pkuseg not installed. To use pkuseg, " + _PKUSEG_INSTALL_MSG
|
|
|
|
raise ImportError(msg)
|
|
|
|
except FileNotFoundError:
|
|
|
|
if segmenter == Segmenter.pkuseg:
|
|
|
|
msg = "Unable to load pkuseg model from: " + pkuseg_model
|
|
|
|
raise FileNotFoundError(msg)
|
2017-05-03 12:01:42 +03:00
|
|
|
|
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
def _get_pkuseg_trie_data(node, path=""):
|
|
|
|
data = []
|
|
|
|
for c, child_node in sorted(node.children.items()):
|
|
|
|
data.extend(_get_pkuseg_trie_data(child_node, path + c))
|
|
|
|
if node.isword:
|
|
|
|
data.append((path, node.usertag))
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-08-18 16:09:16 +03:00
|
|
|
__all__ = ["Chinese"]
|