spaCy/spacy/lang/vi/__init__.py
Ines Montani db55577c45
Drop Python 2.7 and 3.5 (#4828)
* Remove unicode declarations

* Remove Python 3.5 and 2.7 from CI

* Don't require pathlib

* Replace compat helpers

* Remove OrderedDict

* Use f-strings

* Set Cython compiler language level

* Fix typo

* Re-add OrderedDict for Table

* Update setup.cfg

* Revert CONTRIBUTING.md

* Revert lookups.md

* Revert top-level.md

* Small adjustments and docs [ci skip]
2019-12-22 01:53:56 +01:00

48 lines
1.5 KiB
Python

from ...attrs import LANG, NORM
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...tokens import Doc
from .stop_words import STOP_WORDS
from ...util import add_lookups
from .lex_attrs import LEX_ATTRS
class VietnameseDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: "vi" # for pickling
lex_attr_getters[NORM] = add_lookups(
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
)
lex_attr_getters.update(LEX_ATTRS)
stop_words = STOP_WORDS
use_pyvi = True
class Vietnamese(Language):
lang = "vi"
Defaults = VietnameseDefaults # override defaults
def make_doc(self, text):
if self.Defaults.use_pyvi:
try:
from pyvi import ViTokenizer
except ImportError:
msg = (
"Pyvi not installed. Either set Vietnamese.use_pyvi = False, "
"or install it https://pypi.python.org/pypi/pyvi"
)
raise ImportError(msg)
words, spaces = ViTokenizer.spacy_tokenize(text)
return Doc(self.vocab, words=words, spaces=spaces)
else:
words = []
spaces = []
for token in self.tokenizer(text):
words.extend(list(token.text))
spaces.extend([False] * len(token.text))
spaces[-1] = bool(token.whitespace_)
return Doc(self.vocab, words=words, spaces=spaces)
__all__ = ["Vietnamese"]