diff --git a/.github/contributors/phiedulxp.md b/.github/contributors/phiedulxp.md new file mode 100644 index 000000000..fa2666e78 --- /dev/null +++ b/.github/contributors/phiedulxp.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Xiepeng Li | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 20190810 | +| GitHub username | phiedulxp | +| Website (optional) | | diff --git a/spacy/lang/zh/__init__.py b/spacy/lang/zh/__init__.py index 773bbcf38..b1ee5105c 100644 --- a/spacy/lang/zh/__init__.py +++ b/spacy/lang/zh/__init__.py @@ -6,7 +6,7 @@ from ...language import Language from ...tokens import Doc from ..tokenizer_exceptions import BASE_EXCEPTIONS from .stop_words import STOP_WORDS - +from .tag_map import TAG_MAP class ChineseDefaults(Language.Defaults): lex_attr_getters = dict(Language.Defaults.lex_attr_getters) @@ -14,6 +14,7 @@ class ChineseDefaults(Language.Defaults): use_jieba = True tokenizer_exceptions = BASE_EXCEPTIONS stop_words = STOP_WORDS + tag_map = TAG_MAP writing_system = {"direction": "ltr", "has_case": False, "has_letters": False} @@ -44,4 +45,4 @@ class Chinese(Language): return Doc(self.vocab, words=words, spaces=spaces) -__all__ = ["Chinese"] +__all__ = ["Chinese"] \ No newline at end of file diff --git a/spacy/lang/zh/examples.py b/spacy/lang/zh/examples.py index 3c2e45e80..1553768a8 100644 --- a/spacy/lang/zh/examples.py +++ b/spacy/lang/zh/examples.py @@ -9,10 +9,12 @@ Example sentences to test spaCy and its language models. >>> docs = nlp.pipe(sentences) """ - +# from https://zh.wikipedia.org/wiki/汉语 sentences = [ - "蘋果公司正考量用一億元買下英國的新創公司", - "自駕車將保險責任歸屬轉移至製造商", - "舊金山考慮禁止送貨機器人在人行道上行駛", - "倫敦是英國的大城市", + "作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。", + "汉语有多种分支,当中官话最为流行,为中华人民共和国的国家通用语言(又称为普通话)、以及中华民国的国语。", + "此外,中文还是联合国正式语文,并被上海合作组织等国际组织采用为官方语言。", + "在中国大陆,汉语通称为“汉语”。", + "在联合国、台湾、香港及澳门,通称为“中文”。", + "在新加坡及马来西亚,通称为“华语”。" ] diff --git a/spacy/lang/zh/lex_attrs.py b/spacy/lang/zh/lex_attrs.py new file mode 100644 index 000000000..0a42883f7 --- /dev/null +++ b/spacy/lang/zh/lex_attrs.py @@ -0,0 +1,107 @@ +# coding: utf8 +from __future__ import unicode_literals +import re +from ...attrs import LIKE_NUM + +_single_num_words = [ + "〇", + "一", + "二", + "三", + "四", + "五", + "六", + "七", + "八", + "九", + "十", + "十一", + "十二", + "十三", + "十四", + "十五", + "十六", + "十七", + "十八", + "十九", + "廿", + "卅", + "卌", + "皕", + "零", + "壹", + "贰", + "叁", + "肆", + "伍", + "陆", + "柒", + "捌", + "玖", + "拾", + "拾壹", + "拾贰", + "拾叁", + "拾肆", + "拾伍", + "拾陆", + "拾柒", + "拾捌", + "拾玖" +] + +_count_num_words = [ + "一", + "二", + "三", + "四", + "五", + "六", + "七", + "八", + "九", + "壹", + "贰", + "叁", + "肆", + "伍", + "陆", + "柒", + "捌", + "玖" +] + +_base_num_words = [ + "十", + "百", + "千", + "万", + "亿", + "兆", + "拾", + "佰", + "仟" +] + + +def like_num(text): + if text.startswith(("+", "-", "±", "~")): + text = text[1:] + text = text.replace(",", "").replace( + ".", "").replace(",", "").replace("。", "") + if text.isdigit(): + return True + if text.count("/") == 1: + num, denom = text.split("/") + if num.isdigit() and denom.isdigit(): + return True + if text in _single_num_words: + return True + if re.match('^((' + '|'.join(_count_num_words) + '){1}' + + '(' + '|'.join(_base_num_words) + '){1})+' + + '(' + '|'.join(_count_num_words) + ')?$', text): + return True + return False + + +LEX_ATTRS = {LIKE_NUM: like_num} diff --git a/spacy/lang/zh/tag_map.py b/spacy/lang/zh/tag_map.py new file mode 100644 index 000000000..6aa988a98 --- /dev/null +++ b/spacy/lang/zh/tag_map.py @@ -0,0 +1,47 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...symbols import POS, PUNCT, SYM, ADJ, CONJ, CCONJ, NUM, DET, ADV, ADP, X, VERB +from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX + +# The Chinese part-of-speech tagger uses the OntoNotes 5 version of the Penn Treebank tag set. +# We also map the tags to the simpler Google Universal POS tag set. + +TAG_MAP = { + "AS": {POS: PART}, + "DEC": {POS: PART}, + "DEG": {POS: PART}, + "DER": {POS: PART}, + "DEV": {POS: PART}, + "ETC": {POS: PART}, + "LC": {POS: PART}, + "MSP": {POS: PART}, + "SP": {POS: PART}, + "BA": {POS: X}, + "FW": {POS: X}, + "IJ": {POS: INTJ}, + "LB": {POS: X}, + "ON": {POS: X}, + "SB": {POS: X}, + "X": {POS: X}, + "URL": {POS: X}, + "INF": {POS: X}, + "NN": {POS: NOUN}, + "NR": {POS: NOUN}, + "NT": {POS: NOUN}, + "VA": {POS: VERB}, + "VC": {POS: VERB}, + "VE": {POS: VERB}, + "VV": {POS: VERB}, + "CD": {POS: NUM}, + "M": {POS: NUM}, + "OD": {POS: NUM}, + "DT": {POS: DET}, + "CC": {POS: CCONJ}, + "CS": {POS: CONJ}, + "AD": {POS: ADV}, + "JJ": {POS: ADJ}, + "P": {POS: ADP}, + "PN": {POS: PRON}, + "PU": {POS: PUNCT} +} \ No newline at end of file