From 46841958225c75035a13d1b03bef02a8f7a922d6 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 7 Feb 2019 20:55:53 +0100 Subject: [PATCH 1/2] Rename contributer_agreement.md to .github/contributors/lauraBaakman.md --- contributer_agreement.md => .github/contributors/lauraBaakman.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename contributer_agreement.md => .github/contributors/lauraBaakman.md (100%) diff --git a/contributer_agreement.md b/.github/contributors/lauraBaakman.md similarity index 100% rename from contributer_agreement.md rename to .github/contributors/lauraBaakman.md From b41d64825af40c0bff5e9daff14d94f4d4618ec9 Mon Sep 17 00:00:00 2001 From: Julia Makogon Date: Thu, 7 Feb 2019 22:05:11 +0200 Subject: [PATCH 2/2] Ukrainian language added. Small fixes in Russian (#3241) * Classes for Ukrainian; small fix in Russian. * Contributor agreement --- .github/contributors/juliamakogon.md | 106 ++++++ spacy/lang/char_classes.py | 8 +- spacy/lang/ru/lemmatizer.py | 4 +- spacy/lang/ru/tag_map.py | 14 + spacy/lang/uk/__init__.py | 75 ++++ spacy/lang/uk/examples.py | 23 ++ spacy/lang/uk/lemmatizer.py | 12 + spacy/lang/uk/lex_attrs.py | 42 +++ spacy/lang/uk/stop_words.py | 404 ++++++++++++++++++++++ spacy/lang/uk/tag_map.py | 36 ++ spacy/lang/uk/tokenizer_exceptions.py | 38 ++ spacy/tests/conftest.py | 12 +- spacy/tests/lang/uk/__init__.py | 0 spacy/tests/lang/uk/test_tokenizer.py | 128 +++++++ spacy/tests/lang/uk/test_tokenizer_exc.py | 18 + 15 files changed, 914 insertions(+), 6 deletions(-) create mode 100644 .github/contributors/juliamakogon.md create mode 100644 spacy/lang/uk/__init__.py create mode 100644 spacy/lang/uk/examples.py create mode 100644 spacy/lang/uk/lemmatizer.py create mode 100644 spacy/lang/uk/lex_attrs.py create mode 100644 spacy/lang/uk/stop_words.py create mode 100644 spacy/lang/uk/tag_map.py create mode 100644 spacy/lang/uk/tokenizer_exceptions.py create mode 100644 spacy/tests/lang/uk/__init__.py create mode 100644 spacy/tests/lang/uk/test_tokenizer.py create mode 100644 spacy/tests/lang/uk/test_tokenizer_exc.py diff --git a/.github/contributors/juliamakogon.md b/.github/contributors/juliamakogon.md new file mode 100644 index 000000000..90133b55a --- /dev/null +++ b/.github/contributors/juliamakogon.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [ ] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Julia Makogon | +| Company name (if applicable) | Semantrum | +| Title or role (if applicable) | | +| Date | 07.02.2019 | +| GitHub username | juliamakogon | +| Website (optional) | | diff --git a/spacy/lang/char_classes.py b/spacy/lang/char_classes.py index db4624909..237ca33fa 100644 --- a/spacy/lang/char_classes.py +++ b/spacy/lang/char_classes.py @@ -21,9 +21,11 @@ _tatar_lower = r'[әөүҗңһ]' _tatar_upper = r'[ӘӨҮҖҢҺ]' _greek_lower = r'[α-ωάέίόώήύ]' _greek_upper = r'[Α-ΩΆΈΊΌΏΉΎ]' +_ukrainian_lower = r'[а-щюяіїєґ]' +_ukrainian_upper = r'[А-ЩЮЯІЇЄҐ]' -_upper = [_latin_upper, _russian_upper, _tatar_upper, _greek_upper] -_lower = [_latin_lower, _russian_lower, _tatar_lower, _greek_lower] +_upper = [_latin_upper, _russian_upper, _tatar_upper, _greek_upper, _ukrainian_upper] +_lower = [_latin_lower, _russian_lower, _tatar_lower, _greek_lower, _ukrainian_lower] _uncased = [_bengali, _hebrew, _persian, _sinhala] ALPHA = merge_char_classes(_upper + _lower + _uncased) @@ -35,7 +37,7 @@ _units = ('km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm 'TB T G M K % км км² км³ м м² м³ дм дм² дм³ см см² см³ мм мм² мм³ нм ' 'кг г мг м/с км/ч кПа Па мбар Кб КБ кб Мб МБ мб Гб ГБ гб Тб ТБ тб' 'كم كم² كم³ م م² م³ سم سم² سم³ مم مم² مم³ كم غرام جرام جم كغ ملغ كوب اكواب') -_currency = r'\$ £ € ¥ ฿ US\$ C\$ A\$ ₽ ﷼' +_currency = r'\$ £ € ¥ ฿ US\$ C\$ A\$ ₽ ﷼ ₴' # These expressions contain various unicode variations, including characters # used in Chinese (see #1333, #1340, #1351) – unless there are cross-language diff --git a/spacy/lang/ru/lemmatizer.py b/spacy/lang/ru/lemmatizer.py index 8ea6255d7..2cdf08e2e 100644 --- a/spacy/lang/ru/lemmatizer.py +++ b/spacy/lang/ru/lemmatizer.py @@ -8,7 +8,7 @@ from ...lemmatizer import Lemmatizer class RussianLemmatizer(Lemmatizer): _morph = None - def __init__(self): + def __init__(self, pymorphy2_lang='ru'): super(RussianLemmatizer, self).__init__() try: from pymorphy2 import MorphAnalyzer @@ -18,7 +18,7 @@ class RussianLemmatizer(Lemmatizer): 'try to fix it with "pip install pymorphy2==0.8"') if RussianLemmatizer._morph is None: - RussianLemmatizer._morph = MorphAnalyzer() + RussianLemmatizer._morph = MorphAnalyzer(lang=pymorphy2_lang) def __call__(self, string, univ_pos, morphology=None): univ_pos = self.normalize_univ_pos(univ_pos) diff --git a/spacy/lang/ru/tag_map.py b/spacy/lang/ru/tag_map.py index 1369a9dbf..a62f472d5 100644 --- a/spacy/lang/ru/tag_map.py +++ b/spacy/lang/ru/tag_map.py @@ -69,7 +69,9 @@ TAG_MAP = { 'ADJ__Degree=Pos|Number=Plur|Variant=Short': {POS: ADJ, 'Degree': 'Pos', 'Number': 'Plur', 'Variant': 'Short'}, 'ADJ__Foreign=Yes': {POS: ADJ, 'Foreign': 'Yes'}, 'ADJ___': {POS: ADJ}, + 'ADJ': {POS: ADJ}, 'ADP___': {POS: ADP}, + 'ADP': {POS: ADP}, 'ADV__Degree=Cmp': {POS: ADV, 'Degree': 'Cmp'}, 'ADV__Degree=Pos': {POS: ADV, 'Degree': 'Pos'}, 'ADV__Polarity=Neg': {POS: ADV, 'Polarity': 'Neg'}, @@ -91,6 +93,7 @@ TAG_MAP = { 'AUX__Aspect=Imp|Tense=Pres|VerbForm=Conv|Voice=Act': {POS: AUX, 'Aspect': 'Imp', 'Tense': 'Pres', 'VerbForm': 'Conv', 'Voice': 'Act'}, 'AUX__Aspect=Imp|VerbForm=Inf|Voice=Act': {POS: AUX, 'Aspect': 'Imp', 'VerbForm': 'Inf', 'Voice': 'Act'}, 'CCONJ___': {POS: CCONJ}, + 'CCONJ': {POS: CCONJ}, 'DET__Animacy=Inan|Case=Acc|Gender=Masc|Number=Sing': {POS: DET, 'Animacy': 'Inan', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Sing'}, 'DET__Animacy=Inan|Case=Acc|Gender=Neut|Number=Sing': {POS: DET, 'Animacy': 'Inan', 'Case': 'Acc', 'Gender': 'Neut', 'Number': 'Sing'}, 'DET__Animacy=Inan|Case=Gen|Gender=Fem|Number=Sing': {POS: DET, 'Animacy': 'Inan', 'Case': 'Gen', 'Gender': 'Fem', 'Number': 'Sing'}, @@ -124,6 +127,7 @@ TAG_MAP = { 'DET__Case=Nom|Number=Plur': {POS: DET, 'Case': 'Nom', 'Number': 'Plur'}, 'DET__Gender=Masc|Number=Sing': {POS: DET, 'Gender': 'Masc', 'Number': 'Sing'}, 'INTJ___': {POS: INTJ}, + 'INTJ': {POS: INTJ}, 'NOUN__Animacy=Anim|Case=Acc|Gender=Fem|Number=Plur': {POS: NOUN, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Fem', 'Number': 'Plur'}, 'NOUN__Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing': {POS: NOUN, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Fem', 'Number': 'Sing'}, 'NOUN__Animacy=Anim|Case=Acc|Gender=Masc|Number=Plur': {POS: NOUN, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Plur'}, @@ -216,6 +220,7 @@ TAG_MAP = { 'NOUN__Case=Gen|Degree=Pos|Gender=Fem|Number=Sing': {POS: NOUN, 'Case': 'Gen', 'Degree': 'Pos', 'Gender': 'Fem', 'Number': 'Sing'}, 'NOUN__Foreign=Yes': {POS: NOUN, 'Foreign': 'Yes'}, 'NOUN___': {POS: NOUN}, + 'NOUN': {POS: NOUN}, 'NUM__Animacy=Anim|Case=Acc': {POS: NUM, 'Animacy': 'Anim', 'Case': 'Acc'}, 'NUM__Animacy=Anim|Case=Acc|Gender=Fem': {POS: NUM, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Fem'}, 'NUM__Animacy=Anim|Case=Acc|Gender=Masc': {POS: NUM, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Masc'}, @@ -247,9 +252,11 @@ TAG_MAP = { 'NUM__Case=Nom|Gender=Masc': {POS: NUM, 'Case': 'Nom', 'Gender': 'Masc'}, 'NUM__Case=Nom|Gender=Neut': {POS: NUM, 'Case': 'Nom', 'Gender': 'Neut'}, 'NUM___': {POS: NUM}, + 'NUM': {POS: NUM}, 'PART__Mood=Cnd': {POS: PART, 'Mood': 'Cnd'}, 'PART__Polarity=Neg': {POS: PART, 'Polarity': 'Neg'}, 'PART___': {POS: PART}, + 'PART': {POS: PART}, 'PRON__Animacy=Anim|Case=Acc|Gender=Masc|Number=Plur': {POS: PRON, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Plur'}, 'PRON__Animacy=Anim|Case=Acc|Number=Plur': {POS: PRON, 'Animacy': 'Anim', 'Case': 'Acc', 'Number': 'Plur'}, 'PRON__Animacy=Anim|Case=Dat|Gender=Masc|Number=Sing': {POS: PRON, 'Animacy': 'Anim', 'Case': 'Dat', 'Gender': 'Masc', 'Number': 'Sing'}, @@ -327,6 +334,7 @@ TAG_MAP = { 'PRON__Case=Nom|Number=Sing|Person=2': {POS: PRON, 'Case': 'Nom', 'Number': 'Sing', 'Person': '2'}, 'PRON__Number=Sing|Person=1': {POS: PRON, 'Number': 'Sing', 'Person': '1'}, 'PRON___': {POS: PRON}, + 'PRON': {POS: PRON}, 'PROPN__Animacy=Anim|Case=Acc|Gender=Fem|Number=Plur': {POS: PROPN, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Fem', 'Number': 'Plur'}, 'PROPN__Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing': {POS: PROPN, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Fem', 'Number': 'Sing'}, 'PROPN__Animacy=Anim|Case=Acc|Gender=Masc|Number=Plur': {POS: PROPN, 'Animacy': 'Anim', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Plur'}, @@ -422,10 +430,14 @@ TAG_MAP = { 'PROPN__Foreign=Yes': {POS: PROPN, 'Foreign': 'Yes'}, 'PROPN__Number=Sing': {POS: PROPN, 'Number': 'Sing'}, 'PROPN___': {POS: PROPN}, + 'PROPN': {POS: PROPN}, 'PUNCT___': {POS: PUNCT}, + 'PUNCT': {POS: PUNCT}, 'SCONJ__Mood=Cnd': {POS: SCONJ, 'Mood': 'Cnd'}, 'SCONJ___': {POS: SCONJ}, + 'SCONJ': {POS: SCONJ}, 'SYM___': {POS: SYM}, + 'SYM': {POS: SYM}, 'VERB__Animacy=Anim|Aspect=Imp|Case=Acc|Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part|Voice=Act': {POS: VERB, 'Animacy': 'Anim', 'Aspect': 'Imp', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Sing', 'Tense': 'Past', 'VerbForm': 'Part', 'Voice': 'Act'}, 'VERB__Animacy=Anim|Aspect=Imp|Case=Acc|Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part|Voice=Mid': {POS: VERB, 'Animacy': 'Anim', 'Aspect': 'Imp', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Sing', 'Tense': 'Past', 'VerbForm': 'Part', 'Voice': 'Mid'}, 'VERB__Animacy=Anim|Aspect=Imp|Case=Acc|Gender=Masc|Number=Sing|Tense=Pres|VerbForm=Part|Voice=Act': {POS: VERB, 'Animacy': 'Anim', 'Aspect': 'Imp', 'Case': 'Acc', 'Gender': 'Masc', 'Number': 'Sing', 'Tense': 'Pres', 'VerbForm': 'Part', 'Voice': 'Act'}, @@ -726,6 +738,8 @@ TAG_MAP = { 'VERB__Aspect=Perf|VerbForm=Inf|Voice=Mid': {POS: VERB, 'Aspect': 'Perf', 'VerbForm': 'Inf', 'Voice': 'Mid'}, 'VERB__Voice=Act': {POS: VERB, 'Voice': 'Act'}, 'VERB___': {POS: VERB}, + 'VERB': {POS: VERB}, 'X__Foreign=Yes': {POS: X, 'Foreign': 'Yes'}, 'X___': {POS: X}, + 'X': {POS: X}, } diff --git a/spacy/lang/uk/__init__.py b/spacy/lang/uk/__init__.py new file mode 100644 index 000000000..8ee9dab52 --- /dev/null +++ b/spacy/lang/uk/__init__.py @@ -0,0 +1,75 @@ +# coding: utf8 +from __future__ import unicode_literals + +from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS +from .stop_words import STOP_WORDS +from .lex_attrs import LEX_ATTRS + +# uncomment if files are available +# from .norm_exceptions import NORM_EXCEPTIONS +# from .tag_map import TAG_MAP +# from .morph_rules import MORPH_RULES + +# uncomment if lookup-based lemmatizer is available +# from .lemmatizer import LOOKUP +# from ...lemmatizerlookup import Lemmatizer + +from ..tokenizer_exceptions import BASE_EXCEPTIONS +from ..norm_exceptions import BASE_NORMS +from ...util import update_exc, add_lookups +from ...language import Language +from ...attrs import LANG, LIKE_NUM, NORM +# from .tag_map import TAG_MAP +from .lemmatizer import UkrainianLemmatizer + + +# Create a Language subclass +# Documentation: https://spacy.io/docs/usage/adding-languages + +# This file should be placed in spacy/lang/xx (ISO code of language). +# Before submitting a pull request, make sure the remove all comments from the +# language data files, and run at least the basic tokenizer tests. Simply add the +# language ID to the list of languages in spacy/tests/conftest.py to include it +# in the basic tokenizer sanity tests. You can optionally add a fixture for the +# language's tokenizer and add more specific tests. For more info, see the +# tests documentation: https://github.com/explosion/spaCy/tree/master/spacy/tests + + +class UkrainianDefaults(Language.Defaults): + lex_attr_getters = dict(Language.Defaults.lex_attr_getters) + lex_attr_getters[LANG] = lambda text: 'uk' # ISO code + # add more norm exception dictionaries here + lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS) + + # overwrite functions for lexical attributes + lex_attr_getters.update(LEX_ATTRS) + + # add custom tokenizer exceptions to base exceptions + tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) + + # add stop words + stop_words = STOP_WORDS + + # if available: add tag map + # tag_map = dict(TAG_MAP) + + # if available: add morph rules + # morph_rules = dict(MORPH_RULES) + + # if available: add lookup lemmatizer + # @classmethod + # def create_lemmatizer(cls, nlp=None): + # return Lemmatizer(LOOKUP) + + @classmethod + def create_lemmatizer(cls, nlp=None): + return UkrainianLemmatizer() + + +class Ukrainian(Language): + lang = 'uk' # ISO code + Defaults = UkrainianDefaults # set Defaults to custom language defaults + + +# set default export – this allows the language class to be lazy-loaded +__all__ = ['Ukrainian'] diff --git a/spacy/lang/uk/examples.py b/spacy/lang/uk/examples.py new file mode 100644 index 000000000..22df2b81c --- /dev/null +++ b/spacy/lang/uk/examples.py @@ -0,0 +1,23 @@ +# coding: utf8 +from __future__ import unicode_literals + + +""" +Example sentences to test spaCy and its language models. + +>>> from spacy.lang.uk.examples import sentences +>>> docs = nlp.pipe(sentences) +""" + + +sentences = [ + "Ніч на середу буде морозною.", + "Чим кращі книги ти читав, тим гірше спиш.", # Serhiy Zhadan + "Найстаріші ґудзики, відомі людству, археологи знайшли в долині ріки Інд.", + "Слов'янське слово «Україна» вперше згадується у Київському літописному зводі за Іпатіївським списком під 1187 роком.", # wikipedia + "Де у Києві найсмачніша кава?", + "Від Нижнього озера довгими дерев’яними сходами, над якими синьо й біло горіли маленькі коробочки-ліхтарики, підіймалися до нього двоє стовусів: найкращий друг Вертутій і його дванадцятилітній онук Чублик.", # blyznets_viktor_semenovych/zemlia_svitliachkiv + "Китайський космічний зонд \"Чан'е-4\" вперше в історії здійснив м'яку посадку на зворотному боці Місяця.", + "Коли до губ твоїх лишається півподиху, коли до губ твоїх лишається півкроку – зіниці твої виткані із подиву, в очах у тебе синьо і широко.", # Hryhorij Czubaj + "Дорогу сестру збираю у дорогу, а брати вирішили не брати машину." # homographs +] diff --git a/spacy/lang/uk/lemmatizer.py b/spacy/lang/uk/lemmatizer.py new file mode 100644 index 000000000..8db294507 --- /dev/null +++ b/spacy/lang/uk/lemmatizer.py @@ -0,0 +1,12 @@ +from ..ru.lemmatizer import RussianLemmatizer + + +class UkrainianLemmatizer(RussianLemmatizer): + + def __init__(self, pymorphy2_lang='ru'): + try: + super(UkrainianLemmatizer, self).__init__(pymorphy2_lang='uk') + except ImportError: + raise ImportError( + 'The Ukrainian lemmatizer requires the pymorphy2 library and dictionaries: ' + 'try to fix it with "pip install git+https://github.com/kmike/pymorphy2.git pymorphy2-dicts-uk"') diff --git a/spacy/lang/uk/lex_attrs.py b/spacy/lang/uk/lex_attrs.py new file mode 100644 index 000000000..98ded5fb8 --- /dev/null +++ b/spacy/lang/uk/lex_attrs.py @@ -0,0 +1,42 @@ +# coding: utf8 +from __future__ import unicode_literals + +# import the symbols for the attrs you want to overwrite +from ...attrs import LIKE_NUM + + +# Overwriting functions for lexical attributes +# Documentation: https://localhost:1234/docs/usage/adding-languages#lex-attrs +# Most of these functions, like is_lower or like_url should be language- +# independent. Others, like like_num (which includes both digits and number +# words), requires customisation. + + +# Example: check if token resembles a number +_num_words = ["більйон", "вісім", "вісімдесят", "вісімнадцять", "вісімсот", "восьмий", "два", "двадцять", "дванадцять", + "двісті", "дев'яносто", "дев'ятнадцять", "дев'ятсот", "дев'ять", "десять", "децильйон", "квадрильйон", + "квінтильйон", "мільйон", "мільярд", "нонильйон", "один", "одинадцять", "октильйон", "п'ятий", + "п'ятисотий", "п'ятнадцять", "п'ятсот", "п'ять", "секстильйон", "септильйон", "сім", "сімдесят", + "сімнадцять", "сімсот", "сорок", "сто", "тисяча", "три", "тридцять", "трильйон", "тринадцять", "триста", + "чотири", "чотириста", "чотирнадцять", "шістдесят", "шістнадцять", "шістсот", "шість"] + + +def like_num(text): + text = text.replace(',', '').replace('.', '') + if text.isdigit(): + return True + if text.count('/') == 1: + num, denom = text.split('/') + if num.isdigit() and denom.isdigit(): + return True + if text in _num_words: + return True + return False + + +# Create dictionary of functions to overwrite. The default lex_attr_getters are +# updated with this one, so only the functions defined here are overwritten. + +LEX_ATTRS = { + LIKE_NUM: like_num +} diff --git a/spacy/lang/uk/stop_words.py b/spacy/lang/uk/stop_words.py new file mode 100644 index 000000000..f5b85312f --- /dev/null +++ b/spacy/lang/uk/stop_words.py @@ -0,0 +1,404 @@ +# encoding: utf8 +from __future__ import unicode_literals + + +# Add stop words +# Documentation: https://spacy.io/docs/usage/adding-languages#stop-words +# To improve readability, words should be ordered alphabetically and separated +# by spaces and newlines. When adding stop words from an online source, always +# include the link in a comment. Make sure to proofread and double-check the +# words – lists available online are often known to contain mistakes. + + +STOP_WORDS = set("""а +або +адже +але +алло +багато +без +безперервно +би +більш +більше +біля +близько +був +буває +буде +будемо +будете +будеш +буду +будуть +будь +була +були +було +бути +бывь +в +важлива +важливе +важливий +важливі +вам +вами +вас +ваш +ваша +ваше +ваші +вгорі +вгору +вдалині +вже +ви +від +відсотків +він +вісім +вісімнадцятий +вісімнадцять +вниз +внизу +вона +вони +воно +восьмий +всього +втім +г +геть +говорив +говорить +давно +далеко +далі +дарма +два +двадцятий +двадцять +дванадцятий +дванадцять +дві +двох +де +дев'ятий +дев'ятнадцятий +дев'ятнадцять +дев'ять +декілька +день +десятий +десять +дійсно +для +дня +до +добре +довго +доки +досить +другий +дуже +же +життя +з +за +завжди +зазвичай +зайнята +зайнятий +зайняті +зайнято +занадто +зараз +зате +звичайно +звідси +звідусіль +здається +значить +знову +зовсім +ім'я +іноді +інша +інше +інший +інших +інші +її +їй +їх +його +йому +ким +кого +кожен +кожна +кожне +кожні +коли +кому +краще +крейдуючи +кругом +куди +ласка +лише +люди +людина +майже +мало +мати +мене +мені +менш +менше +ми +мимо +міг +між +мій +мільйонів +мною +могти +моє +мож +може +можна +можно +можуть +можхо +мої +мор +моя +на +навіть +навіщо +навкруги +нагорі +над +назад +найбільш +нам +нами +нарешті +нас +наш +наша +наше +наші +не +небагато +недалеко +немає +нерідко +нещодавно +нею +нибудь +нижче +низько +ним +ними +них +ні +ніби +ніколи +нікуди +нічого +ну +нх +нього +о +обоє +один +одинадцятий +одинадцять +однієї +одній +одного +означає +окрім +он +особливо +ось +перед +перший +під +пізніше +пір +по +повинно +подів +поки +пора +поруч +посеред +потім +потрібно +почала +прекрасне +прекрасно +при +про +просто +проте +проти +п'ятий +п'ятнадцятий +п'ятнадцять +п'ять +раз +раніше +рано +раптом +рік +роки +років +року +сам +сама +саме +самим +самими +самих +самі +самій +само +самого +самому +саму +світу +свого +своє +свої +своїй +своїх +свою +сеаой +себе +сім +сімнадцятий +сімнадцять +сказав +сказала +сказати +скільки +скрізь +собі +собою +спасибі +спочатку +справ +став +суть +сьогодні +сьомий +т +та +так +така +таке +такий +такі +також +там +твій +твоє +твоя +те +тебе +теж +тепер +ти +тим +тими +тисяч +тих +ті +тією +тільки +тобі +тобою +того +тоді +той +том +тому +треба +третій +три +тринадцятий +тринадцять +трохи +ту +туди +тут +у +увесь +уміти +усе +усі +усім +усіма +усіх +усію +усього +усьому +усю +усюди +уся +хіба +хотіти +хоч +хоча +хочеш +хто +це +цей +цим +цими +цих +ці +цій +цього +цьому +цю +ця +час +частіше +часто +часу +через +четвертий +чи +чим +численна +численне +численний +численні +чого +чому +чотири +чотирнадцятий +чотирнадцять +шістнадцятий +шістнадцять +шість +шостий +ще +що +щоб +я +як +яка +який +яких +які +якій +якого +якщо +""".split()) diff --git a/spacy/lang/uk/tag_map.py b/spacy/lang/uk/tag_map.py new file mode 100644 index 000000000..f5ae22f43 --- /dev/null +++ b/spacy/lang/uk/tag_map.py @@ -0,0 +1,36 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ..symbols import POS, ADV, NOUN, ADP, PRON, SCONJ, PROPN, DET, SYM, INTJ +from ..symbols import PUNCT, NUM, AUX, X, CONJ, ADJ, VERB, PART, SPACE, CCONJ + + +# Add a tag map +# Documentation: https://spacy.io/docs/usage/adding-languages#tag-map +# Universal Dependencies: http://universaldependencies.org/u/pos/all.html +# The keys of the tag map should be strings in your tag set. The dictionary must +# have an entry POS whose value is one of the Universal Dependencies tags. +# Optionally, you can also include morphological features or other attributes. + + +TAG_MAP = { + "ADV": {POS: ADV}, + "NOUN": {POS: NOUN}, + "ADP": {POS: ADP}, + "PRON": {POS: PRON}, + "SCONJ": {POS: SCONJ}, + "PROPN": {POS: PROPN}, + "DET": {POS: DET}, + "SYM": {POS: SYM}, + "INTJ": {POS: INTJ}, + "PUNCT": {POS: PUNCT}, + "NUM": {POS: NUM}, + "AUX": {POS: AUX}, + "X": {POS: X}, + "CONJ": {POS: CONJ}, + "CCONJ": {POS: CCONJ}, + "ADJ": {POS: ADJ}, + "VERB": {POS: VERB}, + "PART": {POS: PART}, + "SP": {POS: SPACE} +} diff --git a/spacy/lang/uk/tokenizer_exceptions.py b/spacy/lang/uk/tokenizer_exceptions.py new file mode 100644 index 000000000..c5e1595eb --- /dev/null +++ b/spacy/lang/uk/tokenizer_exceptions.py @@ -0,0 +1,38 @@ +# coding: utf8 +from __future__ import unicode_literals + +# import symbols – if you need to use more, add them here +from ...symbols import ORTH, LEMMA, POS, NORM, NOUN + + +# Add tokenizer exceptions +# Documentation: https://spacy.io/docs/usage/adding-languages#tokenizer-exceptions +# Feel free to use custom logic to generate repetitive exceptions more efficiently. +# If an exception is split into more than one token, the ORTH values combined always +# need to match the original string. + +# Exceptions should be added in the following format: + +_exc = {} + +for exc_data in [ + {ORTH: "вул.", LEMMA: "вулиця", NORM: "вулиця", POS: NOUN}, + {ORTH: "ім.", LEMMA: "ім'я", NORM: "імені", POS: NOUN}, + {ORTH: "просп.", LEMMA: "проспект", NORM: "проспект", POS: NOUN}, + {ORTH: "бул.", LEMMA: "бульвар", NORM: "бульвар", POS: NOUN}, + {ORTH: "пров.", LEMMA: "провулок", NORM: "провулок", POS: NOUN}, + {ORTH: "пл.", LEMMA: "площа", NORM: "площа", POS: NOUN}, + {ORTH: "г.", LEMMA: "гора", NORM: "гора", POS: NOUN}, + {ORTH: "п.", LEMMA: "пан", NORM: "пан", POS: NOUN}, + {ORTH: "м.", LEMMA: "місто", NORM: "місто", POS: NOUN}, + {ORTH: "проф.", LEMMA: "професор", NORM: "професор", POS: NOUN}, + {ORTH: "акад.", LEMMA: "академік", NORM: "академік", POS: NOUN}, + {ORTH: "доц.", LEMMA: "доцент", NORM: "доцент", POS: NOUN}, + {ORTH: "оз.", LEMMA: "озеро", NORM: "озеро", POS: NOUN}]: + _exc[exc_data[ORTH]] = [exc_data] + + +# To keep things clean and readable, it's recommended to only declare the +# TOKENIZER_EXCEPTIONS at the bottom: + +TOKENIZER_EXCEPTIONS = _exc diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 129d1d625..a0c6f4540 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -15,7 +15,7 @@ from .. import util # here if it's using spaCy's tokenizer (not a different library) # TODO: re-implement generic tokenizer tests _languages = ['bn', 'ca', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id', - 'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'ur', 'tt', + 'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'ur', 'tt', 'uk', 'xx'] _models = {'en': ['en_core_web_sm'], @@ -49,6 +49,11 @@ def RU(request): pymorphy = pytest.importorskip('pymorphy2') return util.get_lang_class('ru')() +@pytest.fixture() +def UK(request): + pymorphy = pytest.importorskip('pymorphy2') + return util.get_lang_class('uk')() + @pytest.fixture() def JA(request): mecab = pytest.importorskip("MeCab") @@ -175,6 +180,11 @@ def ru_tokenizer(): pymorphy = pytest.importorskip('pymorphy2') return util.get_lang_class('ru').Defaults.create_tokenizer() +@pytest.fixture(scope='session') +def uk_tokenizer(): + pymorphy = pytest.importorskip('pymorphy2') + return util.get_lang_class('uk').Defaults.create_tokenizer() + @pytest.fixture(scope='session') def ca_tokenizer(): return util.get_lang_class('ca').Defaults.create_tokenizer() diff --git a/spacy/tests/lang/uk/__init__.py b/spacy/tests/lang/uk/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/lang/uk/test_tokenizer.py b/spacy/tests/lang/uk/test_tokenizer.py new file mode 100644 index 000000000..ded8e9300 --- /dev/null +++ b/spacy/tests/lang/uk/test_tokenizer.py @@ -0,0 +1,128 @@ +# coding: utf-8 +"""Test that open, closed and paired punctuation is split off correctly.""" + + +from __future__ import unicode_literals + +import pytest + + +PUNCT_OPEN = ['(', '[', '{', '*'] +PUNCT_CLOSE = [')', ']', '}', '*'] +PUNCT_PAIRED = [('(', ')'), ('[', ']'), ('{', '}'), ('*', '*')] + + +@pytest.mark.parametrize('text', ["(", "((", "<"]) +def test_uk_tokenizer_handles_only_punct(uk_tokenizer, text): + tokens = uk_tokenizer(text) + assert len(tokens) == len(text) + + +@pytest.mark.parametrize('punct', PUNCT_OPEN) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_open_punct(uk_tokenizer, punct, text): + tokens = uk_tokenizer(punct + text) + assert len(tokens) == 2 + assert tokens[0].text == punct + assert tokens[1].text == text + + +@pytest.mark.parametrize('punct', PUNCT_CLOSE) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_close_punct(uk_tokenizer, punct, text): + tokens = uk_tokenizer(text + punct) + assert len(tokens) == 2 + assert tokens[0].text == text + assert tokens[1].text == punct + + +@pytest.mark.parametrize('punct', PUNCT_OPEN) +@pytest.mark.parametrize('punct_add', ["`"]) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_two_diff_open_punct(uk_tokenizer, punct, punct_add, text): + tokens = uk_tokenizer(punct + punct_add + text) + assert len(tokens) == 3 + assert tokens[0].text == punct + assert tokens[1].text == punct_add + assert tokens[2].text == text + + +@pytest.mark.parametrize('punct', PUNCT_CLOSE) +@pytest.mark.parametrize('punct_add', ["'"]) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_two_diff_close_punct(uk_tokenizer, punct, punct_add, text): + tokens = uk_tokenizer(text + punct + punct_add) + assert len(tokens) == 3 + assert tokens[0].text == text + assert tokens[1].text == punct + assert tokens[2].text == punct_add + + +@pytest.mark.parametrize('punct', PUNCT_OPEN) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_same_open_punct(uk_tokenizer, punct, text): + tokens = uk_tokenizer(punct + punct + punct + text) + assert len(tokens) == 4 + assert tokens[0].text == punct + assert tokens[3].text == text + + +@pytest.mark.parametrize('punct', PUNCT_CLOSE) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_same_close_punct(uk_tokenizer, punct, text): + tokens = uk_tokenizer(text + punct + punct + punct) + assert len(tokens) == 4 + assert tokens[0].text == text + assert tokens[1].text == punct + + +@pytest.mark.parametrize('text', ["'Тест"]) +def test_uk_tokenizer_splits_open_appostrophe(uk_tokenizer, text): + tokens = uk_tokenizer(text) + assert len(tokens) == 2 + assert tokens[0].text == "'" + + +@pytest.mark.parametrize('text', ["Тест''"]) +def test_uk_tokenizer_splits_double_end_quote(uk_tokenizer, text): + tokens = uk_tokenizer(text) + assert len(tokens) == 2 + tokens_punct = uk_tokenizer("''") + assert len(tokens_punct) == 1 + + +@pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_splits_open_close_punct(uk_tokenizer, punct_open, + punct_close, text): + tokens = uk_tokenizer(punct_open + text + punct_close) + assert len(tokens) == 3 + assert tokens[0].text == punct_open + assert tokens[1].text == text + assert tokens[2].text == punct_close + + +@pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED) +@pytest.mark.parametrize('punct_open2,punct_close2', [("`", "'")]) +@pytest.mark.parametrize('text', ["Привет", "Привіт", "Ґелґотати", "З'єднання", "Єдність", "їхні"]) +def test_uk_tokenizer_two_diff_punct(uk_tokenizer, punct_open, punct_close, + punct_open2, punct_close2, text): + tokens = uk_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2) + assert len(tokens) == 5 + assert tokens[0].text == punct_open2 + assert tokens[1].text == punct_open + assert tokens[2].text == text + assert tokens[3].text == punct_close + assert tokens[4].text == punct_close2 + + +@pytest.mark.parametrize('text', ["Привет.", "Привіт.", "Ґелґотати.", "З'єднання.", "Єдність.", "їхні."]) +def test_uk_tokenizer_splits_trailing_dot(uk_tokenizer, text): + tokens = uk_tokenizer(text) + assert tokens[1].text == "." + + +def test_uk_tokenizer_splits_bracket_period(uk_tokenizer): + text = "(Раз, два, три, проверка)." + tokens = uk_tokenizer(text) + assert tokens[len(tokens) - 1].text == "." diff --git a/spacy/tests/lang/uk/test_tokenizer_exc.py b/spacy/tests/lang/uk/test_tokenizer_exc.py new file mode 100644 index 000000000..88def72e7 --- /dev/null +++ b/spacy/tests/lang/uk/test_tokenizer_exc.py @@ -0,0 +1,18 @@ +# coding: utf-8 +"""Test that tokenizer exceptions are parsed correctly.""" + + +from __future__ import unicode_literals + +import pytest + + +@pytest.mark.parametrize('text,norms,lemmas', [("ім.", ["імені"], ["ім'я"]), + ("проф.", ["професор"], ["професор"])]) +def test_uk_tokenizer_abbrev_exceptions(uk_tokenizer, text, norms, lemmas): + tokens = uk_tokenizer(text) + assert len(tokens) == 1 + assert [token.norm_ for token in tokens] == norms + + +