From dc5be7d2f35ca34fccbfb9e33ecf9dd7160899c5 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:40:58 +0200 Subject: [PATCH 01/77] Cleanup list of Danish stopwords --- spacy/lang/da/stop_words.py | 43 ++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/spacy/lang/da/stop_words.py b/spacy/lang/da/stop_words.py index ac2195f10..ba448f8f3 100644 --- a/spacy/lang/da/stop_words.py +++ b/spacy/lang/da/stop_words.py @@ -1,47 +1,46 @@ # encoding: utf8 from __future__ import unicode_literals - -# Source: https://github.com/stopwords-iso/stopwords-da +# Source: Handpicked by Jens Dahl Møllerhøj. STOP_WORDS = set(""" -ad af aldrig alle alt anden andet andre at +af aldrig alene alle allerede alligevel alt altid anden andet andre at -bare begge blev blive bliver +bag begge blandt blev blive bliver burde bør -da de dem den denne der deres det dette dig din dine disse dit dog du +da de dem den denne dens der derefter deres derfor derfra deri dermed derpå derved det dette dig din dine disse dog du -efter ej eller en end ene eneste enhver er et +efter egen eller ellers en end endnu ene eneste enhver ens enten er et -far fem fik fire flere fleste for fordi forrige fra få får før +flere flest fleste for foran fordi forrige fra få før først -god godt +gennem gjorde gjort god gør gøre gørende -ham han hans har havde have hej helt hende hendes her hos hun hvad hvem hver -hvilken hvis hvor hvordan hvorfor hvornår +ham han hans har havde have hel heller hen hende hendes henover her herefter heri hermed herpå hun hvad hvem hver hvilke hvilken hvilkes hvis hvor hvordan hvorefter hvorfor hvorfra hvorhen hvori hvorimod hvornår hvorved -i ikke ind ingen intet +i igen igennem ikke imellem imens imod ind indtil ingen intet -ja jeg jer jeres jo +jeg jer jeres jo -kan kom komme kommer kun kunne +kan kom kommer kun kunne -lad lav lidt lige lille +lad langs lav lave lavet lidt lige ligesom lille længere -man mand mange med meget men mens mere mig min mine mit mod må +man mange med meget mellem men mens mere mest mig min mindre mindst mine mit må måske -ned nej ni nogen noget nogle nu ny nyt når nær næste næsten +ned nemlig nogen nogensinde noget nogle nok nu ny nyt nær næste næsten -og også okay om op os otte over +og også om omkring op os over overalt på -se seks selv ser ses sig sige sin sine sit skal skulle som stor store syv så -sådan +samme sammen selv selvom senere ses siden sig sige skal skulle som stadig synes syntes så sådan således -tag tage thi ti til to tre +temmelig tidligere til tilbage tit -ud under +ud uden udover under undtagen -var ved vi vil ville vor vores være været +var ved vi via vil ville vore vores vær være været + +øvrigt """.split()) From 23025d3b05572a840ec91301092f8bee68cb1753 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:41:59 +0200 Subject: [PATCH 02/77] Clean up a couple of strange English stopwords --- spacy/lang/en/stop_words.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/lang/en/stop_words.py b/spacy/lang/en/stop_words.py index 640940fea..394731ff1 100644 --- a/spacy/lang/en/stop_words.py +++ b/spacy/lang/en/stop_words.py @@ -16,7 +16,7 @@ call can cannot ca could did do does doing done down due during -each eight either eleven else elsewhere empty enough etc even ever every +each eight either eleven else elsewhere empty enough even ever every everyone everything everywhere except few fifteen fifty first five for former formerly forty four from front full @@ -27,7 +27,7 @@ get give go had has have he hence her here hereafter hereby herein hereupon hers herself him himself his how however hundred -i if in inc indeed into is it its itself +i if in indeed into is it its itself keep From e8400776012931e414599905b8d2923fe78ab458 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:43:06 +0200 Subject: [PATCH 03/77] Add some basic tests for Danish --- spacy/tests/conftest.py | 3 +++ spacy/tests/lang/da/__init__.py | 0 spacy/tests/lang/da/test_exceptions.py | 15 ++++++++++++++ spacy/tests/lang/da/test_text.py | 27 ++++++++++++++++++++++++++ 4 files changed, 45 insertions(+) create mode 100644 spacy/tests/lang/da/__init__.py create mode 100644 spacy/tests/lang/da/test_exceptions.py create mode 100644 spacy/tests/lang/da/test_text.py diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 200f9ff4f..b6232970a 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -105,6 +105,9 @@ def he_tokenizer(): def nb_tokenizer(): return util.get_lang_class('nb').Defaults.create_tokenizer() +@pytest.fixture +def da_tokenizer(): + return util.get_lang_class('da').Defaults.create_tokenizer() @pytest.fixture def stringstore(): diff --git a/spacy/tests/lang/da/__init__.py b/spacy/tests/lang/da/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/lang/da/test_exceptions.py b/spacy/tests/lang/da/test_exceptions.py new file mode 100644 index 000000000..d89fafd2c --- /dev/null +++ b/spacy/tests/lang/da/test_exceptions.py @@ -0,0 +1,15 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + +@pytest.mark.parametrize('text', ["ca.", "m.a.o.", "Jan.", "Dec."]) +def test_da_tokenizer_handles_abbr(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 1 + +def test_da_tokenizer_handles_exc_in_text(da_tokenizer): + text = "Det er bl.a. ikke meningen" + tokens = da_tokenizer(text) + assert len(tokens) == 5 + assert tokens[2].text == "bl.a." diff --git a/spacy/tests/lang/da/test_text.py b/spacy/tests/lang/da/test_text.py new file mode 100644 index 000000000..fa6a935f6 --- /dev/null +++ b/spacy/tests/lang/da/test_text.py @@ -0,0 +1,27 @@ +# coding: utf-8 +"""Test that longer and mixed texts are tokenized correctly.""" + + +from __future__ import unicode_literals + +import pytest + +def test_da_tokenizer_handles_long_text(da_tokenizer): + text = """Der var så dejligt ude på landet. Det var sommer, kornet stod gult, havren grøn, +høet var rejst i stakke nede i de grønne enge, og der gik storken på sine lange, +røde ben og snakkede ægyptisk, for det sprog havde han lært af sin moder. + +Rundt om ager og eng var der store skove, og midt i skovene dybe søer; jo, der var rigtignok dejligt derude på landet!""" + tokens = da_tokenizer(text) + assert len(tokens) == 84 + +@pytest.mark.parametrize('text,match', [ + ('10', True), ('1', True), ('10.000', True), ('10.00', True), + ('999,0', True), ('en', True), ('treoghalvfemsindstyvende', True), ('hundrede', True), + ('hund', False), (',', False), ('1/2', True)]) +def test_lex_attrs_like_number(da_tokenizer, text, match): + tokens = da_tokenizer(text) + assert len(tokens) == 1 + print(tokens[0]) + assert tokens[0].like_num == match + From e8f40ceed8d259df3102dc68bbb13cdb34d704f1 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:44:17 +0200 Subject: [PATCH 04/77] Add short names of months to tokenizer_exceptions --- spacy/lang/da/tokenizer_exceptions.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/spacy/lang/da/tokenizer_exceptions.py b/spacy/lang/da/tokenizer_exceptions.py index fbfbbad86..6bf9ab669 100644 --- a/spacy/lang/da/tokenizer_exceptions.py +++ b/spacy/lang/da/tokenizer_exceptions.py @@ -1,11 +1,27 @@ # encoding: utf8 from __future__ import unicode_literals -from ...symbols import ORTH, LEMMA +from ...symbols import ORTH, LEMMA, NORM _exc = {} +for exc_data in [ + {ORTH: "Kbh.", LEMMA: "København", NORM: "København"}, + + {ORTH: "Jan.", LEMMA: "januar", NORM: "januar"}, + {ORTH: "Feb.", LEMMA: "februar", NORM: "februar"}, + {ORTH: "Mar.", LEMMA: "marts", NORM: "marts"}, + {ORTH: "Apr.", LEMMA: "april", NORM: "april"}, + {ORTH: "Maj.", LEMMA: "maj", NORM: "maj"}, + {ORTH: "Jun.", LEMMA: "juni", NORM: "juni"}, + {ORTH: "Jul.", LEMMA: "juli", NORM: "juli"}, + {ORTH: "Aug.", LEMMA: "august", NORM: "august"}, + {ORTH: "Sep.", LEMMA: "september", NORM: "september"}, + {ORTH: "Okt.", LEMMA: "oktober", NORM: "oktober"}, + {ORTH: "Nov.", LEMMA: "november", NORM: "november"}, + {ORTH: "Dec.", LEMMA: "december", NORM: "december"}]: + _exc[exc_data[ORTH]] = [dict(exc_data)] for orth in [ "A/S", "beg.", "bl.a.", "ca.", "d.s.s.", "dvs.", "f.eks.", "fr.", "hhv.", From 3b2cb107a37804b89792b1993088e59a78d26323 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:45:31 +0200 Subject: [PATCH 05/77] Add like_num functionality to Danish --- spacy/lang/da/__init__.py | 2 ++ spacy/lang/da/lex_attrs.py | 52 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 spacy/lang/da/lex_attrs.py diff --git a/spacy/lang/da/__init__.py b/spacy/lang/da/__init__.py index 99babdc2c..1dc4d4820 100644 --- a/spacy/lang/da/__init__.py +++ b/spacy/lang/da/__init__.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS +from .lex_attrs import LEX_ATTRS from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS @@ -13,6 +14,7 @@ from ...util import update_exc, add_lookups class DanishDefaults(Language.Defaults): lex_attr_getters = dict(Language.Defaults.lex_attr_getters) + lex_attr_getters.update(LEX_ATTRS) lex_attr_getters[LANG] = lambda text: 'da' lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS) diff --git a/spacy/lang/da/lex_attrs.py b/spacy/lang/da/lex_attrs.py new file mode 100644 index 000000000..8152ad259 --- /dev/null +++ b/spacy/lang/da/lex_attrs.py @@ -0,0 +1,52 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...attrs import LIKE_NUM + +# Source http://fjern-uv.dk/tal.php + +_num_words = """nul +en et to tre fire fem seks syv otte ni ti +elleve tolv tretten fjorten femten seksten sytten atten nitten tyve +enogtyve toogtyve treogtyve fireogtyve femogtyve seksogtyve syvogtyve otteogtyve niogtyve tredive +enogtredive toogtredive treogtredive fireogtredive femogtredive seksogtredive syvogtredive otteogtredive niogtredive fyrre +enogfyrre toogfyrre treogfyrre fireogfyrre femgogfyrre seksogfyrre syvogfyrre otteogfyrre niogfyrre halvtreds +enoghalvtreds tooghalvtreds treoghalvtreds fireoghalvtreds femoghalvtreds seksoghalvtreds syvoghalvtreds otteoghalvtreds nioghalvtreds tres +enogtres toogtres treogtres fireogtres femogtres seksogtres syvogtres otteogtres niogtres halvfjerds +enoghalvfjerds tooghalvfjerds treoghalvfjerds fireoghalvfjerds femoghalvfjerds seksoghalvfjerds syvoghalvfjerds otteoghalvfjerds nioghalvfjerds firs +enogfirs toogfirs treogfirs fireogfirs femogfirs seksogfirs syvogfirs otteogfirs niogfirs halvfems +enoghalvfems tooghalvfems treoghalvfems fireoghalvfems femoghalvfems seksoghalvfems syvoghalvfems otteoghalvfems nioghalvfems hundrede +million milliard billion billiard trillion trilliard +""".split() + +# source http://www.duda.dk/video/dansk/grammatik/talord/talord.html + +_ordinal_words = """nulte +første anden tredje fjerde femte sjette syvende ottende niende tiende +elfte tolvte trettende fjortende femtende sekstende syttende attende nittende tyvende +enogtyvende toogtyvende treogtyvende fireogtyvende femogtyvende seksogtyvende syvogtyvende otteogtyvende niogtyvende tredivte enogtredivte toogtredivte treogtredivte fireogtredivte femogtredivte seksogtredivte syvogtredivte otteogtredivte niogtredivte fyrretyvende +enogfyrretyvende toogfyrretyvende treogfyrretyvende fireogfyrretyvende femogfyrretyvende seksogfyrretyvende syvogfyrretyvende otteogfyrretyvende niogfyrretyvende halvtredsindstyvende enoghalvtredsindstyvende +tooghalvtredsindstyvende treoghalvtredsindstyvende fireoghalvtredsindstyvende femoghalvtredsindstyvende seksoghalvtredsindstyvende syvoghalvtredsindstyvende otteoghalvtredsindstyvende nioghalvtredsindstyvende +tresindstyvende enogtresindstyvende toogtresindstyvende treogtresindstyvende fireogtresindstyvende femogtresindstyvende seksogtresindstyvende syvogtresindstyvende otteogtresindstyvende niogtresindstyvende halvfjerdsindstyvende +enoghalvfjerdsindstyvende tooghalvfjerdsindstyvende treoghalvfjerdsindstyvende fireoghalvfjerdsindstyvende femoghalvfjerdsindstyvende seksoghalvfjerdsindstyvende syvoghalvfjerdsindstyvende otteoghalvfjerdsindstyvende nioghalvfjerdsindstyvende firsindstyvende +enogfirsindstyvende toogfirsindstyvende treogfirsindstyvende fireogfirsindstyvende femogfirsindstyvende seksogfirsindstyvende syvogfirsindstyvende otteogfirsindstyvende niogfirsindstyvende halvfemsindstyvende +enoghalvfemsindstyvende tooghalvfemsindstyvende treoghalvfemsindstyvende fireoghalvfemsindstyvende femoghalvfemsindstyvende seksoghalvfemsindstyvende syvoghalvfemsindstyvende otteoghalvfemsindstyvende nioghalvfemsindstyvende +""".split() + +def like_num(text): + text = text.replace(',', '').replace('.', '') + if text.isdigit(): + return True + if text.count('/') == 1: + num, denom = text.split('/') + if num.isdigit() and denom.isdigit(): + return True + if text in _num_words: + return True + if text in _ordinal_words: + return True + return False + +LEX_ATTRS = { + LIKE_NUM: like_num +} From 64c732918a39907860d4107b9d25281152b32fe1 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:49:09 +0200 Subject: [PATCH 06/77] Add Morph_rules. (TODO: Not working?) --- spacy/lang/da/__init__.py | 2 ++ spacy/lang/da/morph_rules.py | 41 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 spacy/lang/da/morph_rules.py diff --git a/spacy/lang/da/__init__.py b/spacy/lang/da/__init__.py index 1dc4d4820..d83ad8048 100644 --- a/spacy/lang/da/__init__.py +++ b/spacy/lang/da/__init__.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS +from .morph_rules import MORPH_RULES from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS @@ -19,6 +20,7 @@ class DanishDefaults(Language.Defaults): lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) + #morph_rules = dict(MORPH_RULES) stop_words = set(STOP_WORDS) diff --git a/spacy/lang/da/morph_rules.py b/spacy/lang/da/morph_rules.py new file mode 100644 index 000000000..b365bf871 --- /dev/null +++ b/spacy/lang/da/morph_rules.py @@ -0,0 +1,41 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...symbols import LEMMA +from ...deprecated import PRON_LEMMA + +MORPH_RULES = { + "PRON": { + "jeg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"}, + "mig": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"}, + "du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two"}, + "han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"}, + "ham": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"}, + "hun": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"}, + "hende": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"}, + "den": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"}, + "det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"}, + "vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"}, + "os": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"}, + "de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"}, + "dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"}, + + "min": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"}, + "din": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"}, + "hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"}, + "hendes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"}, + "dens": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut", "Poss": "Yes", "Reflex": "Yes"}, + "dets": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut", "Poss": "Yes", "Reflex": "Yes"}, + "vores": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"}, + "deres": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"}, + }, + + "VERB": { + "er": {LEMMA: "være", "VerbForm": "Fin", "Tense": "Pres"}, + "var": {LEMMA: "være", "VerbForm": "Fin", "Tense": "Past"} + } +} + +for tag, rules in MORPH_RULES.items(): + for key, attrs in dict(rules).items(): + rules[key.title()] = attrs From 85144835dab55336e07f5c806f3cd54911fea9e2 Mon Sep 17 00:00:00 2001 From: mollerhoj Date: Mon, 3 Jul 2017 15:51:58 +0200 Subject: [PATCH 07/77] Add Tag_map for Danish --- spacy/lang/da/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spacy/lang/da/__init__.py b/spacy/lang/da/__init__.py index d83ad8048..5f6cb867b 100644 --- a/spacy/lang/da/__init__.py +++ b/spacy/lang/da/__init__.py @@ -5,6 +5,7 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .morph_rules import MORPH_RULES +from ..tag_map import TAG_MAP from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS @@ -21,6 +22,7 @@ class DanishDefaults(Language.Defaults): tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) #morph_rules = dict(MORPH_RULES) + tag_map = dict(TAG_MAP) stop_words = set(STOP_WORDS) From 7b9b1be44cae4d13c4e1f0881372701982b61a33 Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Thu, 19 Oct 2017 17:00:41 +0530 Subject: [PATCH 08/77] Support single value for attribute list in doc.to_array --- .github/contributors/ramananbalakrishnan.md | 106 ++++++++++++++++++++ spacy/tokens/doc.pyx | 6 ++ 2 files changed, 112 insertions(+) create mode 100644 .github/contributors/ramananbalakrishnan.md diff --git a/.github/contributors/ramananbalakrishnan.md b/.github/contributors/ramananbalakrishnan.md new file mode 100644 index 000000000..804c41f56 --- /dev/null +++ b/.github/contributors/ramananbalakrishnan.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Ramanan Balakrishnan | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 2017-10-19 | +| GitHub username | ramananbalakrishnan | +| Website (optional) | | diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 809f178f8..ad5358d9a 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -554,13 +554,19 @@ cdef class Doc: cdef int i, j cdef attr_id_t feature cdef np.ndarray[attr_t, ndim=2] output + cdef np.ndarray[attr_t, ndim=1] output_1D # Make an array from the attributes --- otherwise our inner loop is Python # dict iteration. + if( type(py_attr_ids) is not list and type(py_attr_ids) is not tuple ): + py_attr_ids = [ py_attr_ids ] cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids, dtype=numpy.uint64) output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.uint64) for i in range(self.length): for j, feature in enumerate(attr_ids): output[i, j] = get_token_attr(&self.c[i], feature) + if( len(attr_ids) == 1 ): + output_1D = output.reshape((self.length)) + return output_1D return output def count_by(self, attr_id_t attr_id, exclude=None, PreshCounter counts=None): From b3ab124fc5ad9934a166cf6a21995571cbf4de8b Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Thu, 19 Oct 2017 19:37:14 +0530 Subject: [PATCH 09/77] Support strings for attribute list in doc.to_array --- spacy/tests/doc/test_array.py | 20 ++++++++++++++++++++ spacy/tokens/doc.pyx | 27 +++++++++++++++++++-------- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/spacy/tests/doc/test_array.py b/spacy/tests/doc/test_array.py index dd87aa763..ff10394d1 100644 --- a/spacy/tests/doc/test_array.py +++ b/spacy/tests/doc/test_array.py @@ -17,6 +17,26 @@ def test_doc_array_attr_of_token(en_tokenizer, en_vocab): assert feats_array[0][0] != feats_array[0][1] +def test_doc_stringy_array_attr_of_token(en_tokenizer, en_vocab): + text = "An example sentence" + tokens = en_tokenizer(text) + example = tokens.vocab["example"] + assert example.orth != example.shape + feats_array = tokens.to_array((ORTH, SHAPE)) + feats_array_stringy = tokens.to_array(("ORTH", "SHAPE")) + assert feats_array_stringy[0][0] == feats_array[0][0] + assert feats_array_stringy[0][1] == feats_array[0][1] + + +def test_doc_scalar_attr_of_token(en_tokenizer, en_vocab): + text = "An example sentence" + tokens = en_tokenizer(text) + example = tokens.vocab["example"] + assert example.orth != example.shape + feats_array = tokens.to_array(ORTH) + assert feats_array.shape == (3,) + + def test_doc_array_tag(en_tokenizer): text = "A nice sentence." pos = ['DET', 'ADJ', 'NOUN', 'PUNCT'] diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index ad5358d9a..6e7230428 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -21,7 +21,7 @@ from .token cimport Token from .printers import parse_tree from ..lexeme cimport Lexeme, EMPTY_LEXEME from ..typedefs cimport attr_t, flags_t -from ..attrs import intify_attrs +from ..attrs import intify_attrs, IDS from ..attrs cimport attr_id_t from ..attrs cimport ID, ORTH, NORM, LOWER, SHAPE, PREFIX, SUFFIX, LENGTH, CLUSTER from ..attrs cimport LENGTH, POS, LEMMA, TAG, DEP, HEAD, SPACY, ENT_IOB, ENT_TYPE @@ -536,11 +536,15 @@ cdef class Doc: @cython.boundscheck(False) cpdef np.ndarray to_array(self, object py_attr_ids): - """Given a list of M attribute IDs, export the tokens to a numpy - `ndarray` of shape `(N, M)`, where `N` is the length of the document. - The values will be 32-bit integers. + """Export given token attributes to a numpy `ndarray`. - attr_ids (list[int]): A list of attribute ID ints. + If `attr_ids` is a sequence of M attributes, the output array will + be of shape `(N, M)`, where N is the length of the `Doc` + (in tokens). If `attr_ids` is a single attribute, the output shape will + be (N,). You can specify attributes by integer ID (e.g. spacy.attrs.LEMMA) + or string name (e.g. 'LEMMA' or 'lemma'). + + attr_ids (list[]): A list of attributes (int IDs or string names). RETURNS (numpy.ndarray[long, ndim=2]): A feature matrix, with one row per word, and one column per attribute indicated in the input `attr_ids`. @@ -555,11 +559,18 @@ cdef class Doc: cdef attr_id_t feature cdef np.ndarray[attr_t, ndim=2] output cdef np.ndarray[attr_t, ndim=1] output_1D - # Make an array from the attributes --- otherwise our inner loop is Python - # dict iteration. + # Handle scalar/list inputs of strings/ints for py_attr_ids if( type(py_attr_ids) is not list and type(py_attr_ids) is not tuple ): py_attr_ids = [ py_attr_ids ] - cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids, dtype=numpy.uint64) + py_attr_ids_input = [] + for py_attr_id in py_attr_ids: + if( type(py_attr_id) is int ): + py_attr_ids_input.append(py_attr_id) + else: + py_attr_ids_input.append(IDS[py_attr_id.upper()]) + # Make an array from the attributes --- otherwise our inner loop is Python + # dict iteration. + cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids_input, dtype=numpy.uint64) output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.uint64) for i in range(self.length): for j, feature in enumerate(attr_ids): From d44a079fe3d8958fd4e76690a45e77f85d3ea67c Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Fri, 20 Oct 2017 14:25:38 +0530 Subject: [PATCH 10/77] Update documentation on doc.to_array --- website/api/doc.jade | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/website/api/doc.jade b/website/api/doc.jade index dce6b89e0..ceb564c7a 100644 --- a/website/api/doc.jade +++ b/website/api/doc.jade @@ -336,28 +336,40 @@ p +tag method p - | Export the document annotations to a numpy array of shape #[code N*M] - | where #[code N] is the length of the document and #[code M] is the number - | of attribute IDs to export. The values will be 32-bit integers. + | Export given token attributes to a numpy #[code ndarray]. + | If #[code attr_ids] is a sequence of #[code M] attributes, + | the output array will be of shape #[code (N, M)], where #[code N] + | is the length of the #[code Doc] (in tokens). If #[code attr_ids] is + | a single attribute, the output shape will be #[code (N,)]. You can + | specify attributes by integer ID (e.g. #[code spacy.attrs.LEMMA]) + | or string name (e.g. 'LEMMA' or 'lemma'). The values will be 64-bit + | integers. +aside-code("Example"). from spacy.attrs import LOWER, POS, ENT_TYPE, IS_ALPHA doc = nlp(text) # All strings mapped to integers, for easy export to numpy np_array = doc.to_array([LOWER, POS, ENT_TYPE, IS_ALPHA]) + np_array = doc.to_array("POS") +table(["Name", "Type", "Description"]) +row +cell #[code attr_ids] - +cell list - +cell A list of attribute ID ints. + +cell list or int or string + +cell + | A list of attributes (int IDs or string names) or + | a single attribute (int ID or string name) +row("foot") +cell returns - +cell #[code.u-break numpy.ndarray[ndim=2, dtype='int32']] + +cell + | #[code.u-break numpy.ndarray[ndim=2, dtype='uint64']] or + | #[code.u-break numpy.ndarray[ndim=1, dtype='uint64']] or +cell | The exported attributes as a 2D numpy array, with one row per - | token and one column per attribute. + | token and one column per attribute (when #[code attr_ids] is a + | list), or as a 1D numpy array, with one item per attribute (when + | #[code attr_ids] is a single value). +h(2, "from_array") Doc.from_array +tag method From 4acab77a8a36c6f54cacf5e26b32860a30d09657 Mon Sep 17 00:00:00 2001 From: ines Date: Fri, 20 Oct 2017 13:07:57 +0200 Subject: [PATCH 11/77] Add missing symbol for LAW entities (resolves #1427) --- spacy/symbols.pxd | 1 + spacy/symbols.pyx | 1 + website/api/_annotation/_named-entities.jade | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/spacy/symbols.pxd b/spacy/symbols.pxd index e981de6ae..4f1d35cf8 100644 --- a/spacy/symbols.pxd +++ b/spacy/symbols.pxd @@ -467,3 +467,4 @@ cdef enum symbol_t: # We therefore wait until the next data version to add them. # acl + LAW diff --git a/spacy/symbols.pyx b/spacy/symbols.pyx index b7f1f4556..f64577309 100644 --- a/spacy/symbols.pyx +++ b/spacy/symbols.pyx @@ -458,6 +458,7 @@ IDS = { "rcmod": rcmod, "root": root, "xcomp": xcomp + "LAW": LAW } def sort_nums(x): diff --git a/website/api/_annotation/_named-entities.jade b/website/api/_annotation/_named-entities.jade index 476659d4a..93e705c72 100644 --- a/website/api/_annotation/_named-entities.jade +++ b/website/api/_annotation/_named-entities.jade @@ -37,6 +37,10 @@ +cell #[code WORK_OF_ART] +cell Titles of books, songs, etc. + +row + +cell #[code LAW] + +cell Named documents made into laws. + +row +cell #[code LANGUAGE] +cell Any named language. From 108f1f786e62b1fc713ca20ff9a1aaf32665824b Mon Sep 17 00:00:00 2001 From: ines Date: Fri, 20 Oct 2017 13:08:44 +0200 Subject: [PATCH 12/77] Update symbols and document missing token attributes (see #1439) --- spacy/symbols.pxd | 23 ++++++----------------- spacy/symbols.pyx | 13 ++++++++----- website/api/token.jade | 20 ++++++++++++++++++++ 3 files changed, 34 insertions(+), 22 deletions(-) diff --git a/spacy/symbols.pxd b/spacy/symbols.pxd index 4f1d35cf8..6960681a3 100644 --- a/spacy/symbols.pxd +++ b/spacy/symbols.pxd @@ -13,12 +13,12 @@ cdef enum symbol_t: LIKE_EMAIL IS_STOP IS_OOV + IS_BRACKET + IS_QUOTE + IS_LEFT_PUNCT + IS_RIGHT_PUNCT - FLAG14 = 14 - FLAG15 - FLAG16 - FLAG17 - FLAG18 + FLAG18 = 18 FLAG19 FLAG20 FLAG21 @@ -455,16 +455,5 @@ cdef enum symbol_t: root xcomp -# Move these up to FLAG14--FLAG18 once we finish the functionality -# and are ready to regenerate the model. -#IS_BRACKET -#IS_QUOTE -#IS_LEFT_PUNCT -#IS_RIGHT_PUNCT - -# These symbols are currently missing. However, if we add them currently, -# we'll throw off the integer index and the model will have to be retrained. -# We therefore wait until the next data version to add them. -# acl - + acl LAW diff --git a/spacy/symbols.pyx b/spacy/symbols.pyx index f64577309..0e0337b6e 100644 --- a/spacy/symbols.pyx +++ b/spacy/symbols.pyx @@ -18,10 +18,11 @@ IDS = { "LIKE_EMAIL": LIKE_EMAIL, "IS_STOP": IS_STOP, "IS_OOV": IS_OOV, - "FLAG14": FLAG14, - "FLAG15": FLAG15, - "FLAG16": FLAG16, - "FLAG17": FLAG17, + "IS_BRACKET": IS_BRACKET, + "IS_QUOTE": IS_QUOTE, + "IS_LEFT_PUNCT": IS_LEFT_PUNCT, + "IS_RIGHT_PUNCT": IS_RIGHT_PUNCT, + "FLAG18": FLAG18, "FLAG19": FLAG19, "FLAG20": FLAG20, @@ -457,7 +458,9 @@ IDS = { "quantmod": quantmod, "rcmod": rcmod, "root": root, - "xcomp": xcomp + "xcomp": xcomp, + + "acl": acl, "LAW": LAW } diff --git a/website/api/token.jade b/website/api/token.jade index 465d44c66..4062594b4 100644 --- a/website/api/token.jade +++ b/website/api/token.jade @@ -586,6 +586,16 @@ p The L2 norm of the token's vector representation. +cell bool +cell Is the token punctuation? + +row + +cell #[code is_left_punct] + +cell bool + +cell Is the token a left punctuation mark, e.g. #[code (]? + + +row + +cell #[code is_right_punct] + +cell bool + +cell Is the token a right punctuation mark, e.g. #[code )]? + +row +cell #[code is_space] +cell bool @@ -593,6 +603,16 @@ p The L2 norm of the token's vector representation. | Does the token consist of whitespace characters? Equivalent to | #[code token.text.isspace()]. + +row + +cell #[code is_bracket] + +cell bool + +cell Is the token a bracket? + + +row + +cell #[code is_quote] + +cell bool + +cell Is the token a quotation mark? + +row +cell #[code like_url] +cell bool From 072694656376b423e5b53c2742b733d90b416bee Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Fri, 20 Oct 2017 17:09:37 +0530 Subject: [PATCH 13/77] cleanup to_array implementation using fixes on master --- spacy/tokens/doc.pyx | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 6e7230428..9351ba366 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -557,28 +557,25 @@ cdef class Doc: """ cdef int i, j cdef attr_id_t feature + cdef np.ndarray[attr_t, ndim=1] attr_ids cdef np.ndarray[attr_t, ndim=2] output - cdef np.ndarray[attr_t, ndim=1] output_1D # Handle scalar/list inputs of strings/ints for py_attr_ids - if( type(py_attr_ids) is not list and type(py_attr_ids) is not tuple ): - py_attr_ids = [ py_attr_ids ] - py_attr_ids_input = [] - for py_attr_id in py_attr_ids: - if( type(py_attr_id) is int ): - py_attr_ids_input.append(py_attr_id) - else: - py_attr_ids_input.append(IDS[py_attr_id.upper()]) + if not hasattr(py_attr_ids, '__iter__'): + py_attr_ids = [py_attr_ids] + + # Allow strings, e.g. 'lemma' or 'LEMMA' + py_attr_ids = [(IDS[id_.upper()] if hasattr(id_, 'upper') else id_) + for id_ in py_attr_ids] # Make an array from the attributes --- otherwise our inner loop is Python # dict iteration. - cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids_input, dtype=numpy.uint64) + attr_ids = numpy.asarray(py_attr_ids, dtype=numpy.uint64) output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.uint64) for i in range(self.length): for j, feature in enumerate(attr_ids): output[i, j] = get_token_attr(&self.c[i], feature) - if( len(attr_ids) == 1 ): - output_1D = output.reshape((self.length)) - return output_1D - return output + # Handle 1d case + return output if len(attr_ids) >= 2 else output.reshape((self.length,)) + def count_by(self, attr_id_t attr_id, exclude=None, PreshCounter counts=None): """Count the frequencies of a given attribute. Produces a dict of From 92ac9316b5f3ff79db1c3ec44be54f8c4dfe95dc Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 13:59:24 +0200 Subject: [PATCH 14/77] Fix initialization of vectors, to address serialization problem --- spacy/vectors.pyx | 12 +++++------- spacy/vocab.pyx | 10 ++++------ 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index 5512279ae..cea583110 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -32,22 +32,20 @@ cdef class Vectors: cdef public object keys cdef public int i - def __init__(self, strings, data_or_width=0): + def __init__(self, strings, data=None, width=0): if isinstance(strings, StringStore): self.strings = strings else: self.strings = StringStore() for string in strings: self.strings.add(string) - if isinstance(data_or_width, int): - self.data = data = numpy.zeros((len(strings), data_or_width), - dtype='f') + if data is not None: + self.data = numpy.asarray(data, dtype='f') else: - data = data_or_width + self.data = numpy.zeros((len(self.strings), width), dtype='f') self.i = 0 - self.data = data self.key2row = {} - self.keys = np.ndarray((self.data.shape[0],), dtype='uint64') + self.keys = numpy.zeros((self.data.shape[0],), dtype='uint64') def __reduce__(self): return (Vectors, (self.strings, self.data)) diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 205e5a2af..e6ba9944b 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -62,12 +62,10 @@ cdef class Vocab: if strings: for string in strings: _ = self[string] - for name in tag_map.keys(): - if name: - self.strings.add(name) self.lex_attr_getters = lex_attr_getters + print("Create morphology", list(self.strings), tag_map) self.morphology = Morphology(self.strings, tag_map, lemmatizer) - self.vectors = Vectors(self.strings) + self.vectors = Vectors(self.strings, width=0) property lang: def __get__(self): @@ -338,7 +336,7 @@ cdef class Vocab: if self.vectors is None: return None else: - return self.vectors.to_bytes(exclude='strings.json') + return self.vectors.to_bytes() getters = OrderedDict(( ('strings', lambda: self.strings.to_bytes()), @@ -358,7 +356,7 @@ cdef class Vocab: if self.vectors is None: return None else: - return self.vectors.from_bytes(b, exclude='strings') + return self.vectors.from_bytes(b) setters = OrderedDict(( ('strings', lambda b: self.strings.from_bytes(b)), ('lexemes', lambda b: self.lexemes_from_bytes(b)), From 6218af0105d1514089ecd76c4cbf6fec31d50423 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 13:59:57 +0200 Subject: [PATCH 15/77] Remove cpdef enum, to avoid too much code generation --- spacy/morphology.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/morphology.pxd b/spacy/morphology.pxd index be6711bfd..9192f351f 100644 --- a/spacy/morphology.pxd +++ b/spacy/morphology.pxd @@ -44,7 +44,7 @@ cdef class Morphology: cdef int assign_feature(self, uint64_t* morph, univ_morph_t feat_id, bint value) except -1 -cpdef enum univ_morph_t: +cdef enum univ_morph_t: NIL = 0 Animacy_anim = symbols.Animacy_anim Animacy_inam From 506cf2eb1389da6149f97de7db80df52ed0d2d1f Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 14:00:23 +0200 Subject: [PATCH 16/77] Remove cpdef enum, to avoid too much code generation --- spacy/morphology.pyx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index 4a1a0aa54..65b46fe08 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -426,3 +426,7 @@ IDS = { NAMES = [key for key, value in sorted(IDS.items(), key=lambda item: item[1])] +# Unfortunate hack here, to work around problem with long cpdef enum +# (which is generating an enormous amount of C++ in Cython 0.24+) +# We keep the enum cdef, and just make sure the names are available to Python +locals().update(IDS) From 49895fbef69598d18fd00197661ec3ad939de849 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 14:01:12 +0200 Subject: [PATCH 17/77] Rename 'SP' special tag to '_SP' Renaming the tag with an underscore lets us add it to the tag map without worrying that we'll change the sequence of tags, which throws off the tag-to-ID mapping. For instance, if we inserted a 'SP' tag, the "VERB" tag is pushed to a different class ID, and the model is all messed up. --- spacy/lang/de/tag_map.py | 2 +- spacy/lang/en/tag_map.py | 4 +-- spacy/lang/es/tag_map.py | 2 +- spacy/lang/th/tag_map.py | 77 ++++++++++++++++++++-------------------- spacy/morphology.pyx | 14 ++++++-- 5 files changed, 54 insertions(+), 45 deletions(-) diff --git a/spacy/lang/de/tag_map.py b/spacy/lang/de/tag_map.py index d16bd17e0..730c15cfc 100644 --- a/spacy/lang/de/tag_map.py +++ b/spacy/lang/de/tag_map.py @@ -62,5 +62,5 @@ TAG_MAP = { "VVIZU": {POS: VERB, "VerbForm": "inf"}, "VVPP": {POS: VERB, "Aspect": "perf", "VerbForm": "part"}, "XY": {POS: X}, - "SP": {POS: SPACE} + "_SP": {POS: SPACE} } diff --git a/spacy/lang/en/tag_map.py b/spacy/lang/en/tag_map.py index a674c17e3..76eabf307 100644 --- a/spacy/lang/en/tag_map.py +++ b/spacy/lang/en/tag_map.py @@ -55,11 +55,11 @@ TAG_MAP = { "WP": {POS: NOUN, "PronType": "int|rel"}, "WP$": {POS: ADJ, "Poss": "yes", "PronType": "int|rel"}, "WRB": {POS: ADV, "PronType": "int|rel"}, - "SP": {POS: SPACE}, "ADD": {POS: X}, "NFP": {POS: PUNCT}, "GW": {POS: X}, "XX": {POS: X}, "BES": {POS: VERB}, - "HVS": {POS: VERB} + "HVS": {POS: VERB}, + "_SP": {POS: SPACE}, } diff --git a/spacy/lang/es/tag_map.py b/spacy/lang/es/tag_map.py index 86dd48620..2095d23b1 100644 --- a/spacy/lang/es/tag_map.py +++ b/spacy/lang/es/tag_map.py @@ -303,5 +303,5 @@ TAG_MAP = { "VERB__VerbForm=Ger": {"morph": "VerbForm=Ger", "pos": "VERB"}, "VERB__VerbForm=Inf": {"morph": "VerbForm=Inf", "pos": "VERB"}, "X___": {"morph": "_", "pos": "X"}, - "SP": {"morph": "_", "pos": "SPACE"}, + "_SP": {"morph": "_", "pos": "SPACE"}, } diff --git a/spacy/lang/th/tag_map.py b/spacy/lang/th/tag_map.py index 40e5ac44c..570871820 100644 --- a/spacy/lang/th/tag_map.py +++ b/spacy/lang/th/tag_map.py @@ -19,63 +19,64 @@ TAG_MAP = { "NPRP": {POS: PRON}, # ADJ "ADJ": {POS: ADJ}, - "NONM": {POS: ADJ}, - "VATT": {POS: ADJ}, - "DONM": {POS: ADJ}, + "NONM": {POS: ADJ}, + "VATT": {POS: ADJ}, + "DONM": {POS: ADJ}, # ADV "ADV": {POS: ADV}, - "ADVN": {POS: ADV}, - "ADVI": {POS: ADV}, - "ADVP": {POS: ADV}, - "ADVS": {POS: ADV}, + "ADVN": {POS: ADV}, + "ADVI": {POS: ADV}, + "ADVP": {POS: ADV}, + "ADVS": {POS: ADV}, # INT "INT": {POS: INTJ}, # PRON "PROPN": {POS: PROPN}, - "PPRS": {POS: PROPN}, - "PDMN": {POS: PROPN}, - "PNTR": {POS: PROPN}, + "PPRS": {POS: PROPN}, + "PDMN": {POS: PROPN}, + "PNTR": {POS: PROPN}, # DET "DET": {POS: DET}, - "DDAN": {POS: DET}, - "DDAC": {POS: DET}, - "DDBQ": {POS: DET}, - "DDAQ": {POS: DET}, - "DIAC": {POS: DET}, - "DIBQ": {POS: DET}, - "DIAQ": {POS: DET}, - "DCNM": {POS: DET}, + "DDAN": {POS: DET}, + "DDAC": {POS: DET}, + "DDBQ": {POS: DET}, + "DDAQ": {POS: DET}, + "DIAC": {POS: DET}, + "DIBQ": {POS: DET}, + "DIAQ": {POS: DET}, + "DCNM": {POS: DET}, # NUM "NUM": {POS: NUM}, - "NCNM": {POS: NUM}, - "NLBL": {POS: NUM}, - "DCNM": {POS: NUM}, + "NCNM": {POS: NUM}, + "NLBL": {POS: NUM}, + "DCNM": {POS: NUM}, # AUX "AUX": {POS: AUX}, - "XVBM": {POS: AUX}, - "XVAM": {POS: AUX}, - "XVMM": {POS: AUX}, - "XVBB": {POS: AUX}, - "XVAE": {POS: AUX}, + "XVBM": {POS: AUX}, + "XVAM": {POS: AUX}, + "XVMM": {POS: AUX}, + "XVBB": {POS: AUX}, + "XVAE": {POS: AUX}, # ADP "ADP": {POS: ADP}, - "RPRE": {POS: ADP}, + "RPRE": {POS: ADP}, # CCONJ "CCONJ": {POS: CCONJ}, - "JCRG": {POS: CCONJ}, + "JCRG": {POS: CCONJ}, # SCONJ "SCONJ": {POS: SCONJ}, - "PREL": {POS: SCONJ}, - "JSBR": {POS: SCONJ}, - "JCMP": {POS: SCONJ}, + "PREL": {POS: SCONJ}, + "JSBR": {POS: SCONJ}, + "JCMP": {POS: SCONJ}, # PART - "PART": {POS: PART}, - "FIXN": {POS: PART}, - "FIXV": {POS: PART}, - "EAFF": {POS: PART}, - "AITT": {POS: PART}, - "NEG": {POS: PART}, + "PART": {POS: PART}, + "FIXN": {POS: PART}, + "FIXV": {POS: PART}, + "EAFF": {POS: PART}, + "AITT": {POS: PART}, + "NEG": {POS: PART}, # PUNCT "PUNCT": {POS: PUNCT}, - "PUNC": {POS: PUNCT} + "PUNC": {POS: PUNCT}, + "_SP": {POS: SPACE} } diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index 65b46fe08..7845ab4e7 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -4,7 +4,7 @@ from __future__ import unicode_literals from libc.string cimport memset -from .parts_of_speech cimport ADJ, VERB, NOUN, PUNCT +from .parts_of_speech cimport ADJ, VERB, NOUN, PUNCT, SPACE from .attrs cimport POS, IS_SPACE from .parts_of_speech import IDS as POS_IDS from .lexeme cimport Lexeme @@ -36,14 +36,22 @@ cdef class Morphology: def __init__(self, StringStore string_store, tag_map, lemmatizer, exc=None): self.mem = Pool() self.strings = string_store + # Add special space symbol. We prefix with underscore, to make sure it + # always sorts to the end. + space_attrs = tag_map.pop('SP', {POS: SPACE}) + if '_SP' not in tag_map: + self.strings.add('_SP') + tag_map = dict(tag_map) + tag_map['_SP'] = space_attrs + self.tag_names = tuple(sorted(tag_map.keys())) self.tag_map = {} self.lemmatizer = lemmatizer self.n_tags = len(tag_map) - self.tag_names = tuple(sorted(tag_map.keys())) self.reverse_index = {} self.rich_tags = self.mem.alloc(self.n_tags+1, sizeof(RichTagC)) for i, (tag_str, attrs) in enumerate(sorted(tag_map.items())): + self.strings.add(tag_str) self.tag_map[tag_str] = dict(attrs) attrs = _normalize_props(attrs) attrs = intify_attrs(attrs, self.strings, _do_deprecated=True) @@ -93,7 +101,7 @@ cdef class Morphology: # the statistical model fails. # Related to Issue #220 if Lexeme.c_check_flag(token.lex, IS_SPACE): - tag_id = self.reverse_index[self.strings.add('SP')] + tag_id = self.reverse_index[self.strings.add('_SP')] rich_tag = self.rich_tags[tag_id] analysis = self._cache.get(tag_id, token.lex.orth) if analysis is NULL: From ebecaddb765713aaaf7f5b2f51488f39f66655d9 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 14:17:15 +0200 Subject: [PATCH 18/77] Make 'data_or_width' two keyword args in Vectors.__init__ Previously the data and width options were one argument in Vectors, which meant you couldn't say vectors = Vectors(strings, width=300). It's better to have two keywords. --- spacy/tests/vectors/test_vectors.py | 8 ++++---- website/api/vectors.jade | 15 +++++++++------ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/spacy/tests/vectors/test_vectors.py b/spacy/tests/vectors/test_vectors.py index 798871edd..74ac26a10 100644 --- a/spacy/tests/vectors/test_vectors.py +++ b/spacy/tests/vectors/test_vectors.py @@ -35,18 +35,18 @@ def vocab(en_vocab, vectors): def test_init_vectors_with_data(strings, data): - v = Vectors(strings, data) + v = Vectors(strings, data=data) assert v.shape == data.shape def test_init_vectors_with_width(strings): - v = Vectors(strings, 3) + v = Vectors(strings, width=3) for string in strings: v.add(string) assert v.shape == (len(strings), 3) def test_get_vector(strings, data): - v = Vectors(strings, data) + v = Vectors(strings, data=data) for string in strings: v.add(string) assert list(v[strings[0]]) == list(data[0]) @@ -56,7 +56,7 @@ def test_get_vector(strings, data): def test_set_vector(strings, data): orig = data.copy() - v = Vectors(strings, data) + v = Vectors(strings, data=data) for string in strings: v.add(string) assert list(v[strings[0]]) == list(orig[0]) diff --git a/website/api/vectors.jade b/website/api/vectors.jade index a58736506..e08f34643 100644 --- a/website/api/vectors.jade +++ b/website/api/vectors.jade @@ -12,7 +12,7 @@ p p | Create a new vector store. To keep the vector table empty, pass - | #[code data_or_width=0]. You can also create the vector table and add + | #[code width=0]. You can also create the vector table and add | vectors one by one, or set the vector values directly on initialisation. +aside-code("Example"). @@ -21,11 +21,11 @@ p empty_vectors = Vectors(StringStore()) - vectors = Vectors([u'cat'], 300) + vectors = Vectors([u'cat'], width=300) vectors[u'cat'] = numpy.random.uniform(-1, 1, (300,)) vector_table = numpy.zeros((3, 300), dtype='f') - vectors = Vectors(StringStore(), vector_table) + vectors = Vectors(StringStore(), data=vector_table) +table(["Name", "Type", "Description"]) +row @@ -36,9 +36,12 @@ p | that maps strings to hash values, and vice versa. +row - +cell #[code data_or_width] - +cell #[code.u-break numpy.ndarray[ndim=1, dtype='float32']] or int - +cell Vector data or number of dimensions. + +cell #[code data] + +cell #[code.u-break numpy.ndarray[ndim=1, dtype='float32']] + + +row + +cell #[code width] + +cell Number of dimensions. +row("foot") +cell returns From cfae54c507ab24a1da36d3008484d2ac8edb3071 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 14:19:04 +0200 Subject: [PATCH 19/77] Make change to Vectors.__init__ --- spacy/vectors.pyx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index cea583110..fa5fcf624 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -32,7 +32,7 @@ cdef class Vectors: cdef public object keys cdef public int i - def __init__(self, strings, data=None, width=0): + def __init__(self, strings, width=0, data=None): if isinstance(strings, StringStore): self.strings = strings else: @@ -46,6 +46,10 @@ cdef class Vectors: self.i = 0 self.key2row = {} self.keys = numpy.zeros((self.data.shape[0],), dtype='uint64') + for i, string in enumerate(self.strings): + if i >= self.data.shape[0]: + break + self.add(self.strings[string], self.data[i]) def __reduce__(self): return (Vectors, (self.strings, self.data)) From 33229b1c9ef53a49a3bbd00d61ca02c28c5481c8 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 14:19:29 +0200 Subject: [PATCH 20/77] Remove print statement --- spacy/vocab.pyx | 1 - 1 file changed, 1 deletion(-) diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index e6ba9944b..2e189a02b 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -63,7 +63,6 @@ cdef class Vocab: for string in strings: _ = self[string] self.lex_attr_getters = lex_attr_getters - print("Create morphology", list(self.strings), tag_map) self.morphology = Morphology(self.strings, tag_map, lemmatizer) self.vectors = Vectors(self.strings, width=0) From 9010a1a0603fba85143bcd859b88aaed59937a9a Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 14:19:46 +0200 Subject: [PATCH 21/77] Create vectors correctly --- spacy/vocab.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 2e189a02b..3f96b5144 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -252,7 +252,7 @@ cdef class Vocab: """ if new_dim is None: new_dim = self.vectors.data.shape[1] - self.vectors = Vectors(self.strings, new_dim) + self.vectors = Vectors(self.strings, width=new_dim) def get_vector(self, orth): """Retrieve a vector for a word in the vocabulary. From f111b228e0bcd65a9b852f2687a7441628355bba Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 16:24:48 +0200 Subject: [PATCH 22/77] Fix re-parsing of previously parsed text If a Doc object had been previously parsed, it was possible for invalid parses to be added. There were two problems: 1) The parse was only being partially erased 2) The RightArc action was able to create a 1-cycle. This patch fixes both errors, and avoids resetting the parse if one is present. In theory this might allow a better parse to be predicted by running the parser twice. Closes #1253. --- spacy/syntax/arc_eager.pyx | 14 ++++++++++---- spacy/tests/regression/test_issue1253.py | 20 ++++++++++++++++++++ 2 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 spacy/tests/regression/test_issue1253.py diff --git a/spacy/syntax/arc_eager.pyx b/spacy/syntax/arc_eager.pyx index 9770383d1..8adb8e52c 100644 --- a/spacy/syntax/arc_eager.pyx +++ b/spacy/syntax/arc_eager.pyx @@ -212,7 +212,8 @@ cdef class LeftArc: cdef class RightArc: @staticmethod cdef bint is_valid(const StateC* st, attr_t label) nogil: - return st.B_(0).sent_start != 1 + # If there's (perhaps partial) parse pre-set, don't allow cycle. + return st.B_(0).sent_start != 1 and st.H(st.S(0)) != st.B(0) @staticmethod cdef int transition(StateC* st, attr_t label) nogil: @@ -446,14 +447,19 @@ cdef class ArcEager(TransitionSystem): cdef int initialize_state(self, StateC* st) nogil: for i in range(st.length): - st._sent[i].l_edge = i - st._sent[i].r_edge = i + if st._sent[i].dep == 0: + st._sent[i].l_edge = i + st._sent[i].r_edge = i + st._sent[i].head = 0 + st._sent[i].dep = 0 + st._sent[i].l_kids = 0 + st._sent[i].r_kids = 0 st.fast_forward() cdef int finalize_state(self, StateC* st) nogil: cdef int i for i in range(st.length): - if st._sent[i].head == 0 and st._sent[i].dep == 0: + if st._sent[i].head == 0: st._sent[i].dep = self.root_label def finalize_doc(self, doc): diff --git a/spacy/tests/regression/test_issue1253.py b/spacy/tests/regression/test_issue1253.py new file mode 100644 index 000000000..2fe77d6d8 --- /dev/null +++ b/spacy/tests/regression/test_issue1253.py @@ -0,0 +1,20 @@ +from __future__ import unicode_literals +import pytest +import spacy + + +def ss(tt): + for i in range(len(tt)-1): + for j in range(i+1, len(tt)): + tt[i:j].root + + +@pytest.mark.models('en') +def test_access_parse_for_merged(): + nlp = spacy.load('en_core_web_sm') + t_t = nlp.tokenizer("Highly rated - I'll definitely") + nlp.tagger(t_t) + nlp.parser(t_t) + nlp.parser(t_t) + ss(t_t) + From d8391b1c4d344f12c89d78bce64779b24b35d658 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 16:49:36 +0200 Subject: [PATCH 23/77] Fix #1434: Matcher failed on ending ? if no token --- spacy/matcher.pyx | 2 +- spacy/tests/regression/test_issue1434.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 spacy/tests/regression/test_issue1434.py diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index 24d0a9836..fa67f32d6 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -391,7 +391,7 @@ cdef class Matcher: matches.append((ent_id, start, end)) # Look for open patterns that are actually satisfied for state in partials: - while state.second.quantifier in (ZERO, ZERO_PLUS): + while state.second.quantifier in (ZERO, ZERO_ONE, ZERO_PLUS): state.second += 1 if state.second.nr_attr == 0: start = state.first diff --git a/spacy/tests/regression/test_issue1434.py b/spacy/tests/regression/test_issue1434.py new file mode 100644 index 000000000..ec3a34bb0 --- /dev/null +++ b/spacy/tests/regression/test_issue1434.py @@ -0,0 +1,22 @@ +from __future__ import unicode_literals + +from spacy.tokens import Doc +from spacy.vocab import Vocab +from spacy.matcher import Matcher +from spacy.lang.lex_attrs import LEX_ATTRS + + +def test_issue1434(): + '''Test matches occur when optional element at end of short doc''' + vocab = Vocab(lex_attr_getters=LEX_ATTRS) + hello_world = Doc(vocab, words=['Hello', 'World']) + hello = Doc(vocab, words=['Hello']) + + matcher = Matcher(vocab) + matcher.add('MyMatcher', None, + [ {'ORTH': 'Hello' }, {'IS_ALPHA': True, 'OP': '?'} ]) + + matches = matcher(hello_world) + assert matches + matches = matcher(hello) + assert matches From d2fe56a5779fbc56b1b8db2b16dc45443d1e076c Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Fri, 20 Oct 2017 23:58:00 +0530 Subject: [PATCH 24/77] Add LCA matrix for spans and docs --- spacy/tests/doc/test_doc_api.py | 7 +++++ spacy/tests/spans/test_span.py | 11 ++++++++ spacy/tokens/doc.pyx | 48 +++++++++++++++++++++++++++++++ spacy/tokens/span.pyx | 50 +++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+) diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py index cbe1bbc66..5e052f771 100644 --- a/spacy/tests/doc/test_doc_api.py +++ b/spacy/tests/doc/test_doc_api.py @@ -217,6 +217,13 @@ def test_doc_api_has_vector(en_tokenizer, text_file, text, vectors): doc = en_tokenizer(text) assert doc.has_vector +def test_lowest_common_ancestor(en_tokenizer): + tokens = en_tokenizer('the lazy dog slept') + doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=[2, 1, 1, 0]) + lca = doc.get_lca_matrix() + assert(lca[1, 1] == 1) + assert(lca[0, 1] == 2) + assert(lca[1, 2] == 2) def test_parse_tree(en_tokenizer): """Tests doc.print_tree() method.""" diff --git a/spacy/tests/spans/test_span.py b/spacy/tests/spans/test_span.py index 7ed9333b8..5e7c638b6 100644 --- a/spacy/tests/spans/test_span.py +++ b/spacy/tests/spans/test_span.py @@ -55,6 +55,17 @@ def test_spans_span_sent(doc): assert doc[6:7].sent.root.left_edge.text == 'This' +def test_spans_lca_matrix(en_tokenizer): + """Test span's lca matrix generation""" + tokens = en_tokenizer('the lazy dog slept') + doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=[2, 1, 1, 0]) + lca = doc[:2].get_lca_matrix() + assert(lca[0, 0] == 0) + assert(lca[0, 1] == -1) + assert(lca[1, 0] == -1) + assert(lca[1, 1] == 1) + + def test_spans_default_sentiment(en_tokenizer): """Test span.sentiment property's default averaging behaviour""" text = "good stuff bad stuff" diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 809f178f8..fa5b4ba28 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -660,6 +660,54 @@ cdef class Doc: self.is_tagged = bool(TAG in attrs or POS in attrs) return self + def get_lca_matrix(self): + ''' + Calculates the lowest common ancestor matrix + for a given Spacy doc. + Returns LCA matrix containing the integer index + of the ancestor, or -1 if no common ancestor is + found (ex if span excludes a necessary ancestor). + Apologies about the recursion, but the + impact on performance is negligible given + the natural limitations on the depth of a typical human sentence. + ''' + # Efficiency notes: + # + # We can easily improve the performance here by iterating in Cython. + # To loop over the tokens in Cython, the easiest way is: + # for token in doc.c[:doc.c.length]: + # head = token + token.head + # Both token and head will be TokenC* here. The token.head attribute + # is an integer offset. + def __pairwise_lca(token_j, token_k, lca_matrix): + if lca_matrix[token_j.i][token_k.i] != -2: + return lca_matrix[token_j.i][token_k.i] + elif token_j == token_k: + lca_index = token_j.i + elif token_k.head == token_j: + lca_index = token_j.i + elif token_j.head == token_k: + lca_index = token_k.i + elif (token_j.head == token_j) and (token_k.head == token_k): + lca_index = -1 + else: + lca_index = __pairwise_lca(token_j.head, token_k.head, lca_matrix) + lca_matrix[token_j.i][token_k.i] = lca_index + lca_matrix[token_k.i][token_j.i] = lca_index + + return lca_index + + lca_matrix = numpy.empty((len(self), len(self)), dtype=numpy.int32) + lca_matrix.fill(-2) + for j in range(len(self)): + token_j = self[j] + for k in range(j, len(self)): + token_k = self[k] + lca_matrix[j][k] = __pairwise_lca(token_j, token_k, lca_matrix) + lca_matrix[k][j] = lca_matrix[j][k] + + return lca_matrix + def to_disk(self, path, **exclude): """Save the current state to a directory. diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 3b31c50c0..b0a170ddf 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -177,6 +177,56 @@ cdef class Span: return 0.0 return numpy.dot(self.vector, other.vector) / (self.vector_norm * other.vector_norm) + def get_lca_matrix(self): + ''' + Calculates the lowest common ancestor matrix + for a given Spacy span. + Returns LCA matrix containing the integer index + of the ancestor, or -1 if no common ancestor is + found (ex if span excludes a necessary ancestor). + Apologies about the recursion, but the + impact on performance is negligible given + the natural limitations on the depth of a typical human sentence. + ''' + + def __pairwise_lca(token_j, token_k, lca_matrix, margins): + offset = margins[0] + token_k_head = token_k.head if token_k.head.i in range(*margins) else token_k + token_j_head = token_j.head if token_j.head.i in range(*margins) else token_j + token_j_i = token_j.i - offset + token_k_i = token_k.i - offset + + if lca_matrix[token_j_i][token_k_i] != -2: + return lca_matrix[token_j_i][token_k_i] + elif token_j == token_k: + lca_index = token_j_i + elif token_k_head == token_j: + lca_index = token_j_i + elif token_j_head == token_k: + lca_index = token_k_i + elif (token_j_head == token_j) and (token_k_head == token_k): + lca_index = -1 + else: + lca_index = __pairwise_lca(token_j_head, token_k_head, lca_matrix, margins) + + lca_matrix[token_j_i][token_k_i] = lca_index + lca_matrix[token_k_i][token_j_i] = lca_index + + return lca_index + + lca_matrix = numpy.empty((len(self), len(self)), dtype=numpy.int32) + lca_matrix.fill(-2) + margins = [self.start, self.end] + + for j in range(len(self)): + token_j = self[j] + for k in range(len(self)): + token_k = self[k] + lca_matrix[j][k] = __pairwise_lca(token_j, token_k, lca_matrix, margins) + lca_matrix[k][j] = lca_matrix[j][k] + + return lca_matrix + cpdef np.ndarray to_array(self, object py_attr_ids): """Given a list of M attribute IDs, export the tokens to a numpy `ndarray` of shape `(N, M)`, where `N` is the length of the document. From 8f8bccecb9427448563b2d2c4c3cf7fb4eecdfb1 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sat, 21 Oct 2017 00:51:42 +0200 Subject: [PATCH 25/77] Patch deserialisation for invalid loads, to avoid model failure --- spacy/vocab.pyx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/spacy/vocab.pyx b/spacy/vocab.pyx index 205e5a2af..da4d21026 100644 --- a/spacy/vocab.pyx +++ b/spacy/vocab.pyx @@ -400,6 +400,7 @@ cdef class Vocab: cdef int j = 0 cdef SerializedLexemeC lex_data chunk_size = sizeof(lex_data.data) + cdef void* ptr cdef unsigned char* bytes_ptr = bytes_data for i in range(0, len(bytes_data), chunk_size): lexeme = self.mem.alloc(1, sizeof(LexemeC)) @@ -407,6 +408,9 @@ cdef class Vocab: lex_data.data[j] = bytes_ptr[i+j] Lexeme.c_from_bytes(lexeme, lex_data) + ptr = self.strings._map.get(lexeme.orth) + if ptr == NULL: + continue py_str = self.strings[lexeme.orth] assert self.strings[py_str] == lexeme.orth, (py_str, lexeme.orth) key = hash_string(py_str) From 490ad3eaf070f2e210869c37b70edf3fcd504da7 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sat, 21 Oct 2017 00:52:14 +0200 Subject: [PATCH 26/77] Check that empty strings are handled. Closes #1242 --- spacy/tests/regression/test_issue1242.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 spacy/tests/regression/test_issue1242.py diff --git a/spacy/tests/regression/test_issue1242.py b/spacy/tests/regression/test_issue1242.py new file mode 100644 index 000000000..50dc8c37e --- /dev/null +++ b/spacy/tests/regression/test_issue1242.py @@ -0,0 +1,23 @@ +from __future__ import unicode_literals +import pytest +from ...lang.en import English +from ...util import load_model + + +def test_issue1242_empty_strings(): + nlp = English() + doc = nlp('') + assert len(doc) == 0 + docs = list(nlp.pipe(['', 'hello'])) + assert len(docs[0]) == 0 + assert len(docs[1]) == 1 + + +@pytest.mark.models('en') +def test_issue1242_empty_strings_en_core_web_sm(): + nlp = load_model('en_core_web_sm') + doc = nlp('') + assert len(doc) == 0 + docs = list(nlp.pipe(['', 'hello'])) + assert len(docs[0]) == 0 + assert len(docs[1]) == 1 From 84c6c20d1c640f665ff98ef8c11b69a2d4038812 Mon Sep 17 00:00:00 2001 From: Jeroen Bobbeldijk Date: Sun, 22 Oct 2017 15:18:36 +0200 Subject: [PATCH 27/77] Fix #1444: fix pipeline logic and wrong paramater in update call --- examples/training/train_new_entity_type.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/training/train_new_entity_type.py b/examples/training/train_new_entity_type.py index ab69285a6..5f10beebc 100644 --- a/examples/training/train_new_entity_type.py +++ b/examples/training/train_new_entity_type.py @@ -56,8 +56,7 @@ def train_ner(nlp, train_data, output_dir): losses = {} for batch in minibatch(get_gold_parses(nlp.make_doc, train_data), size=3): docs, golds = zip(*batch) - nlp.update(docs, golds, losses=losses, sgd=optimizer, update_shared=True, - drop=0.35) + nlp.update(docs, golds, losses=losses, sgd=optimizer, drop=0.35) print(losses) if not output_dir: return @@ -100,9 +99,10 @@ def main(model_name, output_directory=None): ) ] - nlp.pipeline.append(TokenVectorEncoder(nlp.vocab)) - nlp.pipeline.append(NeuralEntityRecognizer(nlp.vocab)) - nlp.pipeline[-1].add_label('ANIMAL') + nlp.add_pipe(TokenVectorEncoder(nlp.vocab)) + ner = NeuralEntityRecognizer(nlp.vocab) + ner.add_label('ANIMAL') + nlp.add_pipe(ner) train_ner(nlp, train_data, output_directory) # Test that the entity is recognized From a31f048b4d05ff5b30ff456de0460f51a192ee75 Mon Sep 17 00:00:00 2001 From: ines Date: Mon, 23 Oct 2017 10:38:06 +0200 Subject: [PATCH 28/77] Fix formatting --- spacy/tokens/span.pyx | 1 + 1 file changed, 1 insertion(+) diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 3b31c50c0..05dcce1ba 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -129,6 +129,7 @@ cdef class Span: def _(self): return Underscore(Underscore.span_extensions, self, start=self.start_char, end=self.end_char) + def as_doc(self): '''Create a Doc object view of the Span's data. From 3f0a157b33a24c63b52c7c714a55573a0f096398 Mon Sep 17 00:00:00 2001 From: ines Date: Mon, 23 Oct 2017 10:38:13 +0200 Subject: [PATCH 29/77] Fix typo --- website/api/span.jade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/api/span.jade b/website/api/span.jade index 6bff45a9b..399d0bd33 100644 --- a/website/api/span.jade +++ b/website/api/span.jade @@ -284,7 +284,7 @@ p Retokenize the document, such that the span is merged into a single token. +aside-code("Example"). doc = nlp(u'I like New York in Autumn.') - span = doc[2:3] + span = doc[2:4] span.merge() assert len(doc) == 6 assert doc[2].text == 'New York' From db15902e84df8c3187479afb8ccfc3ae02aedb33 Mon Sep 17 00:00:00 2001 From: ines Date: Mon, 23 Oct 2017 10:38:21 +0200 Subject: [PATCH 30/77] Tidy up --- website/assets/css/_components/_code.sass | 1 - website/assets/css/_components/_navigation.sass | 3 --- 2 files changed, 4 deletions(-) diff --git a/website/assets/css/_components/_code.sass b/website/assets/css/_components/_code.sass index f83e96d29..eaf0980e1 100644 --- a/website/assets/css/_components/_code.sass +++ b/website/assets/css/_components/_code.sass @@ -63,7 +63,6 @@ code padding: 0.2rem 0.4rem border-radius: 0.25rem font-family: $font-code - white-space: nowrap margin: 0 box-decoration-break: clone white-space: nowrap diff --git a/website/assets/css/_components/_navigation.sass b/website/assets/css/_components/_navigation.sass index 0e4af8267..1543de5fb 100644 --- a/website/assets/css/_components/_navigation.sass +++ b/website/assets/css/_components/_navigation.sass @@ -14,9 +14,6 @@ width: 100% box-shadow: $box-shadow - //@include breakpoint(min, md) - // position: fixed - &.is-fixed animation: slideInDown 0.5s ease-in-out position: fixed From 7701984f13913a4f114e42f4e0bb009e8c4f6c47 Mon Sep 17 00:00:00 2001 From: ines Date: Mon, 23 Oct 2017 10:38:27 +0200 Subject: [PATCH 31/77] Document Span.as_doc --- website/api/span.jade | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/website/api/span.jade b/website/api/span.jade index 399d0bd33..2a55409f1 100644 --- a/website/api/span.jade +++ b/website/api/span.jade @@ -302,6 +302,25 @@ p Retokenize the document, such that the span is merged into a single token. +cell #[code Token] +cell The newly merged token. ++h(2, "as_doc") Span.as_doc + +p + | Create a #[code Doc] object view of the #[code Span]'s data. Mostly + | useful for C-typed interfaces. + ++aside-code("Example"). + doc = nlp(u'I like New York in Autumn.') + span = doc[2:4] + doc2 = span.as_doc() + assert doc2.text == 'New York' + ++table(["Name", "Type", "Description"]) + +row("foot") + +cell returns + +cell #[code Doc] + +cell A #[code Doc] object of the #[code Span]'s content. + + +h(2, "root") Span.root +tag property +tag-model("parse") From 667575500564355621f0dbaabaf4209e3fe6b24a Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 12:05:10 +0200 Subject: [PATCH 32/77] Add training data JSON example --- examples/training/training-data.json | 1103 ++++++++++++++++++++++++++ 1 file changed, 1103 insertions(+) create mode 100644 examples/training/training-data.json diff --git a/examples/training/training-data.json b/examples/training/training-data.json new file mode 100644 index 000000000..8b4956f05 --- /dev/null +++ b/examples/training/training-data.json @@ -0,0 +1,1103 @@ +[ + { + "id":0, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"prep", + "head":44, + "tag":"IN", + "orth":"In" + }, + { + "dep":"det", + "head":3, + "tag":"DT", + "orth":"an" + }, + { + "dep":"nn", + "head":2, + "tag":"NNP", + "orth":"Oct." + }, + { + "dep":"num", + "head":1, + "tag":"CD", + "orth":"19" + }, + { + "dep":"pobj", + "head":-4, + "tag":"NN", + "orth":"review" + }, + { + "dep":"prep", + "head":-1, + "tag":"IN", + "orth":"of" + }, + { + "dep":"punct", + "head":2, + "tag":"``", + "orth":"``" + }, + { + "dep":"det", + "head":1, + "tag":"DT", + "orth":"The" + }, + { + "dep":"pobj", + "head":-3, + "tag":"NN", + "orth":"Misanthrope" + }, + { + "dep":"punct", + "head":-1, + "tag":"''", + "orth":"''" + }, + { + "dep":"prep", + "head":-2, + "tag":"IN", + "orth":"at" + }, + { + "dep":"poss", + "head":3, + "tag":"NNP", + "orth":"Chicago" + }, + { + "dep":"possessive", + "head":-1, + "tag":"POS", + "orth":"'s" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Goodman" + }, + { + "dep":"pobj", + "head":-4, + "tag":"NNP", + "orth":"Theatre" + }, + { + "dep":"punct", + "head":4, + "tag":"-LRB-", + "orth":"-LRB-" + }, + { + "dep":"punct", + "head":3, + "tag":"``", + "orth":"``" + }, + { + "dep":"amod", + "head":1, + "tag":"VBN", + "orth":"Revitalized" + }, + { + "dep":"nsubj", + "head":1, + "tag":"NNS", + "orth":"Classics" + }, + { + "dep":"dep", + "head":-15, + "tag":"VBP", + "orth":"Take" + }, + { + "dep":"det", + "head":1, + "tag":"DT", + "orth":"the" + }, + { + "dep":"dobj", + "head":-2, + "tag":"NN", + "orth":"Stage" + }, + { + "dep":"prep", + "head":-3, + "tag":"IN", + "orth":"in" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Windy" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNP", + "orth":"City" + }, + { + "dep":"punct", + "head":-6, + "tag":",", + "orth":"," + }, + { + "dep":"punct", + "head":-7, + "tag":"''", + "orth":"''" + }, + { + "dep":"dep", + "head":-8, + "tag":"NN", + "orth":"Leisure" + }, + { + "dep":"cc", + "head":-1, + "tag":"CC", + "orth":"&" + }, + { + "dep":"conj", + "head":-2, + "tag":"NNS", + "orth":"Arts" + }, + { + "dep":"punct", + "head":-11, + "tag":"-RRB-", + "orth":"-RRB-" + }, + { + "dep":"punct", + "head":13, + "tag":",", + "orth":"," + }, + { + "dep":"det", + "head":1, + "tag":"DT", + "orth":"the" + }, + { + "dep":"nsubjpass", + "head":11, + "tag":"NN", + "orth":"role" + }, + { + "dep":"prep", + "head":-1, + "tag":"IN", + "orth":"of" + }, + { + "dep":"pobj", + "head":-1, + "tag":"NNP", + "orth":"Celimene" + }, + { + "dep":"punct", + "head":-3, + "tag":",", + "orth":"," + }, + { + "dep":"partmod", + "head":-4, + "tag":"VBN", + "orth":"played" + }, + { + "dep":"prep", + "head":-1, + "tag":"IN", + "orth":"by" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Kim" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNP", + "orth":"Cattrall" + }, + { + "dep":"punct", + "head":-8, + "tag":",", + "orth":"," + }, + { + "dep":"auxpass", + "head":2, + "tag":"VBD", + "orth":"was" + }, + { + "dep":"advmod", + "head":1, + "tag":"RB", + "orth":"mistakenly" + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBN", + "orth":"attributed" + }, + { + "dep":"prep", + "head":-1, + "tag":"TO", + "orth":"to" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Christina" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNP", + "orth":"Haag" + }, + { + "dep":"punct", + "head":-4, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":1, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Ms." + }, + { + "dep":"nsubj", + "head":1, + "tag":"NNP", + "orth":"Haag" + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBZ", + "orth":"plays" + }, + { + "dep":"dobj", + "head":-1, + "tag":"NNP", + "orth":"Elianti" + }, + { + "dep":"punct", + "head":-2, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":2, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"nn", + "head":3, + "tag":"NNP", + "orth":"Rolls-Royce" + }, + { + "dep":"nn", + "head":2, + "tag":"NNP", + "orth":"Motor" + }, + { + "dep":"nn", + "head":1, + "tag":"NNPS", + "orth":"Cars" + }, + { + "dep":"nsubj", + "head":1, + "tag":"NNP", + "orth":"Inc." + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBD", + "orth":"said" + }, + { + "dep":"nsubj", + "head":1, + "tag":"PRP", + "orth":"it" + }, + { + "dep":"ccomp", + "head":-2, + "tag":"VBZ", + "orth":"expects" + }, + { + "dep":"poss", + "head":2, + "tag":"PRP$", + "orth":"its" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"U.S." + }, + { + "dep":"nsubj", + "head":3, + "tag":"NNS", + "orth":"sales" + }, + { + "dep":"aux", + "head":2, + "tag":"TO", + "orth":"to" + }, + { + "dep":"cop", + "head":1, + "tag":"VB", + "orth":"remain" + }, + { + "dep":"xcomp", + "head":-6, + "tag":"JJ", + "orth":"steady" + }, + { + "dep":"prep", + "head":-1, + "tag":"IN", + "orth":"at" + }, + { + "dep":"quantmod", + "head":1, + "tag":"IN", + "orth":"about" + }, + { + "dep":"num", + "head":1, + "tag":"CD", + "orth":"1,200" + }, + { + "dep":"pobj", + "head":-3, + "tag":"NNS", + "orth":"cars" + }, + { + "dep":"prep", + "head":-5, + "tag":"IN", + "orth":"in" + }, + { + "dep":"pobj", + "head":-1, + "tag":"CD", + "orth":"1990" + }, + { + "dep":"punct", + "head":-15, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":3, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"det", + "head":3, + "tag":"DT", + "orth":"The" + }, + { + "dep":"nn", + "head":2, + "tag":"NN", + "orth":"luxury" + }, + { + "dep":"nn", + "head":1, + "tag":"NN", + "orth":"auto" + }, + { + "dep":"nsubj", + "head":3, + "tag":"NN", + "orth":"maker" + }, + { + "dep":"amod", + "head":1, + "tag":"JJ", + "orth":"last" + }, + { + "dep":"tmod", + "head":1, + "tag":"NN", + "orth":"year" + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBD", + "orth":"sold" + }, + { + "dep":"num", + "head":1, + "tag":"CD", + "orth":"1,214" + }, + { + "dep":"dobj", + "head":-2, + "tag":"NNS", + "orth":"cars" + }, + { + "dep":"prep", + "head":-3, + "tag":"IN", + "orth":"in" + }, + { + "dep":"det", + "head":1, + "tag":"DT", + "orth":"the" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNP", + "orth":"U.S." + } + ] + } + ] + } + ] + }, + { + "id":4, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Howard" + }, + { + "dep":"nsubj", + "head":8, + "tag":"NNP", + "orth":"Mosher" + }, + { + "dep":"punct", + "head":-1, + "tag":",", + "orth":"," + }, + { + "dep":"appos", + "head":-2, + "tag":"NN", + "orth":"president" + }, + { + "dep":"cc", + "head":-1, + "tag":"CC", + "orth":"and" + }, + { + "dep":"amod", + "head":2, + "tag":"JJ", + "orth":"chief" + }, + { + "dep":"nn", + "head":1, + "tag":"NN", + "orth":"executive" + }, + { + "dep":"conj", + "head":-4, + "tag":"NN", + "orth":"officer" + }, + { + "dep":"punct", + "head":-7, + "tag":",", + "orth":"," + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBD", + "orth":"said" + }, + { + "dep":"nsubj", + "head":1, + "tag":"PRP", + "orth":"he" + }, + { + "dep":"ccomp", + "head":-2, + "tag":"VBZ", + "orth":"anticipates" + }, + { + "dep":"dobj", + "head":-1, + "tag":"NN", + "orth":"growth" + }, + { + "dep":"prep", + "head":-1, + "tag":"IN", + "orth":"for" + }, + { + "dep":"det", + "head":3, + "tag":"DT", + "orth":"the" + }, + { + "dep":"nn", + "head":2, + "tag":"NN", + "orth":"luxury" + }, + { + "dep":"nn", + "head":1, + "tag":"NN", + "orth":"auto" + }, + { + "dep":"pobj", + "head":-4, + "tag":"NN", + "orth":"maker" + }, + { + "dep":"prep", + "head":-6, + "tag":"IN", + "orth":"in" + }, + { + "dep":"pobj", + "head":-1, + "tag":"NNP", + "orth":"Britain" + }, + { + "dep":"cc", + "head":-1, + "tag":"CC", + "orth":"and" + }, + { + "dep":"conj", + "head":-2, + "tag":"NNP", + "orth":"Europe" + }, + { + "dep":"punct", + "head":-4, + "tag":",", + "orth":"," + }, + { + "dep":"cc", + "head":-5, + "tag":"CC", + "orth":"and" + }, + { + "dep":"conj", + "head":-6, + "tag":"IN", + "orth":"in" + }, + { + "dep":"amod", + "head":1, + "tag":"JJ", + "orth":"Far" + }, + { + "dep":"amod", + "head":1, + "tag":"JJ", + "orth":"Eastern" + }, + { + "dep":"pobj", + "head":-3, + "tag":"NNS", + "orth":"markets" + }, + { + "dep":"punct", + "head":-19, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":5, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"nn", + "head":2, + "tag":"NNP", + "orth":"BELL" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"INDUSTRIES" + }, + { + "dep":"nsubj", + "head":1, + "tag":"NNP", + "orth":"Inc." + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBD", + "orth":"increased" + }, + { + "dep":"poss", + "head":1, + "tag":"PRP$", + "orth":"its" + }, + { + "dep":"dobj", + "head":-2, + "tag":"NN", + "orth":"quarterly" + }, + { + "dep":"prep", + "head":-3, + "tag":"TO", + "orth":"to" + }, + { + "dep":"num", + "head":1, + "tag":"CD", + "orth":"10" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNS", + "orth":"cents" + }, + { + "dep":"prep", + "head":-6, + "tag":"IN", + "orth":"from" + }, + { + "dep":"num", + "head":1, + "tag":"CD", + "orth":"seven" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNS", + "orth":"cents" + }, + { + "dep":"det", + "head":1, + "tag":"DT", + "orth":"a" + }, + { + "dep":"npadvmod", + "head":-2, + "tag":"NN", + "orth":"share" + }, + { + "dep":"punct", + "head":-11, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":6, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"det", + "head":2, + "tag":"DT", + "orth":"The" + }, + { + "dep":"amod", + "head":1, + "tag":"JJ", + "orth":"new" + }, + { + "dep":"nsubj", + "head":3, + "tag":"NN", + "orth":"rate" + }, + { + "dep":"aux", + "head":2, + "tag":"MD", + "orth":"will" + }, + { + "dep":"cop", + "head":1, + "tag":"VB", + "orth":"be" + }, + { + "dep":"ROOT", + "head":0, + "tag":"JJ", + "orth":"payable" + }, + { + "dep":"tmod", + "head":-1, + "tag":"NNP", + "orth":"Feb." + }, + { + "dep":"num", + "head":-1, + "tag":"CD", + "orth":"15" + }, + { + "dep":"punct", + "head":-3, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":7, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"det", + "head":2, + "tag":"DT", + "orth":"A" + }, + { + "dep":"nn", + "head":1, + "tag":"NN", + "orth":"record" + }, + { + "dep":"nsubjpass", + "head":4, + "tag":"NN", + "orth":"date" + }, + { + "dep":"aux", + "head":3, + "tag":"VBZ", + "orth":"has" + }, + { + "dep":"neg", + "head":2, + "tag":"RB", + "orth":"n't" + }, + { + "dep":"auxpass", + "head":1, + "tag":"VBN", + "orth":"been" + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBN", + "orth":"set" + }, + { + "dep":"punct", + "head":-1, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + }, + { + "id":8, + "paragraphs":[ + { + "sentences":[ + { + "tokens":[ + { + "dep":"nsubj", + "head":7, + "tag":"NNP", + "orth":"Bell" + }, + { + "dep":"punct", + "head":-1, + "tag":",", + "orth":"," + }, + { + "dep":"partmod", + "head":-2, + "tag":"VBN", + "orth":"based" + }, + { + "dep":"prep", + "head":-1, + "tag":"IN", + "orth":"in" + }, + { + "dep":"nn", + "head":1, + "tag":"NNP", + "orth":"Los" + }, + { + "dep":"pobj", + "head":-2, + "tag":"NNP", + "orth":"Angeles" + }, + { + "dep":"punct", + "head":-6, + "tag":",", + "orth":"," + }, + { + "dep":"ROOT", + "head":0, + "tag":"VBZ", + "orth":"makes" + }, + { + "dep":"cc", + "head":-1, + "tag":"CC", + "orth":"and" + }, + { + "dep":"conj", + "head":-2, + "tag":"VBZ", + "orth":"distributes" + }, + { + "dep":"amod", + "head":5, + "tag":"JJ", + "orth":"electronic" + }, + { + "dep":"punct", + "head":-1, + "tag":",", + "orth":"," + }, + { + "dep":"conj", + "head":-2, + "tag":"NN", + "orth":"computer" + }, + { + "dep":"cc", + "head":-3, + "tag":"CC", + "orth":"and" + }, + { + "dep":"conj", + "head":-4, + "tag":"NN", + "orth":"building" + }, + { + "dep":"dobj", + "head":-8, + "tag":"NNS", + "orth":"products" + }, + { + "dep":"punct", + "head":-9, + "tag":".", + "orth":"." + } + ] + } + ] + } + ] + } +] From a68d89a4f351f8df2bfceeac77540b23e29827be Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 12:05:25 +0200 Subject: [PATCH 33/77] Add failing test for bug #1375 -- no out-of-bounds error for token.nbor() --- spacy/tests/regression/test_issue1375.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 spacy/tests/regression/test_issue1375.py diff --git a/spacy/tests/regression/test_issue1375.py b/spacy/tests/regression/test_issue1375.py new file mode 100644 index 000000000..72070758d --- /dev/null +++ b/spacy/tests/regression/test_issue1375.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +import pytest +from ...vocab import Vocab +from ...tokens.doc import Doc + +@pytest.mark.xfail +def test_issue1375(): + '''Test that token.nbor() raises IndexError for out-of-bounds access.''' + doc = Doc(Vocab(), words=['0', '1', '2']) + with pytest.raises(IndexError): + assert doc[0].nbor(-1) + assert doc[1].nbor(-1).text == '0' + with pytest.raises(IndexError): + assert doc[2].nbor(1) + assert doc[1].nbor(1).text == '2' + From b66b8f028b256447d0694a5d8cb04b6554e2e2d0 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 12:10:39 +0200 Subject: [PATCH 34/77] Fix #1375 -- out-of-bounds on token.nbor() --- spacy/tests/regression/test_issue1375.py | 2 +- spacy/tokens/token.pyx | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/spacy/tests/regression/test_issue1375.py b/spacy/tests/regression/test_issue1375.py index 72070758d..6f74d9a6d 100644 --- a/spacy/tests/regression/test_issue1375.py +++ b/spacy/tests/regression/test_issue1375.py @@ -3,7 +3,7 @@ import pytest from ...vocab import Vocab from ...tokens.doc import Doc -@pytest.mark.xfail + def test_issue1375(): '''Test that token.nbor() raises IndexError for out-of-bounds access.''' doc = Doc(Vocab(), words=['0', '1', '2']) diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 9ff59eabe..514934ca7 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -127,6 +127,9 @@ cdef class Token: i (int): The relative position of the token to get. Defaults to 1. RETURNS (Token): The token at position `self.doc[self.i+i]`. """ + if self.i+i < 0 or (self.i+i >= len(self.doc)): + msg = "Error accessing doc[%d].nbor(%d), for doc of length %d" + raise IndexError(msg % (self.i, i, len(self.doc))) return self.doc[self.i+i] def similarity(self, other): From 9bf57510644f0845c33a210364b709f6268eba81 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 12:22:17 +0200 Subject: [PATCH 35/77] Pretty-print JSON --- examples/training/training-data.json | 1384 +++++++++++++------------- 1 file changed, 692 insertions(+), 692 deletions(-) diff --git a/examples/training/training-data.json b/examples/training/training-data.json index 8b4956f05..532ab4ea8 100644 --- a/examples/training/training-data.json +++ b/examples/training/training-data.json @@ -1,304 +1,304 @@ [ { - "id":0, - "paragraphs":[ + "id": 0, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"prep", - "head":44, - "tag":"IN", - "orth":"In" + "dep": "prep", + "head": 44, + "tag": "IN", + "orth": "In" }, { - "dep":"det", - "head":3, - "tag":"DT", - "orth":"an" + "dep": "det", + "head": 3, + "tag": "DT", + "orth": "an" }, { - "dep":"nn", - "head":2, - "tag":"NNP", - "orth":"Oct." + "dep": "nn", + "head": 2, + "tag": "NNP", + "orth": "Oct." }, { - "dep":"num", - "head":1, - "tag":"CD", - "orth":"19" + "dep": "num", + "head": 1, + "tag": "CD", + "orth": "19" }, { - "dep":"pobj", - "head":-4, - "tag":"NN", - "orth":"review" + "dep": "pobj", + "head": -4, + "tag": "NN", + "orth": "review" }, { - "dep":"prep", - "head":-1, - "tag":"IN", - "orth":"of" + "dep": "prep", + "head": -1, + "tag": "IN", + "orth": "of" }, { - "dep":"punct", - "head":2, - "tag":"``", - "orth":"``" + "dep": "punct", + "head": 2, + "tag": "``", + "orth": "``" }, { - "dep":"det", - "head":1, - "tag":"DT", - "orth":"The" + "dep": "det", + "head": 1, + "tag": "DT", + "orth": "The" }, { - "dep":"pobj", - "head":-3, - "tag":"NN", - "orth":"Misanthrope" + "dep": "pobj", + "head": -3, + "tag": "NN", + "orth": "Misanthrope" }, { - "dep":"punct", - "head":-1, - "tag":"''", - "orth":"''" + "dep": "punct", + "head": -1, + "tag": "''", + "orth": "''" }, { - "dep":"prep", - "head":-2, - "tag":"IN", - "orth":"at" + "dep": "prep", + "head": -2, + "tag": "IN", + "orth": "at" }, { - "dep":"poss", - "head":3, - "tag":"NNP", - "orth":"Chicago" + "dep": "poss", + "head": 3, + "tag": "NNP", + "orth": "Chicago" }, { - "dep":"possessive", - "head":-1, - "tag":"POS", - "orth":"'s" + "dep": "possessive", + "head": -1, + "tag": "POS", + "orth": "'s" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Goodman" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Goodman" }, { - "dep":"pobj", - "head":-4, - "tag":"NNP", - "orth":"Theatre" + "dep": "pobj", + "head": -4, + "tag": "NNP", + "orth": "Theatre" }, { - "dep":"punct", - "head":4, - "tag":"-LRB-", - "orth":"-LRB-" + "dep": "punct", + "head": 4, + "tag": "-LRB-", + "orth": "-LRB-" }, { - "dep":"punct", - "head":3, - "tag":"``", - "orth":"``" + "dep": "punct", + "head": 3, + "tag": "``", + "orth": "``" }, { - "dep":"amod", - "head":1, - "tag":"VBN", - "orth":"Revitalized" + "dep": "amod", + "head": 1, + "tag": "VBN", + "orth": "Revitalized" }, { - "dep":"nsubj", - "head":1, - "tag":"NNS", - "orth":"Classics" + "dep": "nsubj", + "head": 1, + "tag": "NNS", + "orth": "Classics" }, { - "dep":"dep", - "head":-15, - "tag":"VBP", - "orth":"Take" + "dep": "dep", + "head": -15, + "tag": "VBP", + "orth": "Take" }, { - "dep":"det", - "head":1, - "tag":"DT", - "orth":"the" + "dep": "det", + "head": 1, + "tag": "DT", + "orth": "the" }, { - "dep":"dobj", - "head":-2, - "tag":"NN", - "orth":"Stage" + "dep": "dobj", + "head": -2, + "tag": "NN", + "orth": "Stage" }, { - "dep":"prep", - "head":-3, - "tag":"IN", - "orth":"in" + "dep": "prep", + "head": -3, + "tag": "IN", + "orth": "in" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Windy" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Windy" }, { - "dep":"pobj", - "head":-2, - "tag":"NNP", - "orth":"City" + "dep": "pobj", + "head": -2, + "tag": "NNP", + "orth": "City" }, { - "dep":"punct", - "head":-6, - "tag":",", - "orth":"," + "dep": "punct", + "head": -6, + "tag": ",", + "orth": "," }, { - "dep":"punct", - "head":-7, - "tag":"''", - "orth":"''" + "dep": "punct", + "head": -7, + "tag": "''", + "orth": "''" }, { - "dep":"dep", - "head":-8, - "tag":"NN", - "orth":"Leisure" + "dep": "dep", + "head": -8, + "tag": "NN", + "orth": "Leisure" }, { - "dep":"cc", - "head":-1, - "tag":"CC", - "orth":"&" + "dep": "cc", + "head": -1, + "tag": "CC", + "orth": "&" }, { - "dep":"conj", - "head":-2, - "tag":"NNS", - "orth":"Arts" + "dep": "conj", + "head": -2, + "tag": "NNS", + "orth": "Arts" }, { - "dep":"punct", - "head":-11, - "tag":"-RRB-", - "orth":"-RRB-" + "dep": "punct", + "head": -11, + "tag": "-RRB-", + "orth": "-RRB-" }, { - "dep":"punct", - "head":13, - "tag":",", - "orth":"," + "dep": "punct", + "head": 13, + "tag": ",", + "orth": "," }, { - "dep":"det", - "head":1, - "tag":"DT", - "orth":"the" + "dep": "det", + "head": 1, + "tag": "DT", + "orth": "the" }, { - "dep":"nsubjpass", - "head":11, - "tag":"NN", - "orth":"role" + "dep": "nsubjpass", + "head": 11, + "tag": "NN", + "orth": "role" }, { - "dep":"prep", - "head":-1, - "tag":"IN", - "orth":"of" + "dep": "prep", + "head": -1, + "tag": "IN", + "orth": "of" }, { - "dep":"pobj", - "head":-1, - "tag":"NNP", - "orth":"Celimene" + "dep": "pobj", + "head": -1, + "tag": "NNP", + "orth": "Celimene" }, { - "dep":"punct", - "head":-3, - "tag":",", - "orth":"," + "dep": "punct", + "head": -3, + "tag": ",", + "orth": "," }, { - "dep":"partmod", - "head":-4, - "tag":"VBN", - "orth":"played" + "dep": "partmod", + "head": -4, + "tag": "VBN", + "orth": "played" }, { - "dep":"prep", - "head":-1, - "tag":"IN", - "orth":"by" + "dep": "prep", + "head": -1, + "tag": "IN", + "orth": "by" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Kim" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Kim" }, { - "dep":"pobj", - "head":-2, - "tag":"NNP", - "orth":"Cattrall" + "dep": "pobj", + "head": -2, + "tag": "NNP", + "orth": "Cattrall" }, { - "dep":"punct", - "head":-8, - "tag":",", - "orth":"," + "dep": "punct", + "head": -8, + "tag": ",", + "orth": "," }, { - "dep":"auxpass", - "head":2, - "tag":"VBD", - "orth":"was" + "dep": "auxpass", + "head": 2, + "tag": "VBD", + "orth": "was" }, { - "dep":"advmod", - "head":1, - "tag":"RB", - "orth":"mistakenly" + "dep": "advmod", + "head": 1, + "tag": "RB", + "orth": "mistakenly" }, { - "dep":"ROOT", - "head":0, - "tag":"VBN", - "orth":"attributed" + "dep": "ROOT", + "head": 0, + "tag": "VBN", + "orth": "attributed" }, { - "dep":"prep", - "head":-1, - "tag":"TO", - "orth":"to" + "dep": "prep", + "head": -1, + "tag": "TO", + "orth": "to" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Christina" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Christina" }, { - "dep":"pobj", - "head":-2, - "tag":"NNP", - "orth":"Haag" + "dep": "pobj", + "head": -2, + "tag": "NNP", + "orth": "Haag" }, { - "dep":"punct", - "head":-4, - "tag":".", - "orth":"." + "dep": "punct", + "head": -4, + "tag": ".", + "orth": "." } ] } @@ -307,41 +307,41 @@ ] }, { - "id":1, - "paragraphs":[ + "id": 1, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Ms." + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Ms." }, { - "dep":"nsubj", - "head":1, - "tag":"NNP", - "orth":"Haag" + "dep": "nsubj", + "head": 1, + "tag": "NNP", + "orth": "Haag" }, { - "dep":"ROOT", - "head":0, - "tag":"VBZ", - "orth":"plays" + "dep": "ROOT", + "head": 0, + "tag": "VBZ", + "orth": "plays" }, { - "dep":"dobj", - "head":-1, - "tag":"NNP", - "orth":"Elianti" + "dep": "dobj", + "head": -1, + "tag": "NNP", + "orth": "Elianti" }, { - "dep":"punct", - "head":-2, - "tag":".", - "orth":"." + "dep": "punct", + "head": -2, + "tag": ".", + "orth": "." } ] } @@ -350,131 +350,131 @@ ] }, { - "id":2, - "paragraphs":[ + "id": 2, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"nn", - "head":3, - "tag":"NNP", - "orth":"Rolls-Royce" + "dep": "nn", + "head": 3, + "tag": "NNP", + "orth": "Rolls-Royce" }, { - "dep":"nn", - "head":2, - "tag":"NNP", - "orth":"Motor" + "dep": "nn", + "head": 2, + "tag": "NNP", + "orth": "Motor" }, { - "dep":"nn", - "head":1, - "tag":"NNPS", - "orth":"Cars" + "dep": "nn", + "head": 1, + "tag": "NNPS", + "orth": "Cars" }, { - "dep":"nsubj", - "head":1, - "tag":"NNP", - "orth":"Inc." + "dep": "nsubj", + "head": 1, + "tag": "NNP", + "orth": "Inc." }, { - "dep":"ROOT", - "head":0, - "tag":"VBD", - "orth":"said" + "dep": "ROOT", + "head": 0, + "tag": "VBD", + "orth": "said" }, { - "dep":"nsubj", - "head":1, - "tag":"PRP", - "orth":"it" + "dep": "nsubj", + "head": 1, + "tag": "PRP", + "orth": "it" }, { - "dep":"ccomp", - "head":-2, - "tag":"VBZ", - "orth":"expects" + "dep": "ccomp", + "head": -2, + "tag": "VBZ", + "orth": "expects" }, { - "dep":"poss", - "head":2, - "tag":"PRP$", - "orth":"its" + "dep": "poss", + "head": 2, + "tag": "PRP$", + "orth": "its" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"U.S." + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "U.S." }, { - "dep":"nsubj", - "head":3, - "tag":"NNS", - "orth":"sales" + "dep": "nsubj", + "head": 3, + "tag": "NNS", + "orth": "sales" }, { - "dep":"aux", - "head":2, - "tag":"TO", - "orth":"to" + "dep": "aux", + "head": 2, + "tag": "TO", + "orth": "to" }, { - "dep":"cop", - "head":1, - "tag":"VB", - "orth":"remain" + "dep": "cop", + "head": 1, + "tag": "VB", + "orth": "remain" }, { - "dep":"xcomp", - "head":-6, - "tag":"JJ", - "orth":"steady" + "dep": "xcomp", + "head": -6, + "tag": "JJ", + "orth": "steady" }, { - "dep":"prep", - "head":-1, - "tag":"IN", - "orth":"at" + "dep": "prep", + "head": -1, + "tag": "IN", + "orth": "at" }, { - "dep":"quantmod", - "head":1, - "tag":"IN", - "orth":"about" + "dep": "quantmod", + "head": 1, + "tag": "IN", + "orth": "about" }, { - "dep":"num", - "head":1, - "tag":"CD", - "orth":"1,200" + "dep": "num", + "head": 1, + "tag": "CD", + "orth": "1,200" }, { - "dep":"pobj", - "head":-3, - "tag":"NNS", - "orth":"cars" + "dep": "pobj", + "head": -3, + "tag": "NNS", + "orth": "cars" }, { - "dep":"prep", - "head":-5, - "tag":"IN", - "orth":"in" + "dep": "prep", + "head": -5, + "tag": "IN", + "orth": "in" }, { - "dep":"pobj", - "head":-1, - "tag":"CD", - "orth":"1990" + "dep": "pobj", + "head": -1, + "tag": "CD", + "orth": "1990" }, { - "dep":"punct", - "head":-15, - "tag":".", - "orth":"." + "dep": "punct", + "head": -15, + "tag": ".", + "orth": "." } ] } @@ -483,83 +483,83 @@ ] }, { - "id":3, - "paragraphs":[ + "id": 3, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"det", - "head":3, - "tag":"DT", - "orth":"The" + "dep": "det", + "head": 3, + "tag": "DT", + "orth": "The" }, { - "dep":"nn", - "head":2, - "tag":"NN", - "orth":"luxury" + "dep": "nn", + "head": 2, + "tag": "NN", + "orth": "luxury" }, { - "dep":"nn", - "head":1, - "tag":"NN", - "orth":"auto" + "dep": "nn", + "head": 1, + "tag": "NN", + "orth": "auto" }, { - "dep":"nsubj", - "head":3, - "tag":"NN", - "orth":"maker" + "dep": "nsubj", + "head": 3, + "tag": "NN", + "orth": "maker" }, { - "dep":"amod", - "head":1, - "tag":"JJ", - "orth":"last" + "dep": "amod", + "head": 1, + "tag": "JJ", + "orth": "last" }, { - "dep":"tmod", - "head":1, - "tag":"NN", - "orth":"year" + "dep": "tmod", + "head": 1, + "tag": "NN", + "orth": "year" }, { - "dep":"ROOT", - "head":0, - "tag":"VBD", - "orth":"sold" + "dep": "ROOT", + "head": 0, + "tag": "VBD", + "orth": "sold" }, { - "dep":"num", - "head":1, - "tag":"CD", - "orth":"1,214" + "dep": "num", + "head": 1, + "tag": "CD", + "orth": "1,214" }, { - "dep":"dobj", - "head":-2, - "tag":"NNS", - "orth":"cars" + "dep": "dobj", + "head": -2, + "tag": "NNS", + "orth": "cars" }, { - "dep":"prep", - "head":-3, - "tag":"IN", - "orth":"in" + "dep": "prep", + "head": -3, + "tag": "IN", + "orth": "in" }, { - "dep":"det", - "head":1, - "tag":"DT", - "orth":"the" + "dep": "det", + "head": 1, + "tag": "DT", + "orth": "the" }, { - "dep":"pobj", - "head":-2, - "tag":"NNP", - "orth":"U.S." + "dep": "pobj", + "head": -2, + "tag": "NNP", + "orth": "U.S." } ] } @@ -568,185 +568,185 @@ ] }, { - "id":4, - "paragraphs":[ + "id": 4, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Howard" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Howard" }, { - "dep":"nsubj", - "head":8, - "tag":"NNP", - "orth":"Mosher" + "dep": "nsubj", + "head": 8, + "tag": "NNP", + "orth": "Mosher" }, { - "dep":"punct", - "head":-1, - "tag":",", - "orth":"," + "dep": "punct", + "head": -1, + "tag": ",", + "orth": "," }, { - "dep":"appos", - "head":-2, - "tag":"NN", - "orth":"president" + "dep": "appos", + "head": -2, + "tag": "NN", + "orth": "president" }, { - "dep":"cc", - "head":-1, - "tag":"CC", - "orth":"and" + "dep": "cc", + "head": -1, + "tag": "CC", + "orth": "and" }, { - "dep":"amod", - "head":2, - "tag":"JJ", - "orth":"chief" + "dep": "amod", + "head": 2, + "tag": "JJ", + "orth": "chief" }, { - "dep":"nn", - "head":1, - "tag":"NN", - "orth":"executive" + "dep": "nn", + "head": 1, + "tag": "NN", + "orth": "executive" }, { - "dep":"conj", - "head":-4, - "tag":"NN", - "orth":"officer" + "dep": "conj", + "head": -4, + "tag": "NN", + "orth": "officer" }, { - "dep":"punct", - "head":-7, - "tag":",", - "orth":"," + "dep": "punct", + "head": -7, + "tag": ",", + "orth": "," }, { - "dep":"ROOT", - "head":0, - "tag":"VBD", - "orth":"said" + "dep": "ROOT", + "head": 0, + "tag": "VBD", + "orth": "said" }, { - "dep":"nsubj", - "head":1, - "tag":"PRP", - "orth":"he" + "dep": "nsubj", + "head": 1, + "tag": "PRP", + "orth": "he" }, { - "dep":"ccomp", - "head":-2, - "tag":"VBZ", - "orth":"anticipates" + "dep": "ccomp", + "head": -2, + "tag": "VBZ", + "orth": "anticipates" }, { - "dep":"dobj", - "head":-1, - "tag":"NN", - "orth":"growth" + "dep": "dobj", + "head": -1, + "tag": "NN", + "orth": "growth" }, { - "dep":"prep", - "head":-1, - "tag":"IN", - "orth":"for" + "dep": "prep", + "head": -1, + "tag": "IN", + "orth": "for" }, { - "dep":"det", - "head":3, - "tag":"DT", - "orth":"the" + "dep": "det", + "head": 3, + "tag": "DT", + "orth": "the" }, { - "dep":"nn", - "head":2, - "tag":"NN", - "orth":"luxury" + "dep": "nn", + "head": 2, + "tag": "NN", + "orth": "luxury" }, { - "dep":"nn", - "head":1, - "tag":"NN", - "orth":"auto" + "dep": "nn", + "head": 1, + "tag": "NN", + "orth": "auto" }, { - "dep":"pobj", - "head":-4, - "tag":"NN", - "orth":"maker" + "dep": "pobj", + "head": -4, + "tag": "NN", + "orth": "maker" }, { - "dep":"prep", - "head":-6, - "tag":"IN", - "orth":"in" + "dep": "prep", + "head": -6, + "tag": "IN", + "orth": "in" }, { - "dep":"pobj", - "head":-1, - "tag":"NNP", - "orth":"Britain" + "dep": "pobj", + "head": -1, + "tag": "NNP", + "orth": "Britain" }, { - "dep":"cc", - "head":-1, - "tag":"CC", - "orth":"and" + "dep": "cc", + "head": -1, + "tag": "CC", + "orth": "and" }, { - "dep":"conj", - "head":-2, - "tag":"NNP", - "orth":"Europe" + "dep": "conj", + "head": -2, + "tag": "NNP", + "orth": "Europe" }, { - "dep":"punct", - "head":-4, - "tag":",", - "orth":"," + "dep": "punct", + "head": -4, + "tag": ",", + "orth": "," }, { - "dep":"cc", - "head":-5, - "tag":"CC", - "orth":"and" + "dep": "cc", + "head": -5, + "tag": "CC", + "orth": "and" }, { - "dep":"conj", - "head":-6, - "tag":"IN", - "orth":"in" + "dep": "conj", + "head": -6, + "tag": "IN", + "orth": "in" }, { - "dep":"amod", - "head":1, - "tag":"JJ", - "orth":"Far" + "dep": "amod", + "head": 1, + "tag": "JJ", + "orth": "Far" }, { - "dep":"amod", - "head":1, - "tag":"JJ", - "orth":"Eastern" + "dep": "amod", + "head": 1, + "tag": "JJ", + "orth": "Eastern" }, { - "dep":"pobj", - "head":-3, - "tag":"NNS", - "orth":"markets" + "dep": "pobj", + "head": -3, + "tag": "NNS", + "orth": "markets" }, { - "dep":"punct", - "head":-19, - "tag":".", - "orth":"." + "dep": "punct", + "head": -19, + "tag": ".", + "orth": "." } ] } @@ -755,101 +755,101 @@ ] }, { - "id":5, - "paragraphs":[ + "id": 5, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"nn", - "head":2, - "tag":"NNP", - "orth":"BELL" + "dep": "nn", + "head": 2, + "tag": "NNP", + "orth": "BELL" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"INDUSTRIES" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "INDUSTRIES" }, { - "dep":"nsubj", - "head":1, - "tag":"NNP", - "orth":"Inc." + "dep": "nsubj", + "head": 1, + "tag": "NNP", + "orth": "Inc." }, { - "dep":"ROOT", - "head":0, - "tag":"VBD", - "orth":"increased" + "dep": "ROOT", + "head": 0, + "tag": "VBD", + "orth": "increased" }, { - "dep":"poss", - "head":1, - "tag":"PRP$", - "orth":"its" + "dep": "poss", + "head": 1, + "tag": "PRP$", + "orth": "its" }, { - "dep":"dobj", - "head":-2, - "tag":"NN", - "orth":"quarterly" + "dep": "dobj", + "head": -2, + "tag": "NN", + "orth": "quarterly" }, { - "dep":"prep", - "head":-3, - "tag":"TO", - "orth":"to" + "dep": "prep", + "head": -3, + "tag": "TO", + "orth": "to" }, { - "dep":"num", - "head":1, - "tag":"CD", - "orth":"10" + "dep": "num", + "head": 1, + "tag": "CD", + "orth": "10" }, { - "dep":"pobj", - "head":-2, - "tag":"NNS", - "orth":"cents" + "dep": "pobj", + "head": -2, + "tag": "NNS", + "orth": "cents" }, { - "dep":"prep", - "head":-6, - "tag":"IN", - "orth":"from" + "dep": "prep", + "head": -6, + "tag": "IN", + "orth": "from" }, { - "dep":"num", - "head":1, - "tag":"CD", - "orth":"seven" + "dep": "num", + "head": 1, + "tag": "CD", + "orth": "seven" }, { - "dep":"pobj", - "head":-2, - "tag":"NNS", - "orth":"cents" + "dep": "pobj", + "head": -2, + "tag": "NNS", + "orth": "cents" }, { - "dep":"det", - "head":1, - "tag":"DT", - "orth":"a" + "dep": "det", + "head": 1, + "tag": "DT", + "orth": "a" }, { - "dep":"npadvmod", - "head":-2, - "tag":"NN", - "orth":"share" + "dep": "npadvmod", + "head": -2, + "tag": "NN", + "orth": "share" }, { - "dep":"punct", - "head":-11, - "tag":".", - "orth":"." + "dep": "punct", + "head": -11, + "tag": ".", + "orth": "." } ] } @@ -858,65 +858,65 @@ ] }, { - "id":6, - "paragraphs":[ + "id": 6, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"det", - "head":2, - "tag":"DT", - "orth":"The" + "dep": "det", + "head": 2, + "tag": "DT", + "orth": "The" }, { - "dep":"amod", - "head":1, - "tag":"JJ", - "orth":"new" + "dep": "amod", + "head": 1, + "tag": "JJ", + "orth": "new" }, { - "dep":"nsubj", - "head":3, - "tag":"NN", - "orth":"rate" + "dep": "nsubj", + "head": 3, + "tag": "NN", + "orth": "rate" }, { - "dep":"aux", - "head":2, - "tag":"MD", - "orth":"will" + "dep": "aux", + "head": 2, + "tag": "MD", + "orth": "will" }, { - "dep":"cop", - "head":1, - "tag":"VB", - "orth":"be" + "dep": "cop", + "head": 1, + "tag": "VB", + "orth": "be" }, { - "dep":"ROOT", - "head":0, - "tag":"JJ", - "orth":"payable" + "dep": "ROOT", + "head": 0, + "tag": "JJ", + "orth": "payable" }, { - "dep":"tmod", - "head":-1, - "tag":"NNP", - "orth":"Feb." + "dep": "tmod", + "head": -1, + "tag": "NNP", + "orth": "Feb." }, { - "dep":"num", - "head":-1, - "tag":"CD", - "orth":"15" + "dep": "num", + "head": -1, + "tag": "CD", + "orth": "15" }, { - "dep":"punct", - "head":-3, - "tag":".", - "orth":"." + "dep": "punct", + "head": -3, + "tag": ".", + "orth": "." } ] } @@ -925,59 +925,59 @@ ] }, { - "id":7, - "paragraphs":[ + "id": 7, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"det", - "head":2, - "tag":"DT", - "orth":"A" + "dep": "det", + "head": 2, + "tag": "DT", + "orth": "A" }, { - "dep":"nn", - "head":1, - "tag":"NN", - "orth":"record" + "dep": "nn", + "head": 1, + "tag": "NN", + "orth": "record" }, { - "dep":"nsubjpass", - "head":4, - "tag":"NN", - "orth":"date" + "dep": "nsubjpass", + "head": 4, + "tag": "NN", + "orth": "date" }, { - "dep":"aux", - "head":3, - "tag":"VBZ", - "orth":"has" + "dep": "aux", + "head": 3, + "tag": "VBZ", + "orth": "has" }, { - "dep":"neg", - "head":2, - "tag":"RB", - "orth":"n't" + "dep": "neg", + "head": 2, + "tag": "RB", + "orth": "n't" }, { - "dep":"auxpass", - "head":1, - "tag":"VBN", - "orth":"been" + "dep": "auxpass", + "head": 1, + "tag": "VBN", + "orth": "been" }, { - "dep":"ROOT", - "head":0, - "tag":"VBN", - "orth":"set" + "dep": "ROOT", + "head": 0, + "tag": "VBN", + "orth": "set" }, { - "dep":"punct", - "head":-1, - "tag":".", - "orth":"." + "dep": "punct", + "head": -1, + "tag": ".", + "orth": "." } ] } @@ -986,113 +986,113 @@ ] }, { - "id":8, - "paragraphs":[ + "id": 8, + "paragraphs": [ { - "sentences":[ + "sentences": [ { - "tokens":[ + "tokens": [ { - "dep":"nsubj", - "head":7, - "tag":"NNP", - "orth":"Bell" + "dep": "nsubj", + "head": 7, + "tag": "NNP", + "orth": "Bell" }, { - "dep":"punct", - "head":-1, - "tag":",", - "orth":"," + "dep": "punct", + "head": -1, + "tag": ",", + "orth": "," }, { - "dep":"partmod", - "head":-2, - "tag":"VBN", - "orth":"based" + "dep": "partmod", + "head": -2, + "tag": "VBN", + "orth": "based" }, { - "dep":"prep", - "head":-1, - "tag":"IN", - "orth":"in" + "dep": "prep", + "head": -1, + "tag": "IN", + "orth": "in" }, { - "dep":"nn", - "head":1, - "tag":"NNP", - "orth":"Los" + "dep": "nn", + "head": 1, + "tag": "NNP", + "orth": "Los" }, { - "dep":"pobj", - "head":-2, - "tag":"NNP", - "orth":"Angeles" + "dep": "pobj", + "head": -2, + "tag": "NNP", + "orth": "Angeles" }, { - "dep":"punct", - "head":-6, - "tag":",", - "orth":"," + "dep": "punct", + "head": -6, + "tag": ",", + "orth": "," }, { - "dep":"ROOT", - "head":0, - "tag":"VBZ", - "orth":"makes" + "dep": "ROOT", + "head": 0, + "tag": "VBZ", + "orth": "makes" }, { - "dep":"cc", - "head":-1, - "tag":"CC", - "orth":"and" + "dep": "cc", + "head": -1, + "tag": "CC", + "orth": "and" }, { - "dep":"conj", - "head":-2, - "tag":"VBZ", - "orth":"distributes" + "dep": "conj", + "head": -2, + "tag": "VBZ", + "orth": "distributes" }, { - "dep":"amod", - "head":5, - "tag":"JJ", - "orth":"electronic" + "dep": "amod", + "head": 5, + "tag": "JJ", + "orth": "electronic" }, { - "dep":"punct", - "head":-1, - "tag":",", - "orth":"," + "dep": "punct", + "head": -1, + "tag": ",", + "orth": "," }, { - "dep":"conj", - "head":-2, - "tag":"NN", - "orth":"computer" + "dep": "conj", + "head": -2, + "tag": "NN", + "orth": "computer" }, { - "dep":"cc", - "head":-3, - "tag":"CC", - "orth":"and" + "dep": "cc", + "head": -3, + "tag": "CC", + "orth": "and" }, { - "dep":"conj", - "head":-4, - "tag":"NN", - "orth":"building" + "dep": "conj", + "head": -4, + "tag": "NN", + "orth": "building" }, { - "dep":"dobj", - "head":-8, - "tag":"NNS", - "orth":"products" + "dep": "dobj", + "head": -8, + "tag": "NNS", + "orth": "products" }, { - "dep":"punct", - "head":-9, - "tag":".", - "orth":"." + "dep": "punct", + "head": -9, + "tag": ".", + "orth": "." } ] } From dd5b2d8fa31d47f3ee16f6a1b3340f1319b39ecb Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 12:40:47 +0200 Subject: [PATCH 36/77] Check for out-of-memory when calling calloc. Closes #1446 --- spacy/syntax/_state.pxd | 7 +++++++ spacy/syntax/nn_parser.pyx | 7 ++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/spacy/syntax/_state.pxd b/spacy/syntax/_state.pxd index 4675d887e..803348b53 100644 --- a/spacy/syntax/_state.pxd +++ b/spacy/syntax/_state.pxd @@ -2,6 +2,8 @@ from libc.string cimport memcpy, memset, memmove from libc.stdlib cimport malloc, calloc, free from libc.stdint cimport uint32_t, uint64_t +from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno + from murmurhash.mrmr cimport hash64 from ..vocab cimport EMPTY_LEXEME @@ -55,6 +57,11 @@ cdef cppclass StateC: this.shifted = calloc(length + (PADDING * 2), sizeof(bint)) this._sent = calloc(length + (PADDING * 2), sizeof(TokenC)) this._ents = calloc(length + (PADDING * 2), sizeof(Entity)) + if not (this._buffer and this._stack and this.shifted + and this._sent and this._ents): + with gil: + PyErr_SetFromErrno(MemoryError) + PyErr_CheckSignals() memset(&this._hist, 0, sizeof(this._hist)) this.offset = 0 cdef int i diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index cb26b8d37..a9553fd1f 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -22,7 +22,7 @@ cimport numpy as np from libcpp.vector cimport vector from cpython.ref cimport PyObject, Py_INCREF, Py_XDECREF -from cpython.exc cimport PyErr_CheckSignals +from cpython.exc cimport PyErr_CheckSignals, PyErr_SetFromErrno from libc.stdint cimport uint32_t, uint64_t from libc.string cimport memset, memcpy from libc.stdlib cimport malloc, calloc, free @@ -429,6 +429,7 @@ cdef class Parser: self._parseC(states[i], feat_weights, hW, hb, nr_class, nr_hidden, nr_feat, nr_piece) + PyErr_CheckSignals() return state_objs cdef void _parseC(self, StateC* state, @@ -438,6 +439,10 @@ cdef class Parser: is_valid = calloc(nr_class, sizeof(int)) vectors = calloc(nr_hidden * nr_piece, sizeof(float)) scores = calloc(nr_class, sizeof(float)) + if not (token_ids and is_valid and vectors and scores): + with gil: + PyErr_SetFromErrno(MemoryError) + PyErr_CheckSignals() while not state.is_final(): state.set_context_tokens(token_ids, nr_feat) From 66f8f9d4a0476f84a130f9e7ba5c7f69f4da02e4 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 13:02:19 +0200 Subject: [PATCH 37/77] Fix Japanese tokenizer JapaneseTokenizer now returns a Doc, not individual words --- spacy/lang/ja/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spacy/lang/ja/__init__.py b/spacy/lang/ja/__init__.py index 3a9c58fca..04cc013a4 100644 --- a/spacy/lang/ja/__init__.py +++ b/spacy/lang/ja/__init__.py @@ -33,8 +33,7 @@ class Japanese(Language): Defaults = JapaneseDefaults def make_doc(self, text): - words = self.tokenizer(text) - return Doc(self.vocab, words=words, spaces=[False]*len(words)) + return self.tokenizer(text) __all__ = ['Japanese'] From c55db0a4a1c8027b16af1510df1e725df2b15a02 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 13:02:24 +0200 Subject: [PATCH 38/77] Add example sentences for Japanese and Chinese (see #1107) --- spacy/lang/ja/examples.py | 18 ++++++++++++++++++ spacy/lang/zh/examples.py | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 spacy/lang/ja/examples.py create mode 100644 spacy/lang/zh/examples.py diff --git a/spacy/lang/ja/examples.py b/spacy/lang/ja/examples.py new file mode 100644 index 000000000..623609205 --- /dev/null +++ b/spacy/lang/ja/examples.py @@ -0,0 +1,18 @@ +# coding: utf8 +from __future__ import unicode_literals + + +""" +Example sentences to test spaCy and its language models. + +>>> from spacy.lang.ja.examples import sentences +>>> docs = nlp.pipe(sentences) +""" + + +sentences = [ + 'アップルがイギリスの新興企業を10億ドルで購入を検討', + '自動運転車の損害賠償責任、自動車メーカーに一定の負担を求める', + '歩道を走る自動配達ロボ、サンフランシスコ市が走行禁止を検討', + 'ロンドンはイギリスの大都市です。' +] diff --git a/spacy/lang/zh/examples.py b/spacy/lang/zh/examples.py new file mode 100644 index 000000000..5e8a36119 --- /dev/null +++ b/spacy/lang/zh/examples.py @@ -0,0 +1,18 @@ +# coding: utf8 +from __future__ import unicode_literals + + +""" +Example sentences to test spaCy and its language models. + +>>> from spacy.lang.zh.examples import sentences +>>> docs = nlp.pipe(sentences) +""" + + +sentences = [ + "蘋果公司正考量用一億元買下英國的新創公司", + "自駕車將保險責任歸屬轉移至製造商", + "舊金山考慮禁止送貨機器人在人行道上行駛", + "倫敦是英國的大城市" +] From 391d5ef0d13c9f7401ee3576ff578515c07c5f77 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 14:25:49 +0200 Subject: [PATCH 39/77] Normalize imports in regression test --- spacy/tests/regression/test_issue1434.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spacy/tests/regression/test_issue1434.py b/spacy/tests/regression/test_issue1434.py index ec3a34bb0..fc88cc3e6 100644 --- a/spacy/tests/regression/test_issue1434.py +++ b/spacy/tests/regression/test_issue1434.py @@ -1,9 +1,9 @@ from __future__ import unicode_literals -from spacy.tokens import Doc -from spacy.vocab import Vocab -from spacy.matcher import Matcher -from spacy.lang.lex_attrs import LEX_ATTRS +from ...vocab import Vocab +from ...lang.lex_attrs import LEX_ATTRS +from ...tokens import Doc +from ...matcher import Matcher def test_issue1434(): From 4bea65a1a8426bb551854b0a6175b5df1403e27d Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 14:26:27 +0200 Subject: [PATCH 40/77] Fix Issue #1450: Off-by-1 in * and ? matches Patterns that end in variable-length operators e.g. * and ? now end on the correct token. Previously, they were off by 1: the next token was pulled into the match, even if that's where the pattern failed. --- spacy/matcher.pyx | 24 ++++++---- spacy/tests/regression/test_issue1450.py | 58 ++++++++++++++++++++++++ spacy/tests/test_matcher.py | 22 +++++++++ 3 files changed, 96 insertions(+), 8 deletions(-) create mode 100644 spacy/tests/regression/test_issue1450.py diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index fa67f32d6..a0c69f4bf 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -69,6 +69,7 @@ cdef enum action_t: REPEAT ACCEPT ADVANCE_ZERO + ACCEPT_PREV PANIC # A "match expression" conists of one or more token patterns @@ -120,24 +121,27 @@ cdef attr_t get_pattern_key(const TokenPatternC* pattern) except 0: cdef int get_action(const TokenPatternC* pattern, const TokenC* token) nogil: + lookahead = &pattern[1] for attr in pattern.attrs[:pattern.nr_attr]: if get_token_attr(token, attr.attr) != attr.value: if pattern.quantifier == ONE: return REJECT elif pattern.quantifier == ZERO: - return ACCEPT if (pattern+1).nr_attr == 0 else ADVANCE + return ACCEPT if lookahead.nr_attr == 0 else ADVANCE elif pattern.quantifier in (ZERO_ONE, ZERO_PLUS): - return ACCEPT if (pattern+1).nr_attr == 0 else ADVANCE_ZERO + return ACCEPT_PREV if lookahead.nr_attr == 0 else ADVANCE_ZERO else: return PANIC if pattern.quantifier == ZERO: return REJECT + elif lookahead.nr_attr == 0: + return ACCEPT elif pattern.quantifier in (ONE, ZERO_ONE): - return ACCEPT if (pattern+1).nr_attr == 0 else ADVANCE + return ADVANCE elif pattern.quantifier == ZERO_PLUS: # This is a bandaid over the 'shadowing' problem described here: # https://github.com/explosion/spaCy/issues/864 - next_action = get_action(pattern+1, token) + next_action = get_action(lookahead, token) if next_action is REJECT: return REPEAT else: @@ -345,6 +349,9 @@ cdef class Matcher: while action == ADVANCE_ZERO: state.second += 1 action = get_action(state.second, token) + if action == PANIC: + raise Exception("Error selecting action in matcher") + if action == REPEAT: # Leave the state in the queue, and advance to next slot # (i.e. we don't overwrite -- we want to greedily match more @@ -356,14 +363,15 @@ cdef class Matcher: partials[q] = state partials[q].second += 1 q += 1 - elif action == ACCEPT: + elif action in (ACCEPT, ACCEPT_PREV): # TODO: What to do about patterns starting with ZERO? Need to # adjust the start position. start = state.first - end = token_i+1 + end = token_i+1 if action == ACCEPT else token_i ent_id = state.second[1].attrs[0].value label = state.second[1].attrs[1].value matches.append((ent_id, start, end)) + partials.resize(q) # Check whether we open any new patterns on this token for pattern in self.patterns: @@ -383,9 +391,9 @@ cdef class Matcher: state.first = token_i state.second = pattern + 1 partials.push_back(state) - elif action == ACCEPT: + elif action in (ACCEPT, ACCEPT_PREV): start = token_i - end = token_i+1 + end = token_i+1 if action == ACCEPT else token_i ent_id = pattern[1].attrs[0].value label = pattern[1].attrs[1].value matches.append((ent_id, start, end)) diff --git a/spacy/tests/regression/test_issue1450.py b/spacy/tests/regression/test_issue1450.py new file mode 100644 index 000000000..6f1d4f568 --- /dev/null +++ b/spacy/tests/regression/test_issue1450.py @@ -0,0 +1,58 @@ +from __future__ import unicode_literals +import pytest + +from ...matcher import Matcher +from ...tokens import Doc +from ...vocab import Vocab + + +@pytest.mark.parametrize( + 'string,start,end', + [ + ('a', 0, 1), + ('a b', 0, 2), + ('a c', 0, 1), + ('a b c', 0, 2), + ('a b b c', 0, 2), + ('a b b', 0, 2), + ] +) +def test_issue1450_matcher_end_zero_plus(string, start, end): + '''Test matcher works when patterns end with * operator. + + Original example (rewritten to avoid model usage) + + nlp = spacy.load('en_core_web_sm') + matcher = Matcher(nlp.vocab) + matcher.add( + "TSTEND", + on_match_1, + [ + {TAG: "JJ", LOWER: "new"}, + {TAG: "NN", 'OP': "*"} + ] + ) + doc = nlp(u'Could you create a new ticket for me?') + print([(w.tag_, w.text, w.lower_) for w in doc]) + matches = matcher(doc) + print(matches) + assert len(matches) == 1 + assert matches[0][1] == 4 + assert matches[0][2] == 5 + ''' + matcher = Matcher(Vocab()) + matcher.add( + "TSTEND", + None, + [ + {'ORTH': "a"}, + {'ORTH': "b", 'OP': "*"} + ] + ) + doc = Doc(Vocab(), words=string.split()) + matches = matcher(doc) + if start is None or end is None: + assert matches == [] + + assert matches[0][1] == start + assert matches[0][2] == end diff --git a/spacy/tests/test_matcher.py b/spacy/tests/test_matcher.py index 9fcb47305..5b08ede39 100644 --- a/spacy/tests/test_matcher.py +++ b/spacy/tests/test_matcher.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from ..matcher import Matcher, PhraseMatcher from .util import get_doc +from ..tokens import Doc import pytest @@ -212,3 +213,24 @@ def test_operator_combos(matcher): assert matches, (string, pattern_str) else: assert not matches, (string, pattern_str) + + +def test_matcher_end_zero_plus(matcher): + '''Test matcher works when patterns end with * operator. (issue 1450)''' + matcher = Matcher(matcher.vocab) + matcher.add( + "TSTEND", + None, + [ + {'ORTH': "a"}, + {'ORTH': "b", 'OP': "*"} + ] + ) + nlp = lambda string: Doc(matcher.vocab, words=string.split()) + assert len(matcher(nlp(u'a'))) == 1 + assert len(matcher(nlp(u'a b'))) == 1 + assert len(matcher(nlp(u'a b'))) == 1 + assert len(matcher(nlp(u'a c'))) == 1 + assert len(matcher(nlp(u'a b c'))) == 1 + assert len(matcher(nlp(u'a b b c'))) == 1 + assert len(matcher(nlp(u'a b b'))) == 1 From 4ef81a9ebce7674f0c290a70c8f2432bdd6198c6 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:27:29 +0200 Subject: [PATCH 41/77] Fix whitespace --- spacy/tests/spans/test_span.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/tests/spans/test_span.py b/spacy/tests/spans/test_span.py index 5e7c638b6..dbb835301 100644 --- a/spacy/tests/spans/test_span.py +++ b/spacy/tests/spans/test_span.py @@ -100,7 +100,7 @@ def test_spans_are_hashable(en_tokenizer): assert hash(span1) != hash(span2) span3 = tokens[0:2] assert hash(span3) == hash(span1) - + def test_spans_by_character(doc): span1 = doc[1:-2] From 090aed940a8340d20b4ab1cd31637ceae3753cfe Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:28:05 +0200 Subject: [PATCH 42/77] Add test for currently failing span.as_doc case --- spacy/tests/spans/test_span.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/spacy/tests/spans/test_span.py b/spacy/tests/spans/test_span.py index dbb835301..4050809b5 100644 --- a/spacy/tests/spans/test_span.py +++ b/spacy/tests/spans/test_span.py @@ -117,3 +117,9 @@ def test_span_to_array(doc): assert arr[0, 0] == span[0].orth assert arr[0, 1] == len(span[0]) + +@pytest.mark.xfail +def test_span_as_doc(doc): + span = doc[4:10] + span_doc = span.as_doc() + assert span.text == span_doc.text From 2b8e7c45e09d0ad45fb9b1c26cb69c451a629afe Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:35:16 +0200 Subject: [PATCH 43/77] Use better training data JSON example --- examples/training/training-data.json | 1742 ++++++++++---------------- 1 file changed, 640 insertions(+), 1102 deletions(-) diff --git a/examples/training/training-data.json b/examples/training/training-data.json index 532ab4ea8..7737b9a14 100644 --- a/examples/training/training-data.json +++ b/examples/training/training-data.json @@ -1,1103 +1,641 @@ [ - { - "id": 0, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "prep", - "head": 44, - "tag": "IN", - "orth": "In" - }, - { - "dep": "det", - "head": 3, - "tag": "DT", - "orth": "an" - }, - { - "dep": "nn", - "head": 2, - "tag": "NNP", - "orth": "Oct." - }, - { - "dep": "num", - "head": 1, - "tag": "CD", - "orth": "19" - }, - { - "dep": "pobj", - "head": -4, - "tag": "NN", - "orth": "review" - }, - { - "dep": "prep", - "head": -1, - "tag": "IN", - "orth": "of" - }, - { - "dep": "punct", - "head": 2, - "tag": "``", - "orth": "``" - }, - { - "dep": "det", - "head": 1, - "tag": "DT", - "orth": "The" - }, - { - "dep": "pobj", - "head": -3, - "tag": "NN", - "orth": "Misanthrope" - }, - { - "dep": "punct", - "head": -1, - "tag": "''", - "orth": "''" - }, - { - "dep": "prep", - "head": -2, - "tag": "IN", - "orth": "at" - }, - { - "dep": "poss", - "head": 3, - "tag": "NNP", - "orth": "Chicago" - }, - { - "dep": "possessive", - "head": -1, - "tag": "POS", - "orth": "'s" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Goodman" - }, - { - "dep": "pobj", - "head": -4, - "tag": "NNP", - "orth": "Theatre" - }, - { - "dep": "punct", - "head": 4, - "tag": "-LRB-", - "orth": "-LRB-" - }, - { - "dep": "punct", - "head": 3, - "tag": "``", - "orth": "``" - }, - { - "dep": "amod", - "head": 1, - "tag": "VBN", - "orth": "Revitalized" - }, - { - "dep": "nsubj", - "head": 1, - "tag": "NNS", - "orth": "Classics" - }, - { - "dep": "dep", - "head": -15, - "tag": "VBP", - "orth": "Take" - }, - { - "dep": "det", - "head": 1, - "tag": "DT", - "orth": "the" - }, - { - "dep": "dobj", - "head": -2, - "tag": "NN", - "orth": "Stage" - }, - { - "dep": "prep", - "head": -3, - "tag": "IN", - "orth": "in" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Windy" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNP", - "orth": "City" - }, - { - "dep": "punct", - "head": -6, - "tag": ",", - "orth": "," - }, - { - "dep": "punct", - "head": -7, - "tag": "''", - "orth": "''" - }, - { - "dep": "dep", - "head": -8, - "tag": "NN", - "orth": "Leisure" - }, - { - "dep": "cc", - "head": -1, - "tag": "CC", - "orth": "&" - }, - { - "dep": "conj", - "head": -2, - "tag": "NNS", - "orth": "Arts" - }, - { - "dep": "punct", - "head": -11, - "tag": "-RRB-", - "orth": "-RRB-" - }, - { - "dep": "punct", - "head": 13, - "tag": ",", - "orth": "," - }, - { - "dep": "det", - "head": 1, - "tag": "DT", - "orth": "the" - }, - { - "dep": "nsubjpass", - "head": 11, - "tag": "NN", - "orth": "role" - }, - { - "dep": "prep", - "head": -1, - "tag": "IN", - "orth": "of" - }, - { - "dep": "pobj", - "head": -1, - "tag": "NNP", - "orth": "Celimene" - }, - { - "dep": "punct", - "head": -3, - "tag": ",", - "orth": "," - }, - { - "dep": "partmod", - "head": -4, - "tag": "VBN", - "orth": "played" - }, - { - "dep": "prep", - "head": -1, - "tag": "IN", - "orth": "by" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Kim" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNP", - "orth": "Cattrall" - }, - { - "dep": "punct", - "head": -8, - "tag": ",", - "orth": "," - }, - { - "dep": "auxpass", - "head": 2, - "tag": "VBD", - "orth": "was" - }, - { - "dep": "advmod", - "head": 1, - "tag": "RB", - "orth": "mistakenly" - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBN", - "orth": "attributed" - }, - { - "dep": "prep", - "head": -1, - "tag": "TO", - "orth": "to" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Christina" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNP", - "orth": "Haag" - }, - { - "dep": "punct", - "head": -4, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 1, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Ms." - }, - { - "dep": "nsubj", - "head": 1, - "tag": "NNP", - "orth": "Haag" - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBZ", - "orth": "plays" - }, - { - "dep": "dobj", - "head": -1, - "tag": "NNP", - "orth": "Elianti" - }, - { - "dep": "punct", - "head": -2, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 2, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "nn", - "head": 3, - "tag": "NNP", - "orth": "Rolls-Royce" - }, - { - "dep": "nn", - "head": 2, - "tag": "NNP", - "orth": "Motor" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNPS", - "orth": "Cars" - }, - { - "dep": "nsubj", - "head": 1, - "tag": "NNP", - "orth": "Inc." - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBD", - "orth": "said" - }, - { - "dep": "nsubj", - "head": 1, - "tag": "PRP", - "orth": "it" - }, - { - "dep": "ccomp", - "head": -2, - "tag": "VBZ", - "orth": "expects" - }, - { - "dep": "poss", - "head": 2, - "tag": "PRP$", - "orth": "its" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "U.S." - }, - { - "dep": "nsubj", - "head": 3, - "tag": "NNS", - "orth": "sales" - }, - { - "dep": "aux", - "head": 2, - "tag": "TO", - "orth": "to" - }, - { - "dep": "cop", - "head": 1, - "tag": "VB", - "orth": "remain" - }, - { - "dep": "xcomp", - "head": -6, - "tag": "JJ", - "orth": "steady" - }, - { - "dep": "prep", - "head": -1, - "tag": "IN", - "orth": "at" - }, - { - "dep": "quantmod", - "head": 1, - "tag": "IN", - "orth": "about" - }, - { - "dep": "num", - "head": 1, - "tag": "CD", - "orth": "1,200" - }, - { - "dep": "pobj", - "head": -3, - "tag": "NNS", - "orth": "cars" - }, - { - "dep": "prep", - "head": -5, - "tag": "IN", - "orth": "in" - }, - { - "dep": "pobj", - "head": -1, - "tag": "CD", - "orth": "1990" - }, - { - "dep": "punct", - "head": -15, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 3, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "det", - "head": 3, - "tag": "DT", - "orth": "The" - }, - { - "dep": "nn", - "head": 2, - "tag": "NN", - "orth": "luxury" - }, - { - "dep": "nn", - "head": 1, - "tag": "NN", - "orth": "auto" - }, - { - "dep": "nsubj", - "head": 3, - "tag": "NN", - "orth": "maker" - }, - { - "dep": "amod", - "head": 1, - "tag": "JJ", - "orth": "last" - }, - { - "dep": "tmod", - "head": 1, - "tag": "NN", - "orth": "year" - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBD", - "orth": "sold" - }, - { - "dep": "num", - "head": 1, - "tag": "CD", - "orth": "1,214" - }, - { - "dep": "dobj", - "head": -2, - "tag": "NNS", - "orth": "cars" - }, - { - "dep": "prep", - "head": -3, - "tag": "IN", - "orth": "in" - }, - { - "dep": "det", - "head": 1, - "tag": "DT", - "orth": "the" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNP", - "orth": "U.S." - } - ] - } - ] - } - ] - }, - { - "id": 4, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Howard" - }, - { - "dep": "nsubj", - "head": 8, - "tag": "NNP", - "orth": "Mosher" - }, - { - "dep": "punct", - "head": -1, - "tag": ",", - "orth": "," - }, - { - "dep": "appos", - "head": -2, - "tag": "NN", - "orth": "president" - }, - { - "dep": "cc", - "head": -1, - "tag": "CC", - "orth": "and" - }, - { - "dep": "amod", - "head": 2, - "tag": "JJ", - "orth": "chief" - }, - { - "dep": "nn", - "head": 1, - "tag": "NN", - "orth": "executive" - }, - { - "dep": "conj", - "head": -4, - "tag": "NN", - "orth": "officer" - }, - { - "dep": "punct", - "head": -7, - "tag": ",", - "orth": "," - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBD", - "orth": "said" - }, - { - "dep": "nsubj", - "head": 1, - "tag": "PRP", - "orth": "he" - }, - { - "dep": "ccomp", - "head": -2, - "tag": "VBZ", - "orth": "anticipates" - }, - { - "dep": "dobj", - "head": -1, - "tag": "NN", - "orth": "growth" - }, - { - "dep": "prep", - "head": -1, - "tag": "IN", - "orth": "for" - }, - { - "dep": "det", - "head": 3, - "tag": "DT", - "orth": "the" - }, - { - "dep": "nn", - "head": 2, - "tag": "NN", - "orth": "luxury" - }, - { - "dep": "nn", - "head": 1, - "tag": "NN", - "orth": "auto" - }, - { - "dep": "pobj", - "head": -4, - "tag": "NN", - "orth": "maker" - }, - { - "dep": "prep", - "head": -6, - "tag": "IN", - "orth": "in" - }, - { - "dep": "pobj", - "head": -1, - "tag": "NNP", - "orth": "Britain" - }, - { - "dep": "cc", - "head": -1, - "tag": "CC", - "orth": "and" - }, - { - "dep": "conj", - "head": -2, - "tag": "NNP", - "orth": "Europe" - }, - { - "dep": "punct", - "head": -4, - "tag": ",", - "orth": "," - }, - { - "dep": "cc", - "head": -5, - "tag": "CC", - "orth": "and" - }, - { - "dep": "conj", - "head": -6, - "tag": "IN", - "orth": "in" - }, - { - "dep": "amod", - "head": 1, - "tag": "JJ", - "orth": "Far" - }, - { - "dep": "amod", - "head": 1, - "tag": "JJ", - "orth": "Eastern" - }, - { - "dep": "pobj", - "head": -3, - "tag": "NNS", - "orth": "markets" - }, - { - "dep": "punct", - "head": -19, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 5, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "nn", - "head": 2, - "tag": "NNP", - "orth": "BELL" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "INDUSTRIES" - }, - { - "dep": "nsubj", - "head": 1, - "tag": "NNP", - "orth": "Inc." - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBD", - "orth": "increased" - }, - { - "dep": "poss", - "head": 1, - "tag": "PRP$", - "orth": "its" - }, - { - "dep": "dobj", - "head": -2, - "tag": "NN", - "orth": "quarterly" - }, - { - "dep": "prep", - "head": -3, - "tag": "TO", - "orth": "to" - }, - { - "dep": "num", - "head": 1, - "tag": "CD", - "orth": "10" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNS", - "orth": "cents" - }, - { - "dep": "prep", - "head": -6, - "tag": "IN", - "orth": "from" - }, - { - "dep": "num", - "head": 1, - "tag": "CD", - "orth": "seven" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNS", - "orth": "cents" - }, - { - "dep": "det", - "head": 1, - "tag": "DT", - "orth": "a" - }, - { - "dep": "npadvmod", - "head": -2, - "tag": "NN", - "orth": "share" - }, - { - "dep": "punct", - "head": -11, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 6, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "det", - "head": 2, - "tag": "DT", - "orth": "The" - }, - { - "dep": "amod", - "head": 1, - "tag": "JJ", - "orth": "new" - }, - { - "dep": "nsubj", - "head": 3, - "tag": "NN", - "orth": "rate" - }, - { - "dep": "aux", - "head": 2, - "tag": "MD", - "orth": "will" - }, - { - "dep": "cop", - "head": 1, - "tag": "VB", - "orth": "be" - }, - { - "dep": "ROOT", - "head": 0, - "tag": "JJ", - "orth": "payable" - }, - { - "dep": "tmod", - "head": -1, - "tag": "NNP", - "orth": "Feb." - }, - { - "dep": "num", - "head": -1, - "tag": "CD", - "orth": "15" - }, - { - "dep": "punct", - "head": -3, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 7, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "det", - "head": 2, - "tag": "DT", - "orth": "A" - }, - { - "dep": "nn", - "head": 1, - "tag": "NN", - "orth": "record" - }, - { - "dep": "nsubjpass", - "head": 4, - "tag": "NN", - "orth": "date" - }, - { - "dep": "aux", - "head": 3, - "tag": "VBZ", - "orth": "has" - }, - { - "dep": "neg", - "head": 2, - "tag": "RB", - "orth": "n't" - }, - { - "dep": "auxpass", - "head": 1, - "tag": "VBN", - "orth": "been" - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBN", - "orth": "set" - }, - { - "dep": "punct", - "head": -1, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - }, - { - "id": 8, - "paragraphs": [ - { - "sentences": [ - { - "tokens": [ - { - "dep": "nsubj", - "head": 7, - "tag": "NNP", - "orth": "Bell" - }, - { - "dep": "punct", - "head": -1, - "tag": ",", - "orth": "," - }, - { - "dep": "partmod", - "head": -2, - "tag": "VBN", - "orth": "based" - }, - { - "dep": "prep", - "head": -1, - "tag": "IN", - "orth": "in" - }, - { - "dep": "nn", - "head": 1, - "tag": "NNP", - "orth": "Los" - }, - { - "dep": "pobj", - "head": -2, - "tag": "NNP", - "orth": "Angeles" - }, - { - "dep": "punct", - "head": -6, - "tag": ",", - "orth": "," - }, - { - "dep": "ROOT", - "head": 0, - "tag": "VBZ", - "orth": "makes" - }, - { - "dep": "cc", - "head": -1, - "tag": "CC", - "orth": "and" - }, - { - "dep": "conj", - "head": -2, - "tag": "VBZ", - "orth": "distributes" - }, - { - "dep": "amod", - "head": 5, - "tag": "JJ", - "orth": "electronic" - }, - { - "dep": "punct", - "head": -1, - "tag": ",", - "orth": "," - }, - { - "dep": "conj", - "head": -2, - "tag": "NN", - "orth": "computer" - }, - { - "dep": "cc", - "head": -3, - "tag": "CC", - "orth": "and" - }, - { - "dep": "conj", - "head": -4, - "tag": "NN", - "orth": "building" - }, - { - "dep": "dobj", - "head": -8, - "tag": "NNS", - "orth": "products" - }, - { - "dep": "punct", - "head": -9, - "tag": ".", - "orth": "." - } - ] - } - ] - } - ] - } -] + { + "id": "wsj_0200", + "paragraphs": [ + { + "raw": "In an Oct. 19 review of \"The Misanthrope\" at Chicago's Goodman Theatre (\"Revitalized Classics Take the Stage in Windy City,\" Leisure & Arts), the role of Celimene, played by Kim Cattrall, was mistakenly attributed to Christina Haag. Ms. Haag plays Elianti.", + "sentences": [ + { + "tokens": [ + { + "head": 44, + "dep": "prep", + "tag": "IN", + "orth": "In", + "ner": "O", + "id": 0 + }, + { + "head": 3, + "dep": "det", + "tag": "DT", + "orth": "an", + "ner": "O", + "id": 1 + }, + { + "head": 2, + "dep": "nmod", + "tag": "NNP", + "orth": "Oct.", + "ner": "B-DATE", + "id": 2 + }, + { + "head": -1, + "dep": "nummod", + "tag": "CD", + "orth": "19", + "ner": "L-DATE", + "id": 3 + }, + { + "head": -4, + "dep": "pobj", + "tag": "NN", + "orth": "review", + "ner": "O", + "id": 4 + }, + { + "head": -1, + "dep": "prep", + "tag": "IN", + "orth": "of", + "ner": "O", + "id": 5 + }, + { + "head": 2, + "dep": "punct", + "tag": "``", + "orth": "``", + "ner": "O", + "id": 6 + }, + { + "head": 1, + "dep": "det", + "tag": "DT", + "orth": "The", + "ner": "B-WORK_OF_ART", + "id": 7 + }, + { + "head": -3, + "dep": "pobj", + "tag": "NN", + "orth": "Misanthrope", + "ner": "L-WORK_OF_ART", + "id": 8 + }, + { + "head": -1, + "dep": "punct", + "tag": "''", + "orth": "''", + "ner": "O", + "id": 9 + }, + { + "head": -2, + "dep": "prep", + "tag": "IN", + "orth": "at", + "ner": "O", + "id": 10 + }, + { + "head": 3, + "dep": "poss", + "tag": "NNP", + "orth": "Chicago", + "ner": "U-GPE", + "id": 11 + }, + { + "head": -1, + "dep": "case", + "tag": "POS", + "orth": "'s", + "ner": "O", + "id": 12 + }, + { + "head": 1, + "dep": "compound", + "tag": "NNP", + "orth": "Goodman", + "ner": "B-FAC", + "id": 13 + }, + { + "head": -4, + "dep": "pobj", + "tag": "NNP", + "orth": "Theatre", + "ner": "L-FAC", + "id": 14 + }, + { + "head": 4, + "dep": "punct", + "tag": "-LRB-", + "orth": "(", + "ner": "O", + "id": 15 + }, + { + "head": 3, + "dep": "punct", + "tag": "``", + "orth": "``", + "ner": "O", + "id": 16 + }, + { + "head": 1, + "dep": "amod", + "tag": "VBN", + "orth": "Revitalized", + "ner": "B-WORK_OF_ART", + "id": 17 + }, + { + "head": 1, + "dep": "nsubj", + "tag": "NNS", + "orth": "Classics", + "ner": "I-WORK_OF_ART", + "id": 18 + }, + { + "head": -15, + "dep": "appos", + "tag": "VBP", + "orth": "Take", + "ner": "I-WORK_OF_ART", + "id": 19 + }, + { + "head": 1, + "dep": "det", + "tag": "DT", + "orth": "the", + "ner": "I-WORK_OF_ART", + "id": 20 + }, + { + "head": -2, + "dep": "dobj", + "tag": "NN", + "orth": "Stage", + "ner": "I-WORK_OF_ART", + "id": 21 + }, + { + "head": -3, + "dep": "prep", + "tag": "IN", + "orth": "in", + "ner": "I-WORK_OF_ART", + "id": 22 + }, + { + "head": 1, + "dep": "compound", + "tag": "NNP", + "orth": "Windy", + "ner": "I-WORK_OF_ART", + "id": 23 + }, + { + "head": -2, + "dep": "pobj", + "tag": "NNP", + "orth": "City", + "ner": "L-WORK_OF_ART", + "id": 24 + }, + { + "head": -6, + "dep": "punct", + "tag": ",", + "orth": ",", + "ner": "O", + "id": 25 + }, + { + "head": -7, + "dep": "punct", + "tag": "''", + "orth": "''", + "ner": "O", + "id": 26 + }, + { + "head": -8, + "dep": "npadvmod", + "tag": "NN", + "orth": "Leisure", + "ner": "B-ORG", + "id": 27 + }, + { + "head": -1, + "dep": "cc", + "tag": "CC", + "orth": "&", + "ner": "I-ORG", + "id": 28 + }, + { + "head": -2, + "dep": "conj", + "tag": "NNS", + "orth": "Arts", + "ner": "L-ORG", + "id": 29 + }, + { + "head": -11, + "dep": "punct", + "tag": "-RRB-", + "orth": ")", + "ner": "O", + "id": 30 + }, + { + "head": 13, + "dep": "punct", + "tag": ",", + "orth": ",", + "ner": "O", + "id": 31 + }, + { + "head": 1, + "dep": "det", + "tag": "DT", + "orth": "the", + "ner": "O", + "id": 32 + }, + { + "head": 11, + "dep": "nsubjpass", + "tag": "NN", + "orth": "role", + "ner": "O", + "id": 33 + }, + { + "head": -1, + "dep": "prep", + "tag": "IN", + "orth": "of", + "ner": "O", + "id": 34 + }, + { + "head": -1, + "dep": "pobj", + "tag": "NNP", + "orth": "Celimene", + "ner": "U-PERSON", + "id": 35 + }, + { + "head": -3, + "dep": "punct", + "tag": ",", + "orth": ",", + "ner": "O", + "id": 36 + }, + { + "head": -4, + "dep": "acl", + "tag": "VBN", + "orth": "played", + "ner": "O", + "id": 37 + }, + { + "head": -1, + "dep": "agent", + "tag": "IN", + "orth": "by", + "ner": "O", + "id": 38 + }, + { + "head": 1, + "dep": "compound", + "tag": "NNP", + "orth": "Kim", + "ner": "B-PERSON", + "id": 39 + }, + { + "head": -2, + "dep": "pobj", + "tag": "NNP", + "orth": "Cattrall", + "ner": "L-PERSON", + "id": 40 + }, + { + "head": -8, + "dep": "punct", + "tag": ",", + "orth": ",", + "ner": "O", + "id": 41 + }, + { + "head": 2, + "dep": "auxpass", + "tag": "VBD", + "orth": "was", + "ner": "O", + "id": 42 + }, + { + "head": 1, + "dep": "advmod", + "tag": "RB", + "orth": "mistakenly", + "ner": "O", + "id": 43 + }, + { + "head": 0, + "dep": "root", + "tag": "VBN", + "orth": "attributed", + "ner": "O", + "id": 44 + }, + { + "head": -1, + "dep": "prep", + "tag": "IN", + "orth": "to", + "ner": "O", + "id": 45 + }, + { + "head": 1, + "dep": "compound", + "tag": "NNP", + "orth": "Christina", + "ner": "B-PERSON", + "id": 46 + }, + { + "head": -2, + "dep": "pobj", + "tag": "NNP", + "orth": "Haag", + "ner": "L-PERSON", + "id": 47 + }, + { + "head": -4, + "dep": "punct", + "tag": ".", + "orth": ".", + "ner": "O", + "id": 48 + } + ], + "brackets": [ + { + "first": 2, + "last": 3, + "label": "NML" + }, + { + "first": 1, + "last": 4, + "label": "NP" + }, + { + "first": 7, + "last": 8, + "label": "NP-TTL" + }, + { + "first": 11, + "last": 12, + "label": "NP" + }, + { + "first": 11, + "last": 14, + "label": "NP" + }, + { + "first": 10, + "last": 14, + "label": "PP-LOC" + }, + { + "first": 6, + "last": 14, + "label": "NP" + }, + { + "first": 5, + "last": 14, + "label": "PP" + }, + { + "first": 1, + "last": 14, + "label": "NP" + }, + { + "first": 17, + "last": 18, + "label": "NP-SBJ" + }, + { + "first": 20, + "last": 21, + "label": "NP" + }, + { + "first": 23, + "last": 24, + "label": "NP" + }, + { + "first": 22, + "last": 24, + "label": "PP-LOC" + }, + { + "first": 19, + "last": 24, + "label": "VP" + }, + { + "first": 17, + "last": 24, + "label": "S-HLN" + }, + { + "first": 27, + "last": 29, + "label": "NP-TMP" + }, + { + "first": 15, + "last": 30, + "label": "NP" + }, + { + "first": 1, + "last": 30, + "label": "NP" + }, + { + "first": 0, + "last": 30, + "label": "PP-LOC" + }, + { + "first": 32, + "last": 33, + "label": "NP" + }, + { + "first": 35, + "last": 35, + "label": "NP" + }, + { + "first": 34, + "last": 35, + "label": "PP" + }, + { + "first": 32, + "last": 35, + "label": "NP" + }, + { + "first": 39, + "last": 40, + "label": "NP-LGS" + }, + { + "first": 38, + "last": 40, + "label": "PP" + }, + { + "first": 37, + "last": 40, + "label": "VP" + }, + { + "first": 32, + "last": 41, + "label": "NP-SBJ-2" + }, + { + "first": 43, + "last": 43, + "label": "ADVP-MNR" + }, + { + "first": 46, + "last": 47, + "label": "NP" + }, + { + "first": 45, + "last": 47, + "label": "PP-CLR" + }, + { + "first": 44, + "last": 47, + "label": "VP" + }, + { + "first": 42, + "last": 47, + "label": "VP" + }, + { + "first": 0, + "last": 48, + "label": "S" + } + ] + }, + { + "tokens": [ + { + "head": 1, + "dep": "compound", + "tag": "NNP", + "orth": "Ms.", + "ner": "O", + "id": 0 + }, + { + "head": 1, + "dep": "nsubj", + "tag": "NNP", + "orth": "Haag", + "ner": "U-PERSON", + "id": 1 + }, + { + "head": 0, + "dep": "root", + "tag": "VBZ", + "orth": "plays", + "ner": "O", + "id": 2 + }, + { + "head": -1, + "dep": "dobj", + "tag": "NNP", + "orth": "Elianti", + "ner": "U-PERSON", + "id": 3 + }, + { + "head": -2, + "dep": "punct", + "tag": ".", + "orth": ".", + "ner": "O", + "id": 4 + } + ], + "brackets": [ + { + "first": 0, + "last": 1, + "label": "NP-SBJ" + }, + { + "first": 3, + "last": 3, + "label": "NP" + }, + { + "first": 2, + "last": 3, + "label": "VP" + }, + { + "first": 0, + "last": 4, + "label": "S" + } + ] + } + ] + } + ] + } + ] From c9dc88ddfc8d605818b02cea7b2ce95dbaf97610 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:50:41 +0200 Subject: [PATCH 44/77] Document current JSON format for training --- website/api/_annotation/_training.jade | 46 +++++++++++++++++++++ website/api/annotation.jade | 29 +------------ website/usage/_training/_tagger-parser.jade | 4 ++ 3 files changed, 51 insertions(+), 28 deletions(-) create mode 100644 website/api/_annotation/_training.jade diff --git a/website/api/_annotation/_training.jade b/website/api/_annotation/_training.jade new file mode 100644 index 000000000..3b11eb2f5 --- /dev/null +++ b/website/api/_annotation/_training.jade @@ -0,0 +1,46 @@ +//- 💫 DOCS > API > ANNOTATION > TRAINING + +p + | spaCy takes training data in JSON format. The built-in + | #[+api("cli#convert") #[code convert]] command helps you convert the + | #[code .conllu] format used by the + | #[+a("https://github.com/UniversalDependencies") Universal Dependencies corpora] + | to spaCy's training format. + ++aside("Annotating entities") + | Named entities are provided in the #[+a("/api/annotation#biluo") BILUO] + | notation. Tokens outside an entity are set to #[code "O"] and tokens + | that are part of an entity are set to the entity label, prefixed by the + | BILUO marker. For example #[code "B-ORG"] describes the first token of + | a multi-token #[code ORG] entity and #[code "U-PERSON"] a single + | token representing a #[code PERSON] entity + ++code("Example structure"). + [{ + "id": int, # ID of the document within the corpus + "paragraphs": [{ # list of paragraphs in the corpus + "raw": string, # raw text of the paragraph + "sentences": [{ # list of sentences in the paragraph + "tokens": [{ # list of tokens in the sentence + "id": int, # index of the token in the document + "dep": string, # dependency label + "head": int, # offset of token head relative to token index + "tag": string, # part-of-speech tag + "orth": string, # verbatim text of the token + "ner": string # BILUO label, e.g. "O" or "B-ORG" + }], + "brackets": [{ # phrase structure (NOT USED by current models) + "first": int, # index of first token + "last": int, # index of last token + "label": string # phrase label + }] + }] + }] + }] + +p + | Here's an example of dependencies, part-of-speech tags and names + | entities, taken from the English Wall Street Journal portion of the Penn + | Treebank: + ++github("spacy", "examples/training/training-data.json", false, false, "json") diff --git a/website/api/annotation.jade b/website/api/annotation.jade index efada23d7..c65cd3983 100644 --- a/website/api/annotation.jade +++ b/website/api/annotation.jade @@ -101,31 +101,4 @@ p This document describes the target annotations spaCy is trained to predict. +section("training") +h(2, "json-input") JSON input format for training - +under-construction - - p spaCy takes training data in the following format: - - +code("Example structure"). - doc: { - id: string, - paragraphs: [{ - raw: string, - sents: [int], - tokens: [{ - start: int, - tag: string, - head: int, - dep: string - }], - ner: [{ - start: int, - end: int, - label: string - }], - brackets: [{ - start: int, - end: int, - label: string - }] - }] - } + include _annotation/_training diff --git a/website/usage/_training/_tagger-parser.jade b/website/usage/_training/_tagger-parser.jade index 4011464c7..a62b9d43e 100644 --- a/website/usage/_training/_tagger-parser.jade +++ b/website/usage/_training/_tagger-parser.jade @@ -1,3 +1,7 @@ //- 💫 DOCS > USAGE > TRAINING > TAGGER & PARSER +under-construction + ++h(3, "training-json") JSON format for training + +include ../../api/_annotation/_training From 3944c1d6e7e6a62824b4074545c59f183ad4479a Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:51:05 +0200 Subject: [PATCH 45/77] Document lemmatizer --- website/api/_data.json | 4 +- website/api/lemmatizer.jade | 157 +++++++++++++++++++++++++++++++++++- 2 files changed, 159 insertions(+), 2 deletions(-) diff --git a/website/api/_data.json b/website/api/_data.json index d85b103dc..e9324e7e3 100644 --- a/website/api/_data.json +++ b/website/api/_data.json @@ -160,7 +160,9 @@ "lemmatizer": { "title": "Lemmatizer", - "tag": "class" + "teaser": "Assign the base forms of words.", + "tag": "class", + "source": "spacy/lemmatizer.py" }, "tagger": { diff --git a/website/api/lemmatizer.jade b/website/api/lemmatizer.jade index 9699395b1..eb061f10a 100644 --- a/website/api/lemmatizer.jade +++ b/website/api/lemmatizer.jade @@ -2,4 +2,159 @@ include ../_includes/_mixins -+under-construction +p + | The #[code Lemmatizer] supports simple part-of-speech-sensitive suffix + | rules and lookup tables. + ++h(2, "init") Lemmatizer.__init__ + +tag method + +p Create a #[code Lemmatizer]. + ++aside-code("Example"). + from spacy.lemmatizer import Lemmatizer + lemmatizer = Lemmatizer() + ++table(["Name", "Type", "Description"]) + +row + +cell #[code index] + +cell dict / #[code None] + +cell Inventory of lemmas in the language. + + +row + +cell #[code exceptions] + +cell dict / #[code None] + +cell Mapping of string forms to lemmas that bypass the #[code rules]. + + +row + +cell #[code rules] + +cell dict / #[code None] + +cell List of suffix rewrite rules. + + +row + +cell #[code lookup] + +cell dict / #[code None] + +cell Lookup table mapping string to their lemmas. + + +row("foot") + +cell returns + +cell #[code Lemmatizer] + +cell The newly created object. + ++h(2, "call") Lemmatizer.__call__ + +tag method + +p Lemmatize a string. + ++aside-code("Example"). + from spacy.lemmatizer import Lemmatizer + from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES + lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES) + lemmas = lemmatizer(u'ducks', u'NOUN') + assert lemmas == [u'duck'] + ++table(["Name", "Type", "Description"]) + +row + +cell #[code string] + +cell unicode + +cell The string to lemmatize, e.g. the token text. + + +row + +cell #[code univ_pos] + +cell unicode / int + +cell The token's universal part-of-speech tag. + + +row + +cell #[code morphology] + +cell dict / #[code None] + +cell + | Morphological features following the + | #[+a("http://universaldependencies.org/") Universal Dependencies] + | scheme. + + +row("foot") + +cell returns + +cell list + +cell The available lemmas for the string. + ++h(2, "lookup") Lemmatizer.lookup + +tag method + +tag-new(2) + +p + | Look up a lemma in the lookup table, if available. If no lemma is found, + | the original string is returned. Languages can provide a + | #[+a("/usage/adding-languages#lemmatizer") lookup table] via the + | #[code lemma_lookup] variable, set on the individual #[code Language] + | class. + ++aside-code("Example"). + lookup = {u'going': u'go'} + lemmatizer = Lemmatizer(lookup=lookup) + assert lemmatizer.lookup(u'going') == u'go' + ++table(["Name", "Type", "Description"]) + +row + +cell #[code string] + +cell unicode + +cell The string to look up. + + +row("foot") + +cell returns + +cell unicode + +cell The lemma if the string was found, otherwise the original string. + ++h(2, "is_base_form") Lemmatizer.is_base_form + +tag method + +p + | Check whether we're dealing with an uninflected paradigm, so we can + | avoid lemmatization entirely. + ++aside-code("Example"). + pos = 'verb' + morph = {'VerbForm': 'inf'} + is_base_form = lemmatizer.is_base_form(pos, morph) + assert is_base_form == True + ++table(["Name", "Type", "Description"]) + +row + +cell #[code univ_pos] + +cell unicode / int + +cell The token's universal part-of-speech tag. + + +row + +cell #[code morphology] + +cell dict + +cell The token's morphological features. + + +row("foot") + +cell returns + +cell bool + +cell + | Whether the token's part-of-speech tag and morphological features + | describe a base form. + ++h(2, "attributes") Attributes + ++table(["Name", "Type", "Description"]) + +row + +cell #[code index] + +cell dict / #[code None] + +cell Inventory of lemmas in the language. + + +row + +cell #[code exc] + +cell dict / #[code None] + +cell Mapping of string forms to lemmas that bypass the #[code rules]. + + +row + +cell #[code rules] + +cell dict / #[code None] + +cell List of suffix rewrite rules. + + +row + +cell #[code lookup_table] + +tag-new(2) + +cell dict / #[code None] + +cell The lemma lookup table, if available. From 56a47f137f7f5334bbc0ae580e9aee7e207731e4 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:51:13 +0200 Subject: [PATCH 46/77] Add title description for tokenizer --- website/api/_data.json | 1 + 1 file changed, 1 insertion(+) diff --git a/website/api/_data.json b/website/api/_data.json index e9324e7e3..ba7997690 100644 --- a/website/api/_data.json +++ b/website/api/_data.json @@ -154,6 +154,7 @@ "tokenizer": { "title": "Tokenizer", + "teaser": "Segment text into words, punctuations marks etc.", "tag": "class", "source": "spacy/tokenizer.pyx" }, From 6686e53530c97b7c5b6b7b0cd132aaf0f07d816f Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 15:51:24 +0200 Subject: [PATCH 47/77] Allow GitHub embeds to specify optional language --- website/_includes/_mixins.jade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/_includes/_mixins.jade b/website/_includes/_mixins.jade index 414ee809e..b7375e2e0 100644 --- a/website/_includes/_mixins.jade +++ b/website/_includes/_mixins.jade @@ -181,7 +181,7 @@ mixin codepen(slug, height, default_tab) alt_file - [string] alternative file path used in footer and link button height - [integer] height of code preview in px -mixin github(repo, file, alt_file, height) +mixin github(repo, file, alt_file, height, language) - var branch = ALPHA ? "develop" : "master" - var height = height || 250 From 95f61745162db49649bb3849aef59a173563c3f8 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 16:00:13 +0200 Subject: [PATCH 48/77] Remove tensorizer from model pipeline example in spacy package --- spacy/cli/package.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/cli/package.py b/spacy/cli/package.py index 5ffc493c3..83d4917f6 100644 --- a/spacy/cli/package.py +++ b/spacy/cli/package.py @@ -101,7 +101,7 @@ def generate_meta(): def generate_pipeline(): prints("If set to 'True', the default pipeline is used. If set to 'False', " "the pipeline will be disabled. Components should be specified as a " - "comma-separated list of component names, e.g. tensorizer, tagger, " + "comma-separated list of component names, e.g. tagger, " "parser, ner. For more information, see the docs on processing pipelines.", title="Enter your model's pipeline components") pipeline = util.get_raw_input("Pipeline components", True) From 95f866f99f42ca2475991c23ef10141730000324 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 16:00:33 +0200 Subject: [PATCH 49/77] Add lookup argument to Lemmatizer.load --- spacy/lemmatizer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/lemmatizer.py b/spacy/lemmatizer.py index bd2ca766a..1f401f63c 100644 --- a/spacy/lemmatizer.py +++ b/spacy/lemmatizer.py @@ -7,8 +7,8 @@ from .symbols import VerbForm_inf, VerbForm_none, Number_sing, Degree_pos class Lemmatizer(object): @classmethod - def load(cls, path, index=None, exc=None, rules=None): - return cls(index or {}, exc or {}, rules or {}) + def load(cls, path, index=None, exc=None, rules=None, lookup=None): + return cls(index or {}, exc or {}, rules or {}, lookup or {}) def __init__(self, index=None, exceptions=None, rules=None, lookup=None): self.index = index if index is not None else {} From 8492d5be6dd7b9d10bccd97c70fd157a3122abd7 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 16:00:54 +0200 Subject: [PATCH 50/77] Always make lemmatizer return a list of lemmas, not a set --- spacy/lemmatizer.py | 6 +++--- spacy/morphology.pyx | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spacy/lemmatizer.py b/spacy/lemmatizer.py index 1f401f63c..f3327a1d7 100644 --- a/spacy/lemmatizer.py +++ b/spacy/lemmatizer.py @@ -26,10 +26,10 @@ class Lemmatizer(object): elif univ_pos in (PUNCT, 'PUNCT', 'punct'): univ_pos = 'punct' else: - return set([string.lower()]) + return list(set([string.lower()])) # See Issue #435 for example of where this logic is requied. if self.is_base_form(univ_pos, morphology): - return set([string.lower()]) + return list(set([string.lower()])) lemmas = lemmatize(string, self.index.get(univ_pos, {}), self.exc.get(univ_pos, {}), self.rules.get(univ_pos, [])) @@ -108,4 +108,4 @@ def lemmatize(string, index, exceptions, rules): forms.extend(oov_forms) if not forms: forms.append(string) - return set(forms) + return list(set(forms)) diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index 7845ab4e7..090a07fe8 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -172,7 +172,7 @@ cdef class Morphology: cdef unicode py_string = self.strings[orth] if self.lemmatizer is None: return self.strings.add(py_string.lower()) - cdef set lemma_strings + cdef list lemma_strings cdef unicode lemma_string lemma_strings = self.lemmatizer(py_string, univ_pos, morphology) lemma_string = sorted(lemma_strings)[0] From 63f0bde749018909812de4f1cf3ec12cf6770483 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 16:07:18 +0200 Subject: [PATCH 51/77] Add test for #1250: Tokenizer cache clobbered special-case attrs --- spacy/tests/regression/test_issue1250.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 spacy/tests/regression/test_issue1250.py diff --git a/spacy/tests/regression/test_issue1250.py b/spacy/tests/regression/test_issue1250.py new file mode 100644 index 000000000..3b6e0bbf2 --- /dev/null +++ b/spacy/tests/regression/test_issue1250.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +from ...tokenizer import Tokenizer +from ...symbols import ORTH, LEMMA, POS +from ...lang.en import English + +def test_issue1250_cached_special_cases(): + nlp = English() + nlp.tokenizer.add_special_case(u'reimbur', [{ORTH: u'reimbur', LEMMA: u'reimburse', POS: u'VERB'}]) + + lemmas = [w.lemma_ for w in nlp(u'reimbur, reimbur...')] + assert lemmas == ['reimburse', ',', 'reimburse', '...'] + lemmas = [w.lemma_ for w in nlp(u'reimbur, reimbur...')] + assert lemmas == ['reimburse', ',', 'reimburse', '...'] From b0f6fd3f1db76131c230b317caba946ce516a193 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 16:07:44 +0200 Subject: [PATCH 52/77] Disable tokenizer cache for special-cases. Fixes #1250 --- spacy/tokenizer.pxd | 5 +++-- spacy/tokenizer.pyx | 27 ++++++++++++++++++++------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd index 1a3e86b49..919b0928b 100644 --- a/spacy/tokenizer.pxd +++ b/spacy/tokenizer.pxd @@ -27,8 +27,9 @@ cdef class Tokenizer: cdef int _try_cache(self, hash_t key, Doc tokens) except -1 cdef int _tokenize(self, Doc tokens, unicode span, hash_t key) except -1 cdef unicode _split_affixes(self, Pool mem, unicode string, vector[LexemeC*] *prefixes, - vector[LexemeC*] *suffixes) + vector[LexemeC*] *suffixes, int* has_special) cdef int _attach_tokens(self, Doc tokens, unicode string, vector[LexemeC*] *prefixes, vector[LexemeC*] *suffixes) except -1 - cdef int _save_cached(self, const TokenC* tokens, hash_t key, int n) except -1 + cdef int _save_cached(self, const TokenC* tokens, hash_t key, int has_special, + int n) except -1 diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index 692357c8a..bc09129de 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -20,7 +20,8 @@ cdef class Tokenizer: """Segment text, and create Doc objects with the discovered segment boundaries. """ - def __init__(self, Vocab vocab, rules, prefix_search, suffix_search, infix_finditer, token_match=None): + def __init__(self, Vocab vocab, rules=None, prefix_search=None, + suffix_search=None, infix_finditer=None, token_match=None): """Create a `Tokenizer`, to create `Doc` objects given unicode text. vocab (Vocab): A storage container for lexical types. @@ -48,8 +49,9 @@ cdef class Tokenizer: self.infix_finditer = infix_finditer self.vocab = vocab self._rules = {} - for chunk, substrings in sorted(rules.items()): - self.add_special_case(chunk, substrings) + if rules is not None: + for chunk, substrings in sorted(rules.items()): + self.add_special_case(chunk, substrings) def __reduce__(self): args = (self.vocab, @@ -148,14 +150,18 @@ cdef class Tokenizer: cdef vector[LexemeC*] prefixes cdef vector[LexemeC*] suffixes cdef int orig_size + cdef int has_special orig_size = tokens.length - span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes) + span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes, + &has_special) self._attach_tokens(tokens, span, &prefixes, &suffixes) - self._save_cached(&tokens.c[orig_size], orig_key, tokens.length - orig_size) + self._save_cached(&tokens.c[orig_size], orig_key, has_special, + tokens.length - orig_size) cdef unicode _split_affixes(self, Pool mem, unicode string, vector[const LexemeC*] *prefixes, - vector[const LexemeC*] *suffixes): + vector[const LexemeC*] *suffixes, + int* has_special): cdef size_t i cdef unicode prefix cdef unicode suffix @@ -174,6 +180,7 @@ cdef class Tokenizer: if minus_pre and self._specials.get(hash_string(minus_pre)) != NULL: string = minus_pre prefixes.push_back(self.vocab.get(mem, prefix)) + has_special[0] = 1 break if self.token_match and self.token_match(string): break @@ -185,6 +192,7 @@ cdef class Tokenizer: if minus_suf and (self._specials.get(hash_string(minus_suf)) != NULL): string = minus_suf suffixes.push_back(self.vocab.get(mem, suffix)) + has_special[0] = 1 break if pre_len and suf_len and (pre_len + suf_len) <= len(string): string = string[pre_len:-suf_len] @@ -197,6 +205,7 @@ cdef class Tokenizer: string = minus_suf suffixes.push_back(self.vocab.get(mem, suffix)) if string and (self._specials.get(hash_string(string)) != NULL): + has_special[0] = 1 break return string @@ -256,11 +265,15 @@ cdef class Tokenizer: preinc(it) tokens.push_back(lexeme, False) - cdef int _save_cached(self, const TokenC* tokens, hash_t key, int n) except -1: + cdef int _save_cached(self, const TokenC* tokens, hash_t key, + int has_special, int n) except -1: cdef int i for i in range(n): if tokens[i].lex.id == 0: return 0 + # See https://github.com/explosion/spaCy/issues/1250 + if has_special: + return 0 cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached)) cached.length = n cached.is_lex = True From 66766c145440541e9982147580f0f445109bac4e Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 17:04:43 +0200 Subject: [PATCH 53/77] Restore SP tag to English tag_map, until models migrate --- spacy/lang/en/tag_map.py | 1 + spacy/morphology.pyx | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy/lang/en/tag_map.py b/spacy/lang/en/tag_map.py index 76eabf307..fc3d2cc93 100644 --- a/spacy/lang/en/tag_map.py +++ b/spacy/lang/en/tag_map.py @@ -42,6 +42,7 @@ TAG_MAP = { "RBR": {POS: ADV, "Degree": "comp"}, "RBS": {POS: ADV, "Degree": "sup"}, "RP": {POS: PART}, + "SP": {POS: SPACE}, "SYM": {POS: SYM}, "TO": {POS: PART, "PartType": "inf", "VerbForm": "inf"}, "UH": {POS: INTJ}, diff --git a/spacy/morphology.pyx b/spacy/morphology.pyx index 090a07fe8..91befaa1b 100644 --- a/spacy/morphology.pyx +++ b/spacy/morphology.pyx @@ -38,7 +38,7 @@ cdef class Morphology: self.strings = string_store # Add special space symbol. We prefix with underscore, to make sure it # always sorts to the end. - space_attrs = tag_map.pop('SP', {POS: SPACE}) + space_attrs = tag_map.get('SP', {POS: SPACE}) if '_SP' not in tag_map: self.strings.add('_SP') tag_map = dict(tag_map) From 908809d488fb7c9ba25fde8d8077a328a12376f4 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 17:05:15 +0200 Subject: [PATCH 54/77] Update tests --- spacy/tests/doc/test_doc_api.py | 18 +++++------- spacy/tests/doc/test_token_api.py | 35 +++++++++++------------- spacy/tests/regression/test_issue1305.py | 11 +++++--- spacy/tests/regression/test_issue781.py | 2 +- 4 files changed, 31 insertions(+), 35 deletions(-) diff --git a/spacy/tests/doc/test_doc_api.py b/spacy/tests/doc/test_doc_api.py index 5e052f771..46c615973 100644 --- a/spacy/tests/doc/test_doc_api.py +++ b/spacy/tests/doc/test_doc_api.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals from ..util import get_doc +from ...tokens import Doc +from ...vocab import Vocab import pytest import numpy @@ -204,17 +206,11 @@ def test_doc_api_right_edge(en_tokenizer): assert doc[6].right_edge.text == ',' -@pytest.mark.xfail -@pytest.mark.parametrize('text,vectors', [ - ("apple orange pear", ["apple -1 -1 -1", "orange -1 -1 0", "pear -1 0 -1"]) -]) -def test_doc_api_has_vector(en_tokenizer, text_file, text, vectors): - text_file.write('\n'.join(vectors)) - text_file.seek(0) - vector_length = en_tokenizer.vocab.load_vectors(text_file) - assert vector_length == 3 - - doc = en_tokenizer(text) +def test_doc_api_has_vector(): + vocab = Vocab() + vocab.clear_vectors(2) + vocab.vectors.add('kitten', numpy.asarray([0., 2.], dtype='f')) + doc = Doc(vocab, words=['kitten']) assert doc.has_vector def test_lowest_common_ancestor(en_tokenizer): diff --git a/spacy/tests/doc/test_token_api.py b/spacy/tests/doc/test_token_api.py index 00caa1445..0ab723f7a 100644 --- a/spacy/tests/doc/test_token_api.py +++ b/spacy/tests/doc/test_token_api.py @@ -3,6 +3,8 @@ from __future__ import unicode_literals from ...attrs import IS_ALPHA, IS_DIGIT, IS_LOWER, IS_PUNCT, IS_TITLE, IS_STOP from ..util import get_doc +from ...vocab import Vocab +from ...tokens import Doc import pytest import numpy @@ -68,26 +70,21 @@ def test_doc_token_api_is_properties(en_vocab): assert doc[5].like_email -@pytest.mark.xfail -@pytest.mark.parametrize('text,vectors', [ - ("apples oranges ldskbjls", ["apples -1 -1 -1", "oranges -1 -1 0"]) -]) -def test_doc_token_api_vectors(en_tokenizer, text_file, text, vectors): - text_file.write('\n'.join(vectors)) - text_file.seek(0) - vector_length = en_tokenizer.vocab.load_vectors(text_file) - assert vector_length == 3 +def test_doc_token_api_vectors(): + vocab = Vocab() + vocab.clear_vectors(2) + vocab.vectors.add('apples', numpy.asarray([0., 2.], dtype='f')) + vocab.vectors.add('oranges', numpy.asarray([0., 1.], dtype='f')) + doc = Doc(vocab, words=['apples', 'oranges', 'oov']) + assert doc.has_vector - tokens = en_tokenizer(text) - assert tokens[0].has_vector - assert tokens[1].has_vector - assert not tokens[2].has_vector - assert tokens[0].similarity(tokens[1]) > tokens[0].similarity(tokens[2]) - assert tokens[0].similarity(tokens[1]) == tokens[1].similarity(tokens[0]) - assert sum(tokens[0].vector) != sum(tokens[1].vector) - assert numpy.isclose( - tokens[0].vector_norm, - numpy.sqrt(numpy.dot(tokens[0].vector, tokens[0].vector))) + assert doc[0].has_vector + assert doc[1].has_vector + assert not doc[2].has_vector + apples_norm = (0*0 + 2*2) ** 0.5 + oranges_norm = (0*0 + 1*1) ** 0.5 + cosine = ((0*0) + (2*1)) / (apples_norm * oranges_norm) + assert doc[0].similarity(doc[1]) == cosine def test_doc_token_api_ancestors(en_tokenizer): diff --git a/spacy/tests/regression/test_issue1305.py b/spacy/tests/regression/test_issue1305.py index e123ce0ba..d1d5eb93d 100644 --- a/spacy/tests/regression/test_issue1305.py +++ b/spacy/tests/regression/test_issue1305.py @@ -1,8 +1,11 @@ import pytest +import spacy -@pytest.mark.models('en') -def test_issue1305(EN): +#@pytest.mark.models('en') +def test_issue1305(): '''Test lemmatization of English VBZ''' - assert EN.vocab.morphology.lemmatizer('works', 'verb') == set(['work']) - doc = EN(u'This app works well') + nlp = spacy.load('en_core_web_sm') + assert nlp.vocab.morphology.lemmatizer('works', 'verb') == ['work'] + doc = nlp(u'This app works well') + print([(w.text, w.tag_) for w in doc]) assert doc[2].lemma_ == 'work' diff --git a/spacy/tests/regression/test_issue781.py b/spacy/tests/regression/test_issue781.py index e3f391a37..2c77e68cd 100644 --- a/spacy/tests/regression/test_issue781.py +++ b/spacy/tests/regression/test_issue781.py @@ -9,4 +9,4 @@ import pytest @pytest.mark.parametrize('word,lemmas', [("chromosomes", ["chromosome"]), ("endosomes", ["endosome"]), ("colocalizes", ["colocalize", "colocaliz"])]) def test_issue781(EN, word, lemmas): lemmatizer = EN.Defaults.create_lemmatizer() - assert lemmatizer(word, 'noun', morphology={'number': 'plur'}) == set(lemmas) + assert lemmatizer(word, 'noun', morphology={'number': 'plur'}) == lemmas From d9bb1e5de8908111fe314026662e07139cccf5bf Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Tue, 24 Oct 2017 17:06:19 +0200 Subject: [PATCH 55/77] Increment version --- spacy/about.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/about.py b/spacy/about.py index 699b61aff..45b91955a 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -3,7 +3,7 @@ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy-nightly' -__version__ = '2.0.0a17' +__version__ = '2.0.0a18' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI' From d71702b8274cfb61153a76f97713637ba239adac Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 20:10:37 +0200 Subject: [PATCH 56/77] Fix formatting --- website/api/_annotation/_biluo.jade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/api/_annotation/_biluo.jade b/website/api/_annotation/_biluo.jade index dc6168732..34d93f768 100644 --- a/website/api/_annotation/_biluo.jade +++ b/website/api/_annotation/_biluo.jade @@ -1,6 +1,6 @@ //- 💫 DOCS > API > ANNOTATION > BILUO -+table([ "Tag", "Description" ]) ++table(["Tag", "Description"]) +row +cell #[code #[span.u-color-theme B] EGIN] +cell The first token of a multi-token entity. From 7459ecfa87cc41f6195a4f49a5842c0eb1879dd8 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 20:13:34 +0200 Subject: [PATCH 57/77] Port over contributor agreements --- .github/CONTRIBUTOR_AGREEMENT.md | 10 +-- .github/contributors/demfier.md | 106 +++++++++++++++++++++++++++ .github/contributors/honnibal.md | 106 +++++++++++++++++++++++++++ .github/contributors/ines.md | 106 +++++++++++++++++++++++++++ .github/contributors/jerbob92.md | 106 +++++++++++++++++++++++++++ .github/contributors/johnhaley81.md | 106 +++++++++++++++++++++++++++ .github/contributors/mdcclv.md | 106 +++++++++++++++++++++++++++ .github/contributors/polm.md | 106 +++++++++++++++++++++++++++ .github/contributors/shuvanon.md | 108 ++++++++++++++++++++++++++++ .github/contributors/yuukos.md | 106 +++++++++++++++++++++++++++ 10 files changed, 961 insertions(+), 5 deletions(-) create mode 100644 .github/contributors/demfier.md create mode 100644 .github/contributors/honnibal.md create mode 100644 .github/contributors/ines.md create mode 100644 .github/contributors/jerbob92.md create mode 100644 .github/contributors/johnhaley81.md create mode 100644 .github/contributors/mdcclv.md create mode 100644 .github/contributors/polm.md create mode 100644 .github/contributors/shuvanon.md create mode 100644 .github/contributors/yuukos.md diff --git a/.github/CONTRIBUTOR_AGREEMENT.md b/.github/CONTRIBUTOR_AGREEMENT.md index c915d48bf..f34603065 100644 --- a/.github/CONTRIBUTOR_AGREEMENT.md +++ b/.github/CONTRIBUTOR_AGREEMENT.md @@ -87,8 +87,8 @@ U.S. Federal law. Any choice of law rules will not apply. 7. Please place an “x” on one of the applicable statement below. Please do NOT mark both statements: - * [x] I am signing on behalf of myself as an individual and no other person - or entity, including my employer, has or will have rights with respect my + * [ ] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my contributions. * [ ] I am signing on behalf of my employer or a legal entity and I have the @@ -98,9 +98,9 @@ mark both statements: | Field | Entry | |------------------------------- | -------------------- | -| Name | Shuvanon Razik | +| Name | | | Company name (if applicable) | | | Title or role (if applicable) | | -| Date | 3/12/2017 | -| GitHub username | shuvanon | +| Date | | +| GitHub username | | | Website (optional) | | diff --git a/.github/contributors/demfier.md b/.github/contributors/demfier.md new file mode 100644 index 000000000..1a730fc78 --- /dev/null +++ b/.github/contributors/demfier.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Gaurav Sahu | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 2017-10-18 | +| GitHub username | demfier | +| Website (optional) | | diff --git a/.github/contributors/honnibal.md b/.github/contributors/honnibal.md new file mode 100644 index 000000000..3a700b7dd --- /dev/null +++ b/.github/contributors/honnibal.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [ ] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [x] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Matthew Honnibal | +| Company name (if applicable) | Explosion AI | +| Title or role (if applicable) | Founder | +| Date | 2017-10-18 | +| GitHub username | honnibal | +| Website (optional) | https://explosion.ai | diff --git a/.github/contributors/ines.md b/.github/contributors/ines.md new file mode 100644 index 000000000..5cd57b07e --- /dev/null +++ b/.github/contributors/ines.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [ ] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [x] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Ines Montani | +| Company name (if applicable) | Explosion AI | +| Title or role (if applicable) | Founder | +| Date | 2017/10/18 | +| GitHub username | ines | +| Website (optional) | https://explosion.ai | diff --git a/.github/contributors/jerbob92.md b/.github/contributors/jerbob92.md new file mode 100644 index 000000000..bb0430d14 --- /dev/null +++ b/.github/contributors/jerbob92.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Jeroen Bobbeldijk | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 22-10-2017 | +| GitHub username | jerbob92 | +| Website (optional) | | diff --git a/.github/contributors/johnhaley81.md b/.github/contributors/johnhaley81.md new file mode 100644 index 000000000..277b3126c --- /dev/null +++ b/.github/contributors/johnhaley81.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | John Haley | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 19/10/2017 | +| GitHub username | johnhaley81 | +| Website (optional) | | diff --git a/.github/contributors/mdcclv.md b/.github/contributors/mdcclv.md new file mode 100644 index 000000000..14ebfae26 --- /dev/null +++ b/.github/contributors/mdcclv.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------------------- | +| Name | Orion Montoya | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 04-10-2017 | +| GitHub username | mdcclv | +| Website (optional) | http://www.mdcclv.com/ | diff --git a/.github/contributors/polm.md b/.github/contributors/polm.md new file mode 100644 index 000000000..a2aa0cb65 --- /dev/null +++ b/.github/contributors/polm.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Paul McCann | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 2017-10-14 | +| GitHub username | polm | +| Website (optional) | http://dampfkraft.com| diff --git a/.github/contributors/shuvanon.md b/.github/contributors/shuvanon.md new file mode 100644 index 000000000..82d02d8d2 --- /dev/null +++ b/.github/contributors/shuvanon.md @@ -0,0 +1,108 @@ + + +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Shuvanon Razik | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 3/12/2017 | +| GitHub username | shuvanon | +| Website (optional) | | diff --git a/.github/contributors/yuukos.md b/.github/contributors/yuukos.md new file mode 100644 index 000000000..aecafeecb --- /dev/null +++ b/.github/contributors/yuukos.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Alexey Kim | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 13-12-2017 | +| GitHub username | yuukos | +| Website (optional) | | From c815ff65f6986302bf6d89c7747e53bcbc65ee9e Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 21:38:53 +0200 Subject: [PATCH 58/77] Update feature list --- website/index.jade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/index.jade b/website/index.jade index 0155ab295..1abe5a984 100644 --- a/website/index.jade +++ b/website/index.jade @@ -79,12 +79,12 @@ include _includes/_mixins +h(2) Features +list +item Non-destructive #[strong tokenization] + +item #[strong Named entity] recognition +item Support for #[strong #{LANG_COUNT}+ languages] +item #[strong #{MODEL_COUNT} statistical models] for #{MODEL_LANG_COUNT} languages +item Pre-trained #[strong word vectors] +item Easy #[strong deep learning] integration +item Part-of-speech tagging - +item #[strong Named entity] recognition +item Labelled dependency parsing +item Syntax-driven sentence segmentation +item Built in #[strong visualizers] for syntax and NER From 63683a515132eef4e8668e51f6ed65066080cb67 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 21:39:05 +0200 Subject: [PATCH 59/77] Port over contributors from master --- CONTRIBUTORS.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index b64dc8db3..edd1ed30d 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -3,6 +3,8 @@ This is a list of everyone who has made significant contributions to spaCy, in alphabetical order. Thanks a lot for the great work! * Adam Bittlingmayer, [@bittlingmayer](https://github.com/bittlingmayer) +* Alexey Kim, [@yuukos](https://github.com/yuukos) +* Alexis Eidelman, [@AlexisEidelman](https://github.com/AlexisEidelman) * Andreas Grivas, [@andreasgrv](https://github.com/andreasgrv) * Andrew Poliakov, [@pavlin99th](https://github.com/pavlin99th) * Aniruddha Adhikary [@aniruddha-adhikary](https://github.com/aniruddha-adhikary) @@ -16,6 +18,7 @@ This is a list of everyone who has made significant contributions to spaCy, in a * Daniel Vila Suero, [@dvsrepo](https://github.com/dvsrepo) * Dmytro Sadovnychyi, [@sadovnychyi](https://github.com/sadovnychyi) * Eric Zhao, [@ericzhao28](https://github.com/ericzhao28) +* Francisco Aranda, [@frascuchon](https://github.com/frascuchon) * Greg Baker, [@solresol](https://github.com/solresol) * Grégory Howard, [@Gregory-Howard](https://github.com/Gregory-Howard) * György Orosz, [@oroszgy](https://github.com/oroszgy) @@ -24,6 +27,9 @@ This is a list of everyone who has made significant contributions to spaCy, in a * Ines Montani, [@ines](https://github.com/ines) * J Nicolas Schrading, [@NSchrading](https://github.com/NSchrading) * Janneke van der Zwaan, [@jvdzwaan](https://github.com/jvdzwaan) +* Jim Geovedi, [@geovedi](https://github.com/geovedi) +* Jim Regan, [@jimregan](https://github.com/jimregan) +* Jeffrey Gerard, [@IamJeffG](https://github.com/IamJeffG) * Jordan Suchow, [@suchow](https://github.com/suchow) * Josh Reeter, [@jreeter](https://github.com/jreeter) * Juan Miguel Cejuela, [@juanmirocks](https://github.com/juanmirocks) @@ -38,6 +44,8 @@ This is a list of everyone who has made significant contributions to spaCy, in a * Michael Wallin, [@wallinm1](https://github.com/wallinm1) * Miguel Almeida, [@mamoit](https://github.com/mamoit) * Oleg Zd, [@olegzd](https://github.com/olegzd) +* Orion Montoya, [@mdcclv](https://github.com/mdcclv) +* Paul O'Leary McCann, [@polm](https://github.com/polm) * Pokey Rule, [@pokey](https://github.com/pokey) * Raphaël Bournhonesque, [@raphael0202](https://github.com/raphael0202) * Rob van Nieuwpoort, [@RvanNieuwpoort](https://github.com/RvanNieuwpoort) @@ -45,12 +53,18 @@ This is a list of everyone who has made significant contributions to spaCy, in a * Sam Bozek, [@sambozek](https://github.com/sambozek) * Sasho Savkov, [@savkov](https://github.com/savkov) * Shuvanon Razik, [@shuvanon](https://github.com/shuvanon) +* Swier, [@swierh](https://github.com/swierh) * Thomas Tanon, [@Tpt](https://github.com/Tpt) * Tiago Rodrigues, [@TiagoMRodrigues](https://github.com/TiagoMRodrigues) +* Vimos Tan, [@Vimos](https://github.com/Vimos) * Vsevolod Solovyov, [@vsolovyov](https://github.com/vsolovyov) * Wah Loon Keng, [@kengz](https://github.com/kengz) +* Wannaphong Phatthiyaphaibun, [@wannaphongcom](https://github.com/wannaphongcom) * Willem van Hage, [@wrvhage](https://github.com/wrvhage) * Wolfgang Seeker, [@wbwseeker](https://github.com/wbwseeker) +* Yam, [@hscspring](https://github.com/hscspring) * Yanhao Yang, [@YanhaoYang](https://github.com/YanhaoYang) * Yasuaki Uechi, [@uetchy](https://github.com/uetchy) +* Yu-chun Huang, [@galaxyh](https://github.com/galaxyh) * Yubing Dong, [@tomtung](https://github.com/tomtung) +* Yuval Pinter, [@yuvalpinter](https://github.com/yuvalpinter) From 972d9e832cc782bdc50693b0cf8c62f3ee247c7d Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 21:39:18 +0200 Subject: [PATCH 60/77] Update README for v2.0 --- README.rst | 256 +++++++++++++++++++---------------------------------- 1 file changed, 93 insertions(+), 163 deletions(-) diff --git a/README.rst b/README.rst index 244308473..27fca3fc2 100644 --- a/README.rst +++ b/README.rst @@ -1,15 +1,16 @@ spaCy: Industrial-strength NLP ****************************** -spaCy is a library for advanced natural language processing in Python and +spaCy is a library for advanced Natural Language Processing in Python and Cython. spaCy is built on the very latest research, but it isn't researchware. -It was designed from day one to be used in real products. spaCy currently supports -English, German, French and Spanish, as well as tokenization for Italian, -Portuguese, Dutch, Swedish, Finnish, Norwegian, Danish, Hungarian, Polish, -Bengali, Hebrew, Chinese and Japanese. It's commercial open-source software, -released under the MIT license. +It was designed from day one to be used in real products. spaCy comes with +`pre-trained statistical models `_ and word +vectors, and currently supports tokenization for **20+ languages**. It features +the **fastest syntactic parser** in the world, convolutional **neural network models** +for tagging, parsing and **named entity recognition** and easy **deep learning** +integration. It's commercial open-source software, released under the MIT license. -💫 **Version 1.8 out now!** `Read the release notes here. `_ +💫 **Version 2.0 out now!** `Check out the new features here. `_ .. image:: https://img.shields.io/travis/explosion/spaCy/master.svg?style=flat-square :target: https://travis-ci.org/explosion/spaCy @@ -38,68 +39,72 @@ released under the MIT license. 📖 Documentation ================ -=================== === -`Usage Workflows`_ How to use spaCy and its features. -`API Reference`_ The detailed reference for spaCy's API. -`Troubleshooting`_ Common problems and solutions for beginners. -`Tutorials`_ End-to-end examples, with code you can modify and run. -`Showcase & Demos`_ Demos, libraries and products from the spaCy community. -`Contribute`_ How to contribute to the spaCy project and code base. -=================== === +=================== === +`spaCy 101`_ New to spaCy? Here's everything you need to know! +`Usage Guides`_ How to use spaCy and its features. +`New in v2.0`_ New features, backwards incompatibilitiies and migration guide. +`API Reference`_ The detailed reference for spaCy's API. +`Models`_ Download statistical language models for spaCy. +`Resources`_ Libraries, extensions, demos, books and courses. +`Changelog`_ Changes and version history. +`Contribute`_ How to contribute to the spaCy project and code base. +=================== === -.. _Usage Workflows: https://spacy.io/docs/usage/ -.. _API Reference: https://spacy.io/docs/api/ -.. _Troubleshooting: https://spacy.io/docs/usage/troubleshooting -.. _Tutorials: https://spacy.io/docs/usage/tutorials -.. _Showcase & Demos: https://spacy.io/docs/usage/showcase +.. _spaCy 101: https://alpha.spacy.io/usage/spacy-101 +.. _New in v2.0: https://alpha.spacy.io/usage/v2#migrating +.. _Usage Guides: https://alpha.spacy.io/usage/ +.. _API Reference: https://alpha.spacy.io/api/ +.. _Models: https://alpha.spacy.io/models +.. _Resources: https://alpha.spacy.io/usage/resources +.. _Changelog: https://alpha.spacy.io/usage/#changelog .. _Contribute: https://github.com/explosion/spaCy/blob/master/CONTRIBUTING.md 💬 Where to ask questions ========================== +The spaCy project is maintained by `@honnibal `_ +and `@ines `_. Please understand that we won't be able +to provide individual support via email. We also believe that help is much more +valuable if it's shared publicly, so that more people can benefit from it. + ====================== === -**Bug reports** `GitHub issue tracker`_ -**Usage questions** `StackOverflow`_, `Gitter chat`_, `Reddit user group`_ -**General discussion** `Gitter chat`_, `Reddit user group`_ -**Commercial support** contact@explosion.ai +**Bug Reports** `GitHub Issue Tracker`_ +**Usage Questions** `StackOverflow`_, `Gitter Chat`_, `Reddit User Group`_ +**General Discussion** `Gitter Chat`_, `Reddit User Group`_ ====================== === -.. _GitHub issue tracker: https://github.com/explosion/spaCy/issues +.. _GitHub Issue Tracker: https://github.com/explosion/spaCy/issues .. _StackOverflow: http://stackoverflow.com/questions/tagged/spacy -.. _Gitter chat: https://gitter.im/explosion/spaCy -.. _Reddit user group: https://www.reddit.com/r/spacynlp +.. _Gitter Chat: https://gitter.im/explosion/spaCy +.. _Reddit User Group: https://www.reddit.com/r/spacynlp Features ======== -* Non-destructive **tokenization** -* Syntax-driven sentence segmentation -* Pre-trained **word vectors** -* Part-of-speech tagging +* **Fastest syntactic parser** in the world * **Named entity** recognition -* Labelled dependency parsing -* Convenient string-to-int mapping -* Export to numpy data arrays -* GIL-free **multi-threading** -* Efficient binary serialization +* Non-destructive **tokenization** +* Support for **20+ languages** +* Pre-trained `statistical models `_ and word vectors * Easy **deep learning** integration -* Statistical models for **English**, **German**, **French** and **Spanish** +* Part-of-speech tagging +* Labelled dependency parsing +* Syntax-driven sentence segmentation +* Built in **visualizers** for syntax and NER +* Convenient string-to-hash mapping +* Export to numpy data arrays +* Efficient binary serialization +* Easy **model packaging** and deployment * State-of-the-art speed * Robust, rigorously evaluated accuracy -See `facts, figures and benchmarks `_. +📖 **For more details, see the** `facts, figures and benchmarks `_. -Top Performance ---------------- +Install spaCy +============= -* Fastest in the world: <50ms per document. No faster system has ever been - announced. -* Accuracy within 1% of the current state of the art on all tasks performed - (parsing, named entity recognition, part-of-speech tagging). The only more - accurate systems are an order of magnitude slower or more. - -Supports --------- +For detailed installation instructions, see +the `documentation `_. ==================== === **Operating system** macOS / OS X, Linux, Windows (Cygwin, MinGW, Visual Studio) @@ -110,12 +115,6 @@ Supports .. _pip: https://pypi.python.org/pypi/spacy .. _conda: https://anaconda.org/conda-forge/spacy -Install spaCy -============= - -Installation requires a working build environment. See notes on Ubuntu, -macOS/OS X and Windows for details. - pip --- @@ -123,7 +122,7 @@ Using pip, spaCy releases are currently only available as source packages. .. code:: bash - pip install -U spacy + pip install spacy When using pip it is generally recommended to install packages in a ``virtualenv`` to avoid modifying system state: @@ -149,25 +148,41 @@ For the feedstock including the build recipe and configuration, check out `this repository `_. Improvements and pull requests to the recipe and setup are always appreciated. +Updating spaCy +-------------- + +Some updates to spaCy may require downloading new statistical models. If you're +running spaCy v2.0 or higher, you can use the ``validate`` command to check if +your installed models are compatible and if not, print details on how to update +them: + +.. code:: bash + + pip install -U spacy + spacy validate + +If you've trained your own models, keep in mind that your training and runtime +inputs must match. After updating spaCy, we recommend **retraining your models** +with the new version. + +📖 **For details on upgrading from spaCy 1.x to spaCy 2.x, see the** +`migration guide `_. + Download models =============== As of v1.7.0, models for spaCy can be installed as **Python packages**. This means that they're a component of your application, just like any -other module. They're versioned and can be defined as a dependency in your -``requirements.txt``. Models can be installed from a download URL or -a local directory, manually or via pip. Their data can be located anywhere on -your file system. To make a model available to spaCy, all you need to do is -create a "shortcut link", an internal alias that tells spaCy where to find the -data files for a specific model name. +other module. Models can be installed using spaCy's ``download`` command, +or manually by pointing pip to a path or URL. ======================= === -`spaCy Models`_ Available models, latest releases and direct download. +`Available Models`_ Detailed model descriptions, accuracy figures and benchmarks. `Models Documentation`_ Detailed usage instructions. ======================= === -.. _spaCy Models: https://github.com/explosion/spacy-models/releases/ -.. _Models Documentation: https://spacy.io/docs/usage/models +.. _Available Models: https://alpha.spacy.io/models +.. _Models Documentation: https://alpha.spacy.io/docs/usage/models .. code:: bash @@ -175,17 +190,10 @@ data files for a specific model name. python -m spacy download en # download best-matching version of specific model for your spaCy installation - python -m spacy download en_core_web_md + python -m spacy download en_core_web_lg # pip install .tar.gz archive from path or URL - pip install /Users/you/en_core_web_md-1.2.0.tar.gz - pip install https://github.com/explosion/spacy-models/releases/download/en_core_web_md-1.2.0/en_core_web_md-1.2.0.tar.gz - - # set up shortcut link to load installed package as "en_default" - python -m spacy link en_core_web_md en_default - - # set up shortcut link to load local model as "my_amazing_model" - python -m spacy link /Users/you/data my_amazing_model + pip install /Users/you/en_core_web_sm-2.0.0.tar.gz Loading and using models ------------------------ @@ -199,24 +207,24 @@ To load a model, use ``spacy.load()`` with the model's shortcut link: doc = nlp(u'This is a sentence.') If you've installed a model via pip, you can also ``import`` it directly and -then call its ``load()`` method with no arguments. This should also work for -older models in previous versions of spaCy. +then call its ``load()`` method: .. code:: python import spacy - import en_core_web_md + import en_core_web_sm - nlp = en_core_web_md.load() + nlp = en_core_web_.load() doc = nlp(u'This is a sentence.') -📖 **For more info and examples, check out the** `models documentation `_. +📖 **For more info and examples, check out the** +`models documentation `_. Support for older versions -------------------------- -If you're using an older version (v1.6.0 or below), you can still download and -install the old models from within spaCy using ``python -m spacy.en.download all`` +If you're using an older version (``v1.6.0`` or below), you can still download +and install the old models from within spaCy using ``python -m spacy.en.download all`` or ``python -m spacy.de.download all``. The ``.tar.gz`` archives are also `attached to the v1.6.0 release `_. To download and install the models manually, unpack the archive, drop the @@ -248,11 +256,13 @@ details. pip install -r requirements.txt pip install -e . -Compared to regular install via pip `requirements.txt `_ +Compared to regular install via pip, `requirements.txt `_ additionally installs developer dependencies such as Cython. - Instead of the above verbose commands, you can also use the following -`Fabric `_ commands: +`Fabric `_ commands. All commands assume that your +``virtualenv`` is located in a directory ``.env``. If you're using a different +directory, you can change it via the environment variable ``VENV_DIR``, for +example ``VENV_DIR=".custom-env" fab clean make``. ============= === ``fab env`` Create ``virtualenv`` and delete previous one, if it exists. @@ -261,14 +271,6 @@ Instead of the above verbose commands, you can also use the following ``fab test`` Run basic tests, aborting after first failure. ============= === -All commands assume that your ``virtualenv`` is located in a directory ``.env``. -If you're using a different directory, you can change it via the environment -variable ``VENV_DIR``, for example: - -.. code:: bash - - VENV_DIR=".custom-env" fab clean make - Ubuntu ------ @@ -310,76 +312,4 @@ and ``--model`` are optional and enable additional tests: # make sure you are using recent pytest version python -m pip install -U pytest - python -m pytest - -🛠 Changelog -============ - -=========== ============== =========== -Version Date Description -=========== ============== =========== -`v1.8.2`_ ``2017-04-26`` French model and small improvements -`v1.8.1`_ ``2017-04-23`` Saving, loading and training bug fixes -`v1.8.0`_ ``2017-04-16`` Better NER training, saving and loading -`v1.7.5`_ ``2017-04-07`` Bug fixes and new CLI commands -`v1.7.3`_ ``2017-03-26`` Alpha support for Hebrew, new CLI commands and bug fixes -`v1.7.2`_ ``2017-03-20`` Small fixes to beam parser and model linking -`v1.7.1`_ ``2017-03-19`` Fix data download for system installation -`v1.7.0`_ ``2017-03-18`` New 50 MB model, CLI, better downloads and lots of bug fixes -`v1.6.0`_ ``2017-01-16`` Improvements to tokenizer and tests -`v1.5.0`_ ``2016-12-27`` Alpha support for Swedish and Hungarian -`v1.4.0`_ ``2016-12-18`` Improved language data and alpha Dutch support -`v1.3.0`_ ``2016-12-03`` Improve API consistency -`v1.2.0`_ ``2016-11-04`` Alpha tokenizers for Chinese, French, Spanish, Italian and Portuguese -`v1.1.0`_ ``2016-10-23`` Bug fixes and adjustments -`v1.0.0`_ ``2016-10-18`` Support for deep learning workflows and entity-aware rule matcher -`v0.101.0`_ ``2016-05-10`` Fixed German model -`v0.100.7`_ ``2016-05-05`` German support -`v0.100.6`_ ``2016-03-08`` Add support for GloVe vectors -`v0.100.5`_ ``2016-02-07`` Fix incorrect use of header file -`v0.100.4`_ ``2016-02-07`` Fix OSX problem introduced in 0.100.3 -`v0.100.3`_ ``2016-02-06`` Multi-threading, faster loading and bugfixes -`v0.100.2`_ ``2016-01-21`` Fix data version lock -`v0.100.1`_ ``2016-01-21`` Fix install for OSX -`v0.100`_ ``2016-01-19`` Revise setup.py, better model downloads, bug fixes -`v0.99`_ ``2015-11-08`` Improve span merging, internal refactoring -`v0.98`_ ``2015-11-03`` Smaller package, bug fixes -`v0.97`_ ``2015-10-23`` Load the StringStore from a json list, instead of a text file -`v0.96`_ ``2015-10-19`` Hotfix to .merge method -`v0.95`_ ``2015-10-18`` Bug fixes -`v0.94`_ ``2015-10-09`` Fix memory and parse errors -`v0.93`_ ``2015-09-22`` Bug fixes to word vectors -=========== ============== =========== - -.. _v1.8.2: https://github.com/explosion/spaCy/releases/tag/v1.8.2 -.. _v1.8.1: https://github.com/explosion/spaCy/releases/tag/v1.8.1 -.. _v1.8.0: https://github.com/explosion/spaCy/releases/tag/v1.8.0 -.. _v1.7.5: https://github.com/explosion/spaCy/releases/tag/v1.7.5 -.. _v1.7.3: https://github.com/explosion/spaCy/releases/tag/v1.7.3 -.. _v1.7.2: https://github.com/explosion/spaCy/releases/tag/v1.7.2 -.. _v1.7.1: https://github.com/explosion/spaCy/releases/tag/v1.7.1 -.. _v1.7.0: https://github.com/explosion/spaCy/releases/tag/v1.7.0 -.. _v1.6.0: https://github.com/explosion/spaCy/releases/tag/v1.6.0 -.. _v1.5.0: https://github.com/explosion/spaCy/releases/tag/v1.5.0 -.. _v1.4.0: https://github.com/explosion/spaCy/releases/tag/v1.4.0 -.. _v1.3.0: https://github.com/explosion/spaCy/releases/tag/v1.3.0 -.. _v1.2.0: https://github.com/explosion/spaCy/releases/tag/v1.2.0 -.. _v1.1.0: https://github.com/explosion/spaCy/releases/tag/v1.1.0 -.. _v1.0.0: https://github.com/explosion/spaCy/releases/tag/v1.0.0 -.. _v0.101.0: https://github.com/explosion/spaCy/releases/tag/0.101.0 -.. _v0.100.7: https://github.com/explosion/spaCy/releases/tag/0.100.7 -.. _v0.100.6: https://github.com/explosion/spaCy/releases/tag/0.100.6 -.. _v0.100.5: https://github.com/explosion/spaCy/releases/tag/0.100.5 -.. _v0.100.4: https://github.com/explosion/spaCy/releases/tag/0.100.4 -.. _v0.100.3: https://github.com/explosion/spaCy/releases/tag/0.100.3 -.. _v0.100.2: https://github.com/explosion/spaCy/releases/tag/0.100.2 -.. _v0.100.1: https://github.com/explosion/spaCy/releases/tag/0.100.1 -.. _v0.100: https://github.com/explosion/spaCy/releases/tag/0.100 -.. _v0.99: https://github.com/explosion/spaCy/releases/tag/0.99 -.. _v0.98: https://github.com/explosion/spaCy/releases/tag/0.98 -.. _v0.97: https://github.com/explosion/spaCy/releases/tag/0.97 -.. _v0.96: https://github.com/explosion/spaCy/releases/tag/0.96 -.. _v0.95: https://github.com/explosion/spaCy/releases/tag/0.95 -.. _v0.94: https://github.com/explosion/spaCy/releases/tag/0.94 -.. _v0.93: https://github.com/explosion/spaCy/releases/tag/0.93 From 1730648e195a854fc44d1970737cb128e874d0d5 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 21:49:04 +0200 Subject: [PATCH 61/77] Update pull request template --- .github/PULL_REQUEST_TEMPLATE.md | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e97a7ea16..ec11b78bd 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,20 +1,19 @@ - + ## Description - - + +### Types of change + -## Types of changes - -- [ ] **Bug fix** (non-breaking change fixing an issue) -- [ ] **New feature** (non-breaking change adding functionality to spaCy) -- [ ] **Breaking change** (fix or feature causing change to spaCy's existing functionality) -- [ ] **Documentation** (addition to documentation of spaCy) - -## Checklist: - -- [ ] My change requires a change to spaCy's documentation. -- [ ] I have updated the documentation accordingly. -- [ ] I have added tests to cover my changes. -- [ ] All new and existing tests passed. +## Checklist + +- [ ] I have submitted the spaCy Contributor Agreement. +- [ ] I ran the tests, and all new and existing tests passed. +- [ ] My changes don't require a change to the documentation, or if they do, I've added all required information. From 4a06eddb5fdc067bf02cca3b9567759372de4885 Mon Sep 17 00:00:00 2001 From: ines Date: Tue, 24 Oct 2017 22:18:40 +0200 Subject: [PATCH 62/77] Update README --- README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 27fca3fc2..9cffd2cae 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,9 @@ spaCy: Industrial-strength NLP ****************************** -spaCy is a library for advanced Natural Language Processing in Python and -Cython. spaCy is built on the very latest research, but it isn't researchware. -It was designed from day one to be used in real products. spaCy comes with +spaCy is a library for advanced Natural Language Processing in Python and Cython. +It's built on the very latest research, and was designed from day one to be +used in real products. spaCy comes with `pre-trained statistical models `_ and word vectors, and currently supports tokenization for **20+ languages**. It features the **fastest syntactic parser** in the world, convolutional **neural network models** From 3484174e487c3ec6171042d06e6a994a8330c61c Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 11:57:43 +0200 Subject: [PATCH 63/77] Add Language.path --- spacy/language.py | 6 ++++++ website/api/language.jade | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/spacy/language.py b/spacy/language.py index c706e532a..933ca772d 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -127,6 +127,7 @@ class Language(object): RETURNS (Language): The newly constructed object. """ self._meta = dict(meta) + self._path = None if vocab is True: factory = self.Defaults.create_vocab vocab = factory(self, **meta.get('vocab', {})) @@ -142,6 +143,10 @@ class Language(object): bytes_data = self.to_bytes(vocab=False) return (unpickle_language, (self.vocab, self.meta, bytes_data)) + @property + def path(self): + return self._path + @property def meta(self): self._meta.setdefault('lang', self.vocab.lang) @@ -611,6 +616,7 @@ class Language(object): if not (path / 'vocab').exists(): exclude['vocab'] = True util.from_disk(path, deserializers, exclude) + self._path = path return self def to_bytes(self, disable=[], **exclude): diff --git a/website/api/language.jade b/website/api/language.jade index 668cbadd7..6aa2d7612 100644 --- a/website/api/language.jade +++ b/website/api/language.jade @@ -609,6 +609,14 @@ p Load state from a binary string. | Custom meta data for the Language class. If a model is loaded, | contains meta data of the model. + +row + +cell #[code path] + +tag-new(2) + +cell #[code Path] + +cell + | Path to the model data directory, if a model is loaded. Otherwise + | #[code None]. + +h(2, "class-attributes") Class attributes +table(["Name", "Type", "Description"]) From 0b1dcbac1488e62379c2da326d666b39221e84e9 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:08:46 +0200 Subject: [PATCH 64/77] Remove unused function --- spacy/_ml.py | 40 ---------------------------------------- 1 file changed, 40 deletions(-) diff --git a/spacy/_ml.py b/spacy/_ml.py index b07e179f0..8a8d355d9 100644 --- a/spacy/_ml.py +++ b/spacy/_ml.py @@ -482,46 +482,6 @@ def get_token_vectors(tokens_attrs_vectors, drop=0.): return vectors, backward -def fine_tune(embedding, combine=None): - if combine is not None: - raise NotImplementedError( - "fine_tune currently only supports addition. Set combine=None") - def fine_tune_fwd(docs_tokvecs, drop=0.): - docs, tokvecs = docs_tokvecs - - lengths = model.ops.asarray([len(doc) for doc in docs], dtype='i') - - vecs, bp_vecs = embedding.begin_update(docs, drop=drop) - flat_tokvecs = embedding.ops.flatten(tokvecs) - flat_vecs = embedding.ops.flatten(vecs) - output = embedding.ops.unflatten( - (model.mix[0] * flat_tokvecs + model.mix[1] * flat_vecs), lengths) - - def fine_tune_bwd(d_output, sgd=None): - flat_grad = model.ops.flatten(d_output) - model.d_mix[0] += flat_tokvecs.dot(flat_grad.T).sum() - model.d_mix[1] += flat_vecs.dot(flat_grad.T).sum() - - bp_vecs([d_o * model.mix[1] for d_o in d_output], sgd=sgd) - if sgd is not None: - sgd(model._mem.weights, model._mem.gradient, key=model.id) - return [d_o * model.mix[0] for d_o in d_output] - return output, fine_tune_bwd - - def fine_tune_predict(docs_tokvecs): - docs, tokvecs = docs_tokvecs - vecs = embedding(docs) - return [model.mix[0]*tv+model.mix[1]*v - for tv, v in zip(tokvecs, vecs)] - - model = wrap(fine_tune_fwd, embedding) - model.mix = model._mem.add((model.id, 'mix'), (2,)) - model.mix.fill(0.5) - model.d_mix = model._mem.add_gradient((model.id, 'd_mix'), (model.id, 'mix')) - model.predict = fine_tune_predict - return model - - @layerize def flatten(seqs, drop=0.): if isinstance(seqs[0], numpy.ndarray): From 7bcec574620b611882e74d2356f6ffdead628ae3 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:08:54 +0200 Subject: [PATCH 65/77] Remove unused attribute --- spacy/matcher.pyx | 2 -- 1 file changed, 2 deletions(-) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index a0c69f4bf..2c001c652 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -198,7 +198,6 @@ cdef class Matcher: cdef public object _patterns cdef public object _entities cdef public object _callbacks - cdef public object _acceptors def __init__(self, vocab): """Create the Matcher. @@ -209,7 +208,6 @@ cdef class Matcher: """ self._patterns = {} self._entities = {} - self._acceptors = {} self._callbacks = {} self.vocab = vocab self.mem = Pool() From 7eebeeaf85d1637af744aa2b504ffa2d2df42ed6 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:09:47 +0200 Subject: [PATCH 66/77] Fix Matcher.__contains__ --- spacy/matcher.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index 2c001c652..ea5b7e416 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -230,7 +230,7 @@ cdef class Matcher: key (unicode): The match ID. RETURNS (bool): Whether the matcher contains rules for this match ID. """ - return len(self._patterns) + return key in self._patterns def add(self, key, on_match, *patterns): """Add a match-rule to the matcher. A match-rule consists of: an ID key, From 9c733a884922a447ae620ab41d97c086d429c8a4 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:09:56 +0200 Subject: [PATCH 67/77] Implement PhraseMatcher.__len__ --- spacy/matcher.pyx | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index ea5b7e416..be9634fc9 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -471,7 +471,13 @@ cdef class PhraseMatcher: self._callbacks = {} def __len__(self): - raise NotImplementedError + """Get the number of rules added to the matcher. Note that this only + returns the number of rules (identical with the number of IDs), not the + number of individual patterns. + + RETURNS (int): The number of rules. + """ + return len(self.phrase_ids) def __contains__(self, key): raise NotImplementedError From 1262aa0bf9e954b9193781661f29652a97222b56 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:10:04 +0200 Subject: [PATCH 68/77] Implement PhraseMatcher.__contains__ --- spacy/matcher.pyx | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index be9634fc9..8b815194c 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -480,7 +480,13 @@ cdef class PhraseMatcher: return len(self.phrase_ids) def __contains__(self, key): - raise NotImplementedError + """Check whether the matcher contains rules for a match ID. + + key (unicode): The match ID. + RETURNS (bool): Whether the matcher contains rules for this match ID. + """ + cdef hash_t ent_id = self.matcher._normalize_key(key) + return ent_id in self.phrase_ids def __reduce__(self): return (self.__class__, (self.vocab,), None, None) From 4d97efc3b5f1d51fa4ff9d2a350787298f77ab04 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:10:16 +0200 Subject: [PATCH 69/77] Add missing docstrings --- spacy/matcher.pyx | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index 8b815194c..6c1069578 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -255,6 +255,10 @@ cdef class Matcher: and '*' patterns in a row and their matches overlap, the first operator will behave non-greedily. This quirk in the semantics makes the matcher more efficient, by avoiding the need for back-tracking. + + key (unicode): The match ID. + on_match (callable): Callback executed on match. + *patterns (list): List of token descritions. """ for pattern in patterns: if len(pattern) == 0: @@ -492,6 +496,13 @@ cdef class PhraseMatcher: return (self.__class__, (self.vocab,), None, None) def add(self, key, on_match, *docs): + """Add a match-rule to the matcher. A match-rule consists of: an ID key, + an on_match callback, and one or more patterns. + + key (unicode): The match ID. + on_match (callable): Callback executed on match. + *docs (Doc): `Doc` objects representing match patterns. + """ cdef Doc doc for doc in docs: if len(doc) >= self.max_length: @@ -520,6 +531,13 @@ cdef class PhraseMatcher: self.phrase_ids.set(phrase_hash, ent_id) def __call__(self, Doc doc): + """Find all sequences matching the supplied patterns on the `Doc`. + + doc (Doc): The document to match over. + RETURNS (list): A list of `(key, start, end)` tuples, + describing the matches. A match tuple describes a span + `doc[start:end]`. The `label_id` and `key` are both integers. + """ matches = [] for _, start, end in self.matcher(doc): ent_id = self.accept_match(doc, start, end) @@ -532,6 +550,14 @@ cdef class PhraseMatcher: return matches def pipe(self, stream, batch_size=1000, n_threads=2): + """Match a stream of documents, yielding them in turn. + + docs (iterable): A stream of documents. + batch_size (int): The number of documents to accumulate into a working set. + n_threads (int): The number of threads with which to work on the buffer + in parallel, if the `Matcher` implementation supports multi-threading. + YIELDS (Doc): Documents, in order. + """ for doc in stream: self(doc) yield doc From 72497c8cb2ed59ac1f0b9fd0c9f1b0f6a6d1f51e Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 12:15:43 +0200 Subject: [PATCH 70/77] Remove comments and add TODO --- spacy/tokenizer.pyx | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index bc09129de..e865c60dd 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -63,11 +63,8 @@ cdef class Tokenizer: return (self.__class__, args, None, None) cpdef Doc tokens_from_list(self, list strings): + # TODO: deprecation warning return Doc(self.vocab, words=strings) - #raise NotImplementedError( - # "Method deprecated in 1.0.\n" - # "Old: tokenizer.tokens_from_list(strings)\n" - # "New: Doc(tokenizer.vocab, words=strings)") @cython.boundscheck(False) def __call__(self, unicode string): From 0102561f34033163dd8b7f711e98f33687233ac8 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 13:57:55 +0200 Subject: [PATCH 71/77] Update docs --- website/api/language.jade | 1 + website/usage/v2.jade | 1 + 2 files changed, 2 insertions(+) diff --git a/website/api/language.jade b/website/api/language.jade index 6aa2d7612..b8fe98d78 100644 --- a/website/api/language.jade +++ b/website/api/language.jade @@ -229,6 +229,7 @@ p +cell Config parameters. +h(2, "preprocess_gold") Language.preprocess_gold + +tag method p | Can be called before training to pre-process gold data. By default, it diff --git a/website/usage/v2.jade b/website/usage/v2.jade index bb150de86..f833468bf 100644 --- a/website/usage/v2.jade +++ b/website/usage/v2.jade @@ -497,6 +497,7 @@ p +code-new. nlp = spacy.load('en', disable=['tagger', 'ner']) + doc = nlp(u"I don't want parsed", disable['parser']) nlp.remove_pipe('parser') +code-old. nlp = spacy.load('en', tagger=False, entity=False) From 094512fd47a67501d911066035289a10454c873c Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Wed, 25 Oct 2017 14:44:00 +0200 Subject: [PATCH 72/77] Fix model-mark on regression test. --- spacy/tests/regression/test_issue1305.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spacy/tests/regression/test_issue1305.py b/spacy/tests/regression/test_issue1305.py index d1d5eb93d..342cdd081 100644 --- a/spacy/tests/regression/test_issue1305.py +++ b/spacy/tests/regression/test_issue1305.py @@ -1,11 +1,10 @@ import pytest import spacy -#@pytest.mark.models('en') +@pytest.mark.models('en') def test_issue1305(): '''Test lemmatization of English VBZ''' nlp = spacy.load('en_core_web_sm') assert nlp.vocab.morphology.lemmatizer('works', 'verb') == ['work'] doc = nlp(u'This app works well') - print([(w.text, w.tag_) for w in doc]) assert doc[2].lemma_ == 'work' From 5117a7d24d0ca15f6fc04be13fa4a30527971ef8 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 15:54:02 +0200 Subject: [PATCH 73/77] Fix whitespace --- spacy/syntax/nn_parser.pyx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index a9553fd1f..f93f44d9d 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -253,7 +253,7 @@ cdef class Parser: hist_width = util.env_opt('history_width', cfg.get('hist_width', 0)) if hist_size != 0: raise ValueError("Currently history size is hard-coded to 0") - if hist_width != 0: + if hist_width != 0: raise ValueError("Currently history width is hard-coded to 0") tok2vec = Tok2Vec(token_vector_width, embed_size, pretrained_dims=cfg.get('pretrained_dims', 0)) @@ -413,7 +413,7 @@ cdef class Parser: for stcls in state_objs: if not stcls.c.is_final(): states.push_back(stcls.c) - + feat_weights = state2vec.get_feat_weights() cdef int i cdef np.ndarray hidden_weights = numpy.ascontiguousarray(vec2scores._layers[-1].W.T) @@ -432,7 +432,7 @@ cdef class Parser: PyErr_CheckSignals() return state_objs - cdef void _parseC(self, StateC* state, + cdef void _parseC(self, StateC* state, const float* feat_weights, const float* hW, const float* hb, int nr_class, int nr_hidden, int nr_feat, int nr_piece) nogil: token_ids = calloc(nr_feat, sizeof(int)) @@ -443,7 +443,7 @@ cdef class Parser: with gil: PyErr_SetFromErrno(MemoryError) PyErr_CheckSignals() - + while not state.is_final(): state.set_context_tokens(token_ids, nr_feat) memset(vectors, 0, nr_hidden * nr_piece * sizeof(float)) From 18aae423fbc09ca0507c6cabbe650143ae9b30bf Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 15:54:10 +0200 Subject: [PATCH 74/77] Remove import of non-existing function --- spacy/syntax/nn_parser.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/syntax/nn_parser.pyx b/spacy/syntax/nn_parser.pyx index f93f44d9d..913d2365f 100644 --- a/spacy/syntax/nn_parser.pyx +++ b/spacy/syntax/nn_parser.pyx @@ -48,7 +48,7 @@ from thinc.neural.util import get_array_module from .. import util from ..util import get_async, get_cuda_stream from .._ml import zero_init, PrecomputableAffine, PrecomputableMaxouts -from .._ml import Tok2Vec, doc2feats, rebatch, fine_tune +from .._ml import Tok2Vec, doc2feats, rebatch from .._ml import Residual, drop_layer, flatten from .._ml import link_vectors_to_models from .._ml import HistoryFeatures From 91beacf5e327a5898935050ff8fdb9b9d9268821 Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 16:19:38 +0200 Subject: [PATCH 75/77] Fix Matcher.__contains__ --- spacy/matcher.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index 6c1069578..fd4a8026a 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -230,7 +230,7 @@ cdef class Matcher: key (unicode): The match ID. RETURNS (bool): Whether the matcher contains rules for this match ID. """ - return key in self._patterns + return self._normalize_key(key) in self._patterns def add(self, key, on_match, *patterns): """Add a match-rule to the matcher. A match-rule consists of: an ID key, From c0b55ebdac8196f4432a381a1ad39d7746d19ded Mon Sep 17 00:00:00 2001 From: ines Date: Wed, 25 Oct 2017 16:31:11 +0200 Subject: [PATCH 76/77] Fix PhraseMatcher.__contains__ and add more tests --- spacy/matcher.pyx | 2 +- spacy/tests/test_matcher.py | 28 ++++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/spacy/matcher.pyx b/spacy/matcher.pyx index fd4a8026a..401405c14 100644 --- a/spacy/matcher.pyx +++ b/spacy/matcher.pyx @@ -490,7 +490,7 @@ cdef class PhraseMatcher: RETURNS (bool): Whether the matcher contains rules for this match ID. """ cdef hash_t ent_id = self.matcher._normalize_key(key) - return ent_id in self.phrase_ids + return ent_id in self._callbacks def __reduce__(self): return (self.__class__, (self.vocab,), None, None) diff --git a/spacy/tests/test_matcher.py b/spacy/tests/test_matcher.py index 5b08ede39..8210467ea 100644 --- a/spacy/tests/test_matcher.py +++ b/spacy/tests/test_matcher.py @@ -64,6 +64,12 @@ def test_matcher_init(en_vocab, words): assert matcher(doc) == [] +def test_matcher_contains(matcher): + matcher.add('TEST', None, [{'ORTH': 'test'}]) + assert 'TEST' in matcher + assert 'TEST2' not in matcher + + def test_matcher_no_match(matcher): words = ["I", "like", "cheese", "."] doc = get_doc(matcher.vocab, words) @@ -112,7 +118,8 @@ def test_matcher_empty_dict(en_vocab): matcher.add('A.', None, [{'ORTH': 'a'}, {}]) matches = matcher(doc) assert matches[0][1:] == (0, 2) - + + def test_matcher_operator_shadow(en_vocab): matcher = Matcher(en_vocab) abc = ["a", "b", "c"] @@ -123,7 +130,8 @@ def test_matcher_operator_shadow(en_vocab): matches = matcher(doc) assert len(matches) == 1 assert matches[0][1:] == (0, 3) - + + def test_matcher_phrase_matcher(en_vocab): words = ["Google", "Now"] doc = get_doc(en_vocab, words) @@ -134,6 +142,22 @@ def test_matcher_phrase_matcher(en_vocab): assert len(matcher(doc)) == 1 +def test_phrase_matcher_length(en_vocab): + matcher = PhraseMatcher(en_vocab) + assert len(matcher) == 0 + matcher.add('TEST', None, get_doc(en_vocab, ['test'])) + assert len(matcher) == 1 + matcher.add('TEST2', None, get_doc(en_vocab, ['test2'])) + assert len(matcher) == 2 + + +def test_phrase_matcher_contains(en_vocab): + matcher = PhraseMatcher(en_vocab) + matcher.add('TEST', None, get_doc(en_vocab, ['test'])) + assert 'TEST' in matcher + assert 'TEST2' not in matcher + + def test_matcher_match_zero(matcher): words1 = 'He said , " some words " ...'.split() words2 = 'He said , " some three words " ...'.split() From 1bc07758faaf73a9cbcdca340b6343cb5d6cd76a Mon Sep 17 00:00:00 2001 From: mayukh18 Date: Wed, 25 Oct 2017 22:24:40 +0530 Subject: [PATCH 77/77] added few bengali pronouns --- spacy/lang/bn/morph_rules.py | 15 ++++++++++++++- spacy/lang/bn/stop_words.py | 4 ++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/spacy/lang/bn/morph_rules.py b/spacy/lang/bn/morph_rules.py index 8561f8676..6ca8fc097 100644 --- a/spacy/lang/bn/morph_rules.py +++ b/spacy/lang/bn/morph_rules.py @@ -12,11 +12,11 @@ MORPH_RULES = { 'কি': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Gender': 'Neut', 'PronType': 'Int', 'Case': 'Acc'}, 'সে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Three', 'PronType': 'Prs', 'Case': 'Nom'}, 'কিসে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Gender': 'Neut', 'PronType': 'Int', 'Case': 'Acc'}, - 'কাদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'PronType': 'Int', 'Case': 'Acc'}, 'তাকে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Three', 'PronType': 'Prs', 'Case': 'Acc'}, 'স্বয়ং': {LEMMA: PRON_LEMMA, 'Reflex': 'Yes', 'PronType': 'Ref'}, 'কোনগুলো': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Gender': 'Neut', 'PronType': 'Int', 'Case': 'Acc'}, 'তুমি': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Nom'}, + 'তুই': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Nom'}, 'তাদেরকে': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Three', 'PronType': 'Prs', 'Case': 'Acc'}, 'আমরা': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'One ', 'PronType': 'Prs', 'Case': 'Nom'}, 'যিনি': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'PronType': 'Rel', 'Case': 'Nom'}, @@ -24,12 +24,15 @@ MORPH_RULES = { 'কোন': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'PronType': 'Int', 'Case': 'Acc'}, 'কারা': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'PronType': 'Int', 'Case': 'Acc'}, 'তোমাকে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Acc'}, + 'তোকে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Acc'}, 'খোদ': {LEMMA: PRON_LEMMA, 'Reflex': 'Yes', 'PronType': 'Ref'}, 'কে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'PronType': 'Int', 'Case': 'Acc'}, 'যারা': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'PronType': 'Rel', 'Case': 'Nom'}, 'যে': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'PronType': 'Rel', 'Case': 'Nom'}, 'তোমরা': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Nom'}, + 'তোরা': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Nom'}, 'তোমাদেরকে': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Acc'}, + 'তোদেরকে': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Two', 'PronType': 'Prs', 'Case': 'Acc'}, 'আপন': {LEMMA: PRON_LEMMA, 'Reflex': 'Yes', 'PronType': 'Ref'}, 'এ': {LEMMA: PRON_LEMMA, 'PronType': 'Dem'}, 'নিজ': {LEMMA: PRON_LEMMA, 'Reflex': 'Yes', 'PronType': 'Ref'}, @@ -42,6 +45,10 @@ MORPH_RULES = { 'আমার': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'One', 'PronType': 'Prs', 'Poss': 'Yes', 'Case': 'Nom'}, + 'মোর': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'One', 'PronType': 'Prs', 'Poss': 'Yes', + 'Case': 'Nom'}, + 'মোদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'One', 'PronType': 'Prs', 'Poss': 'Yes', + 'Case': 'Nom'}, 'তার': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Three', 'PronType': 'Prs', 'Poss': 'Yes', 'Case': 'Nom'}, 'তোমাদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Two', 'PronType': 'Prs', 'Poss': 'Yes', @@ -50,7 +57,13 @@ MORPH_RULES = { 'Case': 'Nom'}, 'তোমার': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Two', 'PronType': 'Prs', 'Poss': 'Yes', 'Case': 'Nom'}, + 'তোর': {LEMMA: PRON_LEMMA, 'Number': 'Sing', 'Person': 'Two', 'PronType': 'Prs', 'Poss': 'Yes', + 'Case': 'Nom'}, 'তাদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Three', 'PronType': 'Prs', 'Poss': 'Yes', 'Case': 'Nom'}, + 'কাদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'PronType': 'Int', 'Case': 'Acc'}, + 'তোদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'Person': 'Two', 'PronType': 'Prs', 'Poss': 'Yes', + 'Case': 'Nom'}, + 'যাদের': {LEMMA: PRON_LEMMA, 'Number': 'Plur', 'PronType': 'Int', 'Case': 'Acc'}, } } diff --git a/spacy/lang/bn/stop_words.py b/spacy/lang/bn/stop_words.py index 5b513da7b..ca0ae934a 100644 --- a/spacy/lang/bn/stop_words.py +++ b/spacy/lang/bn/stop_words.py @@ -22,7 +22,7 @@ STOP_WORDS = set(""" টি ঠিক তখন তত তথা তবু তবে তা তাঁকে তাঁদের তাঁর তাঁরা তাঁহারা তাই তাও তাকে তাতে তাদের তার তারপর তারা তারই তাহলে তাহা তাহাতে তাহার তিনই -তিনি তিনিও তুমি তুলে তেমন তো তোমার +তিনি তিনিও তুমি তুলে তেমন তো তোমার তুই তোরা তোর তোমাদের তোদের থাকবে থাকবেন থাকা থাকায় থাকে থাকেন থেকে থেকেই থেকেও থাকায় দিকে দিতে দিয়ে দিয়েছে দিয়েছেন দিলেন দিয়ে দু দুটি দুটো দেওয়া দেওয়ার দেখতে দেখা দেখে দেন দেয় দেশের দ্বারা দিয়েছে দিয়েছেন দেয় দেওয়া দেওয়ার দিন দুই @@ -32,7 +32,7 @@ STOP_WORDS = set(""" ফলে ফিরে ফের বছর বদলে বরং বলতে বলল বললেন বলা বলে বলেছেন বলেন বসে বহু বা বাদে বার বিনা বিভিন্ন বিশেষ বিষয়টি বেশ ব্যবহার ব্যাপারে বক্তব্য বন বেশি ভাবে ভাবেই -মত মতো মতোই মধ্যভাগে মধ্যে মধ্যেই মধ্যেও মনে মাত্র মাধ্যমে মানুষ মানুষের মোট মোটেই +মত মতো মতোই মধ্যভাগে মধ্যে মধ্যেই মধ্যেও মনে মাত্র মাধ্যমে মানুষ মানুষের মোট মোটেই মোদের মোর যখন যত যতটা যথেষ্ট যদি যদিও যা যাঁর যাঁরা যাওয়া যাওয়ার যাকে যাচ্ছে যাতে যাদের যান যাবে যায় যার যারা যায় যিনি যে যেখানে যেতে যেন যেমন রকম রয়েছে রাখা রেখে রয়েছে