2017-01-12 14:26:26 +03:00
|
|
|
# coding: utf-8
|
2015-03-30 02:37:57 +03:00
|
|
|
from __future__ import unicode_literals
|
2017-01-12 14:26:26 +03:00
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
import pytest
|
|
|
|
from spacy.attrs import LEMMA
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.vocab import Vocab
|
2019-02-24 20:38:47 +03:00
|
|
|
from spacy.tokens import Doc, Token
|
2017-01-12 14:26:26 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
from ..util import get_doc
|
2017-01-12 14:26:26 +03:00
|
|
|
|
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_merge(en_tokenizer):
|
|
|
|
text = "WKRO played songs by the beach boys all night"
|
|
|
|
attrs = {"tag": "NAMED", "lemma": "LEMMA", "ent_type": "TYPE"}
|
|
|
|
doc = en_tokenizer(text)
|
|
|
|
assert len(doc) == 9
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[4:7], attrs=attrs)
|
|
|
|
retokenizer.merge(doc[7:9], attrs=attrs)
|
|
|
|
assert len(doc) == 6
|
|
|
|
assert doc[4].text == "the beach boys"
|
|
|
|
assert doc[4].text_with_ws == "the beach boys "
|
|
|
|
assert doc[4].tag_ == "NAMED"
|
|
|
|
assert doc[5].text == "all night"
|
|
|
|
assert doc[5].text_with_ws == "all night"
|
|
|
|
assert doc[5].tag_ == "NAMED"
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_merge_children(en_tokenizer):
|
|
|
|
"""Test that attachments work correctly after merging."""
|
|
|
|
text = "WKRO played songs by the beach boys all night"
|
|
|
|
attrs = {"tag": "NAMED", "lemma": "LEMMA", "ent_type": "TYPE"}
|
|
|
|
doc = en_tokenizer(text)
|
|
|
|
assert len(doc) == 9
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[4:7], attrs=attrs)
|
|
|
|
for word in doc:
|
|
|
|
if word.i < word.head.i:
|
|
|
|
assert word in list(word.head.lefts)
|
|
|
|
elif word.i > word.head.i:
|
|
|
|
assert word in list(word.head.rights)
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_merge_hang(en_tokenizer):
|
|
|
|
text = "through North and South Carolina"
|
|
|
|
doc = en_tokenizer(text)
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[3:5], attrs={"lemma": "", "ent_type": "ORG"})
|
|
|
|
retokenizer.merge(doc[1:2], attrs={"lemma": "", "ent_type": "ORG"})
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_retokenizer(en_tokenizer):
|
|
|
|
doc = en_tokenizer("WKRO played songs by the beach boys all night")
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[4:7])
|
|
|
|
assert len(doc) == 7
|
|
|
|
assert doc[4].text == "the beach boys"
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_retokenizer_attrs(en_tokenizer):
|
|
|
|
doc = en_tokenizer("WKRO played songs by the beach boys all night")
|
|
|
|
# test both string and integer attributes and values
|
|
|
|
attrs = {LEMMA: "boys", "ENT_TYPE": doc.vocab.strings["ORG"]}
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[4:7], attrs=attrs)
|
|
|
|
assert len(doc) == 7
|
|
|
|
assert doc[4].text == "the beach boys"
|
|
|
|
assert doc[4].lemma_ == "boys"
|
|
|
|
assert doc[4].ent_type_ == "ORG"
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_lex_attrs(en_tokenizer):
|
|
|
|
"""Test that lexical attributes can be changed (see #2390)."""
|
|
|
|
doc = en_tokenizer("WKRO played beach boys songs")
|
|
|
|
assert not any(token.is_stop for token in doc)
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[2:4], attrs={"LEMMA": "boys", "IS_STOP": True})
|
|
|
|
assert doc[2].text == "beach boys"
|
|
|
|
assert doc[2].lemma_ == "boys"
|
|
|
|
assert doc[2].is_stop
|
|
|
|
new_doc = Doc(doc.vocab, words=["beach boys"])
|
|
|
|
assert new_doc[0].is_stop
|
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_spans_merge_tokens(en_tokenizer):
|
2017-01-12 14:26:26 +03:00
|
|
|
text = "Los Angeles start."
|
|
|
|
heads = [1, 1, 0, -1]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert len(doc) == 4
|
2018-11-27 03:09:36 +03:00
|
|
|
assert doc[0].head.text == "Angeles"
|
|
|
|
assert doc[1].head.text == "start"
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
attrs = {"tag": "NNP", "lemma": "Los Angeles", "ent_type": "GPE"}
|
|
|
|
retokenizer.merge(doc[0:2], attrs=attrs)
|
2017-03-29 09:35:03 +03:00
|
|
|
assert len(doc) == 3
|
2018-11-27 03:09:36 +03:00
|
|
|
assert doc[0].text == "Los Angeles"
|
|
|
|
assert doc[0].head.text == "start"
|
|
|
|
assert doc[0].ent_type_ == "GPE"
|
|
|
|
|
2017-01-12 14:26:26 +03:00
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_spans_merge_heads(en_tokenizer):
|
2017-01-12 14:26:26 +03:00
|
|
|
text = "I found a pilates class near work."
|
|
|
|
heads = [1, 0, 2, 1, -3, -1, -1, -6]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert len(doc) == 8
|
💫 Port master changes over to develop (#2979)
* Create aryaprabhudesai.md (#2681)
* Update _install.jade (#2688)
Typo fix: "models" -> "model"
* Add FAC to spacy.explain (resolves #2706)
* Remove docstrings for deprecated arguments (see #2703)
* When calling getoption() in conftest.py, pass a default option (#2709)
* When calling getoption() in conftest.py, pass a default option
This is necessary to allow testing an installed spacy by running:
pytest --pyargs spacy
* Add contributor agreement
* update bengali token rules for hyphen and digits (#2731)
* Less norm computations in token similarity (#2730)
* Less norm computations in token similarity
* Contributor agreement
* Remove ')' for clarity (#2737)
Sorry, don't mean to be nitpicky, I just noticed this when going through the CLI and thought it was a quick fix. That said, if this was intention than please let me know.
* added contributor agreement for mbkupfer (#2738)
* Basic support for Telugu language (#2751)
* Lex _attrs for polish language (#2750)
* Signed spaCy contributor agreement
* Added polish version of english lex_attrs
* Introduces a bulk merge function, in order to solve issue #653 (#2696)
* Fix comment
* Introduce bulk merge to increase performance on many span merges
* Sign contributor agreement
* Implement pull request suggestions
* Describe converters more explicitly (see #2643)
* Add multi-threading note to Language.pipe (resolves #2582) [ci skip]
* Fix formatting
* Fix dependency scheme docs (closes #2705) [ci skip]
* Don't set stop word in example (closes #2657) [ci skip]
* Add words to portuguese language _num_words (#2759)
* Add words to portuguese language _num_words
* Add words to portuguese language _num_words
* Update Indonesian model (#2752)
* adding e-KTP in tokenizer exceptions list
* add exception token
* removing lines with containing space as it won't matter since we use .split() method in the end, added new tokens in exception
* add tokenizer exceptions list
* combining base_norms with norm_exceptions
* adding norm_exception
* fix double key in lemmatizer
* remove unused import on punctuation.py
* reformat stop_words to reduce number of lines, improve readibility
* updating tokenizer exception
* implement is_currency for lang/id
* adding orth_first_upper in tokenizer_exceptions
* update the norm_exception list
* remove bunch of abbreviations
* adding contributors file
* Fixed spaCy+Keras example (#2763)
* bug fixes in keras example
* created contributor agreement
* Adding French hyphenated first name (#2786)
* Fix typo (closes #2784)
* Fix typo (#2795) [ci skip]
Fixed typo on line 6 "regcognizer --> recognizer"
* Adding basic support for Sinhala language. (#2788)
* adding Sinhala language package, stop words, examples and lex_attrs.
* Adding contributor agreement
* Updating contributor agreement
* Also include lowercase norm exceptions
* Fix error (#2802)
* Fix error
ValueError: cannot resize an array that references or is referenced
by another array in this way. Use the resize function
* added spaCy Contributor Agreement
* Add charlax's contributor agreement (#2805)
* agreement of contributor, may I introduce a tiny pl languge contribution (#2799)
* Contributors agreement
* Contributors agreement
* Contributors agreement
* Add jupyter=True to displacy.render in documentation (#2806)
* Revert "Also include lowercase norm exceptions"
This reverts commit 70f4e8adf37cfcfab60be2b97d6deae949b30e9e.
* Remove deprecated encoding argument to msgpack
* Set up dependency tree pattern matching skeleton (#2732)
* Fix bug when too many entity types. Fixes #2800
* Fix Python 2 test failure
* Require older msgpack-numpy
* Restore encoding arg on msgpack-numpy
* Try to fix version pin for msgpack-numpy
* Update Portuguese Language (#2790)
* Add words to portuguese language _num_words
* Add words to portuguese language _num_words
* Portuguese - Add/remove stopwords, fix tokenizer, add currency symbols
* Extended punctuation and norm_exceptions in the Portuguese language
* Correct error in spacy universe docs concerning spacy-lookup (#2814)
* Update Keras Example for (Parikh et al, 2016) implementation (#2803)
* bug fixes in keras example
* created contributor agreement
* baseline for Parikh model
* initial version of parikh 2016 implemented
* tested asymmetric models
* fixed grevious error in normalization
* use standard SNLI test file
* begin to rework parikh example
* initial version of running example
* start to document the new version
* start to document the new version
* Update Decompositional Attention.ipynb
* fixed calls to similarity
* updated the README
* import sys package duh
* simplified indexing on mapping word to IDs
* stupid python indent error
* added code from https://github.com/tensorflow/tensorflow/issues/3388 for tf bug workaround
* Fix typo (closes #2815) [ci skip]
* Update regex version dependency
* Set version to 2.0.13.dev3
* Skip seemingly problematic test
* Remove problematic test
* Try previous version of regex
* Revert "Remove problematic test"
This reverts commit bdebbef45552d698d390aa430b527ee27830f11b.
* Unskip test
* Try older version of regex
* 💫 Update training examples and use minibatching (#2830)
<!--- Provide a general summary of your changes in the title. -->
## Description
Update the training examples in `/examples/training` to show usage of spaCy's `minibatch` and `compounding` helpers ([see here](https://spacy.io/usage/training#tips-batch-size) for details). The lack of batching in the examples has caused some confusion in the past, especially for beginners who would copy-paste the examples, update them with large training sets and experienced slow and unsatisfying results.
### Types of change
enhancements
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Visual C++ link updated (#2842) (closes #2841) [ci skip]
* New landing page
* Add contribution agreement
* Correcting lang/ru/examples.py (#2845)
* Correct some grammatical inaccuracies in lang\ru\examples.py; filled Contributor Agreement
* Correct some grammatical inaccuracies in lang\ru\examples.py
* Move contributor agreement to separate file
* Set version to 2.0.13.dev4
* Add Persian(Farsi) language support (#2797)
* Also include lowercase norm exceptions
* Remove in favour of https://github.com/explosion/spaCy/graphs/contributors
* Rule-based French Lemmatizer (#2818)
<!--- Provide a general summary of your changes in the title. -->
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
Add a rule-based French Lemmatizer following the english one and the excellent PR for [greek language optimizations](https://github.com/explosion/spaCy/pull/2558) to adapt the Lemmatizer class.
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
- Lemma dictionary used can be found [here](http://infolingu.univ-mlv.fr/DonneesLinguistiques/Dictionnaires/telechargement.html), I used the XML version.
- Add several files containing exhaustive list of words for each part of speech
- Add some lemma rules
- Add POS that are not checked in the standard Lemmatizer, i.e PRON, DET, ADV and AUX
- Modify the Lemmatizer class to check in lookup table as a last resort if POS not mentionned
- Modify the lemmatize function to check in lookup table as a last resort
- Init files are updated so the model can support all the functionalities mentioned above
- Add words to tokenizer_exceptions_list.py in respect to regex used in tokenizer_exceptions.py
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [X] I have submitted the spaCy Contributor Agreement.
- [X] I ran the tests, and all new and existing tests passed.
- [X] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Set version to 2.0.13
* Fix formatting and consistency
* Update docs for new version [ci skip]
* Increment version [ci skip]
* Add info on wheels [ci skip]
* Adding "This is a sentence" example to Sinhala (#2846)
* Add wheels badge
* Update badge [ci skip]
* Update README.rst [ci skip]
* Update murmurhash pin
* Increment version to 2.0.14.dev0
* Update GPU docs for v2.0.14
* Add wheel to setup_requires
* Import prefer_gpu and require_gpu functions from Thinc
* Add tests for prefer_gpu() and require_gpu()
* Update requirements and setup.py
* Workaround bug in thinc require_gpu
* Set version to v2.0.14
* Update push-tag script
* Unhack prefer_gpu
* Require thinc 6.10.6
* Update prefer_gpu and require_gpu docs [ci skip]
* Fix specifiers for GPU
* Set version to 2.0.14.dev1
* Set version to 2.0.14
* Update Thinc version pin
* Increment version
* Fix msgpack-numpy version pin
* Increment version
* Update version to 2.0.16
* Update version [ci skip]
* Redundant ')' in the Stop words' example (#2856)
<!--- Provide a general summary of your changes in the title. -->
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [ ] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
- [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Documentation improvement regarding joblib and SO (#2867)
Some documentation improvements
## Description
1. Fixed the dead URL to joblib
2. Fixed Stack Overflow brand name (with space)
### Types of change
Documentation
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* raise error when setting overlapping entities as doc.ents (#2880)
* Fix out-of-bounds access in NER training
The helper method state.B(1) gets the index of the first token of the
buffer, or -1 if no such token exists. Normally this is safe because we
pass this to functions like state.safe_get(), which returns an empty
token. Here we used it directly as an array index, which is not okay!
This error may have been the cause of out-of-bounds access errors during
training. Similar errors may still be around, so much be hunted down.
Hunting this one down took a long time...I printed out values across
training runs and diffed, looking for points of divergence between
runs, when no randomness should be allowed.
* Change PyThaiNLP Url (#2876)
* Fix missing comma
* Add example showing a fix-up rule for space entities
* Set version to 2.0.17.dev0
* Update regex version
* Revert "Update regex version"
This reverts commit 62358dd867d15bc6a475942dff34effba69dd70a.
* Try setting older regex version, to align with conda
* Set version to 2.0.17
* Add spacy-js to universe [ci-skip]
* Add spacy-raspberry to universe (closes #2889)
* Add script to validate universe json [ci skip]
* Removed space in docs + added contributor indo (#2909)
* - removed unneeded space in documentation
* - added contributor info
* Allow input text of length up to max_length, inclusive (#2922)
* Include universe spec for spacy-wordnet component (#2919)
* feat: include universe spec for spacy-wordnet component
* chore: include spaCy contributor agreement
* Minor formatting changes [ci skip]
* Fix image [ci skip]
Twitter URL doesn't work on live site
* Check if the word is in one of the regular lists specific to each POS (#2886)
* 💫 Create random IDs for SVGs to prevent ID clashes (#2927)
Resolves #2924.
## Description
Fixes problem where multiple visualizations in Jupyter notebooks would have clashing arc IDs, resulting in weirdly positioned arc labels. Generating a random ID prefix so even identical parses won't receive the same IDs for consistency (even if effect of ID clash isn't noticable here.)
### Types of change
bug fix
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Fix typo [ci skip]
* fixes symbolic link on py3 and windows (#2949)
* fixes symbolic link on py3 and windows
during setup of spacy using command
python -m spacy link en_core_web_sm en
closes #2948
* Update spacy/compat.py
Co-Authored-By: cicorias <cicorias@users.noreply.github.com>
* Fix formatting
* Update universe [ci skip]
* Catalan Language Support (#2940)
* Catalan language Support
* Ddding Catalan to documentation
* Sort languages alphabetically [ci skip]
* Update tests for pytest 4.x (#2965)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Replace marks in params for pytest 4.0 compat ([see here](https://docs.pytest.org/en/latest/deprecations.html#marks-in-pytest-mark-parametrize))
- [x] Un-xfail passing tests (some fixes in a recent update resolved a bunch of issues, but tests were apparently never updated here)
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Fix regex pin to harmonize with conda (#2964)
* Update README.rst
* Fix bug where Vocab.prune_vector did not use 'batch_size' (#2977)
Fixes #2976
* Fix typo
* Fix typo
* Remove duplicate file
* Require thinc 7.0.0.dev2
Fixes bug in gpu_ops that would use cupy instead of numpy on CPU
* Add missing import
* Fix error IDs
* Fix tests
2018-11-29 18:30:29 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
attrs = {"tag": doc[4].tag_, "lemma": "pilates class", "ent_type": "O"}
|
|
|
|
retokenizer.merge(doc[3:5], attrs=attrs)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert len(doc) == 7
|
|
|
|
assert doc[0].head.i == 1
|
|
|
|
assert doc[1].head.i == 1
|
|
|
|
assert doc[2].head.i == 3
|
|
|
|
assert doc[3].head.i == 1
|
|
|
|
assert doc[4].head.i in [1, 3]
|
|
|
|
assert doc[5].head.i == 4
|
|
|
|
|
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_spans_merge_non_disjoint(en_tokenizer):
|
2018-09-10 17:41:42 +03:00
|
|
|
text = "Los Angeles start."
|
2018-12-18 15:58:30 +03:00
|
|
|
doc = en_tokenizer(text)
|
💫 Port master changes over to develop (#2979)
* Create aryaprabhudesai.md (#2681)
* Update _install.jade (#2688)
Typo fix: "models" -> "model"
* Add FAC to spacy.explain (resolves #2706)
* Remove docstrings for deprecated arguments (see #2703)
* When calling getoption() in conftest.py, pass a default option (#2709)
* When calling getoption() in conftest.py, pass a default option
This is necessary to allow testing an installed spacy by running:
pytest --pyargs spacy
* Add contributor agreement
* update bengali token rules for hyphen and digits (#2731)
* Less norm computations in token similarity (#2730)
* Less norm computations in token similarity
* Contributor agreement
* Remove ')' for clarity (#2737)
Sorry, don't mean to be nitpicky, I just noticed this when going through the CLI and thought it was a quick fix. That said, if this was intention than please let me know.
* added contributor agreement for mbkupfer (#2738)
* Basic support for Telugu language (#2751)
* Lex _attrs for polish language (#2750)
* Signed spaCy contributor agreement
* Added polish version of english lex_attrs
* Introduces a bulk merge function, in order to solve issue #653 (#2696)
* Fix comment
* Introduce bulk merge to increase performance on many span merges
* Sign contributor agreement
* Implement pull request suggestions
* Describe converters more explicitly (see #2643)
* Add multi-threading note to Language.pipe (resolves #2582) [ci skip]
* Fix formatting
* Fix dependency scheme docs (closes #2705) [ci skip]
* Don't set stop word in example (closes #2657) [ci skip]
* Add words to portuguese language _num_words (#2759)
* Add words to portuguese language _num_words
* Add words to portuguese language _num_words
* Update Indonesian model (#2752)
* adding e-KTP in tokenizer exceptions list
* add exception token
* removing lines with containing space as it won't matter since we use .split() method in the end, added new tokens in exception
* add tokenizer exceptions list
* combining base_norms with norm_exceptions
* adding norm_exception
* fix double key in lemmatizer
* remove unused import on punctuation.py
* reformat stop_words to reduce number of lines, improve readibility
* updating tokenizer exception
* implement is_currency for lang/id
* adding orth_first_upper in tokenizer_exceptions
* update the norm_exception list
* remove bunch of abbreviations
* adding contributors file
* Fixed spaCy+Keras example (#2763)
* bug fixes in keras example
* created contributor agreement
* Adding French hyphenated first name (#2786)
* Fix typo (closes #2784)
* Fix typo (#2795) [ci skip]
Fixed typo on line 6 "regcognizer --> recognizer"
* Adding basic support for Sinhala language. (#2788)
* adding Sinhala language package, stop words, examples and lex_attrs.
* Adding contributor agreement
* Updating contributor agreement
* Also include lowercase norm exceptions
* Fix error (#2802)
* Fix error
ValueError: cannot resize an array that references or is referenced
by another array in this way. Use the resize function
* added spaCy Contributor Agreement
* Add charlax's contributor agreement (#2805)
* agreement of contributor, may I introduce a tiny pl languge contribution (#2799)
* Contributors agreement
* Contributors agreement
* Contributors agreement
* Add jupyter=True to displacy.render in documentation (#2806)
* Revert "Also include lowercase norm exceptions"
This reverts commit 70f4e8adf37cfcfab60be2b97d6deae949b30e9e.
* Remove deprecated encoding argument to msgpack
* Set up dependency tree pattern matching skeleton (#2732)
* Fix bug when too many entity types. Fixes #2800
* Fix Python 2 test failure
* Require older msgpack-numpy
* Restore encoding arg on msgpack-numpy
* Try to fix version pin for msgpack-numpy
* Update Portuguese Language (#2790)
* Add words to portuguese language _num_words
* Add words to portuguese language _num_words
* Portuguese - Add/remove stopwords, fix tokenizer, add currency symbols
* Extended punctuation and norm_exceptions in the Portuguese language
* Correct error in spacy universe docs concerning spacy-lookup (#2814)
* Update Keras Example for (Parikh et al, 2016) implementation (#2803)
* bug fixes in keras example
* created contributor agreement
* baseline for Parikh model
* initial version of parikh 2016 implemented
* tested asymmetric models
* fixed grevious error in normalization
* use standard SNLI test file
* begin to rework parikh example
* initial version of running example
* start to document the new version
* start to document the new version
* Update Decompositional Attention.ipynb
* fixed calls to similarity
* updated the README
* import sys package duh
* simplified indexing on mapping word to IDs
* stupid python indent error
* added code from https://github.com/tensorflow/tensorflow/issues/3388 for tf bug workaround
* Fix typo (closes #2815) [ci skip]
* Update regex version dependency
* Set version to 2.0.13.dev3
* Skip seemingly problematic test
* Remove problematic test
* Try previous version of regex
* Revert "Remove problematic test"
This reverts commit bdebbef45552d698d390aa430b527ee27830f11b.
* Unskip test
* Try older version of regex
* 💫 Update training examples and use minibatching (#2830)
<!--- Provide a general summary of your changes in the title. -->
## Description
Update the training examples in `/examples/training` to show usage of spaCy's `minibatch` and `compounding` helpers ([see here](https://spacy.io/usage/training#tips-batch-size) for details). The lack of batching in the examples has caused some confusion in the past, especially for beginners who would copy-paste the examples, update them with large training sets and experienced slow and unsatisfying results.
### Types of change
enhancements
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Visual C++ link updated (#2842) (closes #2841) [ci skip]
* New landing page
* Add contribution agreement
* Correcting lang/ru/examples.py (#2845)
* Correct some grammatical inaccuracies in lang\ru\examples.py; filled Contributor Agreement
* Correct some grammatical inaccuracies in lang\ru\examples.py
* Move contributor agreement to separate file
* Set version to 2.0.13.dev4
* Add Persian(Farsi) language support (#2797)
* Also include lowercase norm exceptions
* Remove in favour of https://github.com/explosion/spaCy/graphs/contributors
* Rule-based French Lemmatizer (#2818)
<!--- Provide a general summary of your changes in the title. -->
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
Add a rule-based French Lemmatizer following the english one and the excellent PR for [greek language optimizations](https://github.com/explosion/spaCy/pull/2558) to adapt the Lemmatizer class.
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
- Lemma dictionary used can be found [here](http://infolingu.univ-mlv.fr/DonneesLinguistiques/Dictionnaires/telechargement.html), I used the XML version.
- Add several files containing exhaustive list of words for each part of speech
- Add some lemma rules
- Add POS that are not checked in the standard Lemmatizer, i.e PRON, DET, ADV and AUX
- Modify the Lemmatizer class to check in lookup table as a last resort if POS not mentionned
- Modify the lemmatize function to check in lookup table as a last resort
- Init files are updated so the model can support all the functionalities mentioned above
- Add words to tokenizer_exceptions_list.py in respect to regex used in tokenizer_exceptions.py
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [X] I have submitted the spaCy Contributor Agreement.
- [X] I ran the tests, and all new and existing tests passed.
- [X] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Set version to 2.0.13
* Fix formatting and consistency
* Update docs for new version [ci skip]
* Increment version [ci skip]
* Add info on wheels [ci skip]
* Adding "This is a sentence" example to Sinhala (#2846)
* Add wheels badge
* Update badge [ci skip]
* Update README.rst [ci skip]
* Update murmurhash pin
* Increment version to 2.0.14.dev0
* Update GPU docs for v2.0.14
* Add wheel to setup_requires
* Import prefer_gpu and require_gpu functions from Thinc
* Add tests for prefer_gpu() and require_gpu()
* Update requirements and setup.py
* Workaround bug in thinc require_gpu
* Set version to v2.0.14
* Update push-tag script
* Unhack prefer_gpu
* Require thinc 6.10.6
* Update prefer_gpu and require_gpu docs [ci skip]
* Fix specifiers for GPU
* Set version to 2.0.14.dev1
* Set version to 2.0.14
* Update Thinc version pin
* Increment version
* Fix msgpack-numpy version pin
* Increment version
* Update version to 2.0.16
* Update version [ci skip]
* Redundant ')' in the Stop words' example (#2856)
<!--- Provide a general summary of your changes in the title. -->
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [ ] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
- [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Documentation improvement regarding joblib and SO (#2867)
Some documentation improvements
## Description
1. Fixed the dead URL to joblib
2. Fixed Stack Overflow brand name (with space)
### Types of change
Documentation
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* raise error when setting overlapping entities as doc.ents (#2880)
* Fix out-of-bounds access in NER training
The helper method state.B(1) gets the index of the first token of the
buffer, or -1 if no such token exists. Normally this is safe because we
pass this to functions like state.safe_get(), which returns an empty
token. Here we used it directly as an array index, which is not okay!
This error may have been the cause of out-of-bounds access errors during
training. Similar errors may still be around, so much be hunted down.
Hunting this one down took a long time...I printed out values across
training runs and diffed, looking for points of divergence between
runs, when no randomness should be allowed.
* Change PyThaiNLP Url (#2876)
* Fix missing comma
* Add example showing a fix-up rule for space entities
* Set version to 2.0.17.dev0
* Update regex version
* Revert "Update regex version"
This reverts commit 62358dd867d15bc6a475942dff34effba69dd70a.
* Try setting older regex version, to align with conda
* Set version to 2.0.17
* Add spacy-js to universe [ci-skip]
* Add spacy-raspberry to universe (closes #2889)
* Add script to validate universe json [ci skip]
* Removed space in docs + added contributor indo (#2909)
* - removed unneeded space in documentation
* - added contributor info
* Allow input text of length up to max_length, inclusive (#2922)
* Include universe spec for spacy-wordnet component (#2919)
* feat: include universe spec for spacy-wordnet component
* chore: include spaCy contributor agreement
* Minor formatting changes [ci skip]
* Fix image [ci skip]
Twitter URL doesn't work on live site
* Check if the word is in one of the regular lists specific to each POS (#2886)
* 💫 Create random IDs for SVGs to prevent ID clashes (#2927)
Resolves #2924.
## Description
Fixes problem where multiple visualizations in Jupyter notebooks would have clashing arc IDs, resulting in weirdly positioned arc labels. Generating a random ID prefix so even identical parses won't receive the same IDs for consistency (even if effect of ID clash isn't noticable here.)
### Types of change
bug fix
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Fix typo [ci skip]
* fixes symbolic link on py3 and windows (#2949)
* fixes symbolic link on py3 and windows
during setup of spacy using command
python -m spacy link en_core_web_sm en
closes #2948
* Update spacy/compat.py
Co-Authored-By: cicorias <cicorias@users.noreply.github.com>
* Fix formatting
* Update universe [ci skip]
* Catalan Language Support (#2940)
* Catalan language Support
* Ddding Catalan to documentation
* Sort languages alphabetically [ci skip]
* Update tests for pytest 4.x (#2965)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Replace marks in params for pytest 4.0 compat ([see here](https://docs.pytest.org/en/latest/deprecations.html#marks-in-pytest-mark-parametrize))
- [x] Un-xfail passing tests (some fixes in a recent update resolved a bunch of issues, but tests were apparently never updated here)
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Fix regex pin to harmonize with conda (#2964)
* Update README.rst
* Fix bug where Vocab.prune_vector did not use 'batch_size' (#2977)
Fixes #2976
* Fix typo
* Fix typo
* Remove duplicate file
* Require thinc 7.0.0.dev2
Fixes bug in gpu_ops that would use cupy instead of numpy on CPU
* Add missing import
* Fix error IDs
* Fix tests
2018-11-29 18:30:29 +03:00
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(
|
|
|
|
doc[0:2],
|
|
|
|
attrs={"tag": "NNP", "lemma": "Los Angeles", "ent_type": "GPE"},
|
|
|
|
)
|
|
|
|
retokenizer.merge(
|
|
|
|
doc[0:1],
|
|
|
|
attrs={"tag": "NNP", "lemma": "Los Angeles", "ent_type": "GPE"},
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_span_np_merges(en_tokenizer):
|
2017-01-12 14:26:26 +03:00
|
|
|
text = "displaCy is a parse tool built with Javascript"
|
|
|
|
heads = [1, 0, 2, 1, -3, -1, -1, -1]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert doc[4].head.i == 1
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
attrs = {"tag": "NP", "lemma": "tool", "ent_type": "O"}
|
|
|
|
retokenizer.merge(doc[2:5], attrs=attrs)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert doc[2].head.i == 1
|
|
|
|
|
|
|
|
text = "displaCy is a lightweight and modern dependency parse tree visualization tool built with CSS3 and JavaScript."
|
|
|
|
heads = [1, 0, 8, 3, -1, -2, 4, 3, 1, 1, -9, -1, -1, -1, -1, -2, -15]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for ent in doc.ents:
|
|
|
|
attrs = {"tag": ent.label_, "lemma": ent.lemma_, "ent_type": ent.label_}
|
|
|
|
retokenizer.merge(ent, attrs=attrs)
|
2017-01-12 14:26:26 +03:00
|
|
|
|
|
|
|
text = "One test with entities like New York City so the ents list is not void"
|
|
|
|
heads = [1, 11, -1, -1, -1, 1, 1, -3, 4, 2, 1, 1, 0, -1, -2]
|
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads)
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for ent in doc.ents:
|
|
|
|
retokenizer.merge(ent)
|
2016-11-09 21:57:50 +03:00
|
|
|
|
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_spans_entity_merge(en_tokenizer):
|
2018-11-27 03:09:36 +03:00
|
|
|
# fmt: off
|
2017-01-12 14:26:26 +03:00
|
|
|
text = "Stewart Lee is a stand up comedian who lives in England and loves Joe Pasquale.\n"
|
|
|
|
heads = [1, 1, 0, 1, 2, -1, -4, 1, -2, -1, -1, -3, -10, 1, -2, -13, -1]
|
2018-11-27 03:09:36 +03:00
|
|
|
tags = ["NNP", "NNP", "VBZ", "DT", "VB", "RP", "NN", "WP", "VBZ", "IN", "NNP", "CC", "VBZ", "NNP", "NNP", ".", "SP"]
|
|
|
|
ents = [(0, 2, "PERSON"), (10, 11, "GPE"), (13, 15, "PERSON")]
|
|
|
|
# fmt: on
|
2017-01-12 14:26:26 +03:00
|
|
|
tokens = en_tokenizer(text)
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = get_doc(
|
|
|
|
tokens.vocab, words=[t.text for t in tokens], heads=heads, tags=tags, ents=ents
|
|
|
|
)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert len(doc) == 17
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for ent in doc.ents:
|
|
|
|
ent_type = max(w.ent_type_ for w in ent)
|
|
|
|
attrs = {"lemma": ent.root.lemma_, "ent_type": ent_type}
|
|
|
|
retokenizer.merge(ent, attrs=attrs)
|
2015-11-05 19:02:23 +03:00
|
|
|
# check looping is ok
|
2017-01-12 14:26:26 +03:00
|
|
|
assert len(doc) == 15
|
2015-11-05 19:02:23 +03:00
|
|
|
|
2015-11-07 11:27:38 +03:00
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_spans_entity_merge_iob():
|
2018-03-25 23:16:19 +03:00
|
|
|
# Test entity IOB stays consistent after merging
|
|
|
|
words = ["a", "b", "c", "d", "e"]
|
|
|
|
doc = Doc(Vocab(), words=words)
|
2018-11-27 03:09:36 +03:00
|
|
|
doc.ents = [
|
|
|
|
(doc.vocab.strings.add("ent-abc"), 0, 3),
|
|
|
|
(doc.vocab.strings.add("ent-d"), 3, 4),
|
|
|
|
]
|
2018-03-25 23:16:19 +03:00
|
|
|
assert doc[0].ent_iob_ == "B"
|
|
|
|
assert doc[1].ent_iob_ == "I"
|
|
|
|
assert doc[2].ent_iob_ == "I"
|
|
|
|
assert doc[3].ent_iob_ == "B"
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[0:1])
|
2018-03-25 23:16:19 +03:00
|
|
|
assert doc[0].ent_iob_ == "B"
|
|
|
|
assert doc[1].ent_iob_ == "I"
|
|
|
|
|
💫 Port master changes over to develop (#2979)
* Create aryaprabhudesai.md (#2681)
* Update _install.jade (#2688)
Typo fix: "models" -> "model"
* Add FAC to spacy.explain (resolves #2706)
* Remove docstrings for deprecated arguments (see #2703)
* When calling getoption() in conftest.py, pass a default option (#2709)
* When calling getoption() in conftest.py, pass a default option
This is necessary to allow testing an installed spacy by running:
pytest --pyargs spacy
* Add contributor agreement
* update bengali token rules for hyphen and digits (#2731)
* Less norm computations in token similarity (#2730)
* Less norm computations in token similarity
* Contributor agreement
* Remove ')' for clarity (#2737)
Sorry, don't mean to be nitpicky, I just noticed this when going through the CLI and thought it was a quick fix. That said, if this was intention than please let me know.
* added contributor agreement for mbkupfer (#2738)
* Basic support for Telugu language (#2751)
* Lex _attrs for polish language (#2750)
* Signed spaCy contributor agreement
* Added polish version of english lex_attrs
* Introduces a bulk merge function, in order to solve issue #653 (#2696)
* Fix comment
* Introduce bulk merge to increase performance on many span merges
* Sign contributor agreement
* Implement pull request suggestions
* Describe converters more explicitly (see #2643)
* Add multi-threading note to Language.pipe (resolves #2582) [ci skip]
* Fix formatting
* Fix dependency scheme docs (closes #2705) [ci skip]
* Don't set stop word in example (closes #2657) [ci skip]
* Add words to portuguese language _num_words (#2759)
* Add words to portuguese language _num_words
* Add words to portuguese language _num_words
* Update Indonesian model (#2752)
* adding e-KTP in tokenizer exceptions list
* add exception token
* removing lines with containing space as it won't matter since we use .split() method in the end, added new tokens in exception
* add tokenizer exceptions list
* combining base_norms with norm_exceptions
* adding norm_exception
* fix double key in lemmatizer
* remove unused import on punctuation.py
* reformat stop_words to reduce number of lines, improve readibility
* updating tokenizer exception
* implement is_currency for lang/id
* adding orth_first_upper in tokenizer_exceptions
* update the norm_exception list
* remove bunch of abbreviations
* adding contributors file
* Fixed spaCy+Keras example (#2763)
* bug fixes in keras example
* created contributor agreement
* Adding French hyphenated first name (#2786)
* Fix typo (closes #2784)
* Fix typo (#2795) [ci skip]
Fixed typo on line 6 "regcognizer --> recognizer"
* Adding basic support for Sinhala language. (#2788)
* adding Sinhala language package, stop words, examples and lex_attrs.
* Adding contributor agreement
* Updating contributor agreement
* Also include lowercase norm exceptions
* Fix error (#2802)
* Fix error
ValueError: cannot resize an array that references or is referenced
by another array in this way. Use the resize function
* added spaCy Contributor Agreement
* Add charlax's contributor agreement (#2805)
* agreement of contributor, may I introduce a tiny pl languge contribution (#2799)
* Contributors agreement
* Contributors agreement
* Contributors agreement
* Add jupyter=True to displacy.render in documentation (#2806)
* Revert "Also include lowercase norm exceptions"
This reverts commit 70f4e8adf37cfcfab60be2b97d6deae949b30e9e.
* Remove deprecated encoding argument to msgpack
* Set up dependency tree pattern matching skeleton (#2732)
* Fix bug when too many entity types. Fixes #2800
* Fix Python 2 test failure
* Require older msgpack-numpy
* Restore encoding arg on msgpack-numpy
* Try to fix version pin for msgpack-numpy
* Update Portuguese Language (#2790)
* Add words to portuguese language _num_words
* Add words to portuguese language _num_words
* Portuguese - Add/remove stopwords, fix tokenizer, add currency symbols
* Extended punctuation and norm_exceptions in the Portuguese language
* Correct error in spacy universe docs concerning spacy-lookup (#2814)
* Update Keras Example for (Parikh et al, 2016) implementation (#2803)
* bug fixes in keras example
* created contributor agreement
* baseline for Parikh model
* initial version of parikh 2016 implemented
* tested asymmetric models
* fixed grevious error in normalization
* use standard SNLI test file
* begin to rework parikh example
* initial version of running example
* start to document the new version
* start to document the new version
* Update Decompositional Attention.ipynb
* fixed calls to similarity
* updated the README
* import sys package duh
* simplified indexing on mapping word to IDs
* stupid python indent error
* added code from https://github.com/tensorflow/tensorflow/issues/3388 for tf bug workaround
* Fix typo (closes #2815) [ci skip]
* Update regex version dependency
* Set version to 2.0.13.dev3
* Skip seemingly problematic test
* Remove problematic test
* Try previous version of regex
* Revert "Remove problematic test"
This reverts commit bdebbef45552d698d390aa430b527ee27830f11b.
* Unskip test
* Try older version of regex
* 💫 Update training examples and use minibatching (#2830)
<!--- Provide a general summary of your changes in the title. -->
## Description
Update the training examples in `/examples/training` to show usage of spaCy's `minibatch` and `compounding` helpers ([see here](https://spacy.io/usage/training#tips-batch-size) for details). The lack of batching in the examples has caused some confusion in the past, especially for beginners who would copy-paste the examples, update them with large training sets and experienced slow and unsatisfying results.
### Types of change
enhancements
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Visual C++ link updated (#2842) (closes #2841) [ci skip]
* New landing page
* Add contribution agreement
* Correcting lang/ru/examples.py (#2845)
* Correct some grammatical inaccuracies in lang\ru\examples.py; filled Contributor Agreement
* Correct some grammatical inaccuracies in lang\ru\examples.py
* Move contributor agreement to separate file
* Set version to 2.0.13.dev4
* Add Persian(Farsi) language support (#2797)
* Also include lowercase norm exceptions
* Remove in favour of https://github.com/explosion/spaCy/graphs/contributors
* Rule-based French Lemmatizer (#2818)
<!--- Provide a general summary of your changes in the title. -->
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
Add a rule-based French Lemmatizer following the english one and the excellent PR for [greek language optimizations](https://github.com/explosion/spaCy/pull/2558) to adapt the Lemmatizer class.
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
- Lemma dictionary used can be found [here](http://infolingu.univ-mlv.fr/DonneesLinguistiques/Dictionnaires/telechargement.html), I used the XML version.
- Add several files containing exhaustive list of words for each part of speech
- Add some lemma rules
- Add POS that are not checked in the standard Lemmatizer, i.e PRON, DET, ADV and AUX
- Modify the Lemmatizer class to check in lookup table as a last resort if POS not mentionned
- Modify the lemmatize function to check in lookup table as a last resort
- Init files are updated so the model can support all the functionalities mentioned above
- Add words to tokenizer_exceptions_list.py in respect to regex used in tokenizer_exceptions.py
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [X] I have submitted the spaCy Contributor Agreement.
- [X] I ran the tests, and all new and existing tests passed.
- [X] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Set version to 2.0.13
* Fix formatting and consistency
* Update docs for new version [ci skip]
* Increment version [ci skip]
* Add info on wheels [ci skip]
* Adding "This is a sentence" example to Sinhala (#2846)
* Add wheels badge
* Update badge [ci skip]
* Update README.rst [ci skip]
* Update murmurhash pin
* Increment version to 2.0.14.dev0
* Update GPU docs for v2.0.14
* Add wheel to setup_requires
* Import prefer_gpu and require_gpu functions from Thinc
* Add tests for prefer_gpu() and require_gpu()
* Update requirements and setup.py
* Workaround bug in thinc require_gpu
* Set version to v2.0.14
* Update push-tag script
* Unhack prefer_gpu
* Require thinc 6.10.6
* Update prefer_gpu and require_gpu docs [ci skip]
* Fix specifiers for GPU
* Set version to 2.0.14.dev1
* Set version to 2.0.14
* Update Thinc version pin
* Increment version
* Fix msgpack-numpy version pin
* Increment version
* Update version to 2.0.16
* Update version [ci skip]
* Redundant ')' in the Stop words' example (#2856)
<!--- Provide a general summary of your changes in the title. -->
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [ ] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
- [ ] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Documentation improvement regarding joblib and SO (#2867)
Some documentation improvements
## Description
1. Fixed the dead URL to joblib
2. Fixed Stack Overflow brand name (with space)
### Types of change
Documentation
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* raise error when setting overlapping entities as doc.ents (#2880)
* Fix out-of-bounds access in NER training
The helper method state.B(1) gets the index of the first token of the
buffer, or -1 if no such token exists. Normally this is safe because we
pass this to functions like state.safe_get(), which returns an empty
token. Here we used it directly as an array index, which is not okay!
This error may have been the cause of out-of-bounds access errors during
training. Similar errors may still be around, so much be hunted down.
Hunting this one down took a long time...I printed out values across
training runs and diffed, looking for points of divergence between
runs, when no randomness should be allowed.
* Change PyThaiNLP Url (#2876)
* Fix missing comma
* Add example showing a fix-up rule for space entities
* Set version to 2.0.17.dev0
* Update regex version
* Revert "Update regex version"
This reverts commit 62358dd867d15bc6a475942dff34effba69dd70a.
* Try setting older regex version, to align with conda
* Set version to 2.0.17
* Add spacy-js to universe [ci-skip]
* Add spacy-raspberry to universe (closes #2889)
* Add script to validate universe json [ci skip]
* Removed space in docs + added contributor indo (#2909)
* - removed unneeded space in documentation
* - added contributor info
* Allow input text of length up to max_length, inclusive (#2922)
* Include universe spec for spacy-wordnet component (#2919)
* feat: include universe spec for spacy-wordnet component
* chore: include spaCy contributor agreement
* Minor formatting changes [ci skip]
* Fix image [ci skip]
Twitter URL doesn't work on live site
* Check if the word is in one of the regular lists specific to each POS (#2886)
* 💫 Create random IDs for SVGs to prevent ID clashes (#2927)
Resolves #2924.
## Description
Fixes problem where multiple visualizations in Jupyter notebooks would have clashing arc IDs, resulting in weirdly positioned arc labels. Generating a random ID prefix so even identical parses won't receive the same IDs for consistency (even if effect of ID clash isn't noticable here.)
### Types of change
bug fix
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Fix typo [ci skip]
* fixes symbolic link on py3 and windows (#2949)
* fixes symbolic link on py3 and windows
during setup of spacy using command
python -m spacy link en_core_web_sm en
closes #2948
* Update spacy/compat.py
Co-Authored-By: cicorias <cicorias@users.noreply.github.com>
* Fix formatting
* Update universe [ci skip]
* Catalan Language Support (#2940)
* Catalan language Support
* Ddding Catalan to documentation
* Sort languages alphabetically [ci skip]
* Update tests for pytest 4.x (#2965)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Replace marks in params for pytest 4.0 compat ([see here](https://docs.pytest.org/en/latest/deprecations.html#marks-in-pytest-mark-parametrize))
- [x] Un-xfail passing tests (some fixes in a recent update resolved a bunch of issues, but tests were apparently never updated here)
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
* Fix regex pin to harmonize with conda (#2964)
* Update README.rst
* Fix bug where Vocab.prune_vector did not use 'batch_size' (#2977)
Fixes #2976
* Fix typo
* Fix typo
* Remove duplicate file
* Require thinc 7.0.0.dev2
Fixes bug in gpu_ops that would use cupy instead of numpy on CPU
* Add missing import
* Fix error IDs
* Fix tests
2018-11-29 18:30:29 +03:00
|
|
|
words = ["a", "b", "c", "d", "e", "f", "g", "h", "i"]
|
|
|
|
doc = Doc(Vocab(), words=words)
|
|
|
|
doc.ents = [
|
|
|
|
(doc.vocab.strings.add("ent-de"), 3, 5),
|
|
|
|
(doc.vocab.strings.add("ent-fg"), 5, 7),
|
|
|
|
]
|
|
|
|
assert doc[3].ent_iob_ == "B"
|
|
|
|
assert doc[4].ent_iob_ == "I"
|
|
|
|
assert doc[5].ent_iob_ == "B"
|
|
|
|
assert doc[6].ent_iob_ == "I"
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[2:4])
|
|
|
|
retokenizer.merge(doc[4:6])
|
|
|
|
retokenizer.merge(doc[7:9])
|
|
|
|
assert len(doc) == 6
|
|
|
|
assert doc[3].ent_iob_ == "B"
|
|
|
|
assert doc[4].ent_iob_ == "I"
|
|
|
|
|
2018-03-25 23:16:19 +03:00
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_spans_sentence_update_after_merge(en_tokenizer):
|
2018-11-27 03:09:36 +03:00
|
|
|
# fmt: off
|
2017-01-12 14:26:26 +03:00
|
|
|
text = "Stewart Lee is a stand up comedian. He lives in England and loves Joe Pasquale."
|
|
|
|
heads = [1, 1, 0, 1, 2, -1, -4, -5, 1, 0, -1, -1, -3, -4, 1, -2, -7]
|
|
|
|
deps = ['compound', 'nsubj', 'ROOT', 'det', 'amod', 'prt', 'attr',
|
|
|
|
'punct', 'nsubj', 'ROOT', 'prep', 'pobj', 'cc', 'conj',
|
|
|
|
'compound', 'dobj', 'punct']
|
2018-11-27 03:09:36 +03:00
|
|
|
# fmt: on
|
2017-01-12 14:26:26 +03:00
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
|
2017-01-12 14:26:26 +03:00
|
|
|
sent1, sent2 = list(doc.sents)
|
2015-11-05 19:02:23 +03:00
|
|
|
init_len = len(sent1)
|
|
|
|
init_len2 = len(sent2)
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
attrs = {"lemma": "none", "ent_type": "none"}
|
|
|
|
retokenizer.merge(doc[0:2], attrs=attrs)
|
|
|
|
retokenizer.merge(doc[-2:], attrs=attrs)
|
2017-01-12 14:26:26 +03:00
|
|
|
assert len(sent1) == init_len - 1
|
|
|
|
assert len(sent2) == init_len2 - 1
|
|
|
|
|
|
|
|
|
2019-02-24 16:14:11 +03:00
|
|
|
def test_doc_retokenize_spans_subtree_size_check(en_tokenizer):
|
2018-11-27 03:09:36 +03:00
|
|
|
# fmt: off
|
2017-01-12 14:26:26 +03:00
|
|
|
text = "Stewart Lee is a stand up comedian who lives in England and loves Joe Pasquale"
|
|
|
|
heads = [1, 1, 0, 1, 2, -1, -4, 1, -2, -1, -1, -3, -10, 1, -2]
|
2018-11-27 03:09:36 +03:00
|
|
|
deps = ["compound", "nsubj", "ROOT", "det", "amod", "prt", "attr",
|
|
|
|
"nsubj", "relcl", "prep", "pobj", "cc", "conj", "compound",
|
|
|
|
"dobj"]
|
|
|
|
# fmt: on
|
2017-01-12 14:26:26 +03:00
|
|
|
tokens = en_tokenizer(text)
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
|
2017-01-12 14:26:26 +03:00
|
|
|
sent1 = list(doc.sents)[0]
|
2015-11-05 19:02:23 +03:00
|
|
|
init_len = len(list(sent1.root.subtree))
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
attrs = {"lemma": "none", "ent_type": "none"}
|
|
|
|
retokenizer.merge(doc[0:2], attrs=attrs)
|
2017-11-01 18:49:11 +03:00
|
|
|
assert len(list(sent1.root.subtree)) == init_len - 1
|
2019-02-24 20:38:47 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenize_merge_extension_attrs(en_vocab):
|
|
|
|
Token.set_extension("a", default=False, force=True)
|
|
|
|
Token.set_extension("b", default="nothing", force=True)
|
|
|
|
doc = Doc(en_vocab, words=["hello", "world", "!"])
|
|
|
|
# Test regular merging
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
attrs = {"lemma": "hello world", "_": {"a": True, "b": "1"}}
|
|
|
|
retokenizer.merge(doc[0:2], attrs=attrs)
|
|
|
|
assert doc[0].lemma_ == "hello world"
|
2019-02-24 21:02:16 +03:00
|
|
|
assert doc[0]._.a is True
|
2019-02-24 20:38:47 +03:00
|
|
|
assert doc[0]._.b == "1"
|
|
|
|
# Test bulk merging
|
|
|
|
doc = Doc(en_vocab, words=["hello", "world", "!", "!"])
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[0:2], attrs={"_": {"a": True, "b": "1"}})
|
|
|
|
retokenizer.merge(doc[2:4], attrs={"_": {"a": None, "b": "2"}})
|
2019-02-24 21:02:16 +03:00
|
|
|
assert doc[0]._.a is True
|
2019-02-24 20:38:47 +03:00
|
|
|
assert doc[0]._.b == "1"
|
2019-02-24 21:02:16 +03:00
|
|
|
assert doc[1]._.a is None
|
2019-02-24 20:38:47 +03:00
|
|
|
assert doc[1]._.b == "2"
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("underscore_attrs", [{"a": "x"}, {"b": "x"}, {"c": "x"}, [1]])
|
|
|
|
def test_doc_retokenize_merge_extension_attrs_invalid(en_vocab, underscore_attrs):
|
|
|
|
Token.set_extension("a", getter=lambda x: x, force=True)
|
|
|
|
Token.set_extension("b", method=lambda x: x, force=True)
|
|
|
|
doc = Doc(en_vocab, words=["hello", "world", "!"])
|
|
|
|
attrs = {"_": underscore_attrs}
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[0:2], attrs=attrs)
|
2019-02-24 23:13:51 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_doc_retokenizer_merge_lex_attrs(en_vocab):
|
|
|
|
"""Test that retokenization also sets attributes on the lexeme if they're
|
|
|
|
lexical attributes. For example, if a user sets IS_STOP, it should mean that
|
|
|
|
"all tokens with that lexeme" are marked as a stop word, so the ambiguity
|
|
|
|
here is acceptable. Also see #2390.
|
|
|
|
"""
|
|
|
|
# Test regular merging
|
|
|
|
doc = Doc(en_vocab, words=["hello", "world", "!"])
|
|
|
|
assert not any(t.is_stop for t in doc)
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[0:2], attrs={"lemma": "hello world", "is_stop": True})
|
|
|
|
assert doc[0].lemma_ == "hello world"
|
|
|
|
assert doc[0].is_stop
|
|
|
|
# Test bulk merging
|
|
|
|
doc = Doc(en_vocab, words=["eins", "zwei", "!", "!"])
|
|
|
|
assert not any(t.like_num for t in doc)
|
|
|
|
assert not any(t.is_stop for t in doc)
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
retokenizer.merge(doc[0:2], attrs={"like_num": True})
|
|
|
|
retokenizer.merge(doc[2:4], attrs={"is_stop": True})
|
|
|
|
assert doc[0].like_num
|
|
|
|
assert doc[1].is_stop
|
|
|
|
assert not doc[0].is_stop
|
|
|
|
assert not doc[1].like_num
|