From 0a5b140235bb6a8cfdb35bcd5fdd68d14128733c Mon Sep 17 00:00:00 2001 From: Kevin Lu Date: Tue, 19 May 2020 20:12:21 -0700 Subject: [PATCH 01/13] Update universe.json --- website/meta/universe.json | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index cf587f5f0..724dc3d07 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -2172,6 +2172,39 @@ "model_uri = f'runs:/{my_run_id}/model'", "nlp2 = mlflow.spacy.load_model(model_uri=model_uri)" ] + }, + { + "id": "pyate", + "title": "PyATE", + "slogan": "Python Automated Term Extraction", + "description": "PyATE is a term extraction library written in Python using Spacy POS tagging with Basic, Combo Basic, C-Value, TermExtractor, and Weirdness.", + "github": "kevinlu1248/pyate", + "pip": "pyate", + "code_example": [ + "from pyate import combo_basic", + "", + "# source: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1994795/", + "string = 'Central to the development of cancer are genetic changes that endow these “cancer cells” with many of the hallmarks of cancer, such as self-sufficient growth and resistance to anti-growth and pro-death signals. However, while the genetic changes that occur within cancer cells themselves, such as activated oncogenes or dysfunctional tumor suppressors, are responsible for many aspects of cancer development, they are not sufficient. Tumor promotion and progression are dependent on ancillary processes provided by cells of the tumor environment but that are not necessarily cancerous themselves. Inflammation has long been associated with the development of cancer. This review will discuss the reflexive relationship between cancer and inflammation with particular focus on how considering the role of inflammation in physiologic processes such as the maintenance of tissue homeostasis and repair may provide a logical framework for understanding the connection between the inflammatory response and cancer.'", + "", + "print(combo_basic(string).sort_values(ascending=False).head(5))", + "\"\"\"\"\"\"", + "dysfunctional tumor 1.443147", + "tumor suppressors 1.443147", + "genetic changes 1.386294", + "cancer cells 1.386294", + "dysfunctional tumor suppressors 1.298612", + "\"\"\"\"\"\"" + ], + "code_language": "python", + "url": "https://github.com/kevinlu1248/pyate", + "author": "Kevin Lu", + "author_links": { + "twitter": "kevinlu1248", + "github": "kevinlu1248", + "website": "https://github.com/kevinlu1248/pyate" + }, + "category": ["pipeline", "research"], + "tags": ["term_extraction"] } ], From a23b3a5a5042ed99cfd0c9988d1956adb85601c0 Mon Sep 17 00:00:00 2001 From: Kevin Lu Date: Tue, 19 May 2020 20:24:24 -0700 Subject: [PATCH 02/13] Update CONTRIBUTOR_AGREEMENT.md --- .github/CONTRIBUTOR_AGREEMENT.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/CONTRIBUTOR_AGREEMENT.md b/.github/CONTRIBUTOR_AGREEMENT.md index da9f244eb..fc974ec95 100644 --- a/.github/CONTRIBUTOR_AGREEMENT.md +++ b/.github/CONTRIBUTOR_AGREEMENT.md @@ -87,7 +87,7 @@ U.S. Federal law. Any choice of law rules will not apply. 7. Please place an “x” on one of the applicable statement below. Please do NOT mark both statements: - * [ ] I am signing on behalf of myself as an individual and no other person + * [x] I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect to my contributions. @@ -98,9 +98,9 @@ mark both statements: | Field | Entry | |------------------------------- | -------------------- | -| Name | | +| Name | Kevin Lu| | Company name (if applicable) | | -| Title or role (if applicable) | | +| Title or role (if applicable) | Student| | Date | | -| GitHub username | | +| GitHub username | kevinlu1248| | Website (optional) | | From 9a1a5352154a58a83278de3be77aa564af05b40f Mon Sep 17 00:00:00 2001 From: Kevin Lu Date: Tue, 19 May 2020 20:25:45 -0700 Subject: [PATCH 03/13] Create kevinlu1248.md --- .github/contributors/kevinlu1248.md | 106 ++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/contributors/kevinlu1248.md diff --git a/.github/contributors/kevinlu1248.md b/.github/contributors/kevinlu1248.md new file mode 100644 index 000000000..fc974ec95 --- /dev/null +++ b/.github/contributors/kevinlu1248.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Kevin Lu| +| Company name (if applicable) | | +| Title or role (if applicable) | Student| +| Date | | +| GitHub username | kevinlu1248| +| Website (optional) | | From 291b9ad7b902edd945cc8430550a6633440c582a Mon Sep 17 00:00:00 2001 From: Kevin Lu Date: Tue, 19 May 2020 20:29:53 -0700 Subject: [PATCH 04/13] Update CONTRIBUTOR_AGREEMENT.md --- .github/CONTRIBUTOR_AGREEMENT.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/CONTRIBUTOR_AGREEMENT.md b/.github/CONTRIBUTOR_AGREEMENT.md index fc974ec95..da9f244eb 100644 --- a/.github/CONTRIBUTOR_AGREEMENT.md +++ b/.github/CONTRIBUTOR_AGREEMENT.md @@ -87,7 +87,7 @@ U.S. Federal law. Any choice of law rules will not apply. 7. Please place an “x” on one of the applicable statement below. Please do NOT mark both statements: - * [x] I am signing on behalf of myself as an individual and no other person + * [ ] I am signing on behalf of myself as an individual and no other person or entity, including my employer, has or will have rights with respect to my contributions. @@ -98,9 +98,9 @@ mark both statements: | Field | Entry | |------------------------------- | -------------------- | -| Name | Kevin Lu| +| Name | | | Company name (if applicable) | | -| Title or role (if applicable) | Student| +| Title or role (if applicable) | | | Date | | -| GitHub username | kevinlu1248| +| GitHub username | | | Website (optional) | | From 78bb9ff5e0e4adc01bd30e227657118d87546f83 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Wed, 20 May 2020 14:56:52 +0200 Subject: [PATCH 05/13] doc_or_span -> obj --- spacy/matcher/matcher.pyx | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 4cfab915f..3d99f117a 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -213,28 +213,28 @@ cdef class Matcher: else: yield doc - def __call__(self, object doc_or_span): + def __call__(self, object obj): """Find all token sequences matching the supplied pattern. - doc_or_span (Doc or Span): The document to match over. + obj (Doc / Span): The document to match over. RETURNS (list): A list of `(key, start, end)` tuples, describing the matches. A match tuple describes a span `doc[start:end]`. The `label_id` and `key` are both integers. """ - if isinstance(doc_or_span, Doc): - doc = doc_or_span + if isinstance(obj, Doc): + doc = obj length = len(doc) - elif isinstance(doc_or_span, Span): - doc = doc_or_span.doc - length = doc_or_span.end - doc_or_span.start + elif isinstance(obj, Span): + doc = obj.doc + length = obj.end - obj.start else: - raise ValueError(Errors.E195.format(good="Doc or Span", got=type(doc_or_span).__name__)) + raise ValueError(Errors.E195.format(good="Doc or Span", got=type(obj).__name__)) if len(set([LEMMA, POS, TAG]) & self._seen_attrs) > 0 \ and not doc.is_tagged: raise ValueError(Errors.E155.format()) if DEP in self._seen_attrs and not doc.is_parsed: raise ValueError(Errors.E156.format()) - matches = find_matches(&self.patterns[0], self.patterns.size(), doc_or_span, length, + matches = find_matches(&self.patterns[0], self.patterns.size(), obj, length, extensions=self._extensions, predicates=self._extra_predicates) for i, (key, start, end) in enumerate(matches): on_match = self._callbacks.get(key, None) @@ -257,7 +257,7 @@ def unpickle_matcher(vocab, patterns, callbacks): return matcher -cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int length, extensions=None, predicates=tuple()): +cdef find_matches(TokenPatternC** patterns, int n, object obj, int length, extensions=None, predicates=tuple()): """Find matches in a doc, with a compiled array of patterns. Matches are returned as a list of (id, start, end) tuples. @@ -286,7 +286,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int lengt else: nr_extra_attr = 0 extra_attr_values = mem.alloc(length, sizeof(attr_t)) - for i, token in enumerate(doc_or_span): + for i, token in enumerate(obj): for name, index in extensions.items(): value = token._.get(name) if isinstance(value, basestring): @@ -298,7 +298,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int lengt for j in range(n): states.push_back(PatternStateC(patterns[j], i, 0)) transition_states(states, matches, predicate_cache, - doc_or_span[i], extra_attr_values, predicates) + obj[i], extra_attr_values, predicates) extra_attr_values += nr_extra_attr predicate_cache += len(predicates) # Handle matches that end in 0-width patterns From c7c4cd5fe13ccae97a4cb9ee211226dfd129a941 Mon Sep 17 00:00:00 2001 From: Kevin Lu Date: Wed, 20 May 2020 09:11:32 -0700 Subject: [PATCH 06/13] Changed pyate code example in universe.json --- website/meta/universe.json | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index 724dc3d07..857e26813 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -2181,19 +2181,23 @@ "github": "kevinlu1248/pyate", "pip": "pyate", "code_example": [ - "from pyate import combo_basic", - "", - "# source: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1994795/", - "string = 'Central to the development of cancer are genetic changes that endow these “cancer cells” with many of the hallmarks of cancer, such as self-sufficient growth and resistance to anti-growth and pro-death signals. However, while the genetic changes that occur within cancer cells themselves, such as activated oncogenes or dysfunctional tumor suppressors, are responsible for many aspects of cancer development, they are not sufficient. Tumor promotion and progression are dependent on ancillary processes provided by cells of the tumor environment but that are not necessarily cancerous themselves. Inflammation has long been associated with the development of cancer. This review will discuss the reflexive relationship between cancer and inflammation with particular focus on how considering the role of inflammation in physiologic processes such as the maintenance of tissue homeostasis and repair may provide a logical framework for understanding the connection between the inflammatory response and cancer.'", - "", - "print(combo_basic(string).sort_values(ascending=False).head(5))", - "\"\"\"\"\"\"", - "dysfunctional tumor 1.443147", - "tumor suppressors 1.443147", - "genetic changes 1.386294", - "cancer cells 1.386294", - "dysfunctional tumor suppressors 1.298612", - "\"\"\"\"\"\"" + "import spacy", + "from pyate.term_extraction_pipeline import TermExtractionPipeline", + "", + "nlp = spacy.load('en_core_web_sm')", + "nlp.add_pipe(TermExtractionPipeline())", + "# source: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1994795/", + "string = 'Central to the development of cancer are genetic changes that endow these “cancer cells” with many of the hallmarks of cancer, such as self-sufficient growth and resistance to anti-growth and pro-death signals. However, while the genetic changes that occur within cancer cells themselves, such as activated oncogenes or dysfunctional tumor suppressors, are responsible for many aspects of cancer development, they are not sufficient. Tumor promotion and progression are dependent on ancillary processes provided by cells of the tumor environment but that are not necessarily cancerous themselves. Inflammation has long been associated with the development of cancer. This review will discuss the reflexive relationship between cancer and inflammation with particular focus on how considering the role of inflammation in physiologic processes such as the maintenance of tissue homeostasis and repair may provide a logical framework for understanding the connection between the inflammatory response and cancer.'", + "", + "doc = nlp(string)", + "print(doc._.combo_basic.sort_values(ascending=False).head(5))", + "\"\"\"\"\"\"", + "dysfunctional tumor 1.443147", + "tumor suppressors 1.443147", + "genetic changes 1.386294", + "cancer cells 1.386294", + "dysfunctional tumor suppressors 1.298612", + "\"\"\"\"\"\"" ], "code_language": "python", "url": "https://github.com/kevinlu1248/pyate", From 56de520afd2276e80f634ceb01e8c5a51ea64bb5 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 14:04:57 +0200 Subject: [PATCH 07/13] Try to fix tests on Travis (2.7) --- spacy/lang/hy/examples.py | 1 + spacy/lang/hy/lex_attrs.py | 1 + spacy/lang/hy/stop_words.py | 3 ++- spacy/lang/zh/__init__.py | 36 ++++++++++++++------------------ spacy/tests/lang/hy/test_text.py | 1 + 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/spacy/lang/hy/examples.py b/spacy/lang/hy/examples.py index b0df31aae..d04204c55 100644 --- a/spacy/lang/hy/examples.py +++ b/spacy/lang/hy/examples.py @@ -1,3 +1,4 @@ +# coding: utf8 from __future__ import unicode_literals diff --git a/spacy/lang/hy/lex_attrs.py b/spacy/lang/hy/lex_attrs.py index 7c1b9592f..910625fb8 100644 --- a/spacy/lang/hy/lex_attrs.py +++ b/spacy/lang/hy/lex_attrs.py @@ -1,3 +1,4 @@ +# coding: utf8 from __future__ import unicode_literals from ...attrs import LIKE_NUM diff --git a/spacy/lang/hy/stop_words.py b/spacy/lang/hy/stop_words.py index c671956a4..3f2f7bb15 100644 --- a/spacy/lang/hy/stop_words.py +++ b/spacy/lang/hy/stop_words.py @@ -1,3 +1,4 @@ +# coding: utf8 from __future__ import unicode_literals @@ -105,6 +106,6 @@ STOP_WORDS = set( յուրաքանչյուր այս մեջ -թ +թ """.split() ) diff --git a/spacy/lang/zh/__init__.py b/spacy/lang/zh/__init__.py index ed0b3eb74..508c5a03f 100644 --- a/spacy/lang/zh/__init__.py +++ b/spacy/lang/zh/__init__.py @@ -109,6 +109,7 @@ class ChineseTokenizer(DummyTokenizer): if reset: try: import pkuseg + self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(None) except ImportError: if self.use_pkuseg: @@ -118,7 +119,7 @@ class ChineseTokenizer(DummyTokenizer): ) raise ImportError(msg) for word in words: - self.pkuseg_seg.preprocesser.insert(word.strip(), '') + self.pkuseg_seg.preprocesser.insert(word.strip(), "") def _get_config(self): config = OrderedDict( @@ -168,21 +169,19 @@ class ChineseTokenizer(DummyTokenizer): return util.to_bytes(serializers, []) def from_bytes(self, data, **kwargs): - pkuseg_features_b = b"" - pkuseg_weights_b = b"" - pkuseg_processors_data = None + data = {"features_b": b"", "weights_b": b"", "processors_data": None} + # pkuseg_features_b = b"" + # pkuseg_weights_b = b"" + # pkuseg_processors_data = None def deserialize_pkuseg_features(b): - nonlocal pkuseg_features_b - pkuseg_features_b = b + data["features_b"] = b def deserialize_pkuseg_weights(b): - nonlocal pkuseg_weights_b - pkuseg_weights_b = b + data["weights_b"] = b def deserialize_pkuseg_processors(b): - nonlocal pkuseg_processors_data - pkuseg_processors_data = srsly.msgpack_loads(b) + data["processors_data"] = srsly.msgpack_loads(b) deserializers = OrderedDict( ( @@ -194,13 +193,13 @@ class ChineseTokenizer(DummyTokenizer): ) util.from_bytes(data, deserializers, []) - if pkuseg_features_b and pkuseg_weights_b: + if data["features_b"] and data["weights_b"]: with tempfile.TemporaryDirectory() as tempdir: tempdir = Path(tempdir) with open(tempdir / "features.pkl", "wb") as fileh: - fileh.write(pkuseg_features_b) + fileh.write(data["features_b"]) with open(tempdir / "weights.npz", "wb") as fileh: - fileh.write(pkuseg_weights_b) + fileh.write(data["weights_b"]) try: import pkuseg except ImportError: @@ -209,13 +208,10 @@ class ChineseTokenizer(DummyTokenizer): + _PKUSEG_INSTALL_MSG ) self.pkuseg_seg = pkuseg.pkuseg(str(tempdir)) - if pkuseg_processors_data: - ( - user_dict, - do_process, - common_words, - other_words, - ) = pkuseg_processors_data + if data["processors_data"]: + (user_dict, do_process, common_words, other_words) = data[ + "processors_data" + ] self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(user_dict) self.pkuseg_seg.postprocesser.do_process = do_process self.pkuseg_seg.postprocesser.common_words = set(common_words) diff --git a/spacy/tests/lang/hy/test_text.py b/spacy/tests/lang/hy/test_text.py index 6b785bdfc..cbdb77e4e 100644 --- a/spacy/tests/lang/hy/test_text.py +++ b/spacy/tests/lang/hy/test_text.py @@ -1,3 +1,4 @@ +# coding: utf8 from __future__ import unicode_literals import pytest From d8f3190c0a265033ca367097e00cbf085b34615a Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 14:14:01 +0200 Subject: [PATCH 08/13] Tidy up and auto-format --- spacy/cli/debug_data.py | 11 ++++++++--- spacy/cli/init_model.py | 7 ++++++- spacy/errors.py | 3 ++- spacy/lang/da/__init__.py | 1 - spacy/lang/de/stop_words.py | 2 +- spacy/lang/en/tokenizer_exceptions.py | 2 +- spacy/lang/es/punctuation.py | 1 - spacy/lang/fr/tokenizer_exceptions.py | 2 +- spacy/lang/gu/stop_words.py | 14 +++++++------- spacy/lang/hy/__init__.py | 5 +++-- spacy/lang/hy/examples.py | 2 +- spacy/lang/hy/lex_attrs.py | 1 + spacy/lang/hy/stop_words.py | 4 ++-- spacy/lang/hy/tag_map.py | 12 ++++++------ spacy/lang/ml/lex_attrs.py | 2 +- spacy/lang/ml/stop_words.py | 1 - spacy/lang/pl/__init__.py | 2 +- spacy/lang/pl/lemmatizer.py | 1 - spacy/lang/pl/punctuation.py | 4 +++- spacy/lang/sv/lex_attrs.py | 2 +- spacy/lang/ur/tag_map.py | 1 - spacy/lang/zh/__init__.py | 3 ++- spacy/language.py | 8 ++++++-- spacy/tests/conftest.py | 9 +++++++-- spacy/tests/doc/test_creation.py | 12 +++++++++--- spacy/tests/doc/test_token_api.py | 2 ++ spacy/tests/lang/de/test_noun_chunks.py | 4 ++-- spacy/tests/lang/el/test_noun_chunks.py | 4 ++-- spacy/tests/lang/en/test_noun_chunks.py | 4 ++-- spacy/tests/lang/es/test_noun_chunks.py | 4 ++-- spacy/tests/lang/es/test_text.py | 2 +- spacy/tests/lang/fr/test_noun_chunks.py | 4 ++-- spacy/tests/lang/gu/test_text.py | 7 +++---- spacy/tests/lang/id/test_noun_chunks.py | 4 ++-- spacy/tests/lang/ml/test_text.py | 11 ++++++++++- spacy/tests/lang/nb/test_noun_chunks.py | 4 ++-- spacy/tests/lang/sv/test_noun_chunks.py | 4 ++-- spacy/tests/lang/zh/test_serialize.py | 12 +++++++++++- spacy/tests/lang/zh/test_tokenizer.py | 8 ++++++-- spacy/tests/matcher/test_matcher_api.py | 6 +++--- spacy/tests/pipeline/test_sentencizer.py | 4 +++- .../serialize/test_serialize_vocab_strings.py | 14 +++++++++----- spacy/tests/test_gold.py | 4 ++-- spacy/tests/vocab_vectors/test_vectors.py | 3 ++- spacy/util.py | 2 +- 45 files changed, 138 insertions(+), 81 deletions(-) diff --git a/spacy/cli/debug_data.py b/spacy/cli/debug_data.py index 279f34f16..7a4a093e2 100644 --- a/spacy/cli/debug_data.py +++ b/spacy/cli/debug_data.py @@ -187,12 +187,17 @@ def debug_data( n_missing_vectors = sum(gold_train_data["words_missing_vectors"].values()) msg.warn( "{} words in training data without vectors ({:0.2f}%)".format( - n_missing_vectors, - n_missing_vectors / gold_train_data["n_words"], + n_missing_vectors, n_missing_vectors / gold_train_data["n_words"], ), ) msg.text( - "10 most common words without vectors: {}".format(_format_labels(gold_train_data["words_missing_vectors"].most_common(10), counts=True)), show=verbose, + "10 most common words without vectors: {}".format( + _format_labels( + gold_train_data["words_missing_vectors"].most_common(10), + counts=True, + ) + ), + show=verbose, ) else: msg.info("No word vectors present in the model") diff --git a/spacy/cli/init_model.py b/spacy/cli/init_model.py index 537afd10f..edbd5dff7 100644 --- a/spacy/cli/init_model.py +++ b/spacy/cli/init_model.py @@ -49,7 +49,12 @@ DEFAULT_OOV_PROB = -20 str, ), model_name=("Optional name for the model meta", "option", "mn", str), - base_model=("Base model (for languages with custom tokenizers)", "option", "b", str), + base_model=( + "Base model (for languages with custom tokenizers)", + "option", + "b", + str, + ), ) def init_model( lang, diff --git a/spacy/errors.py b/spacy/errors.py index f0b8592df..0750ab616 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -8,7 +8,7 @@ def add_codes(err_cls): class ErrorsWithCodes(err_cls): def __getattribute__(self, code): msg = super().__getattribute__(code) - if code.startswith('__'): # python system attributes like __class__ + if code.startswith("__"): # python system attributes like __class__ return msg else: return "[{code}] {msg}".format(code=code, msg=msg) @@ -116,6 +116,7 @@ class Warnings(object): " to check the alignment. Misaligned entities ('-') will be " "ignored during training.") + @add_codes class Errors(object): E001 = ("No component '{name}' found in pipeline. Available names: {opts}") diff --git a/spacy/lang/da/__init__.py b/spacy/lang/da/__init__.py index 92eec44b2..0190656e5 100644 --- a/spacy/lang/da/__init__.py +++ b/spacy/lang/da/__init__.py @@ -9,7 +9,6 @@ from .morph_rules import MORPH_RULES from ..tag_map import TAG_MAP from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ..norm_exceptions import BASE_NORMS from ...language import Language from ...attrs import LANG from ...util import update_exc diff --git a/spacy/lang/de/stop_words.py b/spacy/lang/de/stop_words.py index 69134124f..0c8b375e0 100644 --- a/spacy/lang/de/stop_words.py +++ b/spacy/lang/de/stop_words.py @@ -47,7 +47,7 @@ kleines kommen kommt können könnt konnte könnte konnten kurz lang lange leicht leider lieber los machen macht machte mag magst man manche manchem manchen mancher manches mehr -mein meine meinem meinen meiner meines mich mir mit mittel mochte möchte mochten +mein meine meinem meinen meiner meines mich mir mit mittel mochte möchte mochten mögen möglich mögt morgen muss muß müssen musst müsst musste mussten na nach nachdem nahm natürlich neben nein neue neuen neun neunte neunten neunter diff --git a/spacy/lang/en/tokenizer_exceptions.py b/spacy/lang/en/tokenizer_exceptions.py index 62de81912..6a553052b 100644 --- a/spacy/lang/en/tokenizer_exceptions.py +++ b/spacy/lang/en/tokenizer_exceptions.py @@ -197,7 +197,7 @@ for word in ["who", "what", "when", "where", "why", "how", "there", "that"]: _exc[orth + "d"] = [ {ORTH: orth, LEMMA: word, NORM: word}, - {ORTH: "d", NORM: "'d"} + {ORTH: "d", NORM: "'d"}, ] _exc[orth + "'d've"] = [ diff --git a/spacy/lang/es/punctuation.py b/spacy/lang/es/punctuation.py index 42335237c..f989221c2 100644 --- a/spacy/lang/es/punctuation.py +++ b/spacy/lang/es/punctuation.py @@ -5,7 +5,6 @@ from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES from ..char_classes import LIST_ICONS, CURRENCY, LIST_UNITS, PUNCT from ..char_classes import CONCAT_QUOTES, ALPHA_LOWER, ALPHA_UPPER, ALPHA from ..char_classes import merge_chars -from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES _list_units = [u for u in LIST_UNITS if u != "%"] diff --git a/spacy/lang/fr/tokenizer_exceptions.py b/spacy/lang/fr/tokenizer_exceptions.py index cb1702300..4eb4c1568 100644 --- a/spacy/lang/fr/tokenizer_exceptions.py +++ b/spacy/lang/fr/tokenizer_exceptions.py @@ -461,5 +461,5 @@ _regular_exp.append(URL_PATTERN) TOKENIZER_EXCEPTIONS = _exc TOKEN_MATCH = re.compile( - "(?iu)" + "|".join("(?:{})".format(m) for m in _regular_exp) + "(?iu)" + "|".join("(?:{})".format(m) for m in _regular_exp) ).match diff --git a/spacy/lang/gu/stop_words.py b/spacy/lang/gu/stop_words.py index f641b5720..85d33763d 100644 --- a/spacy/lang/gu/stop_words.py +++ b/spacy/lang/gu/stop_words.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals STOP_WORDS = set( """ -એમ +એમ આ એ રહી @@ -24,7 +24,7 @@ STOP_WORDS = set( તેમને તેમના તેમણે -તેમનું +તેમનું તેમાં અને અહીં @@ -33,12 +33,12 @@ STOP_WORDS = set( થાય જે ને -કે +કે ના ની નો ને -નું +નું શું માં પણ @@ -69,12 +69,12 @@ STOP_WORDS = set( કોઈ કેમ કર્યો -કર્યુ +કર્યુ કરે સૌથી -ત્યારબાદ +ત્યારબાદ તથા -દ્વારા +દ્વારા જુઓ જાઓ જ્યારે diff --git a/spacy/lang/hy/__init__.py b/spacy/lang/hy/__init__.py index 3320edb6c..6aaa965bb 100644 --- a/spacy/lang/hy/__init__.py +++ b/spacy/lang/hy/__init__.py @@ -1,11 +1,12 @@ +# coding: utf8 +from __future__ import unicode_literals + from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .tag_map import TAG_MAP - from ...attrs import LANG from ...language import Language -from ...tokens import Doc class ArmenianDefaults(Language.Defaults): diff --git a/spacy/lang/hy/examples.py b/spacy/lang/hy/examples.py index b0df31aae..323f77b1c 100644 --- a/spacy/lang/hy/examples.py +++ b/spacy/lang/hy/examples.py @@ -1,6 +1,6 @@ +# coding: utf8 from __future__ import unicode_literals - """ Example sentences to test spaCy and its language models. >>> from spacy.lang.hy.examples import sentences diff --git a/spacy/lang/hy/lex_attrs.py b/spacy/lang/hy/lex_attrs.py index 7c1b9592f..910625fb8 100644 --- a/spacy/lang/hy/lex_attrs.py +++ b/spacy/lang/hy/lex_attrs.py @@ -1,3 +1,4 @@ +# coding: utf8 from __future__ import unicode_literals from ...attrs import LIKE_NUM diff --git a/spacy/lang/hy/stop_words.py b/spacy/lang/hy/stop_words.py index c671956a4..d75aad6e2 100644 --- a/spacy/lang/hy/stop_words.py +++ b/spacy/lang/hy/stop_words.py @@ -1,6 +1,6 @@ +# coding: utf8 from __future__ import unicode_literals - STOP_WORDS = set( """ նա @@ -105,6 +105,6 @@ STOP_WORDS = set( յուրաքանչյուր այս մեջ -թ +թ """.split() ) diff --git a/spacy/lang/hy/tag_map.py b/spacy/lang/hy/tag_map.py index 90690c22e..722270110 100644 --- a/spacy/lang/hy/tag_map.py +++ b/spacy/lang/hy/tag_map.py @@ -1,7 +1,7 @@ # coding: utf8 from __future__ import unicode_literals -from ...symbols import POS, SYM, ADJ, NUM, DET, ADV, ADP, X, VERB, NOUN +from ...symbols import POS, ADJ, NUM, DET, ADV, ADP, X, VERB, NOUN from ...symbols import PROPN, PART, INTJ, PRON, SCONJ, AUX, CCONJ TAG_MAP = { @@ -716,7 +716,7 @@ TAG_MAP = { POS: NOUN, "Animacy": "Nhum", "Case": "Dat", - "Number": "Coll", + # "Number": "Coll", "Number": "Sing", "Person": "1", }, @@ -815,7 +815,7 @@ TAG_MAP = { "Animacy": "Nhum", "Case": "Nom", "Definite": "Def", - "Number": "Plur", + # "Number": "Plur", "Number": "Sing", "Poss": "Yes", }, @@ -880,7 +880,7 @@ TAG_MAP = { POS: NOUN, "Animacy": "Nhum", "Case": "Nom", - "Number": "Plur", + # "Number": "Plur", "Number": "Sing", "Person": "2", }, @@ -1223,9 +1223,9 @@ TAG_MAP = { "PRON_Case=Nom|Number=Sing|Number=Plur|Person=3|Person=1|PronType=Emp": { POS: PRON, "Case": "Nom", - "Number": "Sing", + # "Number": "Sing", "Number": "Plur", - "Person": "3", + # "Person": "3", "Person": "1", "PronType": "Emp", }, diff --git a/spacy/lang/ml/lex_attrs.py b/spacy/lang/ml/lex_attrs.py index 345da8126..468ad88f8 100644 --- a/spacy/lang/ml/lex_attrs.py +++ b/spacy/lang/ml/lex_attrs.py @@ -55,7 +55,7 @@ _num_words = [ "തൊണ്ണൂറ് ", "നുറ് ", "ആയിരം ", - "പത്തുലക്ഷം" + "പത്തുലക്ഷം", ] diff --git a/spacy/lang/ml/stop_words.py b/spacy/lang/ml/stop_words.py index 4012571bc..8bd6a7e02 100644 --- a/spacy/lang/ml/stop_words.py +++ b/spacy/lang/ml/stop_words.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals STOP_WORDS = set( - """ അത് ഇത് diff --git a/spacy/lang/pl/__init__.py b/spacy/lang/pl/__init__.py index 61608a3d9..52b662a90 100644 --- a/spacy/lang/pl/__init__.py +++ b/spacy/lang/pl/__init__.py @@ -12,7 +12,7 @@ from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS from ...language import Language from ...attrs import LANG, NORM -from ...util import update_exc, add_lookups +from ...util import add_lookups from ...lookups import Lookups diff --git a/spacy/lang/pl/lemmatizer.py b/spacy/lang/pl/lemmatizer.py index 2be4b0fb7..cd555b9c2 100644 --- a/spacy/lang/pl/lemmatizer.py +++ b/spacy/lang/pl/lemmatizer.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals from ...lemmatizer import Lemmatizer from ...parts_of_speech import NAMES -from ...errors import Errors class PolishLemmatizer(Lemmatizer): diff --git a/spacy/lang/pl/punctuation.py b/spacy/lang/pl/punctuation.py index aa8adac29..c87464b1b 100644 --- a/spacy/lang/pl/punctuation.py +++ b/spacy/lang/pl/punctuation.py @@ -8,7 +8,9 @@ from ..punctuation import TOKENIZER_PREFIXES as BASE_TOKENIZER_PREFIXES _quotes = CONCAT_QUOTES.replace("'", "") -_prefixes = _prefixes = [r"(długo|krótko|jedno|dwu|trzy|cztero)-"] + BASE_TOKENIZER_PREFIXES +_prefixes = _prefixes = [ + r"(długo|krótko|jedno|dwu|trzy|cztero)-" +] + BASE_TOKENIZER_PREFIXES _infixes = ( LIST_ELLIPSES diff --git a/spacy/lang/sv/lex_attrs.py b/spacy/lang/sv/lex_attrs.py index 4b5278c7b..24d06a97a 100644 --- a/spacy/lang/sv/lex_attrs.py +++ b/spacy/lang/sv/lex_attrs.py @@ -40,7 +40,7 @@ _num_words = [ "miljard", "biljon", "biljard", - "kvadriljon" + "kvadriljon", ] diff --git a/spacy/lang/ur/tag_map.py b/spacy/lang/ur/tag_map.py index eebd3a14a..aad548e9b 100644 --- a/spacy/lang/ur/tag_map.py +++ b/spacy/lang/ur/tag_map.py @@ -38,7 +38,6 @@ TAG_MAP = { "NNPC": {POS: PROPN}, "NNC": {POS: NOUN}, "PSP": {POS: ADP}, - ".": {POS: PUNCT}, ",": {POS: PUNCT}, "-LRB-": {POS: PUNCT}, diff --git a/spacy/lang/zh/__init__.py b/spacy/lang/zh/__init__.py index ed0b3eb74..a877169a2 100644 --- a/spacy/lang/zh/__init__.py +++ b/spacy/lang/zh/__init__.py @@ -109,6 +109,7 @@ class ChineseTokenizer(DummyTokenizer): if reset: try: import pkuseg + self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(None) except ImportError: if self.use_pkuseg: @@ -118,7 +119,7 @@ class ChineseTokenizer(DummyTokenizer): ) raise ImportError(msg) for word in words: - self.pkuseg_seg.preprocesser.insert(word.strip(), '') + self.pkuseg_seg.preprocesser.insert(word.strip(), "") def _get_config(self): config = OrderedDict( diff --git a/spacy/language.py b/spacy/language.py index 703806627..0e5c46459 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -79,7 +79,9 @@ class BaseDefaults(object): lookups=lookups, ) vocab.lex_attr_getters[NORM] = util.add_lookups( - vocab.lex_attr_getters.get(NORM, LEX_ATTRS[NORM]), BASE_NORMS, vocab.lookups.get_table("lexeme_norm") + vocab.lex_attr_getters.get(NORM, LEX_ATTRS[NORM]), + BASE_NORMS, + vocab.lookups.get_table("lexeme_norm"), ) for tag_str, exc in cls.morph_rules.items(): for orth_str, attrs in exc.items(): @@ -974,7 +976,9 @@ class Language(object): serializers = OrderedDict() serializers["vocab"] = lambda: self.vocab.to_bytes() serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"]) - serializers["meta.json"] = lambda: srsly.json_dumps(OrderedDict(sorted(self.meta.items()))) + serializers["meta.json"] = lambda: srsly.json_dumps( + OrderedDict(sorted(self.meta.items())) + ) for name, proc in self.pipeline: if name in exclude: continue diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index d26f0ce5c..63bbf2e0a 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -112,6 +112,7 @@ def ga_tokenizer(): def gu_tokenizer(): return get_lang_class("gu").Defaults.create_tokenizer() + @pytest.fixture(scope="session") def he_tokenizer(): return get_lang_class("he").Defaults.create_tokenizer() @@ -246,7 +247,9 @@ def yo_tokenizer(): @pytest.fixture(scope="session") def zh_tokenizer_char(): - return get_lang_class("zh").Defaults.create_tokenizer(config={"use_jieba": False, "use_pkuseg": False}) + return get_lang_class("zh").Defaults.create_tokenizer( + config={"use_jieba": False, "use_pkuseg": False} + ) @pytest.fixture(scope="session") @@ -258,7 +261,9 @@ def zh_tokenizer_jieba(): @pytest.fixture(scope="session") def zh_tokenizer_pkuseg(): pytest.importorskip("pkuseg") - return get_lang_class("zh").Defaults.create_tokenizer(config={"pkuseg_model": "default", "use_jieba": False, "use_pkuseg": True}) + return get_lang_class("zh").Defaults.create_tokenizer( + config={"pkuseg_model": "default", "use_jieba": False, "use_pkuseg": True} + ) @pytest.fixture(scope="session") diff --git a/spacy/tests/doc/test_creation.py b/spacy/tests/doc/test_creation.py index 8f543e86a..863a7c210 100644 --- a/spacy/tests/doc/test_creation.py +++ b/spacy/tests/doc/test_creation.py @@ -50,7 +50,9 @@ def test_create_from_words_and_text(vocab): assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "] assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""] assert doc.text == text - assert [t.text for t in doc if not t.text.isspace()] == [word for word in words if not word.isspace()] + assert [t.text for t in doc if not t.text.isspace()] == [ + word for word in words if not word.isspace() + ] # partial whitespace in words words = [" ", "'", "dogs", "'", "\n\n", "run", " "] @@ -60,7 +62,9 @@ def test_create_from_words_and_text(vocab): assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "] assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""] assert doc.text == text - assert [t.text for t in doc if not t.text.isspace()] == [word for word in words if not word.isspace()] + assert [t.text for t in doc if not t.text.isspace()] == [ + word for word in words if not word.isspace() + ] # non-standard whitespace tokens words = [" ", " ", "'", "dogs", "'", "\n\n", "run"] @@ -70,7 +74,9 @@ def test_create_from_words_and_text(vocab): assert [t.text for t in doc] == [" ", "'", "dogs", "'", "\n\n", "run", " "] assert [t.whitespace_ for t in doc] == ["", "", "", "", "", " ", ""] assert doc.text == text - assert [t.text for t in doc if not t.text.isspace()] == [word for word in words if not word.isspace()] + assert [t.text for t in doc if not t.text.isspace()] == [ + word for word in words if not word.isspace() + ] # mismatch between words and text with pytest.raises(ValueError): diff --git a/spacy/tests/doc/test_token_api.py b/spacy/tests/doc/test_token_api.py index 1c2253dfa..4dcd07ad9 100644 --- a/spacy/tests/doc/test_token_api.py +++ b/spacy/tests/doc/test_token_api.py @@ -181,6 +181,7 @@ def test_is_sent_start(en_tokenizer): doc.is_parsed = True assert len(list(doc.sents)) == 2 + def test_is_sent_end(en_tokenizer): doc = en_tokenizer("This is a sentence. This is another.") assert doc[4].is_sent_end is None @@ -213,6 +214,7 @@ def test_token0_has_sent_start_true(): assert doc[1].is_sent_start is None assert not doc.is_sentenced + def test_tokenlast_has_sent_end_true(): doc = Doc(Vocab(), words=["hello", "world"]) assert doc[0].is_sent_end is None diff --git a/spacy/tests/lang/de/test_noun_chunks.py b/spacy/tests/lang/de/test_noun_chunks.py index 12ece84b5..8d76ddd79 100644 --- a/spacy/tests/lang/de/test_noun_chunks.py +++ b/spacy/tests/lang/de/test_noun_chunks.py @@ -5,9 +5,9 @@ import pytest def test_noun_chunks_is_parsed_de(de_tokenizer): - """Test that noun_chunks raises Value Error for 'de' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'de' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = de_tokenizer("Er lag auf seinem") diff --git a/spacy/tests/lang/el/test_noun_chunks.py b/spacy/tests/lang/el/test_noun_chunks.py index be14acc81..4f24865d0 100644 --- a/spacy/tests/lang/el/test_noun_chunks.py +++ b/spacy/tests/lang/el/test_noun_chunks.py @@ -5,9 +5,9 @@ import pytest def test_noun_chunks_is_parsed_el(el_tokenizer): - """Test that noun_chunks raises Value Error for 'el' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'el' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = el_tokenizer("είναι χώρα της νοτιοανατολικής") diff --git a/spacy/tests/lang/en/test_noun_chunks.py b/spacy/tests/lang/en/test_noun_chunks.py index 1109af150..ff67986a5 100644 --- a/spacy/tests/lang/en/test_noun_chunks.py +++ b/spacy/tests/lang/en/test_noun_chunks.py @@ -13,9 +13,9 @@ from ...util import get_doc def test_noun_chunks_is_parsed(en_tokenizer): - """Test that noun_chunks raises Value Error for 'en' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'en' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = en_tokenizer("This is a sentence") diff --git a/spacy/tests/lang/es/test_noun_chunks.py b/spacy/tests/lang/es/test_noun_chunks.py index 71069d313..66bbd8c3a 100644 --- a/spacy/tests/lang/es/test_noun_chunks.py +++ b/spacy/tests/lang/es/test_noun_chunks.py @@ -5,9 +5,9 @@ import pytest def test_noun_chunks_is_parsed_es(es_tokenizer): - """Test that noun_chunks raises Value Error for 'es' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'es' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = es_tokenizer("en Oxford este verano") diff --git a/spacy/tests/lang/es/test_text.py b/spacy/tests/lang/es/test_text.py index e237f922d..999e788dd 100644 --- a/spacy/tests/lang/es/test_text.py +++ b/spacy/tests/lang/es/test_text.py @@ -62,4 +62,4 @@ def test_lex_attrs_like_number(es_tokenizer, text, match): @pytest.mark.parametrize("word", ["once"]) def test_es_lex_attrs_capitals(word): assert like_num(word) - assert like_num(word.upper()) \ No newline at end of file + assert like_num(word.upper()) diff --git a/spacy/tests/lang/fr/test_noun_chunks.py b/spacy/tests/lang/fr/test_noun_chunks.py index 876bc0ea4..ea93a5a35 100644 --- a/spacy/tests/lang/fr/test_noun_chunks.py +++ b/spacy/tests/lang/fr/test_noun_chunks.py @@ -5,9 +5,9 @@ import pytest def test_noun_chunks_is_parsed_fr(fr_tokenizer): - """Test that noun_chunks raises Value Error for 'fr' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'fr' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = fr_tokenizer("trouver des travaux antérieurs") diff --git a/spacy/tests/lang/gu/test_text.py b/spacy/tests/lang/gu/test_text.py index 9f3ae45a4..aa8d442a2 100644 --- a/spacy/tests/lang/gu/test_text.py +++ b/spacy/tests/lang/gu/test_text.py @@ -3,17 +3,16 @@ from __future__ import unicode_literals import pytest + def test_gu_tokenizer_handlers_long_text(gu_tokenizer): text = """પશ્ચિમ ભારતમાં આવેલું ગુજરાત રાજ્ય જે વ્યક્તિઓની માતૃભૂમિ છે""" tokens = gu_tokenizer(text) assert len(tokens) == 9 + @pytest.mark.parametrize( "text,length", - [ - ("ગુજરાતીઓ ખાવાના શોખીન માનવામાં આવે છે", 6), - ("ખેતરની ખેડ કરવામાં આવે છે.", 5), - ], + [("ગુજરાતીઓ ખાવાના શોખીન માનવામાં આવે છે", 6), ("ખેતરની ખેડ કરવામાં આવે છે.", 5)], ) def test_gu_tokenizer_handles_cnts(gu_tokenizer, text, length): tokens = gu_tokenizer(text) diff --git a/spacy/tests/lang/id/test_noun_chunks.py b/spacy/tests/lang/id/test_noun_chunks.py index 7bac808b3..add76f9b9 100644 --- a/spacy/tests/lang/id/test_noun_chunks.py +++ b/spacy/tests/lang/id/test_noun_chunks.py @@ -5,9 +5,9 @@ import pytest def test_noun_chunks_is_parsed_id(id_tokenizer): - """Test that noun_chunks raises Value Error for 'id' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'id' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = id_tokenizer("sebelas") diff --git a/spacy/tests/lang/ml/test_text.py b/spacy/tests/lang/ml/test_text.py index 92eca6b21..2883cf5bb 100644 --- a/spacy/tests/lang/ml/test_text.py +++ b/spacy/tests/lang/ml/test_text.py @@ -10,7 +10,16 @@ def test_ml_tokenizer_handles_long_text(ml_tokenizer): assert len(tokens) == 5 -@pytest.mark.parametrize("text,length", [("എന്നാൽ അച്ചടിയുടെ ആവിർഭാവം ലിപിയിൽ കാര്യമായ മാറ്റങ്ങൾ വരുത്തിയത് കൂട്ടക്ഷരങ്ങളെ അണുഅക്ഷരങ്ങളായി പിരിച്ചുകൊണ്ടായിരുന്നു", 10), ("പരമ്പരാഗതമായി മലയാളം ഇടത്തുനിന്ന് വലത്തോട്ടാണ് എഴുതുന്നത്", 5)]) +@pytest.mark.parametrize( + "text,length", + [ + ( + "എന്നാൽ അച്ചടിയുടെ ആവിർഭാവം ലിപിയിൽ കാര്യമായ മാറ്റങ്ങൾ വരുത്തിയത് കൂട്ടക്ഷരങ്ങളെ അണുഅക്ഷരങ്ങളായി പിരിച്ചുകൊണ്ടായിരുന്നു", + 10, + ), + ("പരമ്പരാഗതമായി മലയാളം ഇടത്തുനിന്ന് വലത്തോട്ടാണ് എഴുതുന്നത്", 5), + ], +) def test_ml_tokenizer_handles_cnts(ml_tokenizer, text, length): tokens = ml_tokenizer(text) assert len(tokens) == length diff --git a/spacy/tests/lang/nb/test_noun_chunks.py b/spacy/tests/lang/nb/test_noun_chunks.py index 17ec6cfda..653491a64 100644 --- a/spacy/tests/lang/nb/test_noun_chunks.py +++ b/spacy/tests/lang/nb/test_noun_chunks.py @@ -5,9 +5,9 @@ import pytest def test_noun_chunks_is_parsed_nb(nb_tokenizer): - """Test that noun_chunks raises Value Error for 'nb' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'nb' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = nb_tokenizer("Smørsausen brukes bl.a. til") diff --git a/spacy/tests/lang/sv/test_noun_chunks.py b/spacy/tests/lang/sv/test_noun_chunks.py index 38086c255..a6283b65e 100644 --- a/spacy/tests/lang/sv/test_noun_chunks.py +++ b/spacy/tests/lang/sv/test_noun_chunks.py @@ -7,9 +7,9 @@ from ...util import get_doc def test_noun_chunks_is_parsed_sv(sv_tokenizer): - """Test that noun_chunks raises Value Error for 'sv' language if Doc is not parsed. + """Test that noun_chunks raises Value Error for 'sv' language if Doc is not parsed. To check this test, we're constructing a Doc - with a new Vocab here and forcing is_parsed to 'False' + with a new Vocab here and forcing is_parsed to 'False' to make sure the noun chunks don't run. """ doc = sv_tokenizer("Studenten läste den bästa boken") diff --git a/spacy/tests/lang/zh/test_serialize.py b/spacy/tests/lang/zh/test_serialize.py index 58133a88e..56f092ed8 100644 --- a/spacy/tests/lang/zh/test_serialize.py +++ b/spacy/tests/lang/zh/test_serialize.py @@ -34,5 +34,15 @@ def test_zh_tokenizer_serialize_pkuseg(zh_tokenizer_pkuseg): @pytest.mark.slow def test_zh_tokenizer_serialize_pkuseg_with_processors(zh_tokenizer_pkuseg): - nlp = Chinese(meta={"tokenizer": {"config": {"use_jieba": False, "use_pkuseg": True, "pkuseg_model": "medicine"}}}) + nlp = Chinese( + meta={ + "tokenizer": { + "config": { + "use_jieba": False, + "use_pkuseg": True, + "pkuseg_model": "medicine", + } + } + } + ) zh_tokenizer_serialize(nlp.tokenizer) diff --git a/spacy/tests/lang/zh/test_tokenizer.py b/spacy/tests/lang/zh/test_tokenizer.py index 035798aa1..28240b6a9 100644 --- a/spacy/tests/lang/zh/test_tokenizer.py +++ b/spacy/tests/lang/zh/test_tokenizer.py @@ -43,12 +43,16 @@ def test_zh_tokenizer_pkuseg(zh_tokenizer_pkuseg, text, expected_tokens): def test_zh_tokenizer_pkuseg_user_dict(zh_tokenizer_pkuseg): user_dict = _get_pkuseg_trie_data(zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie) zh_tokenizer_pkuseg.pkuseg_update_user_dict(["nonsense_asdf"]) - updated_user_dict = _get_pkuseg_trie_data(zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie) + updated_user_dict = _get_pkuseg_trie_data( + zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie + ) assert len(user_dict) == len(updated_user_dict) - 1 # reset user dict zh_tokenizer_pkuseg.pkuseg_update_user_dict([], reset=True) - reset_user_dict = _get_pkuseg_trie_data(zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie) + reset_user_dict = _get_pkuseg_trie_data( + zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie + ) assert len(reset_user_dict) == 0 diff --git a/spacy/tests/matcher/test_matcher_api.py b/spacy/tests/matcher/test_matcher_api.py index 0295ada82..1112195da 100644 --- a/spacy/tests/matcher/test_matcher_api.py +++ b/spacy/tests/matcher/test_matcher_api.py @@ -265,15 +265,15 @@ def test_matcher_regex_shape(en_vocab): @pytest.mark.parametrize( - "cmp, bad", + "cmp, bad", [ ("==", ["a", "aaa"]), ("!=", ["aa"]), (">=", ["a"]), ("<=", ["aaa"]), (">", ["a", "aa"]), - ("<", ["aa", "aaa"]) - ] + ("<", ["aa", "aaa"]), + ], ) def test_matcher_compare_length(en_vocab, cmp, bad): matcher = Matcher(en_vocab) diff --git a/spacy/tests/pipeline/test_sentencizer.py b/spacy/tests/pipeline/test_sentencizer.py index 7e58b3e98..ee9220a29 100644 --- a/spacy/tests/pipeline/test_sentencizer.py +++ b/spacy/tests/pipeline/test_sentencizer.py @@ -106,7 +106,9 @@ def test_sentencizer_complex(en_vocab, words, sent_starts, sent_ends, n_sents): ), ], ) -def test_sentencizer_custom_punct(en_vocab, punct_chars, words, sent_starts, sent_ends, n_sents): +def test_sentencizer_custom_punct( + en_vocab, punct_chars, words, sent_starts, sent_ends, n_sents +): doc = Doc(en_vocab, words=words) sentencizer = Sentencizer(punct_chars=punct_chars) doc = sentencizer(doc) diff --git a/spacy/tests/serialize/test_serialize_vocab_strings.py b/spacy/tests/serialize/test_serialize_vocab_strings.py index 63faf44fc..3be0a75b3 100644 --- a/spacy/tests/serialize/test_serialize_vocab_strings.py +++ b/spacy/tests/serialize/test_serialize_vocab_strings.py @@ -37,7 +37,7 @@ def test_serialize_vocab_roundtrip_bytes(strings1, strings2): assert vocab1.to_bytes() == vocab1_b new_vocab1 = Vocab().from_bytes(vocab1_b) assert new_vocab1.to_bytes() == vocab1_b - assert len(new_vocab1.strings) == len(strings1) + 1 # adds _SP + assert len(new_vocab1.strings) == len(strings1) + 1 # adds _SP assert sorted([s for s in new_vocab1.strings]) == sorted(strings1 + ["_SP"]) @@ -56,9 +56,13 @@ def test_serialize_vocab_roundtrip_disk(strings1, strings2): assert strings1 == [s for s in vocab1_d.strings if s != "_SP"] assert strings2 == [s for s in vocab2_d.strings if s != "_SP"] if strings1 == strings2: - assert [s for s in vocab1_d.strings if s != "_SP"] == [s for s in vocab2_d.strings if s != "_SP"] + assert [s for s in vocab1_d.strings if s != "_SP"] == [ + s for s in vocab2_d.strings if s != "_SP" + ] else: - assert [s for s in vocab1_d.strings if s != "_SP"] != [s for s in vocab2_d.strings if s != "_SP"] + assert [s for s in vocab1_d.strings if s != "_SP"] != [ + s for s in vocab2_d.strings if s != "_SP" + ] @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) @@ -76,9 +80,8 @@ def test_serialize_vocab_lex_attrs_bytes(strings, lex_attr): def test_deserialize_vocab_seen_entries(strings, lex_attr): # Reported in #2153 vocab = Vocab(strings=strings) - length = len(vocab) vocab.from_bytes(vocab.to_bytes()) - assert len(vocab.strings) == len(strings) + 1 # adds _SP + assert len(vocab.strings) == len(strings) + 1 # adds _SP @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) @@ -130,6 +133,7 @@ def test_serialize_stringstore_roundtrip_disk(strings1, strings2): else: assert list(sstore1_d) != list(sstore2_d) + @pytest.mark.parametrize("strings,lex_attr", test_strings_attrs) def test_pickle_vocab(strings, lex_attr): vocab = Vocab(strings=strings) diff --git a/spacy/tests/test_gold.py b/spacy/tests/test_gold.py index 37b877561..53665d852 100644 --- a/spacy/tests/test_gold.py +++ b/spacy/tests/test_gold.py @@ -112,7 +112,7 @@ def test_gold_biluo_different_tokenization(en_vocab, en_tokenizer): data = ( "I'll return the ₹54 amount", { - "words": ["I", "'ll", "return", "the", "₹", "54", "amount",], + "words": ["I", "'ll", "return", "the", "₹", "54", "amount"], "entities": [(16, 19, "MONEY")], }, ) @@ -122,7 +122,7 @@ def test_gold_biluo_different_tokenization(en_vocab, en_tokenizer): data = ( "I'll return the $54 amount", { - "words": ["I", "'ll", "return", "the", "$", "54", "amount",], + "words": ["I", "'ll", "return", "the", "$", "54", "amount"], "entities": [(16, 19, "MONEY")], }, ) diff --git a/spacy/tests/vocab_vectors/test_vectors.py b/spacy/tests/vocab_vectors/test_vectors.py index 24eb3a1af..1821f8abc 100644 --- a/spacy/tests/vocab_vectors/test_vectors.py +++ b/spacy/tests/vocab_vectors/test_vectors.py @@ -366,6 +366,7 @@ def test_vectors_serialize(): assert row == row_r assert_equal(v.data, v_r.data) + def test_vector_is_oov(): vocab = Vocab(vectors_name="test_vocab_is_oov") data = numpy.ndarray((5, 3), dtype="f") @@ -375,4 +376,4 @@ def test_vector_is_oov(): vocab.set_vector("dog", data[1]) assert vocab["cat"].is_oov is True assert vocab["dog"].is_oov is True - assert vocab["hamster"].is_oov is False \ No newline at end of file + assert vocab["hamster"].is_oov is False diff --git a/spacy/util.py b/spacy/util.py index d4cdca4e0..419c99bc0 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -774,7 +774,7 @@ def get_words_and_spaces(words, text): except ValueError: raise ValueError(Errors.E194.format(text=text, words=words)) if word_start > 0: - text_words.append(text[text_pos:text_pos+word_start]) + text_words.append(text[text_pos : text_pos + word_start]) text_spaces.append(False) text_pos += word_start text_words.append(word) From 69fb4bedf20384b475779ee58521e7aa94cf4852 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 14:14:28 +0200 Subject: [PATCH 09/13] Revert "doc_or_span -> obj" This reverts commit 78bb9ff5e0e4adc01bd30e227657118d87546f83. --- spacy/matcher/matcher.pyx | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 3d99f117a..4cfab915f 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -213,28 +213,28 @@ cdef class Matcher: else: yield doc - def __call__(self, object obj): + def __call__(self, object doc_or_span): """Find all token sequences matching the supplied pattern. - obj (Doc / Span): The document to match over. + doc_or_span (Doc or Span): The document to match over. RETURNS (list): A list of `(key, start, end)` tuples, describing the matches. A match tuple describes a span `doc[start:end]`. The `label_id` and `key` are both integers. """ - if isinstance(obj, Doc): - doc = obj + if isinstance(doc_or_span, Doc): + doc = doc_or_span length = len(doc) - elif isinstance(obj, Span): - doc = obj.doc - length = obj.end - obj.start + elif isinstance(doc_or_span, Span): + doc = doc_or_span.doc + length = doc_or_span.end - doc_or_span.start else: - raise ValueError(Errors.E195.format(good="Doc or Span", got=type(obj).__name__)) + raise ValueError(Errors.E195.format(good="Doc or Span", got=type(doc_or_span).__name__)) if len(set([LEMMA, POS, TAG]) & self._seen_attrs) > 0 \ and not doc.is_tagged: raise ValueError(Errors.E155.format()) if DEP in self._seen_attrs and not doc.is_parsed: raise ValueError(Errors.E156.format()) - matches = find_matches(&self.patterns[0], self.patterns.size(), obj, length, + matches = find_matches(&self.patterns[0], self.patterns.size(), doc_or_span, length, extensions=self._extensions, predicates=self._extra_predicates) for i, (key, start, end) in enumerate(matches): on_match = self._callbacks.get(key, None) @@ -257,7 +257,7 @@ def unpickle_matcher(vocab, patterns, callbacks): return matcher -cdef find_matches(TokenPatternC** patterns, int n, object obj, int length, extensions=None, predicates=tuple()): +cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int length, extensions=None, predicates=tuple()): """Find matches in a doc, with a compiled array of patterns. Matches are returned as a list of (id, start, end) tuples. @@ -286,7 +286,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object obj, int length, exten else: nr_extra_attr = 0 extra_attr_values = mem.alloc(length, sizeof(attr_t)) - for i, token in enumerate(obj): + for i, token in enumerate(doc_or_span): for name, index in extensions.items(): value = token._.get(name) if isinstance(value, basestring): @@ -298,7 +298,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object obj, int length, exten for j in range(n): states.push_back(PatternStateC(patterns[j], i, 0)) transition_states(states, matches, predicate_cache, - obj[i], extra_attr_values, predicates) + doc_or_span[i], extra_attr_values, predicates) extra_attr_values += nr_extra_attr predicate_cache += len(predicates) # Handle matches that end in 0-width patterns From b1f45c9da3631d7d18002b8a939cccc6c24dd90b Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 14:19:58 +0200 Subject: [PATCH 10/13] obj -> doclike --- spacy/lang/de/syntax_iterators.py | 6 +++--- spacy/lang/el/syntax_iterators.py | 6 +++--- spacy/lang/en/syntax_iterators.py | 6 +++--- spacy/lang/es/syntax_iterators.py | 4 ++-- spacy/lang/fa/syntax_iterators.py | 6 +++--- spacy/lang/fr/syntax_iterators.py | 6 +++--- spacy/lang/id/syntax_iterators.py | 6 +++--- spacy/lang/nb/syntax_iterators.py | 6 +++--- spacy/lang/sv/syntax_iterators.py | 6 +++--- spacy/matcher/matcher.pyx | 24 ++++++++++++------------ 10 files changed, 38 insertions(+), 38 deletions(-) diff --git a/spacy/lang/de/syntax_iterators.py b/spacy/lang/de/syntax_iterators.py index 13bb857ca..73c1b1a6e 100644 --- a/spacy/lang/de/syntax_iterators.py +++ b/spacy/lang/de/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -28,7 +28,7 @@ def noun_chunks(obj): "og", "app", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -38,7 +38,7 @@ def noun_chunks(obj): close_app = doc.vocab.strings.add("nk") rbracket = 0 - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: diff --git a/spacy/lang/el/syntax_iterators.py b/spacy/lang/el/syntax_iterators.py index f02619ac9..4317bdeb4 100644 --- a/spacy/lang/el/syntax_iterators.py +++ b/spacy/lang/el/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases. Works on both Doc and Span. """ @@ -14,7 +14,7 @@ def noun_chunks(obj): # obj tag corrects some DEP tagger mistakes. # Further improvement of the models will eliminate the need for this tag. labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -24,7 +24,7 @@ def noun_chunks(obj): nmod = doc.vocab.strings.add("nmod") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/en/syntax_iterators.py b/spacy/lang/en/syntax_iterators.py index 5ff848124..6d366ec90 100644 --- a/spacy/lang/en/syntax_iterators.py +++ b/spacy/lang/en/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -20,7 +20,7 @@ def noun_chunks(obj): "attr", "ROOT", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -29,7 +29,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/es/syntax_iterators.py b/spacy/lang/es/syntax_iterators.py index 0badddca1..d403183ff 100644 --- a/spacy/lang/es/syntax_iterators.py +++ b/spacy/lang/es/syntax_iterators.py @@ -5,8 +5,8 @@ from ...symbols import NOUN, PROPN, PRON, VERB, AUX from ...errors import Errors -def noun_chunks(obj): - doc = obj.doc +def noun_chunks(doclike): + doc = doclike.doc if not doc.is_parsed: raise ValueError(Errors.E029) diff --git a/spacy/lang/fa/syntax_iterators.py b/spacy/lang/fa/syntax_iterators.py index 5ff848124..6d366ec90 100644 --- a/spacy/lang/fa/syntax_iterators.py +++ b/spacy/lang/fa/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -20,7 +20,7 @@ def noun_chunks(obj): "attr", "ROOT", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -29,7 +29,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/fr/syntax_iterators.py b/spacy/lang/fr/syntax_iterators.py index 9495dcf1e..2ed2c1b35 100644 --- a/spacy/lang/fr/syntax_iterators.py +++ b/spacy/lang/fr/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -19,7 +19,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -28,7 +28,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/id/syntax_iterators.py b/spacy/lang/id/syntax_iterators.py index 9495dcf1e..2ed2c1b35 100644 --- a/spacy/lang/id/syntax_iterators.py +++ b/spacy/lang/id/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -19,7 +19,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -28,7 +28,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/nb/syntax_iterators.py b/spacy/lang/nb/syntax_iterators.py index 9495dcf1e..2ed2c1b35 100644 --- a/spacy/lang/nb/syntax_iterators.py +++ b/spacy/lang/nb/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -19,7 +19,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -28,7 +28,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/sv/syntax_iterators.py b/spacy/lang/sv/syntax_iterators.py index 148884efe..84493ae79 100644 --- a/spacy/lang/sv/syntax_iterators.py +++ b/spacy/lang/sv/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -20,7 +20,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -29,7 +29,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 4cfab915f..0c1a56187 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -213,28 +213,28 @@ cdef class Matcher: else: yield doc - def __call__(self, object doc_or_span): + def __call__(self, object doclike): """Find all token sequences matching the supplied pattern. - doc_or_span (Doc or Span): The document to match over. + doclike (Doc or Span): The document to match over. RETURNS (list): A list of `(key, start, end)` tuples, describing the matches. A match tuple describes a span `doc[start:end]`. The `label_id` and `key` are both integers. """ - if isinstance(doc_or_span, Doc): - doc = doc_or_span + if isinstance(doclike, Doc): + doc = doclike length = len(doc) - elif isinstance(doc_or_span, Span): - doc = doc_or_span.doc - length = doc_or_span.end - doc_or_span.start + elif isinstance(doclike, Span): + doc = doclike.doc + length = doclike.end - doclike.start else: - raise ValueError(Errors.E195.format(good="Doc or Span", got=type(doc_or_span).__name__)) + raise ValueError(Errors.E195.format(good="Doc or Span", got=type(doclike).__name__)) if len(set([LEMMA, POS, TAG]) & self._seen_attrs) > 0 \ and not doc.is_tagged: raise ValueError(Errors.E155.format()) if DEP in self._seen_attrs and not doc.is_parsed: raise ValueError(Errors.E156.format()) - matches = find_matches(&self.patterns[0], self.patterns.size(), doc_or_span, length, + matches = find_matches(&self.patterns[0], self.patterns.size(), doclike, length, extensions=self._extensions, predicates=self._extra_predicates) for i, (key, start, end) in enumerate(matches): on_match = self._callbacks.get(key, None) @@ -257,7 +257,7 @@ def unpickle_matcher(vocab, patterns, callbacks): return matcher -cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int length, extensions=None, predicates=tuple()): +cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, extensions=None, predicates=tuple()): """Find matches in a doc, with a compiled array of patterns. Matches are returned as a list of (id, start, end) tuples. @@ -286,7 +286,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int lengt else: nr_extra_attr = 0 extra_attr_values = mem.alloc(length, sizeof(attr_t)) - for i, token in enumerate(doc_or_span): + for i, token in enumerate(doclike): for name, index in extensions.items(): value = token._.get(name) if isinstance(value, basestring): @@ -298,7 +298,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int lengt for j in range(n): states.push_back(PatternStateC(patterns[j], i, 0)) transition_states(states, matches, predicate_cache, - doc_or_span[i], extra_attr_values, predicates) + doclike[i], extra_attr_values, predicates) extra_attr_values += nr_extra_attr predicate_cache += len(predicates) # Handle matches that end in 0-width patterns From e2fe83e35d21afed9e12e9810921228b551e628a Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 14:20:29 +0200 Subject: [PATCH 11/13] Refer to correct object --- spacy/lang/es/syntax_iterators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/lang/es/syntax_iterators.py b/spacy/lang/es/syntax_iterators.py index d403183ff..5fda35211 100644 --- a/spacy/lang/es/syntax_iterators.py +++ b/spacy/lang/es/syntax_iterators.py @@ -21,7 +21,7 @@ def noun_chunks(doclike): np_right_deps = [doc.vocab.strings.add(label) for label in right_labels] stop_deps = [doc.vocab.strings.add(label) for label in stop_labels] token = doc[0] - while token and token.i < len(doc): + while token and token.i < len(doclike): if token.pos in [PROPN, NOUN, PRON]: left, right = noun_bounds( doc, token, np_left_deps, np_right_deps, stop_deps From bea863acd255407887806d1089c1f63896cdf084 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 14:24:38 +0200 Subject: [PATCH 12/13] Fix naming conflict and formatting --- spacy/lang/zh/__init__.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/spacy/lang/zh/__init__.py b/spacy/lang/zh/__init__.py index 508c5a03f..9d1cb71a7 100644 --- a/spacy/lang/zh/__init__.py +++ b/spacy/lang/zh/__init__.py @@ -169,19 +169,16 @@ class ChineseTokenizer(DummyTokenizer): return util.to_bytes(serializers, []) def from_bytes(self, data, **kwargs): - data = {"features_b": b"", "weights_b": b"", "processors_data": None} - # pkuseg_features_b = b"" - # pkuseg_weights_b = b"" - # pkuseg_processors_data = None + pkuseg_data = {"features_b": b"", "weights_b": b"", "processors_data": None} def deserialize_pkuseg_features(b): - data["features_b"] = b + pkuseg_data["features_b"] = b def deserialize_pkuseg_weights(b): - data["weights_b"] = b + pkuseg_data["weights_b"] = b def deserialize_pkuseg_processors(b): - data["processors_data"] = srsly.msgpack_loads(b) + pkuseg_data["processors_data"] = srsly.msgpack_loads(b) deserializers = OrderedDict( ( @@ -193,13 +190,13 @@ class ChineseTokenizer(DummyTokenizer): ) util.from_bytes(data, deserializers, []) - if data["features_b"] and data["weights_b"]: + if pkuseg_data["features_b"] and pkuseg_data["weights_b"]: with tempfile.TemporaryDirectory() as tempdir: tempdir = Path(tempdir) with open(tempdir / "features.pkl", "wb") as fileh: - fileh.write(data["features_b"]) + fileh.write(pkuseg_data["features_b"]) with open(tempdir / "weights.npz", "wb") as fileh: - fileh.write(data["weights_b"]) + fileh.write(pkuseg_data["weights_b"]) try: import pkuseg except ImportError: @@ -208,10 +205,9 @@ class ChineseTokenizer(DummyTokenizer): + _PKUSEG_INSTALL_MSG ) self.pkuseg_seg = pkuseg.pkuseg(str(tempdir)) - if data["processors_data"]: - (user_dict, do_process, common_words, other_words) = data[ - "processors_data" - ] + if pkuseg_data["processors_data"]: + processors_data = pkuseg_data["processors_data"] + (user_dict, do_process, common_words, other_words) = processors_data self.pkuseg_seg.preprocesser = pkuseg.Preprocesser(user_dict) self.pkuseg_seg.postprocesser.do_process = do_process self.pkuseg_seg.postprocesser.common_words = set(common_words) From a9cb2882cb98674614e72232c4bc5133b92fa501 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 15:17:39 +0200 Subject: [PATCH 13/13] Rename argument: doc_or_span/obj -> doclike (#5463) * doc_or_span -> obj * Revert "doc_or_span -> obj" This reverts commit 78bb9ff5e0e4adc01bd30e227657118d87546f83. * obj -> doclike * Refer to correct object --- spacy/lang/de/syntax_iterators.py | 6 +++--- spacy/lang/el/syntax_iterators.py | 6 +++--- spacy/lang/en/syntax_iterators.py | 6 +++--- spacy/lang/es/syntax_iterators.py | 6 +++--- spacy/lang/fa/syntax_iterators.py | 6 +++--- spacy/lang/fr/syntax_iterators.py | 6 +++--- spacy/lang/id/syntax_iterators.py | 6 +++--- spacy/lang/nb/syntax_iterators.py | 6 +++--- spacy/lang/sv/syntax_iterators.py | 6 +++--- spacy/matcher/matcher.pyx | 24 ++++++++++++------------ 10 files changed, 39 insertions(+), 39 deletions(-) diff --git a/spacy/lang/de/syntax_iterators.py b/spacy/lang/de/syntax_iterators.py index 13bb857ca..73c1b1a6e 100644 --- a/spacy/lang/de/syntax_iterators.py +++ b/spacy/lang/de/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -28,7 +28,7 @@ def noun_chunks(obj): "og", "app", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -38,7 +38,7 @@ def noun_chunks(obj): close_app = doc.vocab.strings.add("nk") rbracket = 0 - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: diff --git a/spacy/lang/el/syntax_iterators.py b/spacy/lang/el/syntax_iterators.py index f02619ac9..4317bdeb4 100644 --- a/spacy/lang/el/syntax_iterators.py +++ b/spacy/lang/el/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases. Works on both Doc and Span. """ @@ -14,7 +14,7 @@ def noun_chunks(obj): # obj tag corrects some DEP tagger mistakes. # Further improvement of the models will eliminate the need for this tag. labels = ["nsubj", "obj", "iobj", "appos", "ROOT", "obl"] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -24,7 +24,7 @@ def noun_chunks(obj): nmod = doc.vocab.strings.add("nmod") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/en/syntax_iterators.py b/spacy/lang/en/syntax_iterators.py index 5ff848124..6d366ec90 100644 --- a/spacy/lang/en/syntax_iterators.py +++ b/spacy/lang/en/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -20,7 +20,7 @@ def noun_chunks(obj): "attr", "ROOT", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -29,7 +29,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/es/syntax_iterators.py b/spacy/lang/es/syntax_iterators.py index 0badddca1..5fda35211 100644 --- a/spacy/lang/es/syntax_iterators.py +++ b/spacy/lang/es/syntax_iterators.py @@ -5,8 +5,8 @@ from ...symbols import NOUN, PROPN, PRON, VERB, AUX from ...errors import Errors -def noun_chunks(obj): - doc = obj.doc +def noun_chunks(doclike): + doc = doclike.doc if not doc.is_parsed: raise ValueError(Errors.E029) @@ -21,7 +21,7 @@ def noun_chunks(obj): np_right_deps = [doc.vocab.strings.add(label) for label in right_labels] stop_deps = [doc.vocab.strings.add(label) for label in stop_labels] token = doc[0] - while token and token.i < len(doc): + while token and token.i < len(doclike): if token.pos in [PROPN, NOUN, PRON]: left, right = noun_bounds( doc, token, np_left_deps, np_right_deps, stop_deps diff --git a/spacy/lang/fa/syntax_iterators.py b/spacy/lang/fa/syntax_iterators.py index 5ff848124..6d366ec90 100644 --- a/spacy/lang/fa/syntax_iterators.py +++ b/spacy/lang/fa/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -20,7 +20,7 @@ def noun_chunks(obj): "attr", "ROOT", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -29,7 +29,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/fr/syntax_iterators.py b/spacy/lang/fr/syntax_iterators.py index 9495dcf1e..2ed2c1b35 100644 --- a/spacy/lang/fr/syntax_iterators.py +++ b/spacy/lang/fr/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -19,7 +19,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -28,7 +28,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/id/syntax_iterators.py b/spacy/lang/id/syntax_iterators.py index 9495dcf1e..2ed2c1b35 100644 --- a/spacy/lang/id/syntax_iterators.py +++ b/spacy/lang/id/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -19,7 +19,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -28,7 +28,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/nb/syntax_iterators.py b/spacy/lang/nb/syntax_iterators.py index 9495dcf1e..2ed2c1b35 100644 --- a/spacy/lang/nb/syntax_iterators.py +++ b/spacy/lang/nb/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -19,7 +19,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -28,7 +28,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/lang/sv/syntax_iterators.py b/spacy/lang/sv/syntax_iterators.py index 148884efe..84493ae79 100644 --- a/spacy/lang/sv/syntax_iterators.py +++ b/spacy/lang/sv/syntax_iterators.py @@ -5,7 +5,7 @@ from ...symbols import NOUN, PROPN, PRON from ...errors import Errors -def noun_chunks(obj): +def noun_chunks(doclike): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ @@ -20,7 +20,7 @@ def noun_chunks(obj): "nmod", "nmod:poss", ] - doc = obj.doc # Ensure works on both Doc and Span. + doc = doclike.doc # Ensure works on both Doc and Span. if not doc.is_parsed: raise ValueError(Errors.E029) @@ -29,7 +29,7 @@ def noun_chunks(obj): conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") seen = set() - for i, word in enumerate(obj): + for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced diff --git a/spacy/matcher/matcher.pyx b/spacy/matcher/matcher.pyx index 4cfab915f..0c1a56187 100644 --- a/spacy/matcher/matcher.pyx +++ b/spacy/matcher/matcher.pyx @@ -213,28 +213,28 @@ cdef class Matcher: else: yield doc - def __call__(self, object doc_or_span): + def __call__(self, object doclike): """Find all token sequences matching the supplied pattern. - doc_or_span (Doc or Span): The document to match over. + doclike (Doc or Span): The document to match over. RETURNS (list): A list of `(key, start, end)` tuples, describing the matches. A match tuple describes a span `doc[start:end]`. The `label_id` and `key` are both integers. """ - if isinstance(doc_or_span, Doc): - doc = doc_or_span + if isinstance(doclike, Doc): + doc = doclike length = len(doc) - elif isinstance(doc_or_span, Span): - doc = doc_or_span.doc - length = doc_or_span.end - doc_or_span.start + elif isinstance(doclike, Span): + doc = doclike.doc + length = doclike.end - doclike.start else: - raise ValueError(Errors.E195.format(good="Doc or Span", got=type(doc_or_span).__name__)) + raise ValueError(Errors.E195.format(good="Doc or Span", got=type(doclike).__name__)) if len(set([LEMMA, POS, TAG]) & self._seen_attrs) > 0 \ and not doc.is_tagged: raise ValueError(Errors.E155.format()) if DEP in self._seen_attrs and not doc.is_parsed: raise ValueError(Errors.E156.format()) - matches = find_matches(&self.patterns[0], self.patterns.size(), doc_or_span, length, + matches = find_matches(&self.patterns[0], self.patterns.size(), doclike, length, extensions=self._extensions, predicates=self._extra_predicates) for i, (key, start, end) in enumerate(matches): on_match = self._callbacks.get(key, None) @@ -257,7 +257,7 @@ def unpickle_matcher(vocab, patterns, callbacks): return matcher -cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int length, extensions=None, predicates=tuple()): +cdef find_matches(TokenPatternC** patterns, int n, object doclike, int length, extensions=None, predicates=tuple()): """Find matches in a doc, with a compiled array of patterns. Matches are returned as a list of (id, start, end) tuples. @@ -286,7 +286,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int lengt else: nr_extra_attr = 0 extra_attr_values = mem.alloc(length, sizeof(attr_t)) - for i, token in enumerate(doc_or_span): + for i, token in enumerate(doclike): for name, index in extensions.items(): value = token._.get(name) if isinstance(value, basestring): @@ -298,7 +298,7 @@ cdef find_matches(TokenPatternC** patterns, int n, object doc_or_span, int lengt for j in range(n): states.push_back(PatternStateC(patterns[j], i, 0)) transition_states(states, matches, predicate_cache, - doc_or_span[i], extra_attr_values, predicates) + doclike[i], extra_attr_values, predicates) extra_attr_values += nr_extra_attr predicate_cache += len(predicates) # Handle matches that end in 0-width patterns