From 493c77462a236fae204920e8a3fa22d70833d2fc Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Mon, 6 Apr 2020 18:46:51 +0200 Subject: [PATCH 01/19] issue5230: test cases covering known sources of resource warnings --- spacy/tests/regression/test_issue5230.py | 112 +++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 spacy/tests/regression/test_issue5230.py diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py new file mode 100644 index 000000000..e3d7c7e82 --- /dev/null +++ b/spacy/tests/regression/test_issue5230.py @@ -0,0 +1,112 @@ +import warnings + +import numpy +import pytest +import srsly + +from spacy.kb import KnowledgeBase +from spacy.vectors import Vectors +from spacy.language import Language +from spacy.pipeline import Pipe +from spacy.tests.util import make_tempdir + + +@pytest.mark.xfail +def test_language_to_disk_resource_warning(): + nlp = Language() + with make_tempdir() as d: + with warnings.catch_warnings(record=True) as w: + # catch only warnings raised in spacy.language since there may be others from other components or pipelines + warnings.filterwarnings( + "always", module="spacy.language", category=ResourceWarning + ) + nlp.to_disk(d) + assert len(w) == 0 + + +@pytest.mark.xfail +def test_vectors_to_disk_resource_warning(): + data = numpy.zeros((3, 300), dtype="f") + keys = ["cat", "dog", "rat"] + vectors = Vectors(data=data, keys=keys) + with make_tempdir() as d: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always", category=ResourceWarning) + vectors.to_disk(d) + assert len(w) == 0 + + +@pytest.mark.xfail +def test_custom_pipes_to_disk_resource_warning(): + # create dummy pipe partially implementing interface -- only want to test to_disk + class SerializableDummy(object): + def __init__(self, **cfg): + if cfg: + self.cfg = cfg + else: + self.cfg = None + super(SerializableDummy, self).__init__() + + def to_bytes(self, exclude=tuple(), disable=None, **kwargs): + return srsly.msgpack_dumps({"dummy": srsly.json_dumps(None)}) + + def from_bytes(self, bytes_data, exclude): + return self + + def to_disk(self, path, exclude=tuple(), **kwargs): + pass + + def from_disk(self, path, exclude=tuple(), **kwargs): + return self + + class MyPipe(Pipe): + def __init__(self, vocab, model=True, **cfg): + if cfg: + self.cfg = cfg + else: + self.cfg = None + self.model = SerializableDummy() + self.vocab = SerializableDummy() + + pipe = MyPipe(None) + with make_tempdir() as d: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always", category=ResourceWarning) + pipe.to_disk(d) + assert len(w) == 0 + + +@pytest.mark.xfail +def test_tagger_to_disk_resource_warning(): + nlp = Language() + nlp.add_pipe(nlp.create_pipe("tagger")) + tagger = nlp.get_pipe("tagger") + # need to add model for two reasons: + # 1. no model leads to error in serialization, + # 2. the affected line is the one for model serialization + tagger.begin_training(pipeline=nlp.pipeline) + + with make_tempdir() as d: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always", category=ResourceWarning) + tagger.to_disk(d) + assert len(w) == 0 + + +@pytest.mark.xfail +def test_entity_linker_to_disk_resource_warning(): + nlp = Language() + nlp.add_pipe(nlp.create_pipe("entity_linker")) + entity_linker = nlp.get_pipe("entity_linker") + # need to add model for two reasons: + # 1. no model leads to error in serialization, + # 2. the affected line is the one for model serialization + kb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + entity_linker.set_kb(kb) + entity_linker.begin_training(pipeline=nlp.pipeline) + + with make_tempdir() as d: + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings("always", category=ResourceWarning) + entity_linker.to_disk(d) + assert len(w) == 0 From 1cd975d4a5cf50eb5a2b16a30e8b520c7778af40 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Mon, 6 Apr 2020 18:54:32 +0200 Subject: [PATCH 02/19] issue5230: fixed resource warnings in language --- spacy/language.py | 5 ++--- spacy/tests/regression/test_issue5230.py | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/spacy/language.py b/spacy/language.py index 56619080d..0eb062eae 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -903,9 +903,8 @@ class Language(object): serializers["tokenizer"] = lambda p: self.tokenizer.to_disk( p, exclude=["vocab"] ) - serializers["meta.json"] = lambda p: p.open("w").write( - srsly.json_dumps(self.meta) - ) + serializers["meta.json"] = lambda p: srsly.write_json(p, self.meta) + for name, proc in self.pipeline: if not hasattr(proc, "name"): continue diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index e3d7c7e82..be84875e7 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -11,7 +11,6 @@ from spacy.pipeline import Pipe from spacy.tests.util import make_tempdir -@pytest.mark.xfail def test_language_to_disk_resource_warning(): nlp = Language() with make_tempdir() as d: From 273ed452bb4ba148d491dcec4b321a6293bdcd30 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Mon, 6 Apr 2020 19:22:32 +0200 Subject: [PATCH 03/19] issue5230: added unicode declaration at top of the file --- spacy/tests/regression/test_issue5230.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index be84875e7..9cfa3fc05 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -1,3 +1,4 @@ +# coding: utf8 import warnings import numpy From 71cc903d65b8946a4c6cd04cb2ca38b8a19eb5c4 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Mon, 6 Apr 2020 20:30:41 +0200 Subject: [PATCH 04/19] issue5230: replaced open statements on path objects so that serialization still works an files are closed --- spacy/pipeline/pipes.pyx | 6 +++--- spacy/tests/regression/test_issue5230.py | 4 ---- spacy/vectors.pyx | 10 +++++++++- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/spacy/pipeline/pipes.pyx b/spacy/pipeline/pipes.pyx index a20c9b6df..ce95b2752 100644 --- a/spacy/pipeline/pipes.pyx +++ b/spacy/pipeline/pipes.pyx @@ -202,7 +202,7 @@ class Pipe(object): serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg) serialize["vocab"] = lambda p: self.vocab.to_disk(p) if self.model not in (None, True, False): - serialize["model"] = lambda p: p.open("wb").write(self.model.to_bytes()) + serialize["model"] = self.model.to_disk exclude = util.get_serialization_exclude(serialize, exclude, kwargs) util.to_disk(path, serialize, exclude) @@ -625,7 +625,7 @@ class Tagger(Pipe): serialize = OrderedDict(( ("vocab", lambda p: self.vocab.to_disk(p)), ("tag_map", lambda p: srsly.write_msgpack(p, tag_map)), - ("model", lambda p: p.open("wb").write(self.model.to_bytes())), + ("model", self.model.to_disk), ("cfg", lambda p: srsly.write_json(p, self.cfg)) )) exclude = util.get_serialization_exclude(serialize, exclude, kwargs) @@ -1394,7 +1394,7 @@ class EntityLinker(Pipe): serialize["vocab"] = lambda p: self.vocab.to_disk(p) serialize["kb"] = lambda p: self.kb.dump(p) if self.model not in (None, True, False): - serialize["model"] = lambda p: p.open("wb").write(self.model.to_bytes()) + serialize["model"] = self.model.to_disk exclude = util.get_serialization_exclude(serialize, exclude, kwargs) util.to_disk(path, serialize, exclude) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index 9cfa3fc05..716a4624b 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -24,7 +24,6 @@ def test_language_to_disk_resource_warning(): assert len(w) == 0 -@pytest.mark.xfail def test_vectors_to_disk_resource_warning(): data = numpy.zeros((3, 300), dtype="f") keys = ["cat", "dog", "rat"] @@ -36,7 +35,6 @@ def test_vectors_to_disk_resource_warning(): assert len(w) == 0 -@pytest.mark.xfail def test_custom_pipes_to_disk_resource_warning(): # create dummy pipe partially implementing interface -- only want to test to_disk class SerializableDummy(object): @@ -76,7 +74,6 @@ def test_custom_pipes_to_disk_resource_warning(): assert len(w) == 0 -@pytest.mark.xfail def test_tagger_to_disk_resource_warning(): nlp = Language() nlp.add_pipe(nlp.create_pipe("tagger")) @@ -93,7 +90,6 @@ def test_tagger_to_disk_resource_warning(): assert len(w) == 0 -@pytest.mark.xfail def test_entity_linker_to_disk_resource_warning(): nlp = Language() nlp.add_pipe(nlp.create_pipe("entity_linker")) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index f3c20fb7f..62d176c6c 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -376,8 +376,16 @@ cdef class Vectors: save_array = lambda arr, file_: xp.save(file_, arr, allow_pickle=False) else: save_array = lambda arr, file_: xp.save(file_, arr) + + def save_vectors(path): + # the source of numpy.save indicates that the file object is closed after use. + # but it seems that somehow this does not happen, as ResourceWarnings are raised here. + # in order to not rely on this, wrap in context manager. + with path.open("wb") as _file: + save_array(self.data, _file) + serializers = OrderedDict(( - ("vectors", lambda p: save_array(self.data, p.open("wb"))), + ("vectors", save_vectors), ("key2row", lambda p: srsly.write_msgpack(p, self.key2row)) )) return util.to_disk(path, serializers, []) From cde96f6c64220bf6a82cf4288f6e2bfbbc97eb0a Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Mon, 6 Apr 2020 20:51:12 +0200 Subject: [PATCH 05/19] issue5230: optimized unit test a bit --- spacy/tests/regression/test_issue5230.py | 61 +++++++++--------------- 1 file changed, 23 insertions(+), 38 deletions(-) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index 716a4624b..76d4d3e96 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -1,41 +1,28 @@ # coding: utf8 import warnings -import numpy import pytest import srsly - +from numpy import zeros from spacy.kb import KnowledgeBase from spacy.vectors import Vectors + from spacy.language import Language from spacy.pipeline import Pipe from spacy.tests.util import make_tempdir -def test_language_to_disk_resource_warning(): - nlp = Language() - with make_tempdir() as d: - with warnings.catch_warnings(record=True) as w: - # catch only warnings raised in spacy.language since there may be others from other components or pipelines - warnings.filterwarnings( - "always", module="spacy.language", category=ResourceWarning - ) - nlp.to_disk(d) - assert len(w) == 0 +def nlp(): + return Language() -def test_vectors_to_disk_resource_warning(): - data = numpy.zeros((3, 300), dtype="f") +def vectors(): + data = zeros((3, 1), dtype="f") keys = ["cat", "dog", "rat"] - vectors = Vectors(data=data, keys=keys) - with make_tempdir() as d: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always", category=ResourceWarning) - vectors.to_disk(d) - assert len(w) == 0 + return Vectors(data=data, keys=keys) -def test_custom_pipes_to_disk_resource_warning(): +def custom_pipe(): # create dummy pipe partially implementing interface -- only want to test to_disk class SerializableDummy(object): def __init__(self, **cfg): @@ -66,15 +53,10 @@ def test_custom_pipes_to_disk_resource_warning(): self.model = SerializableDummy() self.vocab = SerializableDummy() - pipe = MyPipe(None) - with make_tempdir() as d: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always", category=ResourceWarning) - pipe.to_disk(d) - assert len(w) == 0 + return MyPipe(None) -def test_tagger_to_disk_resource_warning(): +def tagger(): nlp = Language() nlp.add_pipe(nlp.create_pipe("tagger")) tagger = nlp.get_pipe("tagger") @@ -82,15 +64,10 @@ def test_tagger_to_disk_resource_warning(): # 1. no model leads to error in serialization, # 2. the affected line is the one for model serialization tagger.begin_training(pipeline=nlp.pipeline) - - with make_tempdir() as d: - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings("always", category=ResourceWarning) - tagger.to_disk(d) - assert len(w) == 0 + return tagger -def test_entity_linker_to_disk_resource_warning(): +def entity_linker(): nlp = Language() nlp.add_pipe(nlp.create_pipe("entity_linker")) entity_linker = nlp.get_pipe("entity_linker") @@ -100,9 +77,17 @@ def test_entity_linker_to_disk_resource_warning(): kb = KnowledgeBase(nlp.vocab, entity_vector_length=1) entity_linker.set_kb(kb) entity_linker.begin_training(pipeline=nlp.pipeline) + return entity_linker + +@pytest.mark.parametrize( + "obj", + [nlp(), vectors(), custom_pipe(), tagger(), entity_linker()], + ids=["nlp", "vectors", "custom_pipe", "tagger", "entity_linker"], +) +def test_to_disk_resource_warning(obj): with make_tempdir() as d: - with warnings.catch_warnings(record=True) as w: + with warnings.catch_warnings(record=True) as warnings_list: warnings.filterwarnings("always", category=ResourceWarning) - entity_linker.to_disk(d) - assert len(w) == 0 + obj.to_disk(d) + assert len(warnings_list) == 0 From b63871ceff4497ca61bd066c8432603bc73c6a8b Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Mon, 6 Apr 2020 21:04:06 +0200 Subject: [PATCH 06/19] issue5230: added contributors agreement --- .github/contributors/lfiedler.md | 106 +++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/contributors/lfiedler.md diff --git a/.github/contributors/lfiedler.md b/.github/contributors/lfiedler.md new file mode 100644 index 000000000..61f8ffeb4 --- /dev/null +++ b/.github/contributors/lfiedler.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI GmbH](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Leander Fiedler | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 06 April 2020 | +| GitHub username | lfiedler | +| Website (optional) | | \ No newline at end of file From e1e25c7e302876b85dc7a95c0f5cf768fbac3f1d Mon Sep 17 00:00:00 2001 From: lfiedler Date: Mon, 6 Apr 2020 21:36:02 +0200 Subject: [PATCH 07/19] issue5230: added unittest test case for completion --- spacy/tests/regression/test_issue5230.py | 28 +++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index 76d4d3e96..1a03fa0d2 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -1,5 +1,6 @@ # coding: utf8 import warnings +from unittest import TestCase import pytest import srsly @@ -80,14 +81,31 @@ def entity_linker(): return entity_linker -@pytest.mark.parametrize( - "obj", +objects_to_test = ( [nlp(), vectors(), custom_pipe(), tagger(), entity_linker()], - ids=["nlp", "vectors", "custom_pipe", "tagger", "entity_linker"], + ["nlp", "vectors", "custom_pipe", "tagger", "entity_linker"], ) -def test_to_disk_resource_warning(obj): + + +def write_obj_and_catch_warnings(obj): with make_tempdir() as d: with warnings.catch_warnings(record=True) as warnings_list: warnings.filterwarnings("always", category=ResourceWarning) obj.to_disk(d) - assert len(warnings_list) == 0 + return warnings_list + + +@pytest.mark.parametrize("obj", objects_to_test[0], ids=objects_to_test[1]) +def test_to_disk_resource_warning(obj): + warnings_list = write_obj_and_catch_warnings(obj) + assert len(warnings_list) == 0 + + +class TestToDiskResourceWarningUnittest(TestCase): + def test_resource_warning(self): + scenarios = zip(*objects_to_test) + + for scenario in scenarios: + with self.subTest(msg=scenario[1]): + warnings_list = write_obj_and_catch_warnings(scenario[0]) + self.assertEqual(len(warnings_list), 0) From 8c1d0d628fb196abd33859b18a597eb0414e6c55 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Fri, 10 Apr 2020 20:35:52 +0200 Subject: [PATCH 08/19] issue5230 writer now checks instance of loc parameter before trying to operate on it --- spacy/kb.pyx | 4 ++-- spacy/tests/regression/test_issue5230.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/spacy/kb.pyx b/spacy/kb.pyx index 63eb41b42..7c6865eed 100644 --- a/spacy/kb.pyx +++ b/spacy/kb.pyx @@ -446,10 +446,10 @@ cdef class KnowledgeBase: cdef class Writer: def __init__(self, object loc): - if path.exists(loc): - assert not path.isdir(loc), "%s is directory." % loc if isinstance(loc, Path): loc = bytes(loc) + if path.exists(loc): + assert not path.isdir(loc), "%s is directory." % loc cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc self._fp = fopen(bytes_loc, 'wb') if not self._fp: diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index 1a03fa0d2..b7c6b9b1d 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -5,7 +5,7 @@ from unittest import TestCase import pytest import srsly from numpy import zeros -from spacy.kb import KnowledgeBase +from spacy.kb import KnowledgeBase, Writer from spacy.vectors import Vectors from spacy.language import Language @@ -101,6 +101,19 @@ def test_to_disk_resource_warning(obj): assert len(warnings_list) == 0 +def test_writer_with_path_py35(): + writer = None + with make_tempdir() as d: + path = d / "test" + try: + writer = Writer(path) + except Exception as e: + pytest.fail(str(e)) + finally: + if writer: + writer.close() + + class TestToDiskResourceWarningUnittest(TestCase): def test_resource_warning(self): scenarios = zip(*objects_to_test) From a7bdfe42e13bdb2e61edcb3b4bf9203e041ef3f0 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Fri, 10 Apr 2020 21:14:33 +0200 Subject: [PATCH 09/19] issue5230 added print statement to warnings filter to remotely debug failing python35(win) setup --- spacy/tests/regression/test_issue5230.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index b7c6b9b1d..03027fe39 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -98,6 +98,8 @@ def write_obj_and_catch_warnings(obj): @pytest.mark.parametrize("obj", objects_to_test[0], ids=objects_to_test[1]) def test_to_disk_resource_warning(obj): warnings_list = write_obj_and_catch_warnings(obj) + for warning in warnings_list: + print(warning.message) assert len(warnings_list) == 0 From 88ca40a15d010fe50da383f4664f8064046f7540 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Fri, 10 Apr 2020 21:45:53 +0200 Subject: [PATCH 10/19] issue5230 raise warnings as errors to remotely debug failing python35(win) setup --- spacy/tests/regression/test_issue5230.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index 03027fe39..adc9307ce 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -90,7 +90,7 @@ objects_to_test = ( def write_obj_and_catch_warnings(obj): with make_tempdir() as d: with warnings.catch_warnings(record=True) as warnings_list: - warnings.filterwarnings("always", category=ResourceWarning) + warnings.filterwarnings("error", category=ResourceWarning) obj.to_disk(d) return warnings_list @@ -98,8 +98,6 @@ def write_obj_and_catch_warnings(obj): @pytest.mark.parametrize("obj", objects_to_test[0], ids=objects_to_test[1]) def test_to_disk_resource_warning(obj): warnings_list = write_obj_and_catch_warnings(obj) - for warning in warnings_list: - print(warning.message) assert len(warnings_list) == 0 From ca2a7a44db29b3ffbcf24459a8c0332742c8b676 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Fri, 10 Apr 2020 22:26:55 +0200 Subject: [PATCH 11/19] issue5230 store string values of warnings to remotely debug failing python35(win) setup --- spacy/tests/regression/test_issue5230.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index adc9307ce..c78a84ad7 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -90,9 +90,9 @@ objects_to_test = ( def write_obj_and_catch_warnings(obj): with make_tempdir() as d: with warnings.catch_warnings(record=True) as warnings_list: - warnings.filterwarnings("error", category=ResourceWarning) + warnings.filterwarnings("always", category=ResourceWarning) obj.to_disk(d) - return warnings_list + return list(map(lambda w: w.message, warnings_list)) @pytest.mark.parametrize("obj", objects_to_test[0], ids=objects_to_test[1]) From d2bb649227ce5a24e53d7526cf7892643eb297c9 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Fri, 10 Apr 2020 23:21:13 +0200 Subject: [PATCH 12/19] issue5230 filter warnings in addition to filterwarnings to prevent deprecation warnings in python35(win) setup to pop up --- spacy/tests/regression/test_issue5230.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index c78a84ad7..ae735c7bd 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -92,7 +92,8 @@ def write_obj_and_catch_warnings(obj): with warnings.catch_warnings(record=True) as warnings_list: warnings.filterwarnings("always", category=ResourceWarning) obj.to_disk(d) - return list(map(lambda w: w.message, warnings_list)) + # in python3.5 it seems that deprecation warnings are not filtered by filterwarnings + return list(filter(lambda x: isinstance(x, ResourceWarning), warnings_list)) @pytest.mark.parametrize("obj", objects_to_test[0], ids=objects_to_test[1]) From d60e2d3ebf33fc0c4280117b08f6e3ef9ad63ff9 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Sun, 12 Apr 2020 09:08:41 +0200 Subject: [PATCH 13/19] issue5230 added unit test for dumping and loading knowledgebase --- spacy/tests/regression/test_issue5230.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/spacy/tests/regression/test_issue5230.py b/spacy/tests/regression/test_issue5230.py index ae735c7bd..337c82255 100644 --- a/spacy/tests/regression/test_issue5230.py +++ b/spacy/tests/regression/test_issue5230.py @@ -115,6 +115,23 @@ def test_writer_with_path_py35(): writer.close() +def test_save_and_load_knowledge_base(): + nlp = Language() + kb = KnowledgeBase(nlp.vocab, entity_vector_length=1) + with make_tempdir() as d: + path = d / "kb" + try: + kb.dump(path) + except Exception as e: + pytest.fail(str(e)) + + try: + kb_loaded = KnowledgeBase(nlp.vocab, entity_vector_length=1) + kb_loaded.load_bulk(path) + except Exception as e: + pytest.fail(str(e)) + + class TestToDiskResourceWarningUnittest(TestCase): def test_resource_warning(self): scenarios = zip(*objects_to_test) From 67000068304b9a125ec792f32bed8491767dbed1 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Sun, 12 Apr 2020 09:34:54 +0200 Subject: [PATCH 14/19] issue5230 attempted fix of pytest segfault for python3.5 --- spacy/kb.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/kb.pyx b/spacy/kb.pyx index 7c6865eed..14327f0d6 100644 --- a/spacy/kb.pyx +++ b/spacy/kb.pyx @@ -491,10 +491,10 @@ cdef class Writer: cdef class Reader: def __init__(self, object loc): - assert path.exists(loc) - assert not path.isdir(loc) if isinstance(loc, Path): loc = bytes(loc) + assert path.exists(loc) + assert not path.isdir(loc) cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc self._fp = fopen(bytes_loc, 'rb') if not self._fp: From cef0c909b9dc1afd37511db4cbfd1863f27a371a Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Wed, 15 Apr 2020 19:28:33 +0200 Subject: [PATCH 15/19] issue5230 changed reference to function to anonymous function --- spacy/pipeline/pipes.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/pipeline/pipes.pyx b/spacy/pipeline/pipes.pyx index ce95b2752..8af76a0fb 100644 --- a/spacy/pipeline/pipes.pyx +++ b/spacy/pipeline/pipes.pyx @@ -202,7 +202,7 @@ class Pipe(object): serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg) serialize["vocab"] = lambda p: self.vocab.to_disk(p) if self.model not in (None, True, False): - serialize["model"] = self.model.to_disk + serialize["model"] = lambda p: self.model.to_disk(p) exclude = util.get_serialization_exclude(serialize, exclude, kwargs) util.to_disk(path, serialize, exclude) From a3401b11946b9aba06dd3e83a1877c156e7ddeb4 Mon Sep 17 00:00:00 2001 From: Leander Fiedler Date: Wed, 15 Apr 2020 21:52:52 +0200 Subject: [PATCH 16/19] issue5230 changed reference to function to anonymous function --- spacy/pipeline/pipes.pyx | 4 ++-- spacy/vectors.pyx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spacy/pipeline/pipes.pyx b/spacy/pipeline/pipes.pyx index 8af76a0fb..fc077fc82 100644 --- a/spacy/pipeline/pipes.pyx +++ b/spacy/pipeline/pipes.pyx @@ -625,7 +625,7 @@ class Tagger(Pipe): serialize = OrderedDict(( ("vocab", lambda p: self.vocab.to_disk(p)), ("tag_map", lambda p: srsly.write_msgpack(p, tag_map)), - ("model", self.model.to_disk), + ("model", lambda p: self.model.to_disk(p)), ("cfg", lambda p: srsly.write_json(p, self.cfg)) )) exclude = util.get_serialization_exclude(serialize, exclude, kwargs) @@ -1394,7 +1394,7 @@ class EntityLinker(Pipe): serialize["vocab"] = lambda p: self.vocab.to_disk(p) serialize["kb"] = lambda p: self.kb.dump(p) if self.model not in (None, True, False): - serialize["model"] = self.model.to_disk + serialize["model"] = lambda p: self.model.to_disk(p) exclude = util.get_serialization_exclude(serialize, exclude, kwargs) util.to_disk(path, serialize, exclude) diff --git a/spacy/vectors.pyx b/spacy/vectors.pyx index 62d176c6c..2877d2d7d 100644 --- a/spacy/vectors.pyx +++ b/spacy/vectors.pyx @@ -385,7 +385,7 @@ cdef class Vectors: save_array(self.data, _file) serializers = OrderedDict(( - ("vectors", save_vectors), + ("vectors", lambda p: save_vectors(p)), ("key2row", lambda p: srsly.write_msgpack(p, self.key2row)) )) return util.to_disk(path, serializers, []) From cb02bff0ebe31ab0d3b13fad9fcd2424c09f6c4b Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 21 May 2020 20:24:07 +0200 Subject: [PATCH 17/19] Add blank:{lang} shortcut to util.load_mode --- spacy/tests/test_misc.py | 11 +++++++++++ spacy/util.py | 2 ++ 2 files changed, 13 insertions(+) diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index 4075ccf64..3ac621649 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -135,3 +135,14 @@ def test_ascii_filenames(): root = Path(__file__).parent.parent for path in root.glob("**/*"): assert all(ord(c) < 128 for c in path.name), path.name + + +def test_load_model_blank_shortcut(): + """Test that using a model name like "blank:en" works as a shortcut for + spacy.blank("en"). + """ + nlp = util.load_model("blank:en") + assert nlp.lang == "en" + assert nlp.pipeline == [] + with pytest.raises(ImportError): + util.load_model("blank:fjsfijsdof") diff --git a/spacy/util.py b/spacy/util.py index 419c99bc0..5fd296404 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -161,6 +161,8 @@ def load_model(name, **overrides): if not data_path or not data_path.exists(): raise IOError(Errors.E049.format(path=path2str(data_path))) if isinstance(name, basestring_): # in data dir / shortcut + if name.startswith("blank:"): # shortcut for blank model + return get_lang_class(name.replace("blank:", ""))() if name in set([d.name for d in data_path.iterdir()]): return load_model_from_link(name, **overrides) if is_package(name): # installed as package From 71fe61fdcd6c04de739391251bb346ba1de94e4e Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 22 May 2020 10:14:34 +0200 Subject: [PATCH 18/19] Disallow merging 0-length spans --- spacy/errors.py | 1 + spacy/tests/doc/test_retokenize_merge.py | 7 +++++++ spacy/tokens/_retokenize.pyx | 2 ++ 3 files changed, 10 insertions(+) diff --git a/spacy/errors.py b/spacy/errors.py index aca94d64e..6d92545d7 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -567,6 +567,7 @@ class Errors(object): E197 = ("Row out of bounds, unable to add row {row} for key {key}.") E198 = ("Unable to return {n} most similar vectors for the current vectors " "table, which contains {n_rows} vectors.") + E199 = ("Unable to merge 0-length span at doc[{start}:{end}].") @add_codes diff --git a/spacy/tests/doc/test_retokenize_merge.py b/spacy/tests/doc/test_retokenize_merge.py index 5bdf78f39..636b7bb14 100644 --- a/spacy/tests/doc/test_retokenize_merge.py +++ b/spacy/tests/doc/test_retokenize_merge.py @@ -425,3 +425,10 @@ def test_retokenize_skip_duplicates(en_vocab): retokenizer.merge(doc[0:2]) assert len(doc) == 2 assert doc[0].text == "hello world" + + +def test_retokenize_disallow_zero_length(en_vocab): + doc = Doc(en_vocab, words=["hello", "world", "!"]) + with pytest.raises(ValueError): + with doc.retokenize() as retokenizer: + retokenizer.merge(doc[1:1]) diff --git a/spacy/tokens/_retokenize.pyx b/spacy/tokens/_retokenize.pyx index 512ad73bc..ce8e510d6 100644 --- a/spacy/tokens/_retokenize.pyx +++ b/spacy/tokens/_retokenize.pyx @@ -55,6 +55,8 @@ cdef class Retokenizer: """ if (span.start, span.end) in self._spans_to_merge: return + if span.end - span.start <= 0: + raise ValueError(Errors.E199.format(start=span.start, end=span.end)) for token in span: if token.i in self.tokens_to_merge: raise ValueError(Errors.E102.format(token=repr(token))) From 65c7e82de24739977d7ca775d585cacc7dc25cd5 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Fri, 22 May 2020 13:50:30 +0200 Subject: [PATCH 19/19] Auto-format and remove 2.3 feature [ci skip] --- website/docs/api/token.md | 150 +++++++++++++++++--------------------- 1 file changed, 67 insertions(+), 83 deletions(-) diff --git a/website/docs/api/token.md b/website/docs/api/token.md index 69dac23d6..0fa86b7bc 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -351,25 +351,9 @@ property to `0` for the first word of the document. - assert doc[4].sent_start == 1 + assert doc[4].is_sent_start == True ``` + -## Token.is_sent_end {#is_sent_end tag="property" new="2"} - -A boolean value indicating whether the token ends a sentence. `None` if -unknown. Defaults to `True` for the last token in the `Doc`. - -> #### Example -> -> ```python -> doc = nlp("Give it back! He pleaded.") -> assert doc[3].is_sent_end -> assert not doc[4].is_sent_end -> ``` - -| Name | Type | Description | -| ----------- | ---- | ------------------------------------ | -| **RETURNS** | bool | Whether the token ends a sentence. | - ## Token.has_vector {#has_vector tag="property" model="vectors"} A boolean value indicating whether a word vector is associated with the token. @@ -424,71 +408,71 @@ The L2 norm of the token's vector representation. ## Attributes {#attributes} -| Name | Type | Description | -| -------------------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `doc` | `Doc` | The parent document. | -| `sent` 2.0.12 | `Span` | The sentence span that this token is a part of. | -| `text` | unicode | Verbatim text content. | -| `text_with_ws` | unicode | Text content, with trailing space character if present. | -| `whitespace_` | unicode | Trailing space character if present. | -| `orth` | int | ID of the verbatim text content. | -| `orth_` | unicode | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. | -| `vocab` | `Vocab` | The vocab object of the parent `Doc`. | -| `tensor` 2.1.7 | `ndarray` | The tokens's slice of the parent `Doc`'s tensor. | -| `head` | `Token` | The syntactic parent, or "governor", of this token. | -| `left_edge` | `Token` | The leftmost token of this token's syntactic descendants. | -| `right_edge` | `Token` | The rightmost token of this token's syntactic descendants. | -| `i` | int | The index of the token within the parent document. | -| `ent_type` | int | Named entity type. | -| `ent_type_` | unicode | Named entity type. | -| `ent_iob` | int | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. | -| `ent_iob_` | unicode | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. | -| `ent_kb_id` 2.2 | int | Knowledge base ID that refers to the named entity this token is a part of, if any. | -| `ent_kb_id_` 2.2 | unicode | Knowledge base ID that refers to the named entity this token is a part of, if any. | -| `ent_id` | int | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | -| `ent_id_` | unicode | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | -| `lemma` | int | Base form of the token, with no inflectional suffixes. | -| `lemma_` | unicode | Base form of the token, with no inflectional suffixes. | -| `norm` | int | The token's norm, i.e. a normalized form of the token text. Usually set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions) or [norm exceptions](/usage/adding-languages#norm-exceptions). | -| `norm_` | unicode | The token's norm, i.e. a normalized form of the token text. Usually set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions) or [norm exceptions](/usage/adding-languages#norm-exceptions). | -| `lower` | int | Lowercase form of the token. | -| `lower_` | unicode | Lowercase form of the token text. Equivalent to `Token.text.lower()`. | +| Name | Type | Description | +| -------------------------------------------- | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `doc` | `Doc` | The parent document. | +| `sent` 2.0.12 | `Span` | The sentence span that this token is a part of. | +| `text` | unicode | Verbatim text content. | +| `text_with_ws` | unicode | Text content, with trailing space character if present. | +| `whitespace_` | unicode | Trailing space character if present. | +| `orth` | int | ID of the verbatim text content. | +| `orth_` | unicode | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. | +| `vocab` | `Vocab` | The vocab object of the parent `Doc`. | +| `tensor` 2.1.7 | `ndarray` | The tokens's slice of the parent `Doc`'s tensor. | +| `head` | `Token` | The syntactic parent, or "governor", of this token. | +| `left_edge` | `Token` | The leftmost token of this token's syntactic descendants. | +| `right_edge` | `Token` | The rightmost token of this token's syntactic descendants. | +| `i` | int | The index of the token within the parent document. | +| `ent_type` | int | Named entity type. | +| `ent_type_` | unicode | Named entity type. | +| `ent_iob` | int | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. | +| `ent_iob_` | unicode | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. | +| `ent_kb_id` 2.2 | int | Knowledge base ID that refers to the named entity this token is a part of, if any. | +| `ent_kb_id_` 2.2 | unicode | Knowledge base ID that refers to the named entity this token is a part of, if any. | +| `ent_id` | int | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | +| `ent_id_` | unicode | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | +| `lemma` | int | Base form of the token, with no inflectional suffixes. | +| `lemma_` | unicode | Base form of the token, with no inflectional suffixes. | +| `norm` | int | The token's norm, i.e. a normalized form of the token text. Usually set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions) or [norm exceptions](/usage/adding-languages#norm-exceptions). | +| `norm_` | unicode | The token's norm, i.e. a normalized form of the token text. Usually set in the language's [tokenizer exceptions](/usage/adding-languages#tokenizer-exceptions) or [norm exceptions](/usage/adding-languages#norm-exceptions). | +| `lower` | int | Lowercase form of the token. | +| `lower_` | unicode | Lowercase form of the token text. Equivalent to `Token.text.lower()`. | | `shape` | int | Transform of the tokens's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. | | `shape_` | unicode | Transform of the tokens's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. | -| `prefix` | int | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. | -| `prefix_` | unicode | A length-N substring from the start of the token. Defaults to `N=1`. | -| `suffix` | int | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. | -| `suffix_` | unicode | Length-N substring from the end of the token. Defaults to `N=3`. | -| `is_alpha` | bool | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. | -| `is_ascii` | bool | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. | -| `is_digit` | bool | Does the token consist of digits? Equivalent to `token.text.isdigit()`. | -| `is_lower` | bool | Is the token in lowercase? Equivalent to `token.text.islower()`. | -| `is_upper` | bool | Is the token in uppercase? Equivalent to `token.text.isupper()`. | -| `is_title` | bool | Is the token in titlecase? Equivalent to `token.text.istitle()`. | -| `is_punct` | bool | Is the token punctuation? | -| `is_left_punct` | bool | Is the token a left punctuation mark, e.g. `'('` ? | -| `is_right_punct` | bool | Is the token a right punctuation mark, e.g. `')'` ? | -| `is_space` | bool | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. | -| `is_bracket` | bool | Is the token a bracket? | -| `is_quote` | bool | Is the token a quotation mark? | -| `is_currency` 2.0.8 | bool | Is the token a currency symbol? | -| `like_url` | bool | Does the token resemble a URL? | -| `like_num` | bool | Does the token represent a number? e.g. "10.9", "10", "ten", etc. | -| `like_email` | bool | Does the token resemble an email address? | -| `is_oov` | bool | Is the token out-of-vocabulary? | -| `is_stop` | bool | Is the token part of a "stop list"? | -| `pos` | int | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). | -| `pos_` | unicode | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). | -| `tag` | int | Fine-grained part-of-speech. | -| `tag_` | unicode | Fine-grained part-of-speech. | -| `dep` | int | Syntactic dependency relation. | -| `dep_` | unicode | Syntactic dependency relation. | -| `lang` | int | Language of the parent document's vocabulary. | -| `lang_` | unicode | Language of the parent document's vocabulary. | -| `prob` | float | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). | -| `idx` | int | The character offset of the token within the parent document. | -| `sentiment` | float | A scalar value indicating the positivity or negativity of the token. | -| `lex_id` | int | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. | -| `rank` | int | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. | -| `cluster` | int | Brown cluster ID. | -| `_` | `Underscore` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). | +| `prefix` | int | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. | +| `prefix_` | unicode | A length-N substring from the start of the token. Defaults to `N=1`. | +| `suffix` | int | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. | +| `suffix_` | unicode | Length-N substring from the end of the token. Defaults to `N=3`. | +| `is_alpha` | bool | Does the token consist of alphabetic characters? Equivalent to `token.text.isalpha()`. | +| `is_ascii` | bool | Does the token consist of ASCII characters? Equivalent to `all(ord(c) < 128 for c in token.text)`. | +| `is_digit` | bool | Does the token consist of digits? Equivalent to `token.text.isdigit()`. | +| `is_lower` | bool | Is the token in lowercase? Equivalent to `token.text.islower()`. | +| `is_upper` | bool | Is the token in uppercase? Equivalent to `token.text.isupper()`. | +| `is_title` | bool | Is the token in titlecase? Equivalent to `token.text.istitle()`. | +| `is_punct` | bool | Is the token punctuation? | +| `is_left_punct` | bool | Is the token a left punctuation mark, e.g. `'('` ? | +| `is_right_punct` | bool | Is the token a right punctuation mark, e.g. `')'` ? | +| `is_space` | bool | Does the token consist of whitespace characters? Equivalent to `token.text.isspace()`. | +| `is_bracket` | bool | Is the token a bracket? | +| `is_quote` | bool | Is the token a quotation mark? | +| `is_currency` 2.0.8 | bool | Is the token a currency symbol? | +| `like_url` | bool | Does the token resemble a URL? | +| `like_num` | bool | Does the token represent a number? e.g. "10.9", "10", "ten", etc. | +| `like_email` | bool | Does the token resemble an email address? | +| `is_oov` | bool | Is the token out-of-vocabulary? | +| `is_stop` | bool | Is the token part of a "stop list"? | +| `pos` | int | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). | +| `pos_` | unicode | Coarse-grained part-of-speech from the [Universal POS tag set](https://universaldependencies.org/docs/u/pos/). | +| `tag` | int | Fine-grained part-of-speech. | +| `tag_` | unicode | Fine-grained part-of-speech. | +| `dep` | int | Syntactic dependency relation. | +| `dep_` | unicode | Syntactic dependency relation. | +| `lang` | int | Language of the parent document's vocabulary. | +| `lang_` | unicode | Language of the parent document's vocabulary. | +| `prob` | float | Smoothed log probability estimate of token's word type (context-independent entry in the vocabulary). | +| `idx` | int | The character offset of the token within the parent document. | +| `sentiment` | float | A scalar value indicating the positivity or negativity of the token. | +| `lex_id` | int | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. | +| `rank` | int | Sequential ID of the token's lexical type, used to index into tables, e.g. for word vectors. | +| `cluster` | int | Brown cluster ID. | +| `_` | `Underscore` | User space for adding custom [attribute extensions](/usage/processing-pipelines#custom-components-attributes). |