From b47b4e2654f69498b68c06a0b6464db4e924d268 Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Wed, 18 Oct 2017 14:43:47 +0530 Subject: [PATCH 1/7] Support single value for attribute list in doc.to_scalar conversion --- spacy/tokens/doc.pyx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index aca35a73f..9a644b86d 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -496,13 +496,19 @@ cdef class Doc: cdef int i, j cdef attr_id_t feature cdef np.ndarray[attr_t, ndim=2] output + cdef np.ndarray[attr_t, ndim=1] output_1D # Make an array from the attributes --- otherwise our inner loop is Python # dict iteration. + if( type(py_attr_ids) is not list ): + py_attr_ids = [ py_attr_ids ] cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids, dtype=numpy.int32) output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.int32) for i in range(self.length): for j, feature in enumerate(attr_ids): output[i, j] = get_token_attr(&self.c[i], feature) + if( len(attr_ids) == 1 ): + output_1D = output.reshape((self.length)) + return output_1D return output def count_by(self, attr_id_t attr_id, exclude=None, PreshCounter counts=None): From 5941aa96a12771ec3ca500c4df68b7cea0c25af1 Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Wed, 18 Oct 2017 15:52:17 +0530 Subject: [PATCH 2/7] Support strings for attribute list in doc.to_array --- .github/contributors/ramananbalakrishnan.md | 106 ++++++++++++++++++++ spacy/tests/doc/test_array.py | 20 ++++ spacy/tokens/doc.pyx | 29 ++++-- 3 files changed, 146 insertions(+), 9 deletions(-) create mode 100644 .github/contributors/ramananbalakrishnan.md diff --git a/.github/contributors/ramananbalakrishnan.md b/.github/contributors/ramananbalakrishnan.md new file mode 100644 index 000000000..37492fb3d --- /dev/null +++ b/.github/contributors/ramananbalakrishnan.md @@ -0,0 +1,106 @@ +# spaCy contributor agreement + +This spaCy Contributor Agreement (**"SCA"**) is based on the +[Oracle Contributor Agreement](http://www.oracle.com/technetwork/oca-405177.pdf). +The SCA applies to any contribution that you make to any product or project +managed by us (the **"project"**), and sets out the intellectual property rights +you grant to us in the contributed materials. The term **"us"** shall mean +[ExplosionAI UG (haftungsbeschränkt)](https://explosion.ai/legal). The term +**"you"** shall mean the person or entity identified below. + +If you agree to be bound by these terms, fill in the information requested +below and include the filled-in version with your first pull request, under the +folder [`.github/contributors/`](/.github/contributors/). The name of the file +should be your GitHub username, with the extension `.md`. For example, the user +example_user would create the file `.github/contributors/example_user.md`. + +Read this agreement carefully before signing. These terms and conditions +constitute a binding legal agreement. + +## Contributor Agreement + +1. The term "contribution" or "contributed materials" means any source code, +object code, patch, tool, sample, graphic, specification, manual, +documentation, or any other material posted or submitted by you to the project. + +2. With respect to any worldwide copyrights, or copyright applications and +registrations, in your contribution: + + * you hereby assign to us joint ownership, and to the extent that such + assignment is or becomes invalid, ineffective or unenforceable, you hereby + grant to us a perpetual, irrevocable, non-exclusive, worldwide, no-charge, + royalty-free, unrestricted license to exercise all rights under those + copyrights. This includes, at our option, the right to sublicense these same + rights to third parties through multiple levels of sublicensees or other + licensing arrangements; + + * you agree that each of us can do all things in relation to your + contribution as if each of us were the sole owners, and if one of us makes + a derivative work of your contribution, the one who makes the derivative + work (or has it made will be the sole owner of that derivative work; + + * you agree that you will not assert any moral rights in your contribution + against us, our licensees or transferees; + + * you agree that we may register a copyright in your contribution and + exercise all ownership rights associated with it; and + + * you agree that neither of us has any duty to consult with, obtain the + consent of, pay or render an accounting to the other for any use or + distribution of your contribution. + +3. With respect to any patents you own, or that you can license without payment +to any third party, you hereby grant to us a perpetual, irrevocable, +non-exclusive, worldwide, no-charge, royalty-free license to: + + * make, have made, use, sell, offer to sell, import, and otherwise transfer + your contribution in whole or in part, alone or in combination with or + included in any product, work or materials arising out of the project to + which your contribution was submitted, and + + * at our option, to sublicense these same rights to third parties through + multiple levels of sublicensees or other licensing arrangements. + +4. Except as set out above, you keep all right, title, and interest in your +contribution. The rights that you grant to us under these terms are effective +on the date you first submitted a contribution to us, even if your submission +took place before the date you sign these terms. + +5. You covenant, represent, warrant and agree that: + + * Each contribution that you submit is and shall be an original work of + authorship and you can legally grant the rights set out in this SCA; + + * to the best of your knowledge, each contribution will not violate any + third party's copyrights, trademarks, patents, or other intellectual + property rights; and + + * each contribution shall be in compliance with U.S. export control laws and + other applicable export and import laws. You agree to notify us if you + become aware of any circumstance which would make any of the foregoing + representations inaccurate in any respect. We may publicly disclose your + participation in the project, including the fact that you have signed the SCA. + +6. This SCA is governed by the laws of the State of California and applicable +U.S. Federal law. Any choice of law rules will not apply. + +7. Please place an “x” on one of the applicable statement below. Please do NOT +mark both statements: + + * [x] I am signing on behalf of myself as an individual and no other person + or entity, including my employer, has or will have rights with respect to my + contributions. + + * [ ] I am signing on behalf of my employer or a legal entity and I have the + actual authority to contractually bind that entity. + +## Contributor Details + +| Field | Entry | +|------------------------------- | -------------------- | +| Name | Ramanan Balakrishnan | +| Company name (if applicable) | | +| Title or role (if applicable) | | +| Date | 2017-10-18 | +| GitHub username | ramananbalakrishnan | +| Website (optional) | | diff --git a/spacy/tests/doc/test_array.py b/spacy/tests/doc/test_array.py index dd87aa763..ff10394d1 100644 --- a/spacy/tests/doc/test_array.py +++ b/spacy/tests/doc/test_array.py @@ -17,6 +17,26 @@ def test_doc_array_attr_of_token(en_tokenizer, en_vocab): assert feats_array[0][0] != feats_array[0][1] +def test_doc_stringy_array_attr_of_token(en_tokenizer, en_vocab): + text = "An example sentence" + tokens = en_tokenizer(text) + example = tokens.vocab["example"] + assert example.orth != example.shape + feats_array = tokens.to_array((ORTH, SHAPE)) + feats_array_stringy = tokens.to_array(("ORTH", "SHAPE")) + assert feats_array_stringy[0][0] == feats_array[0][0] + assert feats_array_stringy[0][1] == feats_array[0][1] + + +def test_doc_scalar_attr_of_token(en_tokenizer, en_vocab): + text = "An example sentence" + tokens = en_tokenizer(text) + example = tokens.vocab["example"] + assert example.orth != example.shape + feats_array = tokens.to_array(ORTH) + assert feats_array.shape == (3,) + + def test_doc_array_tag(en_tokenizer): text = "A nice sentence." pos = ['DET', 'ADJ', 'NOUN', 'PUNCT'] diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 9a644b86d..4f3b06946 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -16,6 +16,7 @@ from .token cimport Token from ..lexeme cimport Lexeme from ..lexeme cimport EMPTY_LEXEME from ..typedefs cimport attr_t, flags_t +from ..attrs import IDS from ..attrs cimport attr_id_t from ..attrs cimport ID, ORTH, NORM, LOWER, SHAPE, PREFIX, SUFFIX, LENGTH, CLUSTER from ..attrs cimport POS, LEMMA, TAG, DEP, HEAD, SPACY, ENT_IOB, ENT_TYPE @@ -474,10 +475,13 @@ cdef class Doc: @cython.boundscheck(False) cpdef np.ndarray to_array(self, object py_attr_ids): - """ - Given a list of M attribute IDs, export the tokens to a numpy - `ndarray` of shape (N, M), where `N` is the length - of the document. The values will be 32-bit integers. + """Export given token attributes to a numpy `ndarray`. + + If `attr_ids` is a sequence of M attributes, the output array will + be of shape `(N, M)`, where N is the length of the `Doc` + (in tokens). If `attr_ids` is a single attribute, the output shape will + be (N,). You can specify attributes by integer ID (e.g. spacy.attrs.LEMMA) + or string name (e.g. 'LEMMA' or 'lemma'). Example: from spacy import attrs @@ -486,22 +490,29 @@ cdef class Doc: np_array = doc.to_array([attrs.LOWER, attrs.POS, attrs.ENT_TYPE, attrs.IS_ALPHA]) Arguments: - attr_ids (list[int]): A list of attribute ID ints. + attr_ids (list[]): A list of attributes (int IDs or string names). Returns: feat_array (numpy.ndarray[long, ndim=2]): A feature matrix, with one row per word, and one column per attribute - indicated in the input attr_ids. + indicated in the input `attr_ids`. """ cdef int i, j cdef attr_id_t feature cdef np.ndarray[attr_t, ndim=2] output cdef np.ndarray[attr_t, ndim=1] output_1D + # Handle scalar/list inputs of strings/ints for py_attr_ids + if( type(py_attr_ids) is not list and type(py_attr_ids) is not tuple ): + py_attr_ids = [ py_attr_ids ] + py_attr_ids_input = [] + for py_attr_id in py_attr_ids: + if( type(py_attr_id) is int ): + py_attr_ids_input.append(py_attr_id) + else: + py_attr_ids_input.append(IDS[py_attr_id.upper()]) # Make an array from the attributes --- otherwise our inner loop is Python # dict iteration. - if( type(py_attr_ids) is not list ): - py_attr_ids = [ py_attr_ids ] - cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids, dtype=numpy.int32) + cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids_input, dtype=numpy.int32) output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.int32) for i in range(self.length): for j, feature in enumerate(attr_ids): From fbccc8c87d5456bb6b84730cbdd69abcccc64142 Mon Sep 17 00:00:00 2001 From: Ramanan Balakrishnan Date: Fri, 20 Oct 2017 14:23:48 +0530 Subject: [PATCH 3/7] Update documentation on doc.to_array --- website/docs/api/doc.jade | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/website/docs/api/doc.jade b/website/docs/api/doc.jade index 1c2911f52..59752b2a0 100644 --- a/website/docs/api/doc.jade +++ b/website/docs/api/doc.jade @@ -176,9 +176,14 @@ p +tag method p - | Export the document annotations to a numpy array of shape #[code N*M] - | where #[code N] is the length of the document and #[code M] is the number - | of attribute IDs to export. The values will be 32-bit integers. + | Export given token attributes to a numpy #[code ndarray]. + | If #[code attr_ids] is a sequence of #[code M] attributes, + | the output array will be of shape #[code (N, M)], where #[code N] + | is the length of the #[code Doc] (in tokens). If #[code attr_ids] is + | a single attribute, the output shape will be #[code (N,)]. You can + | specify attributes by integer ID (e.g. #[code spacy.attrs.LEMMA]) + | or string name (e.g. 'LEMMA' or 'lemma'). The values will be 32-bit + | integers. +aside-code("Example"). from spacy import attrs @@ -186,19 +191,26 @@ p # All strings mapped to integers, for easy export to numpy np_array = doc.to_array([attrs.LOWER, attrs.POS, attrs.ENT_TYPE, attrs.IS_ALPHA]) + np_array = doc.to_array("POS") +table(["Name", "Type", "Description"]) +row +cell #[code attr_ids] - +cell ints - +cell A list of attribute ID ints. + +cell int or string + +cell + | A list of attributes (int IDs or string names) or + | a single attribute (int ID or string name) +footrow +cell return - +cell #[code numpy.ndarray[ndim=2, dtype='int32']] + +cell + | #[code numpy.ndarray[ndim=2, dtype='int32']] or + | #[code numpy.ndarray[ndim=1, dtype='int32']] +cell | The exported attributes as a 2D numpy array, with one row per - | token and one column per attribute. + | token and one column per attribute (when #[code attr_ids] is a + | list), or as a 1D numpy array, with one item per attribute (when + | #[code attr_ids] is a single value). +h(2, "count_by") Doc.count_by +tag method From c0799430a7d126dcc0105898fca29d3e5ceff50a Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 11:17:00 +0200 Subject: [PATCH 4/7] Make small changes to Doc.to_array * Change type-check logic to 'hasattr' (Python type-checking is brittle) * Small 'house style' edits, mostly making code more terse. --- spacy/tokens/doc.pyx | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 4f3b06946..66936c4a5 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -477,11 +477,11 @@ cdef class Doc: cpdef np.ndarray to_array(self, object py_attr_ids): """Export given token attributes to a numpy `ndarray`. - If `attr_ids` is a sequence of M attributes, the output array will - be of shape `(N, M)`, where N is the length of the `Doc` - (in tokens). If `attr_ids` is a single attribute, the output shape will - be (N,). You can specify attributes by integer ID (e.g. spacy.attrs.LEMMA) - or string name (e.g. 'LEMMA' or 'lemma'). + If `attr_ids` is a sequence of M attributes, the output array will + be of shape `(N, M)`, where N is the length of the `Doc` + (in tokens). If `attr_ids` is a single attribute, the output shape will + be (N,). You can specify attributes by integer ID (e.g. spacy.attrs.LEMMA) + or string name (e.g. 'LEMMA' or 'lemma'). Example: from spacy import attrs @@ -499,28 +499,25 @@ cdef class Doc: """ cdef int i, j cdef attr_id_t feature + cdef np.ndarray[attr_t, ndim=1] attr_ids, output_1D cdef np.ndarray[attr_t, ndim=2] output - cdef np.ndarray[attr_t, ndim=1] output_1D # Handle scalar/list inputs of strings/ints for py_attr_ids - if( type(py_attr_ids) is not list and type(py_attr_ids) is not tuple ): - py_attr_ids = [ py_attr_ids ] - py_attr_ids_input = [] - for py_attr_id in py_attr_ids: - if( type(py_attr_id) is int ): - py_attr_ids_input.append(py_attr_id) - else: - py_attr_ids_input.append(IDS[py_attr_id.upper()]) - # Make an array from the attributes --- otherwise our inner loop is Python + if not hasattr(py_attr_ids, '__iter__'): + py_attr_ids = [py_attr_ids] + + # Allow strings, e.g. 'lemma' or 'LEMMA' + convert_id = lambda id_: IDS[id_.upper()] if hasattr(id_, 'upper') else id_ + # Make an array from the attributes --- otherwise inner loop would be Python # dict iteration. - cdef np.ndarray[attr_t, ndim=1] attr_ids = numpy.asarray(py_attr_ids_input, dtype=numpy.int32) + attr_ids = numpy.asarray((convert_id(id_) for id_ in py_attr_ids), + dtype=numpy.int32) + output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.int32) for i in range(self.length): for j, feature in enumerate(attr_ids): output[i, j] = get_token_attr(&self.c[i], feature) - if( len(attr_ids) == 1 ): - output_1D = output.reshape((self.length)) - return output_1D - return output + # Handle 1d case + return output if len(attr_ids) >= 2 else output.reshape((self.length,)) def count_by(self, attr_id_t attr_id, exclude=None, PreshCounter counts=None): """ From 658536b5ce0c90a4baa7d1c25e7d3fad363f4d4d Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 11:35:10 +0200 Subject: [PATCH 5/7] Fix to_array compile error --- spacy/tokens/doc.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 66936c4a5..ce2a82cd0 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -499,10 +499,10 @@ cdef class Doc: """ cdef int i, j cdef attr_id_t feature - cdef np.ndarray[attr_t, ndim=1] attr_ids, output_1D + cdef np.ndarray[attr_t, ndim=1] attr_ids cdef np.ndarray[attr_t, ndim=2] output # Handle scalar/list inputs of strings/ints for py_attr_ids - if not hasattr(py_attr_ids, '__iter__'): + if not hasattr(py_attr_ids, '__iter__'): py_attr_ids = [py_attr_ids] # Allow strings, e.g. 'lemma' or 'LEMMA' From 7a46792376773f0f7ed55f9a1e71d0512c5eed2b Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 11:53:47 +0200 Subject: [PATCH 6/7] Fix compile error Closures not allowed in cpdef --- spacy/tokens/doc.pyx | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index ce2a82cd0..3b2ef80fa 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -506,12 +506,11 @@ cdef class Doc: py_attr_ids = [py_attr_ids] # Allow strings, e.g. 'lemma' or 'LEMMA' - convert_id = lambda id_: IDS[id_.upper()] if hasattr(id_, 'upper') else id_ + py_attr_ids = [(IDS[id_.toupper()] if hasattr(id_, 'upper') else id_) + for id_ in py_attr_ids] # Make an array from the attributes --- otherwise inner loop would be Python # dict iteration. - attr_ids = numpy.asarray((convert_id(id_) for id_ in py_attr_ids), - dtype=numpy.int32) - + attr_ids = numpy.asarray(py_attr_ids, dtype=numpy.int32) output = numpy.ndarray(shape=(self.length, len(attr_ids)), dtype=numpy.int32) for i in range(self.length): for j, feature in enumerate(attr_ids): From dbc276e3b2ca64a6f72d612629d773a9f44e13da Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Fri, 20 Oct 2017 13:02:13 +0200 Subject: [PATCH 7/7] Fix 'toupper()' -> 'upper()' --- spacy/tokens/doc.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 3b2ef80fa..1bc8745c4 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -506,7 +506,7 @@ cdef class Doc: py_attr_ids = [py_attr_ids] # Allow strings, e.g. 'lemma' or 'LEMMA' - py_attr_ids = [(IDS[id_.toupper()] if hasattr(id_, 'upper') else id_) + py_attr_ids = [(IDS[id_.upper()] if hasattr(id_, 'upper') else id_) for id_ in py_attr_ids] # Make an array from the attributes --- otherwise inner loop would be Python # dict iteration.