fix 's typo's across code base (#8384)

This commit is contained in:
Sofie Van Landeghem 2021-06-15 10:57:08 +02:00 committed by GitHub
parent 507422149f
commit 0fd0d949c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 14 additions and 14 deletions

View File

@ -163,7 +163,7 @@ cdef class Lexeme:
self.vocab.set_vector(self.c.orth, vector) self.vocab.set_vector(self.c.orth, vector)
property rank: property rank:
"""RETURNS (str): Sequential ID of the lexemes's lexical type, used """RETURNS (str): Sequential ID of the lexeme's lexical type, used
to index into tables, e.g. for word vectors.""" to index into tables, e.g. for word vectors."""
def __get__(self): def __get__(self):
return self.c.id return self.c.id
@ -205,7 +205,7 @@ cdef class Lexeme:
self.c.lower = x self.c.lower = x
property norm: property norm:
"""RETURNS (uint64): The lexemes's norm, i.e. a normalised form of the """RETURNS (uint64): The lexeme's norm, i.e. a normalised form of the
lexeme text. lexeme text.
""" """
def __get__(self): def __get__(self):
@ -288,7 +288,7 @@ cdef class Lexeme:
self.c.lower = self.vocab.strings.add(x) self.c.lower = self.vocab.strings.add(x)
property norm_: property norm_:
"""RETURNS (str): The lexemes's norm, i.e. a normalised form of the """RETURNS (str): The lexeme's norm, i.e. a normalised form of the
lexeme text. lexeme text.
""" """
def __get__(self): def __get__(self):

View File

@ -329,7 +329,7 @@ cdef class Token:
@property @property
def shape(self): def shape(self):
"""RETURNS (uint64): ID of the token's shape, a transform of the """RETURNS (uint64): ID of the token's shape, a transform of the
tokens's string, to show orthographic features (e.g. "Xxxx", "dd"). token's string, to show orthographic features (e.g. "Xxxx", "dd").
""" """
return self.c.lex.shape return self.c.lex.shape
@ -825,7 +825,7 @@ cdef class Token:
@property @property
def shape_(self): def shape_(self):
"""RETURNS (str): Transform of the tokens's string, to show """RETURNS (str): Transform of the token's string, to show
orthographic features. For example, "Xxxx" or "dd". orthographic features. For example, "Xxxx" or "dd".
""" """
return self.vocab.strings[self.c.lex.shape] return self.vocab.strings[self.c.lex.shape]

View File

@ -127,14 +127,14 @@ The L2 norm of the lexeme's vector representation.
| `text` | Verbatim text content. ~~str~~ | | `text` | Verbatim text content. ~~str~~ |
| `orth` | ID of the verbatim text content. ~~int~~ | | `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ | | `orth_` | Verbatim text content (identical to `Lexeme.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `rank` | Sequential ID of the lexemes's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ | | `rank` | Sequential ID of the lexeme's lexical type, used to index into tables, e.g. for word vectors. ~~int~~ |
| `flags` | Container of the lexeme's binary flags. ~~int~~ | | `flags` | Container of the lexeme's binary flags. ~~int~~ |
| `norm` | The lexemes's norm, i.e. a normalized form of the lexeme text. ~~int~~ | | `norm` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~int~~ |
| `norm_` | The lexemes's norm, i.e. a normalized form of the lexeme text. ~~str~~ | | `norm_` | The lexeme's norm, i.e. a normalized form of the lexeme text. ~~str~~ |
| `lower` | Lowercase form of the word. ~~int~~ | | `lower` | Lowercase form of the word. ~~int~~ |
| `lower_` | Lowercase form of the word. ~~str~~ | | `lower_` | Lowercase form of the word. ~~str~~ |
| `shape` | Transform of the words's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | | `shape` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | | `shape_` | Transform of the word's string, to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ | | `prefix` | Length-N substring from the start of the word. Defaults to `N=1`. ~~int~~ |
| `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ | | `prefix_` | Length-N substring from the start of the word. Defaults to `N=1`. ~~str~~ |
| `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ | | `suffix` | Length-N substring from the end of the word. Defaults to `N=3`. ~~int~~ |

View File

@ -431,7 +431,7 @@ The L2 norm of the token's vector representation.
| `orth` | ID of the verbatim text content. ~~int~~ | | `orth` | ID of the verbatim text content. ~~int~~ |
| `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ | | `orth_` | Verbatim text content (identical to `Token.text`). Exists mostly for consistency with the other attributes. ~~str~~ |
| `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ | | `vocab` | The vocab object of the parent `Doc`. ~~vocab~~ |
| `tensor` <Tag variant="new">2.1.7</Tag> | The tokens's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ | | `tensor` <Tag variant="new">2.1.7</Tag> | The token's slice of the parent `Doc`'s tensor. ~~numpy.ndarray~~ |
| `head` | The syntactic parent, or "governor", of this token. ~~Token~~ | | `head` | The syntactic parent, or "governor", of this token. ~~Token~~ |
| `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ | | `left_edge` | The leftmost token of this token's syntactic descendants. ~~Token~~ |
| `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ | | `right_edge` | The rightmost token of this token's syntactic descendants. ~~Token~~ |
@ -450,8 +450,8 @@ The L2 norm of the token's vector representation.
| `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ | | `norm_` | The token's norm, i.e. a normalized form of the token text. Can be set in the language's [tokenizer exceptions](/usage/linguistic-features#language-data). ~~str~~ |
| `lower` | Lowercase form of the token. ~~int~~ | | `lower` | Lowercase form of the token. ~~int~~ |
| `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ | | `lower_` | Lowercase form of the token text. Equivalent to `Token.text.lower()`. ~~str~~ |
| `shape` | Transform of the tokens's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ | | `shape` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~int~~ |
| `shape_` | Transform of the tokens's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ | | `shape_` | Transform of the token's string to show orthographic features. Alphabetic characters are replaced by `x` or `X`, and numeric characters are replaced by `d`, and sequences of the same character are truncated after length 4. For example,`"Xxxx"`or`"dd"`. ~~str~~ |
| `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ | | `prefix` | Hash value of a length-N substring from the start of the token. Defaults to `N=1`. ~~int~~ |
| `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ | | `prefix_` | A length-N substring from the start of the token. Defaults to `N=1`. ~~str~~ |
| `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ | | `suffix` | Hash value of a length-N substring from the end of the token. Defaults to `N=3`. ~~int~~ |

View File

@ -382,7 +382,7 @@ doc = nlp("This is a sentence.")
You can use the [`info`](/api/cli#info) command or You can use the [`info`](/api/cli#info) command or
[`spacy.info()`](/api/top-level#spacy.info) method to print a pipeline [`spacy.info()`](/api/top-level#spacy.info) method to print a pipeline
packages's meta data before loading it. Each `Language` object with a loaded package's meta data before loading it. Each `Language` object with a loaded
pipeline also exposes the pipeline's meta data as the attribute `meta`. For pipeline also exposes the pipeline's meta data as the attribute `meta`. For
example, `nlp.meta['version']` will return the package version. example, `nlp.meta['version']` will return the package version.