mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 09:26:27 +03:00
Update examples and API docs
This commit is contained in:
parent
e9e62b01b0
commit
c3e903e4c2
|
@ -109,9 +109,8 @@ p
|
|||
| easily accessed.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
for token in doc:
|
||||
print(token.text, token.tag_)
|
||||
doc = nlp(u'Give it back')
|
||||
assert [t.text for t in doc] == [u'Give', u'it', u'back']
|
||||
|
||||
p
|
||||
| This is the main way of accessing #[+api("token") #[code Token]] objects,
|
||||
|
@ -143,7 +142,7 @@ p Get the number of tokens in the document.
|
|||
|
||||
+h(2, "similarity") Doc.similarity
|
||||
+tag method
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| Make a semantic similarity estimate. The default estimate is cosine
|
||||
|
@ -178,11 +177,10 @@ p
|
|||
| of the given attribute ID.
|
||||
|
||||
+aside-code("Example").
|
||||
from spacy import attrs
|
||||
from spacy.attrs import ORTH
|
||||
doc = nlp(u'apple apple orange banana')
|
||||
tokens.count_by(attrs.ORTH)
|
||||
# {12800L: 1, 11880L: 2, 7561L: 1}
|
||||
tokens.to_array([attrs.ORTH])
|
||||
assert doc.count_by(ORTH) == {7024L: 1, 119552L: 1, 2087L: 2}
|
||||
doc.to_array([attrs.ORTH])
|
||||
# array([[11880], [11880], [7561], [12800]])
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
|
@ -237,6 +235,7 @@ p
|
|||
np_array = doc.to_array([LOWER, POS, ENT_TYPE, IS_ALPHA])
|
||||
doc2 = Doc(doc.vocab)
|
||||
doc2.from_array([LOWER, POS, ENT_TYPE, IS_ALPHA], np_array)
|
||||
assert doc.text == doc2.text
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
|
@ -307,8 +306,7 @@ p
|
|||
+aside-code("Example").
|
||||
doc = nlp(u'Los Angeles start.')
|
||||
doc.merge(0, len('Los Angeles'), 'NNP', 'Los Angeles', 'GPE')
|
||||
print([token.text for token in doc])
|
||||
# ['Los Angeles', 'start', '.']
|
||||
assert [t.text for t in doc] == [u'Los Angeles', u'start', u'.']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
|
@ -338,7 +336,7 @@ p
|
|||
|
||||
+h(2, "print_tree") Doc.print_tree
|
||||
+tag method
|
||||
+tag requires model
|
||||
+tag requires model: parse
|
||||
|
||||
p
|
||||
| Returns the parse trees in JSON (dict) format. Especially useful for
|
||||
|
@ -371,7 +369,7 @@ p
|
|||
|
||||
+h(2, "ents") Doc.ents
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: NER
|
||||
|
||||
p
|
||||
| Iterate over the entities in the document. Yields named-entity
|
||||
|
@ -393,7 +391,7 @@ p
|
|||
|
||||
+h(2, "noun_chunks") Doc.noun_chunks
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: parse
|
||||
|
||||
p
|
||||
| Iterate over the base noun phrases in the document. Yields base
|
||||
|
@ -416,7 +414,7 @@ p
|
|||
|
||||
+h(2, "sents") Doc.sents
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: parse
|
||||
|
||||
p
|
||||
| Iterate over the sentences in the document. Sentence spans have no label.
|
||||
|
@ -438,7 +436,7 @@ p
|
|||
|
||||
+h(2, "has_vector") Doc.has_vector
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| A boolean value indicating whether a word vector is associated with the
|
||||
|
@ -456,7 +454,7 @@ p
|
|||
|
||||
+h(2, "vector") Doc.vector
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| A real-valued meaning representation. Defaults to an average of the
|
||||
|
@ -464,8 +462,8 @@ p
|
|||
|
||||
+aside-code("Example").
|
||||
apples = nlp(u'I like apples')
|
||||
(apples.vector.dtype, apples.vector.shape)
|
||||
# (dtype('float32'), (300,))
|
||||
assert doc.vector.dtype == 'float32'
|
||||
assert doc.vector.shape == (300,)
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
|
@ -475,11 +473,18 @@ p
|
|||
|
||||
+h(2, "vector_norm") Doc.vector_norm
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| The L2 norm of the document's vector representation.
|
||||
|
||||
+aside-code("Example").
|
||||
doc1 = nlp(u'I like apples')
|
||||
doc2 = nlp(u'I like oranges')
|
||||
doc1.vector_norm # 4.54232424414368
|
||||
doc2.vector_norm # 3.304373298575751
|
||||
assert doc1.vector_norm != doc2.vector_norm
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
|
|
|
@ -12,8 +12,7 @@ p Create a Span object from the #[code slice doc[start : end]].
|
|||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
span = doc[1:4]
|
||||
print([token.text for token in span])
|
||||
# ['it', 'back', '!']
|
||||
assert [t.text for t in span] == [u'it', u'back', u'!']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
|
@ -93,8 +92,7 @@ p Iterate over #[code Token] objects.
|
|||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
span = doc[1:4]
|
||||
print([token.text for token in span])
|
||||
# ['it', 'back', '!']
|
||||
assert [t.text for t in span] == ['it', 'back', '!']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
|
@ -120,16 +118,18 @@ p Get the number of tokens in the span.
|
|||
|
||||
+h(2, "similarity") Span.similarity
|
||||
+tag method
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| Make a semantic similarity estimate. The default estimate is cosine
|
||||
| similarity using an average of word vectors.
|
||||
|
||||
+aside-code("Example").
|
||||
apples, and, oranges = nlp(u'apples and oranges')
|
||||
apples_oranges = apples.similarity(oranges)
|
||||
oranges_apples = oranges.similarity(apples)
|
||||
doc = nlp(u'green apples and red oranges')
|
||||
green_apples = doc[:2]
|
||||
red_oranges = doc[3:]
|
||||
apples_oranges = green_apples.similarity(red_oranges)
|
||||
oranges_apples = red_oranges.similarity(green_apples)
|
||||
assert apples_oranges == oranges_apples
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
|
@ -165,17 +165,18 @@ p Retokenize the document, such that the span is merged into a single token.
|
|||
|
||||
+h(2, "root") Span.root
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p
|
||||
| The token within the span that's highest in the parse tree. If there's a
|
||||
| tie, the earlist is prefered.
|
||||
|
||||
+aside-code("Example").
|
||||
tokens = nlp(u'I like New York in Autumn.')
|
||||
i, like, new, york, in_, autumn, dot = range(len(tokens))
|
||||
assert tokens[new].head.text == 'York'
|
||||
assert tokens[york].head.text == 'like'
|
||||
new_york = tokens[new:york+1]
|
||||
doc = nlp(u'I like New York in Autumn.')
|
||||
i, like, new, york, in_, autumn, dot = range(len(doc))
|
||||
assert doc[new].head.text == 'York'
|
||||
assert doc[york].head.text == 'like'
|
||||
new_york = doc[new:york+1]
|
||||
assert new_york.root.text == 'York'
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
|
@ -186,9 +187,15 @@ p
|
|||
|
||||
+h(2, "lefts") Span.lefts
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p Tokens that are to the left of the span, whose head is within the span.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like New York in Autumn.')
|
||||
lefts = [t.text for t in doc[3:7].lefts]
|
||||
assert lefts == [u'New']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
|
@ -197,9 +204,15 @@ p Tokens that are to the left of the span, whose head is within the span.
|
|||
|
||||
+h(2, "rights") Span.rights
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p Tokens that are to the right of the span, whose head is within the span.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like New York in Autumn.')
|
||||
rights = [t.text for t in doc[2:4].rights]
|
||||
assert rights == [u'in']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
|
@ -208,9 +221,15 @@ p Tokens that are to the right of the span, whose head is within the span.
|
|||
|
||||
+h(2, "subtree") Span.subtree
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p Tokens that descend from tokens in the span, but fall outside it.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
subtree = [t.text for t in doc[:3].subtree]
|
||||
assert subtree == [u'Give', u'it', u'back', u'!']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
|
@ -219,15 +238,15 @@ p Tokens that descend from tokens in the span, but fall outside it.
|
|||
|
||||
+h(2, "has_vector") Span.has_vector
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| A boolean value indicating whether a word vector is associated with the
|
||||
| object.
|
||||
|
||||
+aside-code("Example").
|
||||
apple = nlp(u'apple')
|
||||
assert apple.has_vector
|
||||
doc = nlp(u'I like apples')
|
||||
assert doc[1:].has_vector
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
|
@ -237,16 +256,16 @@ p
|
|||
|
||||
+h(2, "vector") Span.vector
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| A real-valued meaning representation. Defaults to an average of the
|
||||
| token vectors.
|
||||
|
||||
+aside-code("Example").
|
||||
apple = nlp(u'apple')
|
||||
(apple.vector.dtype, apple.vector.shape)
|
||||
# (dtype('float32'), (300,))
|
||||
doc = nlp(u'I like apples')
|
||||
assert doc[1:].vector.dtype == 'float32'
|
||||
assert doc[1:].vector.shape == (300,)
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
|
@ -256,11 +275,17 @@ p
|
|||
|
||||
+h(2, "vector_norm") Span.vector_norm
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| The L2 norm of the span's vector representation.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like apples')
|
||||
doc[1:].vector_norm # 4.800883928527915
|
||||
doc[2:].vector_norm # 6.895897646384268
|
||||
assert doc[1:].vector_norm != doc[2:].vector_norm
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
|
|
|
@ -12,6 +12,7 @@ p Construct a #[code Token] object.
|
|||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
token = doc[0]
|
||||
assert token.text == u'Give'
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
|
@ -59,8 +60,7 @@ p Check the value of a boolean flag.
|
|||
from spacy.attrs import IS_TITLE
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
token = doc[0]
|
||||
token.check_flag(IS_TITLE)
|
||||
# True
|
||||
assert token.check_flag(IS_TITLE) == True
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
|
@ -73,35 +73,14 @@ p Check the value of a boolean flag.
|
|||
+cell bool
|
||||
+cell Whether the flag is set.
|
||||
|
||||
+h(2, "nbor") Token.nbor
|
||||
+tag method
|
||||
|
||||
p Get a neighboring token.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
token = doc[0]
|
||||
token.nbor()
|
||||
# it
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell #[code i]
|
||||
+cell int
|
||||
+cell The relative position of the token to get. Defaults to #[code 1].
|
||||
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell #[code Token]
|
||||
+cell The token at position #[code self.doc[self.i+i]].
|
||||
|
||||
+h(2, "similarity") Token.similarity
|
||||
+tag method
|
||||
+tag requires model: vectors
|
||||
|
||||
p Compute a semantic similarity estimate. Defaults to cosine over vectors.
|
||||
|
||||
+aside-code("Example").
|
||||
apples, and, oranges = nlp(u'apples and oranges')
|
||||
apples, _, oranges = nlp(u'apples and oranges')
|
||||
apples_oranges = apples.similarity(oranges)
|
||||
oranges_apples = oranges.similarity(apples)
|
||||
assert apples_oranges == oranges_apples
|
||||
|
@ -119,13 +98,41 @@ p Compute a semantic similarity estimate. Defaults to cosine over vectors.
|
|||
+cell float
|
||||
+cell A scalar similarity score. Higher is more similar.
|
||||
|
||||
+h(2, "nbor") Token.nbor
|
||||
+tag method
|
||||
|
||||
p Get a neighboring token.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
give_nbor = doc[0].nbor()
|
||||
assert give_nbor.text == u'it'
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell #[code i]
|
||||
+cell int
|
||||
+cell The relative position of the token to get. Defaults to #[code 1].
|
||||
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell #[code Token]
|
||||
+cell The token at position #[code self.doc[self.i+i]].
|
||||
|
||||
+h(2, "is_ancestor") Token.is_ancestor
|
||||
+tag method
|
||||
+tag requires model: parse
|
||||
|
||||
p
|
||||
| Check whether this token is a parent, grandparent, etc. of another
|
||||
| in the dependency tree.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
give = doc[0]
|
||||
it = doc[1]
|
||||
assert give.is_ancestor(it)
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell descendant
|
||||
|
@ -137,60 +144,38 @@ p
|
|||
+cell bool
|
||||
+cell Whether this token is the ancestor of the descendant.
|
||||
|
||||
+h(2, "has_vector") Token.has_vector
|
||||
+h(2, "ancestors") Token.ancestors
|
||||
+tag property
|
||||
+tag requires model
|
||||
+tag requires model: parse
|
||||
|
||||
p
|
||||
| A boolean value indicating whether a word vector is associated with the
|
||||
| token.
|
||||
p The rightmost token of this token's syntactic descendants.
|
||||
|
||||
+aside-code("Example").
|
||||
apple = nlp(u'apple')
|
||||
assert apple.has_vector
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
it_ancestors = doc[1].ancestors
|
||||
assert [t.text for t in it_ancestors] == [u'Give']
|
||||
he_ancestors = doc[4].ancestors
|
||||
assert [t.text for t in he_ancestors] == [u'pleaded']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell bool
|
||||
+cell Whether the token has a vector data attached.
|
||||
|
||||
+h(2, "vector") Token.vector
|
||||
+tag property
|
||||
+tag requires model
|
||||
|
||||
p
|
||||
| A real-valued meaning representation.
|
||||
|
||||
+aside-code("Example").
|
||||
apple = nlp(u'apple')
|
||||
(apple.vector.dtype, apple.vector.shape)
|
||||
# (dtype('float32'), (300,))
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell #[code numpy.ndarray[ndim=1, dtype='float32']]
|
||||
+cell A 1D numpy array representing the token's semantics.
|
||||
|
||||
+h(2, "vector_norm") Span.vector_norm
|
||||
+tag property
|
||||
+tag requires model
|
||||
|
||||
p
|
||||
| The L2 norm of the token's vector representation.
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell float
|
||||
+cell The L2 norm of the vector representation.
|
||||
+cell yields
|
||||
+cell #[code Token]
|
||||
+cell
|
||||
| A sequence of ancestor tokens such that
|
||||
| #[code ancestor.is_ancestor(self)].
|
||||
|
||||
+h(2, "conjuncts") Token.conjuncts
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p A sequence of coordinated tokens, including the token itself.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like apples and oranges')
|
||||
apples_conjuncts = doc[2].conjuncts
|
||||
assert [t.text for t in apples_conjuncts] == [u'oranges']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
|
@ -199,9 +184,15 @@ p A sequence of coordinated tokens, including the token itself.
|
|||
|
||||
+h(2, "children") Token.children
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p A sequence of the token's immediate syntactic children.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
give_children = doc[0].children
|
||||
assert [t.text for t in give_children] == [u'it', u'back', u'!']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
|
@ -210,27 +201,79 @@ p A sequence of the token's immediate syntactic children.
|
|||
|
||||
+h(2, "subtree") Token.subtree
|
||||
+tag property
|
||||
+tag requires model: parse
|
||||
|
||||
p A sequence of all the token's syntactic descendents.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'Give it back! He pleaded.')
|
||||
give_subtree = doc[0].subtree
|
||||
assert [t.text for t in give_subtree] == [u'Give', u'it', u'back', u'!']
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
+cell #[code Token]
|
||||
+cell A descendant token such that #[code self.is_ancestor(descendant)].
|
||||
|
||||
+h(2, "ancestors") Token.ancestors
|
||||
+h(2, "has_vector") Token.has_vector
|
||||
+tag property
|
||||
+tag requires model: vectors
|
||||
|
||||
p The rightmost token of this token's syntactic descendants.
|
||||
p
|
||||
| A boolean value indicating whether a word vector is associated with the
|
||||
| token.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like apples')
|
||||
apples = doc[2]
|
||||
assert apples.has_vector
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell yields
|
||||
+cell #[code Token]
|
||||
+cell
|
||||
| A sequence of ancestor tokens such that
|
||||
| #[code ancestor.is_ancestor(self)].
|
||||
+cell returns
|
||||
+cell bool
|
||||
+cell Whether the token has a vector data attached.
|
||||
|
||||
+h(2, "vector") Token.vector
|
||||
+tag property
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| A real-valued meaning representation.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like apples')
|
||||
apples = doc[2]
|
||||
assert apples.vector.dtype == 'float32'
|
||||
assert apples.vector.shape == (300,)
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell #[code numpy.ndarray[ndim=1, dtype='float32']]
|
||||
+cell A 1D numpy array representing the token's semantics.
|
||||
|
||||
+h(2, "vector_norm") Span.vector_norm
|
||||
+tag property
|
||||
+tag requires model: vectors
|
||||
|
||||
p
|
||||
| The L2 norm of the token's vector representation.
|
||||
|
||||
+aside-code("Example").
|
||||
doc = nlp(u'I like apples and pasta')
|
||||
apples = doc[2]
|
||||
pasta = doc[4]
|
||||
apples.vector_norm # 6.89589786529541
|
||||
pasta.vector_norm # 7.759851932525635
|
||||
assert apples.vector_norm != pasta.vector_norm
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell float
|
||||
+cell The L2 norm of the vector representation.
|
||||
|
||||
+h(2, "attributes") Attributes
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user