Update dependency parse workflow

This commit is contained in:
ines 2017-05-23 23:34:39 +02:00
parent fe24267948
commit 9ed6b48a49

View File

@ -8,55 +8,80 @@ p
| boundary detection, and lets you iterate over base noun phrases, or | boundary detection, and lets you iterate over base noun phrases, or
| "chunks". | "chunks".
+aside-code("Example").
import spacy
nlp = spacy.load('en')
doc = nlp(u'I like green eggs and ham.')
for np in doc.noun_chunks:
print(np.text, np.root.text, np.root.dep_, np.root.head.text)
# I I nsubj like
# green eggs eggs dobj like
# ham ham conj eggs
p p
| You can check whether a #[+api("doc") #[code Doc]] object has been | You can check whether a #[+api("doc") #[code Doc]] object has been
| parsed with the #[code doc.is_parsed] attribute, which returns a boolean | parsed with the #[code doc.is_parsed] attribute, which returns a boolean
| value. If this attribute is #[code False], the default sentence iterator | value. If this attribute is #[code False], the default sentence iterator
| will raise an exception. | will raise an exception.
+h(2, "displacy") The displaCy visualizer +h(2, "noun-chunks") Noun chunks
+tag-model("dependency parse")
p p Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque enim ante, pretium a orci eget, varius dignissim augue. Nam eu dictum mauris, id tincidunt nisi. Integer commodo pellentesque tincidunt. Nam at turpis finibus tortor gravida sodales tincidunt sit amet est. Nullam euismod arcu in tortor auctor.
| The best way to understand spaCy's dependency parser is interactively,
| through the #[+a(DEMOS_URL + "/displacy", true) displaCy visualizer]. If +code("Example").
| you want to know how to write rules that hook into some type of syntactic nlp = spacy.load('en')
| construction, just plug the sentence into the visualizer and see how doc = nlp(u'Autonomous cars shift insurance liability toward manufacturers')
| spaCy annotates it. for chunk in doc.noun_chunks:
print(chunk.text, chunk.root.text, chunk.root.dep_,
chunk.root.head.text)
+aside
| #[strong Text:] The original noun chunk text.#[br]
| #[strong Root text:] ...#[br]
| #[strong Root dep:] ...#[br]
| #[strong Root head text:] ...#[br]
+table(["Text", "root.text", "root.dep_", "root.head.text"])
- var style = [0, 0, 1, 0]
+annotation-row(["Autonomous cars", "cars", "nsubj", "shift"], style)
+annotation-row(["insurance liability", "liability", "dobj", "shift"], style)
+annotation-row(["manufacturers", "manufacturers", "pobj", "toward"], style)
+h(2, "navigating") Navigating the parse tree +h(2, "navigating") Navigating the parse tree
p p
| spaCy uses the terms #[em head] and #[em child] to describe the words | spaCy uses the terms #[strong head] and #[strong child] to describe the words
| connected by a single arc in the dependency tree. The term #[em dep] is | #[strong connected by a single arc] in the dependency tree. The term
| used for the arc label, which describes the type of syntactic relation | #[strong dep] is used for the arc label, which describes the type of
| that connects the child to the head. As with other attributes, the value | syntactic relation that connects the child to the head. As with other
| of #[code token.dep] is an integer. You can get the string value with | attributes, the value of #[code .dep] is an integer. You can get
| #[code token.dep_]. | the string value with #[code .dep_].
+aside-code("Example"). +code("Example").
from spacy.symbols import det doc = nlp(u'Autonomous cars shift insurance liability toward manufacturers')
the, dog = nlp(u'the dog') for token in doc:
assert the.dep == det print(token.text, token.dep_, token.head.text, token.head.pos_,
assert the.dep_ == 'det' [child for child in token.children])
+aside
| #[strong Text]: The original token text.#[br]
| #[strong Dep]: The syntactic relation connecting child to head.#[br]
| #[strong Head text]: The original text of the token head.#[br]
| #[strong Head POS]: The part-of-speech tag of the token head.#[br]
| #[strong Children]: ...
+table(["Text", "Dep", "Head text", "Head POS", "Children"])
- var style = [0, 1, 0, 1, 0]
+annotation-row(["Autonomous", "amod", "cars", "NOUN", ""], style)
+annotation-row(["cars", "nsubj", "shift", "VERB", "Autonomous"], style)
+annotation-row(["shift", "ROOT", "shift", "VERB", "cars, liability"], style)
+annotation-row(["insurance", "compound", "liability", "NOUN", ""], style)
+annotation-row(["liability", "dobj", "shift", "VERB", "insurance, toward"], style)
+annotation-row(["toward", "prep", "liability", "NOUN", "manufacturers"], style)
+annotation-row(["manufacturers", "pobj", "toward", "ADP", ""], style)
+codepen("dcf8d293367ca185b935ed2ca11ebedd", 370)
p p
| Because the syntactic relations form a tree, every word has exactly one | Because the syntactic relations form a tree, every word has
| head. You can therefore iterate over the arcs in the tree by iterating | #[strong exactly one head]. You can therefore iterate over the arcs in
| over the words in the sentence. This is usually the best way to match an | the tree by iterating over the words in the sentence. This is usually
| arc of interest — from below: | the best way to match an arc of interest — from below:
+code. +code.
from spacy.symbols import nsubj, VERB from spacy.symbols import nsubj, VERB
# Finding a verb with a subject from below — good # Finding a verb with a subject from below — good
verbs = set() verbs = set()
for possible_subject in doc: for possible_subject in doc:
@ -82,6 +107,8 @@ p
| attribute, which provides a sequence of #[+api("token") #[code Token]] | attribute, which provides a sequence of #[+api("token") #[code Token]]
| objects. | objects.
+h(3, "navigating-around") Iterating around the local tree
p p
| A few more convenience attributes are provided for iterating around the | A few more convenience attributes are provided for iterating around the
| local tree from the token. The #[code .lefts] and #[code .rights] | local tree from the token. The #[code .lefts] and #[code .rights]
@ -90,55 +117,89 @@ p
| two integer-typed attributes, #[code .n_rights] and #[code .n_lefts], | two integer-typed attributes, #[code .n_rights] and #[code .n_lefts],
| that give the number of left and right children. | that give the number of left and right children.
+aside-code("Examples"). +code.
apples = nlp(u'bright red apples on the tree')[2] doc = nlp(u'bright red apples on the tree')
print([w.text for w in apples.lefts]) assert [token.text for token in doc[2].lefts]) == [u'bright', u'red']
# ['bright', 'red'] assert [token.text for token in doc[2].rights]) == ['on']
print([w.text for w in apples.rights]) assert doc[2].n_lefts == 2
# ['on'] assert doc[2].n_rights == 1
assert apples.n_lefts == 2
assert apples.n_rights == 1
from spacy.symbols import nsubj
doc = nlp(u'Credit and mortgage account holders must submit their requests within 30 days.')
root = [w for w in doc if w.head is w][0]
subject = list(root.lefts)[0]
for descendant in subject.subtree:
assert subject.is_ancestor_of(descendant)
from spacy.symbols import nsubj
doc = nlp(u'Credit and mortgage account holders must submit their requests.')
holders = doc[4]
span = doc[holders.left_edge.i : holders.right_edge.i + 1]
span.merge()
for word in doc:
print(word.text, word.pos_, word.dep_, word.head.text)
# Credit and mortgage account holders nsubj NOUN submit
# must VERB aux submit
# submit VERB ROOT submit
# their DET det requests
# requests NOUN dobj submit
p p
| You can get a whole phrase by its syntactic head using the | You can get a whole phrase by its syntactic head using the
| #[code .subtree] attribute. This returns an ordered sequence of tokens. | #[code .subtree] attribute. This returns an ordered sequence of tokens.
| For the default English model, the parse tree is #[em projective], which
| means that there are no crossing brackets. The tokens returned by
| #[code .subtree] are therefore guaranteed to be contiguous. This is not
| true for the German model, which has many
| #[+a("https://explosion.ai/blog/german-model#word-order", true) non-projective dependencies].
| You can walk up the tree with the #[code .ancestors] attribute, and | You can walk up the tree with the #[code .ancestors] attribute, and
| check dominance with the #[code .is_ancestor()] method. | check dominance with the #[+api("token#is_ancestor") #[code .is_ancestor()]]
| method.
+aside("Projective vs. non-projective")
| For the #[+a("/docs/usage/models#available") default English model], the
| parse tree is #[strong projective], which means that there are no crossing
| brackets. The tokens returned by #[code .subtree] are therefore guaranteed
| to be contiguous. This is not true for the German model, which has many
| #[+a(COMPANY_URL + "/blog/german-model#word-order", true) non-projective dependencies].
+code.
doc = nlp(u'Credit and mortgage account holders must submit their requests')
root = [token for token in doc if token.head is token][0]
subject = list(root.lefts)[0]
for descendant in subject.subtree:
assert subject.is_ancestor(descendant)
print(descendant.text, descendant.dep_, descendant.n_lefts, descendant.n_rights,
[ancestor.text for ancestor in descendant.ancestors])
+table(["Text", "Dep", "n_lefts", "n_rights", "ancestors"])
- var style = [0, 1, 1, 1, 0]
+annotation-row(["Credit", "nmod", 0, 2, "holders, submit"], style)
+annotation-row(["and", "cc", 0, 0, "Credit, holders, submit"], style)
+annotation-row(["mortgage", "compound", 0, 0, "account, Credit, holders, submit"], style)
+annotation-row(["account", "conj", 1, 0, "Credit, holders, submit"], style)
+annotation-row(["holders", "nsubj", 1, 0, "submit"], style)
p p
| Finally, I often find the #[code .left_edge] and #[code right_edge] | Finally, the #[code .left_edge] and #[code .right_edge] attributes
| attributes especially useful. They give you the first and last token | can be especially useful, because they give you the first and last token
| of the subtree. This is the easiest way to create a #[code Span] object | of the subtree. This is the easiest way to create a #[code Span] object
| for a syntactic phrase — a useful operation. | for a syntactic phrase. Note that #[code .right_edge] gives a token
| #[strong within] the subtree — so if you use it as the end-point of a
| range, don't forget to #[code +1]!
+code.
doc = nlp(u'Credit and mortgage account holders must submit their requests')
span = doc[doc[4].left_edge.i : doc[4].right_edge.i+1]
span.merge()
for token in doc:
print(token.text, token.pos_, token.dep_, token.head.text)
+table(["Text", "POS", "Dep", "Head text"])
- var style = [0, 1, 1, 0]
+annotation-row(["Credit and mortgage account holders", "NOUN", "nsubj", "submit"], style)
+annotation-row(["must", "VERB", "aux", "submit"], style)
+annotation-row(["submit", "VERB", "ROOT", "submit"], style)
+annotation-row(["their", "ADJ", "poss", "requests"], style)
+annotation-row(["requests", "NOUN", "dobj", "submit"], style)
+h(2, "displacy") Visualizing dependencies
p p
| Note that #[code .right_edge] gives a token #[em within] the subtree — | The best way to understand spaCy's dependency parser is interactively.
| so if you use it as the end-point of a range, don't forget to #[code +1]! | To make this easier, spaCy v2.0+ comes with a visualization module. Simply
| pass a #[code Doc] or a list of #[code Doc] objects to
| displaCy and run #[+api("displacy#serve") #[code displacy.serve]] to
| run the web server, or #[+api("displacy#render") #[code displacy.render]]
| to generate the raw markup. If you want to know how to write rules that
| hook into some type of syntactic construction, just plug the sentence into
| the visualizer and see how spaCy annotates it.
+code.
from spacy import displacy
doc = nlp(u'Autonomous cars shift insurance liability toward manufacturers')
displacy.serve(doc, style='dep')
+infobox
| For more details and examples, see the
| #[+a("/docs/usage/visualizers") usage workflow on visualizing spaCy]. You
| can also test displaCy in our #[+a(DEMOS_URL + "/displacy", true) online demo].
+h(2, "disabling") Disabling the parser +h(2, "disabling") Disabling the parser
@ -149,8 +210,6 @@ p
| the parser from being loaded: | the parser from being loaded:
+code. +code.
import spacy
nlp = spacy.load('en', parser=False) nlp = spacy.load('en', parser=False)
p p