mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 16:07:41 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			238 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			238 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| //- 💫 DOCS > USAGE > DEPENDENCY PARSE
 | ||
| 
 | ||
| include ../../_includes/_mixins
 | ||
| 
 | ||
| p
 | ||
|     |  spaCy features a fast and accurate syntactic dependency parser, and has
 | ||
|     |  a rich API for navigating the tree. The parser also powers the sentence
 | ||
|     |  boundary detection, and lets you iterate over base noun phrases, or
 | ||
|     |  "chunks". You can check whether a #[+api("doc") #[code Doc]] object has
 | ||
|     |  been parsed with the #[code doc.is_parsed] attribute, which returns a
 | ||
|     |  boolean value. If this attribute is #[code False], the default sentence
 | ||
|     |  iterator will raise an exception.
 | ||
| 
 | ||
| +h(2, "noun-chunks") Noun chunks
 | ||
|     +tag-model("dependency parse")
 | ||
| 
 | ||
| p
 | ||
|     |  Noun chunks are "base noun phrases" – flat phrases that have a noun as
 | ||
|     |  their head. You can think of noun chunks as a noun plus the words describing
 | ||
|     |  the noun – for example, "the lavish green grass" or "the world’s largest
 | ||
|     |  tech fund". To get the noun chunks in a document, simply iterate over
 | ||
|     |  #[+api("doc#noun_chunks") #[code Doc.noun_chunks]].
 | ||
| 
 | ||
| +code("Example").
 | ||
|     nlp = spacy.load('en')
 | ||
|     doc = nlp(u'Autonomous cars shift insurance liability toward manufacturers')
 | ||
|     for chunk in doc.noun_chunks:
 | ||
|         print(chunk.text, chunk.root.text, chunk.root.dep_,
 | ||
|               chunk.root.head.text)
 | ||
| 
 | ||
| +aside
 | ||
|     |  #[strong Text:] The original noun chunk text.#[br]
 | ||
|     |  #[strong Root text:] The original text of the word connecting the noun
 | ||
|     |  chunk to the rest of the parse.#[br]
 | ||
|     |  #[strong Root dep:] Dependcy relation connecting the root to its head.#[br]
 | ||
|     |  #[strong Root head text:] The text of the root token's head.#[br]
 | ||
| 
 | ||
| +table(["Text", "root.text", "root.dep_", "root.head.text"])
 | ||
|     - var style = [0, 0, 1, 0]
 | ||
|     +annotation-row(["Autonomous cars", "cars", "nsubj", "shift"], style)
 | ||
|     +annotation-row(["insurance liability", "liability", "dobj", "shift"], style)
 | ||
|     +annotation-row(["manufacturers", "manufacturers", "pobj", "toward"], style)
 | ||
| 
 | ||
| +h(2, "navigating") Navigating the parse tree
 | ||
| 
 | ||
| p
 | ||
|     |  spaCy uses the terms #[strong head] and #[strong child] to describe the words
 | ||
|     |  #[strong connected by a single arc] in the dependency tree. The term
 | ||
|     |  #[strong dep] is used for the arc label, which describes the type of
 | ||
|     |  syntactic relation that connects the child to the head. As with other
 | ||
|     |  attributes, the value of #[code .dep] is a hash value. You can get
 | ||
|     |  the string value with #[code .dep_].
 | ||
| 
 | ||
| +code("Example").
 | ||
|     doc = nlp(u'Autonomous cars shift insurance liability toward manufacturers')
 | ||
|     for token in doc:
 | ||
|         print(token.text, token.dep_, token.head.text, token.head.pos_,
 | ||
|               [child for child in token.children])
 | ||
| 
 | ||
| +aside
 | ||
|     |  #[strong Text]: The original token text.#[br]
 | ||
|     |  #[strong Dep]: The syntactic relation connecting child to head.#[br]
 | ||
|     |  #[strong Head text]: The original text of the token head.#[br]
 | ||
|     |  #[strong Head POS]: The part-of-speech tag of the token head.#[br]
 | ||
|     |  #[strong Children]: The immediate syntactic dependents of the token.
 | ||
| 
 | ||
| +table(["Text", "Dep", "Head text", "Head POS", "Children"])
 | ||
|     - var style = [0, 1, 0, 1, 0]
 | ||
|     +annotation-row(["Autonomous", "amod", "cars", "NOUN", ""], style)
 | ||
|     +annotation-row(["cars", "nsubj", "shift", "VERB", "Autonomous"], style)
 | ||
|     +annotation-row(["shift", "ROOT", "shift", "VERB", "cars, liability"], style)
 | ||
|     +annotation-row(["insurance", "compound", "liability", "NOUN", ""], style)
 | ||
|     +annotation-row(["liability", "dobj", "shift", "VERB", "insurance, toward"], style)
 | ||
|     +annotation-row(["toward", "prep", "liability", "NOUN", "manufacturers"], style)
 | ||
|     +annotation-row(["manufacturers", "pobj", "toward", "ADP", ""], style)
 | ||
| 
 | ||
| +codepen("dcf8d293367ca185b935ed2ca11ebedd", 370)
 | ||
| 
 | ||
| p
 | ||
|     |  Because the syntactic relations form a tree, every word has
 | ||
|     |  #[strong exactly one head]. You can therefore iterate over the arcs in
 | ||
|     |  the tree by iterating over the words in the sentence. This is usually
 | ||
|     |  the best way to match an arc of interest — from below:
 | ||
| 
 | ||
| +code.
 | ||
|     from spacy.symbols import nsubj, VERB
 | ||
| 
 | ||
|     # Finding a verb with a subject from below — good
 | ||
|     verbs = set()
 | ||
|     for possible_subject in doc:
 | ||
|         if possible_subject.dep == nsubj and possible_subject.head.pos == VERB:
 | ||
|             verbs.add(possible_subject.head)
 | ||
| 
 | ||
| p
 | ||
|     |  If you try to match from above, you'll have to iterate twice: once for
 | ||
|     |  the head, and then again through the children:
 | ||
| 
 | ||
| +code.
 | ||
|     # Finding a verb with a subject from above — less good
 | ||
|     verbs = []
 | ||
|     for possible_verb in doc:
 | ||
|         if possible_verb.pos == VERB:
 | ||
|             for possible_subject in possible_verb.children:
 | ||
|                 if possible_subject.dep == nsubj:
 | ||
|                     verbs.append(possible_verb)
 | ||
|                     break
 | ||
| 
 | ||
| p
 | ||
|     |  To iterate through the children, use the #[code token.children]
 | ||
|     |  attribute, which provides a sequence of #[+api("token") #[code Token]]
 | ||
|     |  objects.
 | ||
| 
 | ||
| +h(3, "navigating-around") Iterating around the local tree
 | ||
| 
 | ||
| p
 | ||
|     |  A few more convenience attributes are provided for iterating around the
 | ||
|     |  local tree from the token. The #[code .lefts] and #[code .rights]
 | ||
|     |  attributes provide sequences of syntactic children that occur before and
 | ||
|     |  after the token. Both sequences are in sentences order. There are also
 | ||
|     |  two integer-typed attributes, #[code .n_rights] and #[code .n_lefts],
 | ||
|     |  that give the number of left and right children.
 | ||
| 
 | ||
| +code.
 | ||
|     doc = nlp(u'bright red apples on the tree')
 | ||
|     assert [token.text for token in doc[2].lefts]) == [u'bright', u'red']
 | ||
|     assert [token.text for token in doc[2].rights]) == ['on']
 | ||
|     assert doc[2].n_lefts == 2
 | ||
|     assert doc[2].n_rights == 1
 | ||
| 
 | ||
| p
 | ||
|     |  You can get a whole phrase by its syntactic head using the
 | ||
|     |  #[code .subtree] attribute. This returns an ordered sequence of tokens.
 | ||
|     |  You can walk up the tree with the #[code .ancestors] attribute, and
 | ||
|     |  check dominance with the #[+api("token#is_ancestor") #[code .is_ancestor()]]
 | ||
|     |  method.
 | ||
| 
 | ||
| +aside("Projective vs. non-projective")
 | ||
|     |  For the #[+a("/docs/usage/models#available") default English model], the
 | ||
|     |  parse tree is #[strong projective], which means that there are no crossing
 | ||
|     |  brackets. The tokens returned by #[code .subtree] are therefore guaranteed
 | ||
|     |  to be contiguous. This is not true for the German model, which has many
 | ||
|     |  #[+a(COMPANY_URL + "/blog/german-model#word-order", true) non-projective dependencies].
 | ||
| 
 | ||
| +code.
 | ||
|     doc = nlp(u'Credit and mortgage account holders must submit their requests')
 | ||
|     root = [token for token in doc if token.head is token][0]
 | ||
|     subject = list(root.lefts)[0]
 | ||
|     for descendant in subject.subtree:
 | ||
|         assert subject.is_ancestor(descendant)
 | ||
|         print(descendant.text, descendant.dep_, descendant.n_lefts, descendant.n_rights,
 | ||
|               [ancestor.text for ancestor in descendant.ancestors])
 | ||
| 
 | ||
| +table(["Text", "Dep", "n_lefts", "n_rights", "ancestors"])
 | ||
|     - var style = [0, 1, 1, 1, 0]
 | ||
|     +annotation-row(["Credit", "nmod", 0, 2, "holders, submit"], style)
 | ||
|     +annotation-row(["and", "cc", 0, 0, "Credit, holders, submit"], style)
 | ||
|     +annotation-row(["mortgage", "compound", 0, 0, "account, Credit, holders, submit"], style)
 | ||
|     +annotation-row(["account", "conj", 1, 0, "Credit, holders, submit"], style)
 | ||
|     +annotation-row(["holders", "nsubj", 1, 0, "submit"], style)
 | ||
| 
 | ||
| p
 | ||
|     |  Finally, the #[code .left_edge] and #[code .right_edge] attributes
 | ||
|     |  can be especially useful, because they give you the first and last token
 | ||
|     |  of the subtree. This is the easiest way to create a #[code Span] object
 | ||
|     |  for a syntactic phrase. Note that #[code .right_edge] gives a token
 | ||
|     |  #[strong within] the subtree — so if you use it as the end-point of a
 | ||
|     |  range, don't forget to #[code +1]!
 | ||
| 
 | ||
| +code.
 | ||
|     doc = nlp(u'Credit and mortgage account holders must submit their requests')
 | ||
|     span = doc[doc[4].left_edge.i : doc[4].right_edge.i+1]
 | ||
|     span.merge()
 | ||
|     for token in doc:
 | ||
|         print(token.text, token.pos_, token.dep_, token.head.text)
 | ||
| 
 | ||
| +table(["Text", "POS", "Dep", "Head text"])
 | ||
|     - var style = [0, 1, 1, 0]
 | ||
|     +annotation-row(["Credit and mortgage account holders", "NOUN", "nsubj", "submit"], style)
 | ||
|     +annotation-row(["must", "VERB", "aux", "submit"], style)
 | ||
|     +annotation-row(["submit", "VERB", "ROOT", "submit"], style)
 | ||
|     +annotation-row(["their", "ADJ", "poss", "requests"], style)
 | ||
|     +annotation-row(["requests", "NOUN", "dobj", "submit"], style)
 | ||
| 
 | ||
| +h(2, "displacy") Visualizing dependencies
 | ||
| 
 | ||
| p
 | ||
|     |  The best way to understand spaCy's dependency parser is interactively.
 | ||
|     |  To make this easier, spaCy v2.0+ comes with a visualization module. Simply
 | ||
|     |  pass a #[code Doc] or a list of #[code Doc] objects to
 | ||
|     |  displaCy and run #[+api("displacy#serve") #[code displacy.serve]] to
 | ||
|     |  run the web server, or #[+api("displacy#render") #[code displacy.render]]
 | ||
|     |  to generate the raw markup. If you want to know how to write rules that
 | ||
|     |  hook into some type of syntactic construction, just plug the sentence into
 | ||
|     |  the visualizer and see how spaCy annotates it.
 | ||
| 
 | ||
| +code.
 | ||
|     from spacy import displacy
 | ||
| 
 | ||
|     doc = nlp(u'Autonomous cars shift insurance liability toward manufacturers')
 | ||
|     displacy.serve(doc, style='dep')
 | ||
| 
 | ||
| +infobox
 | ||
|     |  For more details and examples, see the
 | ||
|     |  #[+a("/docs/usage/visualizers") usage guide on visualizing spaCy]. You
 | ||
|     |  can also test displaCy in our #[+a(DEMOS_URL + "/displacy", true) online demo].
 | ||
| 
 | ||
| +h(2, "disabling") Disabling the parser
 | ||
| 
 | ||
| p
 | ||
|     |  In the #[+a("/docs/usage/models/available") default models], the parser
 | ||
|     |  is loaded and enabled as part of the
 | ||
|     |  #[+a("docs/usage/language-processing-pipelines") standard processing pipeline].
 | ||
|     |  If you don't need any of the syntactic information, you should disable
 | ||
|     |  the parser. Disabling the parser will make spaCy load and run much faster.
 | ||
|     |  If you want to load the parser, but need to disable it for specific
 | ||
|     |  documents, you can also control its use on the #[code nlp] object.
 | ||
| 
 | ||
| +code.
 | ||
|     nlp = spacy.load('en', disable=['parser'])
 | ||
|     nlp = English().from_disk('/model', disable=['parser'])
 | ||
|     doc = nlp(u"I don't want parsed", disable=['parser'])
 | ||
| 
 | ||
| +infobox("Important note: disabling pipeline components")
 | ||
|     .o-block
 | ||
|         |  Since spaCy v2.0 comes with better support for customising the
 | ||
|         |  processing pipeline components, the #[code parser] keyword argument
 | ||
|         |  has been replaced with #[code disable], which takes a list of
 | ||
|         |  #[+a("/docs/usage/language-processing-pipeline") pipeline component names].
 | ||
|         |  This lets you disable both default and custom components when loading
 | ||
|         |  a model, or initialising a Language class via
 | ||
|         |  #[+api("language-from_disk") #[code from_disk]].
 | ||
|     +code-new.
 | ||
|         nlp = spacy.load('en', disable=['parser'])
 | ||
|         doc = nlp(u"I don't want parsed", disable=['parser'])
 | ||
|     +code-old.
 | ||
|         nlp = spacy.load('en', parser=False)
 | ||
|         doc = nlp(u"I don't want parsed", parse=False)
 |