Merge pull request #1956 from pktippa/web-doc-patches

Website documentation changes Linguistic features, dependency parse
This commit is contained in:
Ines Montani 2018-02-15 12:06:26 +01:00 committed by GitHub
commit 2392ec9d8c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 4 additions and 4 deletions

View File

@ -65,9 +65,9 @@ p
- var style = [0, 1, 0, 1, 0]
+annotation-row(["Autonomous", "amod", "cars", "NOUN", ""], style)
+annotation-row(["cars", "nsubj", "shift", "VERB", "Autonomous"], style)
+annotation-row(["shift", "ROOT", "shift", "VERB", "cars, liability"], style)
+annotation-row(["shift", "ROOT", "shift", "VERB", "cars, liability, toward"], style)
+annotation-row(["insurance", "compound", "liability", "NOUN", ""], style)
+annotation-row(["liability", "dobj", "shift", "VERB", "insurance, toward"], style)
+annotation-row(["liability", "dobj", "shift", "VERB", "insurance"], style)
+annotation-row(["toward", "prep", "liability", "NOUN", "manufacturers"], style)
+annotation-row(["manufacturers", "pobj", "toward", "ADP", ""], style)

View File

@ -80,7 +80,7 @@ p
doc.ents = [netflix_ent]
ents = [(e.text, e.start_char, e.end_char, e.label_) for e in doc.ents]
assert ents = [(u'Netflix', 0, 7, u'ORG')]
assert ents == [(u'Netflix', 0, 7, u'ORG')]
p
| Keep in mind that you need to create a #[code Span] with the start and

View File

@ -54,7 +54,7 @@ p
+code.
import spacy
from spacy.symbols import ORTH, LEMMA, POS
from spacy.symbols import ORTH, LEMMA, POS, TAG
nlp = spacy.load('en')
doc = nlp(u'gimme that') # phrase to tokenize