diff --git a/website/usage/_linguistic-features/_dependency-parse.jade b/website/usage/_linguistic-features/_dependency-parse.jade index 188b7b8f3..d8d7cbce1 100644 --- a/website/usage/_linguistic-features/_dependency-parse.jade +++ b/website/usage/_linguistic-features/_dependency-parse.jade @@ -65,9 +65,9 @@ p - var style = [0, 1, 0, 1, 0] +annotation-row(["Autonomous", "amod", "cars", "NOUN", ""], style) +annotation-row(["cars", "nsubj", "shift", "VERB", "Autonomous"], style) - +annotation-row(["shift", "ROOT", "shift", "VERB", "cars, liability"], style) + +annotation-row(["shift", "ROOT", "shift", "VERB", "cars, liability, toward"], style) +annotation-row(["insurance", "compound", "liability", "NOUN", ""], style) - +annotation-row(["liability", "dobj", "shift", "VERB", "insurance, toward"], style) + +annotation-row(["liability", "dobj", "shift", "VERB", "insurance"], style) +annotation-row(["toward", "prep", "liability", "NOUN", "manufacturers"], style) +annotation-row(["manufacturers", "pobj", "toward", "ADP", ""], style) diff --git a/website/usage/_linguistic-features/_named-entities.jade b/website/usage/_linguistic-features/_named-entities.jade index 9e55ba84e..0f32d1da3 100644 --- a/website/usage/_linguistic-features/_named-entities.jade +++ b/website/usage/_linguistic-features/_named-entities.jade @@ -80,7 +80,7 @@ p doc.ents = [netflix_ent] ents = [(e.text, e.start_char, e.end_char, e.label_) for e in doc.ents] - assert ents = [(u'Netflix', 0, 7, u'ORG')] + assert ents == [(u'Netflix', 0, 7, u'ORG')] p | Keep in mind that you need to create a #[code Span] with the start and diff --git a/website/usage/_linguistic-features/_tokenization.jade b/website/usage/_linguistic-features/_tokenization.jade index f149556ce..2cd3a13de 100644 --- a/website/usage/_linguistic-features/_tokenization.jade +++ b/website/usage/_linguistic-features/_tokenization.jade @@ -54,7 +54,7 @@ p +code. import spacy - from spacy.symbols import ORTH, LEMMA, POS + from spacy.symbols import ORTH, LEMMA, POS, TAG nlp = spacy.load('en') doc = nlp(u'gimme that') # phrase to tokenize