diff --git a/spacy/gold.pyx b/spacy/gold.pyx index e0ba26a04..569979a5f 100644 --- a/spacy/gold.pyx +++ b/spacy/gold.pyx @@ -532,7 +532,7 @@ cdef class GoldParse: self.labels[i] = deps[i2j_multi[i]] # Now set NER...This is annoying because if we've split # got an entity word split into two, we need to adjust the - # BILOU tags. We can't have BB or LL etc. + # BILUO tags. We can't have BB or LL etc. # Case 1: O -- easy. ner_tag = entities[i2j_multi[i]] if ner_tag == "O": diff --git a/spacy/matcher/phrasematcher.pyx b/spacy/matcher/phrasematcher.pyx index 22ce8831a..68821a085 100644 --- a/spacy/matcher/phrasematcher.pyx +++ b/spacy/matcher/phrasematcher.pyx @@ -127,7 +127,7 @@ cdef class PhraseMatcher: and self.attr not in (DEP, POS, TAG, LEMMA): string_attr = self.vocab.strings[self.attr] user_warning(Warnings.W012.format(key=key, attr=string_attr)) - tags = get_bilou(length) + tags = get_biluo(length) phrase_key = mem.alloc(length, sizeof(attr_t)) for i, tag in enumerate(tags): attr_value = self.get_lex_value(doc, i) @@ -230,7 +230,7 @@ cdef class PhraseMatcher: return "matcher:{}-{}".format(string_attr_name, string_attr_value) -def get_bilou(length): +def get_biluo(length): if length == 0: raise ValueError(Errors.E127) elif length == 1: diff --git a/website/docs/api/annotation.md b/website/docs/api/annotation.md index 366e15980..a5bb30b6f 100644 --- a/website/docs/api/annotation.md +++ b/website/docs/api/annotation.md @@ -510,7 +510,7 @@ described in any single publication. The model is a greedy transition-based parser guided by a linear model whose weights are learned using the averaged perceptron loss, via the [dynamic oracle](http://www.aclweb.org/anthology/C12-1059) imitation learning -strategy. The transition system is equivalent to the BILOU tagging scheme. +strategy. The transition system is equivalent to the BILUO tagging scheme. ## Models and training data {#training}