mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			141 lines
		
	
	
		
			4.8 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			141 lines
		
	
	
		
			4.8 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
//- 💫 DOCS > API > ANNOTATION SPECS
 | 
						||
 | 
						||
include ../../_includes/_mixins
 | 
						||
 | 
						||
p This document describes the target annotations spaCy is trained to predict.
 | 
						||
 | 
						||
+h(2, "tokenization") Tokenization
 | 
						||
 | 
						||
p
 | 
						||
    |  Tokenization standards are based on the
 | 
						||
    |  #[+a("https://catalog.ldc.upenn.edu/LDC2013T19") OntoNotes 5] corpus.
 | 
						||
    |  The tokenizer differs from most by including tokens for significant
 | 
						||
    |  whitespace. Any sequence of whitespace characters beyond a single space
 | 
						||
    |  (#[code ' ']) is included as a token.
 | 
						||
 | 
						||
+aside-code("Example").
 | 
						||
    from spacy.en import English
 | 
						||
    nlp = English(parser=False)
 | 
						||
    tokens = nlp('Some\nspaces  and\ttab characters')
 | 
						||
    print([t.orth_ for t in tokens])
 | 
						||
    # ['Some', '\n', 'spaces', ' ', 'and', '\t', 'tab', 'characters']
 | 
						||
 | 
						||
p
 | 
						||
    |  The whitespace tokens are useful for much the same reason punctuation is
 | 
						||
    |  – it's often an important delimiter in the text. By preserving it in the
 | 
						||
    |  token output, we are able to maintain a simple alignment between the
 | 
						||
    |  tokens and the original string, and we ensure that no information is
 | 
						||
    |  lost during processing.
 | 
						||
 | 
						||
+h(2, "sentence-boundary") Sentence boundary detection
 | 
						||
 | 
						||
p
 | 
						||
    |  Sentence boundaries are calculated from the syntactic parse tree, so
 | 
						||
    |  features such as punctuation and capitalisation play an important but
 | 
						||
    |  non-decisive role in determining the sentence boundaries. Usually this
 | 
						||
    |  means that the sentence boundaries will at least coincide with clause
 | 
						||
    |  boundaries, even given poorly punctuated text.
 | 
						||
 | 
						||
+h(2, "pos-tagging") Part-of-speech Tagging
 | 
						||
 | 
						||
include _annotation/_pos-tags
 | 
						||
 | 
						||
+h(2, "lemmatization") Lemmatization
 | 
						||
 | 
						||
p A "lemma" is the uninflected form of a word. In English, this means:
 | 
						||
 | 
						||
+list
 | 
						||
    +item #[strong Adjectives]: The form like "happy", not "happier" or "happiest"
 | 
						||
    +item #[strong Adverbs]: The form like "badly", not "worse" or "worst"
 | 
						||
    +item #[strong Nouns]: The form like "dog", not "dogs"; like "child", not "children"
 | 
						||
    +item #[strong Verbs]: The form like "write", not "writes", "writing", "wrote" or "written"
 | 
						||
 | 
						||
+aside("About spaCy's custom pronoun lemma")
 | 
						||
    |  Unlike verbs and common nouns, there's no clear base form of a personal
 | 
						||
    |  pronoun. Should the lemma of "me" be "I", or should we normalize person
 | 
						||
    |  as well, giving "it" — or maybe "he"? spaCy's solution is to introduce a
 | 
						||
    |  novel symbol, #[code.u-nowrap -PRON-], which is used as the lemma for
 | 
						||
    |  all personal pronouns.
 | 
						||
 | 
						||
p
 | 
						||
    |  The lemmatization data is taken from
 | 
						||
    |  #[+a("https://wordnet.princeton.edu") WordNet]. However, we also add a
 | 
						||
    |  special case for pronouns: all pronouns are lemmatized to the special
 | 
						||
    |  token #[code -PRON-].
 | 
						||
 | 
						||
+h(2, "dependency-parsing") Syntactic Dependency Parsing
 | 
						||
 | 
						||
include _annotation/_dep-labels
 | 
						||
 | 
						||
+h(2, "named-entities") Named Entity Recognition
 | 
						||
 | 
						||
include _annotation/_named-entities
 | 
						||
 | 
						||
+h(3, "biluo") BILUO Scheme
 | 
						||
 | 
						||
p
 | 
						||
    |  spaCy translates character offsets into the BILUO scheme, in order to
 | 
						||
    |  decide the cost of each action given the current state of the entity
 | 
						||
    |  recognizer. The costs are then used to calculate the gradient of the
 | 
						||
    |  loss, to train the model.
 | 
						||
 | 
						||
+aside("Why BILUO, not IOB?")
 | 
						||
    |  There are several coding schemes for encoding entity annotations as
 | 
						||
    |  token tags.  These coding schemes are equally expressive, but not
 | 
						||
    |  necessarily equally learnable.
 | 
						||
    |  #[+a("http://www.aclweb.org/anthology/W09-1119") Ratinov and Roth]
 | 
						||
    |  showed that the minimal #[strong Begin], #[strong In], #[strong Out]
 | 
						||
    |  scheme was more difficult to learn than the #[strong BILUO] scheme that
 | 
						||
    |  we use, which explicitly marks boundary tokens.
 | 
						||
 | 
						||
+table([ "Tag", "Description" ])
 | 
						||
    +row
 | 
						||
        +cell #[code #[span.u-color-theme B] EGIN]
 | 
						||
        +cell The first token of a multi-token entity.
 | 
						||
 | 
						||
    +row
 | 
						||
        +cell #[code #[span.u-color-theme I] N]
 | 
						||
        +cell An inner token of a multi-token entity.
 | 
						||
 | 
						||
    +row
 | 
						||
        +cell #[code #[span.u-color-theme L] AST]
 | 
						||
        +cell The final token of a multi-token entity.
 | 
						||
 | 
						||
    +row
 | 
						||
        +cell #[code #[span.u-color-theme U] NIT]
 | 
						||
        +cell A single-token entity.
 | 
						||
 | 
						||
    +row
 | 
						||
        +cell #[code #[span.u-color-theme O] UT]
 | 
						||
        +cell A non-entity token.
 | 
						||
 | 
						||
+h(2, "json-input") JSON input format for training
 | 
						||
 | 
						||
p
 | 
						||
    |  spaCy takes training data in the following format:
 | 
						||
 | 
						||
+code("Example structure").
 | 
						||
    doc: {
 | 
						||
        id: string,
 | 
						||
        paragraphs: [{
 | 
						||
            raw: string,
 | 
						||
            sents: [int],
 | 
						||
            tokens: [{
 | 
						||
                start: int,
 | 
						||
                tag: string,
 | 
						||
                head: int,
 | 
						||
                dep: string
 | 
						||
            }],
 | 
						||
            ner: [{
 | 
						||
                start: int,
 | 
						||
                end: int,
 | 
						||
                label: string
 | 
						||
            }],
 | 
						||
            brackets: [{
 | 
						||
                start: int,
 | 
						||
                end: int,
 | 
						||
                label: string
 | 
						||
            }]
 | 
						||
        }]
 | 
						||
    }
 |