2016-10-31 21:04:15 +03:00
|
|
|
{
|
|
|
|
"sidebar": {
|
2017-10-03 15:27:22 +03:00
|
|
|
"Overview": {
|
|
|
|
"Architecture": "./",
|
|
|
|
"Annotation Specs": "annotation",
|
|
|
|
"Functions": "top-level"
|
2017-05-20 13:59:57 +03:00
|
|
|
},
|
2017-10-03 15:27:22 +03:00
|
|
|
"Containers": {
|
2016-10-31 21:04:15 +03:00
|
|
|
"Doc": "doc",
|
|
|
|
"Token": "token",
|
|
|
|
"Span": "span",
|
2017-10-03 15:27:22 +03:00
|
|
|
"Lexeme": "lexeme"
|
|
|
|
},
|
|
|
|
|
|
|
|
"Pipeline": {
|
2016-10-31 21:04:15 +03:00
|
|
|
"Language": "language",
|
2017-10-03 15:27:22 +03:00
|
|
|
"Pipe": "pipe",
|
2017-07-22 18:56:25 +03:00
|
|
|
"Tensorizer": "tensorizer",
|
2016-10-31 21:04:15 +03:00
|
|
|
"Tagger": "tagger",
|
|
|
|
"DependencyParser": "dependencyparser",
|
|
|
|
"EntityRecognizer": "entityrecognizer",
|
2017-07-22 18:56:33 +03:00
|
|
|
"TextCategorizer": "textcategorizer",
|
2017-10-03 15:27:22 +03:00
|
|
|
"Tokenizer": "tokenizer",
|
|
|
|
"Lemmatizer": "lemmatizer",
|
2016-10-31 21:04:15 +03:00
|
|
|
"Matcher": "matcher",
|
2017-10-03 15:27:22 +03:00
|
|
|
"PhraseMatcher": "phrasematcher"
|
|
|
|
},
|
|
|
|
|
|
|
|
"Other": {
|
2016-10-31 21:04:15 +03:00
|
|
|
"Vocab": "vocab",
|
|
|
|
"StringStore": "stringstore",
|
2017-06-05 14:33:11 +03:00
|
|
|
"Vectors": "vectors",
|
2017-05-22 13:29:30 +03:00
|
|
|
"GoldParse": "goldparse",
|
2017-05-26 14:40:32 +03:00
|
|
|
"GoldCorpus": "goldcorpus",
|
|
|
|
"Binder": "binder"
|
2016-10-31 21:04:15 +03:00
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
"index": {
|
2017-10-03 15:27:22 +03:00
|
|
|
"title": "Architecture",
|
|
|
|
"next": "annotation",
|
|
|
|
"menu": {
|
|
|
|
"Basics": "basics",
|
|
|
|
"Neural Network Model": "nn-model",
|
|
|
|
"Cython Conventions": "cython"
|
|
|
|
}
|
2017-05-20 13:59:57 +03:00
|
|
|
},
|
|
|
|
|
2017-10-03 15:27:22 +03:00
|
|
|
"top-level": {
|
|
|
|
"title": "Top-level Functions",
|
|
|
|
"menu": {
|
|
|
|
"spacy": "spacy",
|
|
|
|
"displacy": "displacy",
|
|
|
|
"Utility Functions": "util",
|
|
|
|
"Compatibility": "compat",
|
|
|
|
"Command Line": "cli"
|
|
|
|
}
|
2017-05-20 13:59:57 +03:00
|
|
|
},
|
|
|
|
|
2016-10-31 21:04:15 +03:00
|
|
|
"language": {
|
|
|
|
"title": "Language",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "A text-processing pipeline.",
|
2017-05-26 14:40:32 +03:00
|
|
|
"source": "spacy/language.py"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"doc": {
|
|
|
|
"title": "Doc",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "A container for accessing linguistic annotations.",
|
2017-05-26 14:40:32 +03:00
|
|
|
"source": "spacy/tokens/doc.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"token": {
|
|
|
|
"title": "Token",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/tokens/token.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"span": {
|
|
|
|
"title": "Span",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/tokens/span.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"lexeme": {
|
|
|
|
"title": "Lexeme",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/lexeme.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"vocab": {
|
|
|
|
"title": "Vocab",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "A storage class for vocabulary and other data shared across a language.",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/vocab.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"stringstore": {
|
|
|
|
"title": "StringStore",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/strings.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"matcher": {
|
|
|
|
"title": "Matcher",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Match sequences of tokens, based on pattern rules.",
|
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/matcher.pyx"
|
|
|
|
},
|
|
|
|
|
|
|
|
"phrasematcher": {
|
|
|
|
"title": "PhraseMatcher",
|
|
|
|
"teaser": "Match sequences of tokens, based on documents.",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"tag_new": 2,
|
2017-05-26 14:40:32 +03:00
|
|
|
"source": "spacy/matcher.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
2017-10-03 15:27:22 +03:00
|
|
|
"pipe": {
|
|
|
|
"title": "Pipe",
|
|
|
|
"teaser": "Abstract base class defining the API for pipeline components.",
|
|
|
|
"tag": "class",
|
|
|
|
"tag_new": 2,
|
|
|
|
"source": "spacy/pipeline.pyx"
|
|
|
|
},
|
|
|
|
|
2016-10-31 21:04:15 +03:00
|
|
|
"dependenyparser": {
|
|
|
|
"title": "DependencyParser",
|
2017-06-01 18:38:06 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/pipeline.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
"entityrecognizer": {
|
|
|
|
"title": "EntityRecognizer",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Annotate named entities on documents.",
|
2017-06-01 18:38:06 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/pipeline.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
2017-07-22 18:56:33 +03:00
|
|
|
"textcategorizer": {
|
|
|
|
"title": "TextCategorizer",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Add text categorization models to spaCy pipelines.",
|
2017-07-22 18:56:33 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"tag_new": 2,
|
2017-07-22 18:56:33 +03:00
|
|
|
"source": "spacy/pipeline.pyx"
|
|
|
|
},
|
|
|
|
|
2016-10-31 21:04:15 +03:00
|
|
|
"dependencyparser": {
|
|
|
|
"title": "DependencyParser",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Annotate syntactic dependencies on documents.",
|
2017-06-01 18:38:06 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/pipeline.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
2016-11-03 01:17:42 +03:00
|
|
|
"tokenizer": {
|
|
|
|
"title": "Tokenizer",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/tokenizer.pyx"
|
2016-11-03 01:17:42 +03:00
|
|
|
},
|
|
|
|
|
2017-10-03 15:27:22 +03:00
|
|
|
"lemmatizer": {
|
|
|
|
"title": "Lemmatizer",
|
|
|
|
"tag": "class"
|
|
|
|
},
|
|
|
|
|
2016-10-31 21:04:15 +03:00
|
|
|
"tagger": {
|
|
|
|
"title": "Tagger",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Annotate part-of-speech tags on documents.",
|
2017-06-01 18:38:06 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/pipeline.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
2017-07-22 18:56:25 +03:00
|
|
|
"tensorizer": {
|
|
|
|
"title": "Tensorizer",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Add a tensor with position-sensitive meaning representations to a document.",
|
2017-07-22 18:56:25 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"tag_new": 2,
|
2017-07-22 18:56:25 +03:00
|
|
|
"source": "spacy/pipeline.pyx"
|
|
|
|
},
|
|
|
|
|
2016-10-31 21:04:15 +03:00
|
|
|
"goldparse": {
|
|
|
|
"title": "GoldParse",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
|
|
|
"source": "spacy/gold.pyx"
|
2016-10-31 21:04:15 +03:00
|
|
|
},
|
|
|
|
|
2017-05-22 13:29:30 +03:00
|
|
|
"goldcorpus": {
|
|
|
|
"title": "GoldCorpus",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "An annotated corpus, using the JSON file format.",
|
2017-05-26 14:40:32 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"tag_new": 2,
|
2017-05-26 14:40:32 +03:00
|
|
|
"source": "spacy/gold.pyx"
|
|
|
|
},
|
|
|
|
|
|
|
|
"binder": {
|
|
|
|
"title": "Binder",
|
2017-05-28 01:32:43 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"tag_new": 2,
|
2017-05-28 01:32:43 +03:00
|
|
|
"source": "spacy/tokens/binder.pyx"
|
2017-05-22 13:29:30 +03:00
|
|
|
},
|
|
|
|
|
2017-06-05 14:33:11 +03:00
|
|
|
"vectors": {
|
|
|
|
"title": "Vectors",
|
2017-10-03 15:27:22 +03:00
|
|
|
"teaser": "Store, save and load word vectors.",
|
2017-06-05 14:33:11 +03:00
|
|
|
"tag": "class",
|
2017-10-03 15:27:22 +03:00
|
|
|
"tag_new": 2,
|
2017-06-05 14:33:11 +03:00
|
|
|
"source": "spacy/vectors.pyx"
|
|
|
|
},
|
|
|
|
|
2016-10-31 21:04:15 +03:00
|
|
|
"annotation": {
|
2017-10-03 15:27:22 +03:00
|
|
|
"title": "Annotation Specifications",
|
|
|
|
"teaser": "Schemes used for labels, tags and training data.",
|
|
|
|
"menu": {
|
|
|
|
"Tokenization": "tokenization",
|
|
|
|
"Sentence Boundaries": "sbd",
|
|
|
|
"POS Tagging": "pos-tagging",
|
|
|
|
"Lemmatization": "lemmatization",
|
|
|
|
"Dependencies": "dependency-parsing",
|
|
|
|
"Named Entities": "named-entities",
|
|
|
|
"Training Data": "training"
|
|
|
|
}
|
2016-10-31 21:04:15 +03:00
|
|
|
}
|
|
|
|
}
|