Update API docs and add "source" button to GH source

This commit is contained in:
ines 2017-05-26 13:40:32 +02:00
parent 93ee5c4a52
commit 1b9c6ded71
5 changed files with 54 additions and 102 deletions

View File

@ -6,9 +6,17 @@ include _sidebar
main.o-main.o-main--sidebar.o-main--aside main.o-main.o-main--sidebar.o-main--aside
article.o-content article.o-content
+h(1)=title +grid.o-no-block
if tag +grid-col(source ? "two-thirds" : "full")
+tag=tag +h(1)=title
if tag
+tag=tag
if source
+grid-col("third").u-text-right
.o-inline-list
+button(gh("spacy", source), false, "secondary").u-text-tag Source #[+icon("code", 14)]
if ALPHA if ALPHA
+infobox("⚠️ You are viewing the spaCy v2.0 alpha docs") +infobox("⚠️ You are viewing the spaCy v2.0 alpha docs")

View File

@ -24,7 +24,8 @@
"Vocab": "vocab", "Vocab": "vocab",
"StringStore": "stringstore", "StringStore": "stringstore",
"GoldParse": "goldparse", "GoldParse": "goldparse",
"GoldCorpus": "goldcorpus" "GoldCorpus": "goldcorpus",
"Binder": "binder"
}, },
"Other": { "Other": {
"Annotation Specs": "annotation" "Annotation Specs": "annotation"
@ -47,62 +48,74 @@
"spacy": { "spacy": {
"title": "spaCy top-level functions", "title": "spaCy top-level functions",
"source": "spacy/__init__.py",
"next": "displacy" "next": "displacy"
}, },
"displacy": { "displacy": {
"title": "displaCy", "title": "displaCy",
"tag": "module", "tag": "module",
"source": "spacy/displacy",
"next": "util" "next": "util"
}, },
"util": { "util": {
"title": "Utility Functions", "title": "Utility Functions",
"source": "spacy/util.py",
"next": "cli" "next": "cli"
}, },
"cli": { "cli": {
"title": "Command Line Interface" "title": "Command Line Interface",
"source": "spacy/cli"
}, },
"language": { "language": {
"title": "Language", "title": "Language",
"tag": "class" "tag": "class",
"source": "spacy/language.py"
}, },
"doc": { "doc": {
"title": "Doc", "title": "Doc",
"tag": "class" "tag": "class",
"source": "spacy/tokens/doc.pyx"
}, },
"token": { "token": {
"title": "Token", "title": "Token",
"tag": "class" "tag": "class",
"source": "spacy/tokens/token.pyx"
}, },
"span": { "span": {
"title": "Span", "title": "Span",
"tag": "class" "tag": "class",
"source": "spacy/tokens/span.pyx"
}, },
"lexeme": { "lexeme": {
"title": "Lexeme", "title": "Lexeme",
"tag": "class" "tag": "class",
"source": "spacy/lexeme.pyx"
}, },
"vocab": { "vocab": {
"title": "Vocab", "title": "Vocab",
"tag": "class" "tag": "class",
"source": "spacy/vocab.pyx"
}, },
"stringstore": { "stringstore": {
"title": "StringStore", "title": "StringStore",
"tag": "class" "tag": "class",
"source": "spacy/strings.pyx"
}, },
"matcher": { "matcher": {
"title": "Matcher", "title": "Matcher",
"tag": "class" "tag": "class",
"source": "spacy/matcher.pyx"
}, },
"dependenyparser": { "dependenyparser": {
@ -122,7 +135,8 @@
"tokenizer": { "tokenizer": {
"title": "Tokenizer", "title": "Tokenizer",
"tag": "class" "tag": "class",
"source": "spacy/tokenizer.pyx"
}, },
"tagger": { "tagger": {
@ -132,11 +146,18 @@
"goldparse": { "goldparse": {
"title": "GoldParse", "title": "GoldParse",
"tag": "class" "tag": "class",
"source": "spacy/gold.pyx"
}, },
"goldcorpus": { "goldcorpus": {
"title": "GoldCorpus", "title": "GoldCorpus",
"tag": "class",
"source": "spacy/gold.pyx"
},
"binder": {
"title": "Binder",
"tag": "class" "tag": "class"
}, },

View File

@ -0,0 +1,5 @@
//- 💫 DOCS > API > BINDER
include ../../_includes/_mixins
+under-construction

View File

@ -382,6 +382,11 @@ p Load state from a binary string.
+cell #[code Vocab] +cell #[code Vocab]
+cell A container for the lexical types. +cell A container for the lexical types.
+row
+cell #[code tokenizer]
+cell #[code Tokenizer]
+cell The tokenizer.
+row +row
+cell #[code make_doc] +cell #[code make_doc]
+cell #[code lambda text: Doc] +cell #[code lambda text: Doc]

View File

@ -198,93 +198,6 @@ p
| attributes. The #[code ORTH] fields of the attributes must | attributes. The #[code ORTH] fields of the attributes must
| exactly match the string when they are concatenated. | exactly match the string when they are concatenated.
+h(2, "to_disk") Tokenizer.to_disk
+tag method
+tag-new(2)
p Save the current state to a directory.
+aside-code("Example").
tokenizer.to_disk('/path/to/tokenizer')
+table(["Name", "Type", "Description"])
+row
+cell #[code path]
+cell unicode or #[code Path]
+cell
| A path to a directory, which will be created if it doesn't exist.
| Paths may be either strings or #[code Path]-like objects.
+h(2, "from_disk") Tokenizer.from_disk
+tag method
+tag-new(2)
p Loads state from a directory. Modifies the object in place and returns it.
+aside-code("Example").
from spacy.tokenizer import Tokenizer
tokenizer = Tokenizer(nlp.vocab)
tokenizer = tokenizer.from_disk('/path/to/tokenizer')
+table(["Name", "Type", "Description"])
+row
+cell #[code path]
+cell unicode or #[code Path]
+cell
| A path to a directory. Paths may be either strings or
| #[code Path]-like objects.
+footrow
+cell returns
+cell #[code Tokenizer]
+cell The modified #[code Tokenizer] object.
+h(2, "to_bytes") Tokenizer.to_bytes
+tag method
p Serialize the current state to a binary string.
+aside-code("Example").
tokenizer_bytes = tokenizer.to_bytes()
+table(["Name", "Type", "Description"])
+row
+cell #[code **exclude]
+cell -
+cell Named attributes to prevent from being serialized.
+footrow
+cell returns
+cell bytes
+cell The serialized form of the #[code Tokenizer] object.
+h(2, "from_bytes") Tokenizer.from_bytes
+tag method
p Load state from a binary string.
+aside-code("Example").
fron spacy.tokenizer import Tokenizer
tokenizer_bytes = tokenizer.to_bytes()
new_tokenizer = Tokenizer(nlp.vocab)
new_tokenizer.from_bytes(tokenizer_bytes)
+table(["Name", "Type", "Description"])
+row
+cell #[code bytes_data]
+cell bytes
+cell The data to load from.
+row
+cell #[code **exclude]
+cell -
+cell Named attributes to prevent from being loaded.
+footrow
+cell returns
+cell #[code Tokenizer]
+cell The #[code Tokenizer] object.
+h(2, "attributes") Attributes +h(2, "attributes") Attributes
+table(["Name", "Type", "Description"]) +table(["Name", "Type", "Description"])