mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 17:36:30 +03:00
Update API docs and add "source" button to GH source
This commit is contained in:
parent
93ee5c4a52
commit
1b9c6ded71
|
@ -6,9 +6,17 @@ include _sidebar
|
|||
|
||||
main.o-main.o-main--sidebar.o-main--aside
|
||||
article.o-content
|
||||
+h(1)=title
|
||||
if tag
|
||||
+tag=tag
|
||||
+grid.o-no-block
|
||||
+grid-col(source ? "two-thirds" : "full")
|
||||
+h(1)=title
|
||||
if tag
|
||||
+tag=tag
|
||||
|
||||
if source
|
||||
+grid-col("third").u-text-right
|
||||
.o-inline-list
|
||||
+button(gh("spacy", source), false, "secondary").u-text-tag Source #[+icon("code", 14)]
|
||||
|
||||
|
||||
if ALPHA
|
||||
+infobox("⚠️ You are viewing the spaCy v2.0 alpha docs")
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
"Vocab": "vocab",
|
||||
"StringStore": "stringstore",
|
||||
"GoldParse": "goldparse",
|
||||
"GoldCorpus": "goldcorpus"
|
||||
"GoldCorpus": "goldcorpus",
|
||||
"Binder": "binder"
|
||||
},
|
||||
"Other": {
|
||||
"Annotation Specs": "annotation"
|
||||
|
@ -47,62 +48,74 @@
|
|||
|
||||
"spacy": {
|
||||
"title": "spaCy top-level functions",
|
||||
"source": "spacy/__init__.py",
|
||||
"next": "displacy"
|
||||
},
|
||||
|
||||
"displacy": {
|
||||
"title": "displaCy",
|
||||
"tag": "module",
|
||||
"source": "spacy/displacy",
|
||||
"next": "util"
|
||||
},
|
||||
|
||||
"util": {
|
||||
"title": "Utility Functions",
|
||||
"source": "spacy/util.py",
|
||||
"next": "cli"
|
||||
},
|
||||
|
||||
"cli": {
|
||||
"title": "Command Line Interface"
|
||||
"title": "Command Line Interface",
|
||||
"source": "spacy/cli"
|
||||
},
|
||||
|
||||
"language": {
|
||||
"title": "Language",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/language.py"
|
||||
},
|
||||
|
||||
"doc": {
|
||||
"title": "Doc",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/tokens/doc.pyx"
|
||||
},
|
||||
|
||||
"token": {
|
||||
"title": "Token",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/tokens/token.pyx"
|
||||
},
|
||||
|
||||
"span": {
|
||||
"title": "Span",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/tokens/span.pyx"
|
||||
},
|
||||
|
||||
"lexeme": {
|
||||
"title": "Lexeme",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/lexeme.pyx"
|
||||
},
|
||||
|
||||
"vocab": {
|
||||
"title": "Vocab",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/vocab.pyx"
|
||||
},
|
||||
|
||||
"stringstore": {
|
||||
"title": "StringStore",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/strings.pyx"
|
||||
},
|
||||
|
||||
"matcher": {
|
||||
"title": "Matcher",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/matcher.pyx"
|
||||
},
|
||||
|
||||
"dependenyparser": {
|
||||
|
@ -122,7 +135,8 @@
|
|||
|
||||
"tokenizer": {
|
||||
"title": "Tokenizer",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/tokenizer.pyx"
|
||||
},
|
||||
|
||||
"tagger": {
|
||||
|
@ -132,11 +146,18 @@
|
|||
|
||||
"goldparse": {
|
||||
"title": "GoldParse",
|
||||
"tag": "class"
|
||||
"tag": "class",
|
||||
"source": "spacy/gold.pyx"
|
||||
},
|
||||
|
||||
"goldcorpus": {
|
||||
"title": "GoldCorpus",
|
||||
"tag": "class",
|
||||
"source": "spacy/gold.pyx"
|
||||
},
|
||||
|
||||
"binder": {
|
||||
"title": "Binder",
|
||||
"tag": "class"
|
||||
},
|
||||
|
||||
|
|
5
website/docs/api/binder.jade
Normal file
5
website/docs/api/binder.jade
Normal file
|
@ -0,0 +1,5 @@
|
|||
//- 💫 DOCS > API > BINDER
|
||||
|
||||
include ../../_includes/_mixins
|
||||
|
||||
+under-construction
|
|
@ -382,6 +382,11 @@ p Load state from a binary string.
|
|||
+cell #[code Vocab]
|
||||
+cell A container for the lexical types.
|
||||
|
||||
+row
|
||||
+cell #[code tokenizer]
|
||||
+cell #[code Tokenizer]
|
||||
+cell The tokenizer.
|
||||
|
||||
+row
|
||||
+cell #[code make_doc]
|
||||
+cell #[code lambda text: Doc]
|
||||
|
|
|
@ -198,93 +198,6 @@ p
|
|||
| attributes. The #[code ORTH] fields of the attributes must
|
||||
| exactly match the string when they are concatenated.
|
||||
|
||||
+h(2, "to_disk") Tokenizer.to_disk
|
||||
+tag method
|
||||
+tag-new(2)
|
||||
|
||||
p Save the current state to a directory.
|
||||
|
||||
+aside-code("Example").
|
||||
tokenizer.to_disk('/path/to/tokenizer')
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell #[code path]
|
||||
+cell unicode or #[code Path]
|
||||
+cell
|
||||
| A path to a directory, which will be created if it doesn't exist.
|
||||
| Paths may be either strings or #[code Path]-like objects.
|
||||
|
||||
+h(2, "from_disk") Tokenizer.from_disk
|
||||
+tag method
|
||||
+tag-new(2)
|
||||
|
||||
p Loads state from a directory. Modifies the object in place and returns it.
|
||||
|
||||
+aside-code("Example").
|
||||
from spacy.tokenizer import Tokenizer
|
||||
tokenizer = Tokenizer(nlp.vocab)
|
||||
tokenizer = tokenizer.from_disk('/path/to/tokenizer')
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell #[code path]
|
||||
+cell unicode or #[code Path]
|
||||
+cell
|
||||
| A path to a directory. Paths may be either strings or
|
||||
| #[code Path]-like objects.
|
||||
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell #[code Tokenizer]
|
||||
+cell The modified #[code Tokenizer] object.
|
||||
|
||||
+h(2, "to_bytes") Tokenizer.to_bytes
|
||||
+tag method
|
||||
|
||||
p Serialize the current state to a binary string.
|
||||
|
||||
+aside-code("Example").
|
||||
tokenizer_bytes = tokenizer.to_bytes()
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell #[code **exclude]
|
||||
+cell -
|
||||
+cell Named attributes to prevent from being serialized.
|
||||
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell bytes
|
||||
+cell The serialized form of the #[code Tokenizer] object.
|
||||
|
||||
+h(2, "from_bytes") Tokenizer.from_bytes
|
||||
+tag method
|
||||
|
||||
p Load state from a binary string.
|
||||
|
||||
+aside-code("Example").
|
||||
fron spacy.tokenizer import Tokenizer
|
||||
tokenizer_bytes = tokenizer.to_bytes()
|
||||
new_tokenizer = Tokenizer(nlp.vocab)
|
||||
new_tokenizer.from_bytes(tokenizer_bytes)
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
+row
|
||||
+cell #[code bytes_data]
|
||||
+cell bytes
|
||||
+cell The data to load from.
|
||||
|
||||
+row
|
||||
+cell #[code **exclude]
|
||||
+cell -
|
||||
+cell Named attributes to prevent from being loaded.
|
||||
|
||||
+footrow
|
||||
+cell returns
|
||||
+cell #[code Tokenizer]
|
||||
+cell The #[code Tokenizer] object.
|
||||
|
||||
+h(2, "attributes") Attributes
|
||||
|
||||
+table(["Name", "Type", "Description"])
|
||||
|
|
Loading…
Reference in New Issue
Block a user