spaCy/website/docs/api/language-models.jade

94 lines
3.4 KiB
Plaintext
Raw Normal View History

2016-11-02 13:25:09 +03:00
//- 💫 DOCS > API > LANGUAGE MODELS
include ../../_includes/_mixins
p
| spaCy currently provides models for the following languages and
| capabilities:
+aside-code("Download language models", "bash").
2017-03-25 12:22:05 +03:00
python -m spacy download en
python -m spacy download de
2017-04-26 21:50:02 +03:00
python -m spacy download fr
+table([ "Language", "Token", "SBD", "Lemma", "POS", "NER", "Dep", "Vector", "Sentiment"])
+row
+cell English #[code en]
each icon in [ "pro", "pro", "pro", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
+row
+cell German #[code de]
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
2017-04-26 21:50:02 +03:00
+row
+cell French #[code fr]
each icon in [ "pro", "con", "con", "pro", "con", "pro", "pro", "con" ]
2017-04-26 21:50:02 +03:00
+cell.u-text-center #[+procon(icon)]
+row
+cell Spanish #[code es]
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
2017-04-26 21:50:02 +03:00
p
+button("/docs/usage/models", true, "primary") See available models
2016-12-21 02:54:52 +03:00
2017-05-12 16:57:49 +03:00
+h(2, "alpha-support") Alpha tokenization support
2016-12-21 02:54:52 +03:00
p
2017-05-12 16:57:49 +03:00
| Work has started on the following languages. You can help by
| #[+a("/docs/usage/adding-languages#language-data") improving the existing language data]
| and extending the tokenization patterns.
2016-12-21 02:54:52 +03:00
2017-05-12 16:57:49 +03:00
+aside("Usage note")
| Note that the alpha languages don't yet come with a language model. In
| order to use them, you have to import them directly:
+code.o-no-block.
from spacy.lang.fi import Finnish
nlp = Finnish()
doc = nlp(u'Ilmatyynyalukseni on täynnä ankeriaita')
+infobox("Dependencies")
| Some language tokenizers require external dependencies. To use #[strong Chinese],
| you need to have #[+a("https://github.com/fxsjy/jieba") Jieba] installed.
| The #[strong Japanese] tokenizer requires
| #[+a("https://github.com/mocobeta/janome") Janome].
2017-05-12 16:57:49 +03:00
+table([ "Language", "Code", "Source" ])
each language, code in { it: "Italian", pt: "Portuguese", nl: "Dutch", sv: "Swedish", fi: "Finnish", nb: "Norwegian Bokmål", da: "Danish", hu: "Hungarian", pl: "Polish", bn: "Bengali", he: "Hebrew", zh: "Chinese", ja: "Japanese" }
2016-12-21 02:54:52 +03:00
+row
2017-05-12 16:57:49 +03:00
+cell #{language}
+cell #[code=code]
2016-12-21 02:54:52 +03:00
+cell
2017-05-12 16:57:49 +03:00
+src(gh("spaCy", "spacy/lang/" + code)) lang/#{code}
+h(2, "multi-language") Multi-language support
+tag-new(2)
p
| As of v2.0, spaCy supports models trained on more than one language. This
| is especially useful for named entity recognition. The language ID used
| for multi-language or language-neutral models is #[code xx]. The
| language class, a generic subclass containing only the base language data,
| can be found in #[+src(gh("spaCy", "spacy/lang/xx")) lang/xx].
p
| To load your model with the neutral, multi-language class, simply set
| #[code "language": "xx"] in your
| #[+a("/docs/usage/saving-loading#models-generating") model package]'s
| meta.json. You can also import the class directly, or call
| #[+api("util#get_lang_class") #[code util.get_lang_class()]] for
| lazy-loading.
+code("Standard import").
from spacy.lang.xx import MultiLanguage
nlp = MultiLanguage()
+code("With lazy-loading").
from spacy.util import get_lang_class
nlp = get_lang_class('xx')