mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 21:57:15 +03:00
94 lines
3.4 KiB
Plaintext
94 lines
3.4 KiB
Plaintext
//- 💫 DOCS > API > LANGUAGE MODELS
|
|
|
|
include ../../_includes/_mixins
|
|
|
|
p
|
|
| spaCy currently provides models for the following languages and
|
|
| capabilities:
|
|
|
|
|
|
+aside-code("Download language models", "bash").
|
|
python -m spacy download en
|
|
python -m spacy download de
|
|
python -m spacy download fr
|
|
|
|
+table([ "Language", "Token", "SBD", "Lemma", "POS", "NER", "Dep", "Vector", "Sentiment"])
|
|
+row
|
|
+cell English #[code en]
|
|
each icon in [ "pro", "pro", "pro", "pro", "pro", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
+row
|
|
+cell German #[code de]
|
|
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
+row
|
|
+cell French #[code fr]
|
|
each icon in [ "pro", "con", "con", "pro", "con", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
+row
|
|
+cell Spanish #[code es]
|
|
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
p
|
|
+button("/docs/usage/models", true, "primary") See available models
|
|
|
|
+h(2, "alpha-support") Alpha tokenization support
|
|
|
|
p
|
|
| Work has started on the following languages. You can help by
|
|
| #[+a("/docs/usage/adding-languages#language-data") improving the existing language data]
|
|
| and extending the tokenization patterns.
|
|
|
|
+aside("Usage note")
|
|
| Note that the alpha languages don't yet come with a language model. In
|
|
| order to use them, you have to import them directly:
|
|
|
|
+code.o-no-block.
|
|
from spacy.lang.fi import Finnish
|
|
nlp = Finnish()
|
|
doc = nlp(u'Ilmatyynyalukseni on täynnä ankeriaita')
|
|
|
|
+infobox("Dependencies")
|
|
| Some language tokenizers require external dependencies. To use #[strong Chinese],
|
|
| you need to have #[+a("https://github.com/fxsjy/jieba") Jieba] installed.
|
|
| The #[strong Japanese] tokenizer requires
|
|
| #[+a("https://github.com/mocobeta/janome") Janome].
|
|
|
|
+table([ "Language", "Code", "Source" ])
|
|
each language, code in { it: "Italian", pt: "Portuguese", nl: "Dutch", sv: "Swedish", fi: "Finnish", nb: "Norwegian Bokmål", da: "Danish", hu: "Hungarian", pl: "Polish", bn: "Bengali", he: "Hebrew", zh: "Chinese", ja: "Japanese" }
|
|
+row
|
|
+cell #{language}
|
|
+cell #[code=code]
|
|
+cell
|
|
+src(gh("spaCy", "spacy/lang/" + code)) lang/#{code}
|
|
|
|
+h(2, "multi-language") Multi-language support
|
|
+tag-new(2)
|
|
|
|
p
|
|
| As of v2.0, spaCy supports models trained on more than one language. This
|
|
| is especially useful for named entity recognition. The language ID used
|
|
| for multi-language or language-neutral models is #[code xx]. The
|
|
| language class, a generic subclass containing only the base language data,
|
|
| can be found in #[+src(gh("spaCy", "spacy/lang/xx")) lang/xx].
|
|
|
|
p
|
|
| To load your model with the neutral, multi-language class, simply set
|
|
| #[code "language": "xx"] in your
|
|
| #[+a("/docs/usage/saving-loading#models-generating") model package]'s
|
|
| meta.json. You can also import the class directly, or call
|
|
| #[+api("util#get_lang_class") #[code util.get_lang_class()]] for
|
|
| lazy-loading.
|
|
|
|
+code("Standard import").
|
|
from spacy.lang.xx import MultiLanguage
|
|
nlp = MultiLanguage()
|
|
|
|
+code("With lazy-loading").
|
|
from spacy.util import get_lang_class
|
|
nlp = get_lang_class('xx')
|