Docs for thinc-apple-ops (#9549)

* Docs for thinc-apple-ops

* Ignore thinc-apple-ops in reqs tests

* Fix install quickstart

* Add cupy cuda 113, 114 extras

* Remove draft section

Co-authored-by: Ines Montani <ines@ines.io>
This commit is contained in:
Adriane Boyd 2021-10-29 10:35:31 +02:00 committed by GitHub
parent 76173b0866
commit 5477453ea3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 32 additions and 8 deletions

View File

@ -100,6 +100,8 @@ cuda113 =
cupy-cuda113>=5.0.0b4,<10.0.0
cuda114 =
cupy-cuda114>=5.0.0b4,<10.0.0
apple =
thinc-apple-ops>=0.0.4,<1.0.0
# Language tokenizers with external dependencies
ja =
sudachipy>=0.4.9

View File

@ -25,6 +25,7 @@ def test_build_dependencies():
"sudachipy",
"sudachidict_core",
"spacy-pkuseg",
"thinc-apple-ops",
]
# check requirements.txt

View File

@ -71,13 +71,14 @@ spaCy's [`setup.cfg`](%%GITHUB_SPACY/setup.cfg) for details on what's included.
> $ pip install %%SPACY_PKG_NAME[lookups,transformers]%%SPACY_PKG_FLAGS
> ```
| Name | Description |
| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. |
| `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. |
| `ray` | Install [`spacy-ray`](https://github.com/explosion/spacy-ray) to add CLI commands for [parallel training](/usage/training#parallel-training). |
| `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. |
| `ja`, `ko`, `th`, `zh` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). |
| Name | Description |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `lookups` | Install [`spacy-lookups-data`](https://github.com/explosion/spacy-lookups-data) for data tables for lemmatization and lexeme normalization. The data is serialized with trained pipelines, so you only need this package if you want to train your own models. |
| `transformers` | Install [`spacy-transformers`](https://github.com/explosion/spacy-transformers). The package will be installed automatically when you install a transformer-based pipeline. |
| `ray` | Install [`spacy-ray`](https://github.com/explosion/spacy-ray) to add CLI commands for [parallel training](/usage/training#parallel-training). |
| `cuda`, ... | Install spaCy with GPU support provided by [CuPy](https://cupy.chainer.org) for your given CUDA version. See the GPU [installation instructions](#gpu) for details and options. |
| `apple` | Install [`thinc-apple-ops`](https://github.com/explosion/thinc-apple-ops) to improve performance on an Apple M1. |
| `ja`, `ko`, `th` | Install additional dependencies required for tokenization for the [languages](/usage/models#languages). |
### conda {#conda}

View File

@ -4,10 +4,12 @@ import { StaticQuery, graphql } from 'gatsby'
import { Quickstart, QS } from '../components/quickstart'
import { repo, DEFAULT_BRANCH } from '../components/util'
const DEFAULT_OS = 'mac'
const DEFAULT_PLATFORM = 'x86'
const DEFAULT_MODELS = ['en']
const DEFAULT_OPT = 'efficiency'
const DEFAULT_HARDWARE = 'cpu'
const DEFAULT_CUDA = 'cuda102'
const DEFAULT_CUDA = 'cuda113'
const CUDA = {
'8.0': 'cuda80',
'9.0': 'cuda90',
@ -19,11 +21,15 @@ const CUDA = {
'11.0': 'cuda110',
'11.1': 'cuda111',
'11.2': 'cuda112',
'11.3': 'cuda113',
'11.4': 'cuda114',
}
const LANG_EXTRAS = ['ja'] // only for languages with models
const QuickstartInstall = ({ id, title }) => {
const [train, setTrain] = useState(false)
const [platform, setPlatform] = useState(DEFAULT_PLATFORM)
const [os, setOs] = useState(DEFAULT_OS)
const [hardware, setHardware] = useState(DEFAULT_HARDWARE)
const [cuda, setCuda] = useState(DEFAULT_CUDA)
const [selectedModels, setModels] = useState(DEFAULT_MODELS)
@ -33,15 +39,19 @@ const QuickstartInstall = ({ id, title }) => {
config: v => setTrain(v.includes('train')),
models: setModels,
optimize: v => setEfficiency(v.includes('efficiency')),
platform: v => setPlatform(v[0]),
os: v => setOs(v[0]),
}
const showDropdown = {
hardware: () => hardware === 'gpu',
}
const modelExtras = train ? selectedModels.filter(m => LANG_EXTRAS.includes(m)) : []
const apple = os === 'mac' && platform === 'arm'
const pipExtras = [
hardware === 'gpu' && cuda,
train && 'transformers',
train && 'lookups',
apple && 'apple',
...modelExtras,
]
.filter(e => e)
@ -62,6 +72,16 @@ const QuickstartInstall = ({ id, title }) => {
{ id: 'windows', title: 'Windows' },
{ id: 'linux', title: 'Linux' },
],
defaultValue: DEFAULT_OS,
},
{
id: 'platform',
title: 'Platform',
options: [
{ id: 'x86', title: 'x86', checked: true },
{ id: 'arm', title: 'ARM / M1' },
],
defaultValue: DEFAULT_PLATFORM,
},
{
id: 'package',